diff --git a/platform/dbops/binaries/build/share/bison/m4sugar/foreach.m4 b/platform/dbops/binaries/build/share/bison/m4sugar/foreach.m4 new file mode 100644 index 0000000000000000000000000000000000000000..2052d44c2a759e9540fc08dd452c8d4f80b302d9 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/m4sugar/foreach.m4 @@ -0,0 +1,362 @@ +# -*- Autoconf -*- +# This file is part of Autoconf. +# foreach-based replacements for recursive functions. +# Speeds up GNU M4 1.4.x by avoiding quadratic $@ recursion, but penalizes +# GNU M4 1.6 by requiring more memory and macro expansions. +# +# Copyright (C) 2008-2017, 2020 Free Software Foundation, Inc. + +# This file is part of Autoconf. This program is free +# software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# Under Section 7 of GPL version 3, you are granted additional +# permissions described in the Autoconf Configure Script Exception, +# version 3.0, as published by the Free Software Foundation. +# +# You should have received a copy of the GNU General Public License +# and a copy of the Autoconf Configure Script Exception along with +# this program; see the files COPYINGv3 and COPYING.EXCEPTION +# respectively. If not, see . + +# Written by Eric Blake. + +# In M4 1.4.x, every byte of $@ is rescanned. This means that an +# algorithm on n arguments that recurses with one less argument each +# iteration will scan n * (n + 1) / 2 arguments, for O(n^2) time. In +# M4 1.6, this was fixed so that $@ is only scanned once, then +# back-references are made to information stored about the scan. +# Thus, n iterations need only scan n arguments, for O(n) time. +# Additionally, in M4 1.4.x, recursive algorithms did not clean up +# memory very well, requiring O(n^2) memory rather than O(n) for n +# iterations. +# +# This file is designed to overcome the quadratic nature of $@ +# recursion by writing a variant of m4_foreach that uses m4_for rather +# than $@ recursion to operate on the list. This involves more macro +# expansions, but avoids the need to rescan a quadratic number of +# arguments, making these replacements very attractive for M4 1.4.x. +# On the other hand, in any version of M4, expanding additional macros +# costs additional time; therefore, in M4 1.6, where $@ recursion uses +# fewer macros, these replacements actually pessimize performance. +# Additionally, the use of $10 to mean the tenth argument violates +# POSIX; although all versions of m4 1.4.x support this meaning, a +# future m4 version may switch to take it as the first argument +# concatenated with a literal 0, so the implementations in this file +# are not future-proof. Thus, this file is conditionally included as +# part of m4_init(), only when it is detected that M4 probably has +# quadratic behavior (ie. it lacks the macro __m4_version__). +# +# Please keep this file in sync with m4sugar.m4. + +# _m4_foreach(PRE, POST, IGNORED, ARG...) +# --------------------------------------- +# Form the common basis of the m4_foreach and m4_map macros. For each +# ARG, expand PRE[ARG]POST[]. The IGNORED argument makes recursion +# easier, and must be supplied rather than implicit. +# +# This version minimizes the number of times that $@ is evaluated by +# using m4_for to generate a boilerplate into _m4_f then passing $@ to +# that temporary macro. Thus, the recursion is done in m4_for without +# reparsing any user input, and is not quadratic. For an idea of how +# this works, note that m4_foreach(i,[1,2],[i]) calls +# _m4_foreach([m4_define([i],],[)i],[],[1],[2]) +# which defines _m4_f: +# $1[$4]$2[]$1[$5]$2[]_m4_popdef([_m4_f]) +# then calls _m4_f([m4_define([i],],[)i],[],[1],[2]) for a net result: +# m4_define([i],[1])i[]m4_define([i],[2])i[]_m4_popdef([_m4_f]). +m4_define([_m4_foreach], +[m4_if([$#], [3], [], + [m4_pushdef([_m4_f], _m4_for([4], [$#], [1], + [$0_([1], [2],], [)])[_m4_popdef([_m4_f])])_m4_f($@)])]) + +m4_define([_m4_foreach_], +[[$$1[$$3]$$2[]]]) + +# m4_case(SWITCH, VAL1, IF-VAL1, VAL2, IF-VAL2, ..., DEFAULT) +# ----------------------------------------------------------- +# Find the first VAL that SWITCH matches, and expand the corresponding +# IF-VAL. If there are no matches, expand DEFAULT. +# +# Use m4_for to create a temporary macro in terms of a boilerplate +# m4_if with final cleanup. If $# is even, we have DEFAULT; if it is +# odd, then rounding the last $# up in the temporary macro is +# harmless. For example, both m4_case(1,2,3,4,5) and +# m4_case(1,2,3,4,5,6) result in the intermediate _m4_case being +# m4_if([$1],[$2],[$3],[$1],[$4],[$5],_m4_popdef([_m4_case])[$6]) +m4_define([m4_case], +[m4_if(m4_eval([$# <= 2]), [1], [$2], +[m4_pushdef([_$0], [m4_if(]_m4_for([2], m4_eval([($# - 1) / 2 * 2]), [2], + [_$0_(], [)])[_m4_popdef( + [_$0])]m4_dquote($m4_eval([($# + 1) & ~1]))[)])_$0($@)])]) + +m4_define([_m4_case_], +[$0_([1], [$1], m4_incr([$1]))]) + +m4_define([_m4_case__], +[[[$$1],[$$2],[$$3],]]) + +# m4_bmatch(SWITCH, RE1, VAL1, RE2, VAL2, ..., DEFAULT) +# ----------------------------------------------------- +# m4 equivalent of +# +# if (SWITCH =~ RE1) +# VAL1; +# elif (SWITCH =~ RE2) +# VAL2; +# elif ... +# ... +# else +# DEFAULT +# +# We build the temporary macro _m4_b: +# m4_define([_m4_b], _m4_defn([_m4_bmatch]))_m4_b([$1], [$2], [$3])... +# _m4_b([$1], [$m-1], [$m])_m4_b([], [], [$m+1]_m4_popdef([_m4_b])) +# then invoke m4_unquote(_m4_b($@)), for concatenation with later text. +m4_define([m4_bmatch], +[m4_if([$#], 0, [m4_fatal([$0: too few arguments: $#])], + [$#], 1, [m4_fatal([$0: too few arguments: $#: $1])], + [$#], 2, [$2], + [m4_pushdef([_m4_b], [m4_define([_m4_b], + _m4_defn([_$0]))]_m4_for([3], m4_eval([($# + 1) / 2 * 2 - 1]), + [2], [_$0_(], [)])[_m4_b([], [],]m4_dquote([$]m4_eval( + [($# + 1) / 2 * 2]))[_m4_popdef([_m4_b]))])m4_unquote(_m4_b($@))])]) + +m4_define([_m4_bmatch], +[m4_if(m4_bregexp([$1], [$2]), [-1], [], [[$3]m4_define([$0])])]) + +m4_define([_m4_bmatch_], +[$0_([1], m4_decr([$1]), [$1])]) + +m4_define([_m4_bmatch__], +[[_m4_b([$$1], [$$2], [$$3])]]) + + +# m4_cond(TEST1, VAL1, IF-VAL1, TEST2, VAL2, IF-VAL2, ..., [DEFAULT]) +# ------------------------------------------------------------------- +# Similar to m4_if, except that each TEST is expanded when encountered. +# If the expansion of TESTn matches the string VALn, the result is IF-VALn. +# The result is DEFAULT if no tests passed. This macro allows +# short-circuiting of expensive tests, where it pays to arrange quick +# filter tests to run first. +# +# m4_cond already guarantees either 3*n or 3*n + 1 arguments, 1 <= n. +# We only have to speed up _m4_cond, by building the temporary _m4_c: +# m4_define([_m4_c], _m4_defn([m4_unquote]))_m4_c([m4_if(($1), [($2)], +# [[$3]m4_define([_m4_c])])])_m4_c([m4_if(($4), [($5)], +# [[$6]m4_define([_m4_c])])])..._m4_c([m4_if(($m-2), [($m-1)], +# [[$m]m4_define([_m4_c])])])_m4_c([[$m+1]]_m4_popdef([_m4_c])) +# We invoke m4_unquote(_m4_c($@)), for concatenation with later text. +m4_define([_m4_cond], +[m4_pushdef([_m4_c], [m4_define([_m4_c], + _m4_defn([m4_unquote]))]_m4_for([2], m4_eval([$# / 3 * 3 - 1]), [3], + [$0_(], [)])[_m4_c(]m4_dquote(m4_dquote( + [$]m4_eval([$# / 3 * 3 + 1])))[_m4_popdef([_m4_c]))])m4_unquote(_m4_c($@))]) + +m4_define([_m4_cond_], +[$0_(m4_decr([$1]), [$1], m4_incr([$1]))]) + +m4_define([_m4_cond__], +[[_m4_c([m4_if(($$1), [($$2)], [[$$3]m4_define([_m4_c])])])]]) + +# m4_bpatsubsts(STRING, RE1, SUBST1, RE2, SUBST2, ...) +# ---------------------------------------------------- +# m4 equivalent of +# +# $_ = STRING; +# s/RE1/SUBST1/g; +# s/RE2/SUBST2/g; +# ... +# +# m4_bpatsubsts already validated an odd number of arguments; we only +# need to speed up _m4_bpatsubsts. To avoid nesting, we build the +# temporary _m4_p: +# m4_define([_m4_p], [$1])m4_define([_m4_p], +# m4_bpatsubst(m4_dquote(_m4_defn([_m4_p])), [$2], [$3]))m4_define([_m4_p], +# m4_bpatsubst(m4_dquote(_m4_defn([_m4_p])), [$4], [$5]))m4_define([_m4_p],... +# m4_bpatsubst(m4_dquote(_m4_defn([_m4_p])), [$m-1], [$m]))m4_unquote( +# _m4_defn([_m4_p])_m4_popdef([_m4_p])) +m4_define([_m4_bpatsubsts], +[m4_pushdef([_m4_p], [m4_define([_m4_p], + ]m4_dquote([$]1)[)]_m4_for([3], [$#], [2], [$0_(], + [)])[m4_unquote(_m4_defn([_m4_p])_m4_popdef([_m4_p]))])_m4_p($@)]) + +m4_define([_m4_bpatsubsts_], +[$0_(m4_decr([$1]), [$1])]) + +m4_define([_m4_bpatsubsts__], +[[m4_define([_m4_p], +m4_bpatsubst(m4_dquote(_m4_defn([_m4_p])), [$$1], [$$2]))]]) + +# m4_shiftn(N, ...) +# ----------------- +# Returns ... shifted N times. Useful for recursive "varargs" constructs. +# +# m4_shiftn already validated arguments; we only need to speed up +# _m4_shiftn. If N is 3, then we build the temporary _m4_s, defined as +# ,[$5],[$6],...,[$m]_m4_popdef([_m4_s]) +# before calling m4_shift(_m4_s($@)). +m4_define([_m4_shiftn], +[m4_if(m4_incr([$1]), [$#], [], [m4_pushdef([_m4_s], + _m4_for(m4_eval([$1 + 2]), [$#], [1], + [[,]m4_dquote($], [)])[_m4_popdef([_m4_s])])m4_shift(_m4_s($@))])]) + +# m4_do(STRING, ...) +# ------------------ +# This macro invokes all its arguments (in sequence, of course). It is +# useful for making your macros more structured and readable by dropping +# unnecessary dnl's and have the macros indented properly. +# +# Here, we use the temporary macro _m4_do, defined as +# $1[]$2[]...[]$n[]_m4_popdef([_m4_do]) +m4_define([m4_do], +[m4_if([$#], [0], [], + [m4_pushdef([_$0], _m4_for([1], [$#], [1], + [$], [[[]]])[_m4_popdef([_$0])])_$0($@)])]) + +# m4_dquote_elt(ARGS) +# ------------------- +# Return ARGS as an unquoted list of double-quoted arguments. +# +# _m4_foreach to the rescue. +m4_define([m4_dquote_elt], +[m4_if([$#], [0], [], [[[$1]]_m4_foreach([,m4_dquote(], [)], $@)])]) + +# m4_reverse(ARGS) +# ---------------- +# Output ARGS in reverse order. +# +# Invoke _m4_r($@) with the temporary _m4_r built as +# [$m], [$m-1], ..., [$2], [$1]_m4_popdef([_m4_r]) +m4_define([m4_reverse], +[m4_if([$#], [0], [], [$#], [1], [[$1]], +[m4_pushdef([_m4_r], [[$$#]]_m4_for(m4_decr([$#]), [1], [-1], + [[, ]m4_dquote($], [)])[_m4_popdef([_m4_r])])_m4_r($@)])]) + + +# m4_map_args_pair(EXPRESSION, [END-EXPR = EXPRESSION], ARG...) +# ------------------------------------------------------------- +# Perform a pairwise grouping of consecutive ARGs, by expanding +# EXPRESSION([ARG1], [ARG2]). If there are an odd number of ARGs, the +# final argument is expanded with END-EXPR([ARGn]). +# +# Build the temporary macro _m4_map_args_pair, with the $2([$m+1]) +# only output if $# is odd: +# $1([$3], [$4])[]$1([$5], [$6])[]...$1([$m-1], +# [$m])[]m4_default([$2], [$1])([$m+1])[]_m4_popdef([_m4_map_args_pair]) +m4_define([m4_map_args_pair], +[m4_if([$#], [0], [m4_fatal([$0: too few arguments: $#])], + [$#], [1], [m4_fatal([$0: too few arguments: $#: $1])], + [$#], [2], [], + [$#], [3], [m4_default([$2], [$1])([$3])[]], + [m4_pushdef([_$0], _m4_for([3], + m4_eval([$# / 2 * 2 - 1]), [2], [_$0_(], [)])_$0_end( + [1], [2], [$#])[_m4_popdef([_$0])])_$0($@)])]) + +m4_define([_m4_map_args_pair_], +[$0_([1], [$1], m4_incr([$1]))]) + +m4_define([_m4_map_args_pair__], +[[$$1([$$2], [$$3])[]]]) + +m4_define([_m4_map_args_pair_end], +[m4_if(m4_eval([$3 & 1]), [1], [[m4_default([$$2], [$$1])([$$3])[]]])]) + +# m4_join(SEP, ARG1, ARG2...) +# --------------------------- +# Produce ARG1SEPARG2...SEPARGn. Avoid back-to-back SEP when a given ARG +# is the empty string. No expansion is performed on SEP or ARGs. +# +# Use a self-modifying separator, since we don't know how many +# arguments might be skipped before a separator is first printed, but +# be careful if the separator contains $. _m4_foreach to the rescue. +m4_define([m4_join], +[m4_pushdef([_m4_sep], [m4_define([_m4_sep], _m4_defn([m4_echo]))])]dnl +[_m4_foreach([_$0([$1],], [)], $@)_m4_popdef([_m4_sep])]) + +m4_define([_m4_join], +[m4_if([$2], [], [], [_m4_sep([$1])[$2]])]) + +# m4_joinall(SEP, ARG1, ARG2...) +# ------------------------------ +# Produce ARG1SEPARG2...SEPARGn. An empty ARG results in back-to-back SEP. +# No expansion is performed on SEP or ARGs. +# +# A bit easier than m4_join. _m4_foreach to the rescue. +m4_define([m4_joinall], +[[$2]m4_if(m4_eval([$# <= 2]), [1], [], + [_m4_foreach([$1], [], m4_shift($@))])]) + +# m4_list_cmp(A, B) +# ----------------- +# Compare the two lists of integer expressions A and B. +# +# m4_list_cmp takes care of any side effects; we only override +# _m4_list_cmp_raw, where we can safely expand lists multiple times. +# First, insert padding so that both lists are the same length; the +# trailing +0 is necessary to handle a missing list. Next, create a +# temporary macro to perform pairwise comparisons until an inequality +# is found. For example, m4_list_cmp([1], [1,2]) creates _m4_cmp as +# m4_if(m4_eval([($1) != ($3)]), [1], [m4_cmp([$1], [$3])], +# m4_eval([($2) != ($4)]), [1], [m4_cmp([$2], [$4])], +# [0]_m4_popdef([_m4_cmp])) +# then calls _m4_cmp([1+0], [0*2], [1], [2+0]) +m4_define([_m4_list_cmp_raw], +[m4_if([$1], [$2], 0, + [_m4_list_cmp($1+0_m4_list_pad(m4_count($1), m4_count($2)), + $2+0_m4_list_pad(m4_count($2), m4_count($1)))])]) + +m4_define([_m4_list_pad], +[m4_if(m4_eval($1 < $2), [1], + [_m4_for(m4_incr([$1]), [$2], [1], [,0*])])]) + +m4_define([_m4_list_cmp], +[m4_pushdef([_m4_cmp], [m4_if(]_m4_for( + [1], m4_eval([$# >> 1]), [1], [$0_(], [,]m4_eval([$# >> 1])[)])[ + [0]_m4_popdef([_m4_cmp]))])_m4_cmp($@)]) + +m4_define([_m4_list_cmp_], +[$0_([$1], m4_eval([$1 + $2]))]) + +m4_define([_m4_list_cmp__], +[[m4_eval([($$1) != ($$2)]), [1], [m4_cmp([$$1], [$$2])], +]]) + +# m4_max(EXPR, ...) +# m4_min(EXPR, ...) +# ----------------- +# Return the decimal value of the maximum (or minimum) in a series of +# integer expressions. +# +# _m4_foreach to the rescue; we only need to replace _m4_minmax. Here, +# we need a temporary macro to track the best answer so far, so that +# the foreach expression is tractable. +m4_define([_m4_minmax], +[m4_pushdef([_m4_best], m4_eval([$2]))_m4_foreach( + [m4_define([_m4_best], $1(_m4_best,], [))], m4_shift($@))]dnl +[_m4_best[]_m4_popdef([_m4_best])]) + +# m4_set_add_all(SET, VALUE...) +# ----------------------------- +# Add each VALUE into SET. This is O(n) in the number of VALUEs, and +# can be faster than calling m4_set_add for each VALUE. +# +# _m4_foreach to the rescue. If no deletions have occurred, then +# avoid the speed penalty of m4_set_add. +m4_define([m4_set_add_all], +[m4_if([$#], [0], [], [$#], [1], [], + [m4_define([_m4_set_size($1)], m4_eval(m4_set_size([$1]) + + m4_len(_m4_foreach(m4_ifdef([_m4_set_cleanup($1)], + [[m4_set_add]], [[_$0]])[([$1],], [)], $@))))])]) + +m4_define([_m4_set_add_all], +[m4_ifdef([_m4_set([$1],$2)], [], + [m4_define([_m4_set([$1],$2)], + [1])m4_pushdef([_m4_set([$1])], [$2])-])]) diff --git a/platform/dbops/binaries/build/share/bison/m4sugar/m4sugar.m4 b/platform/dbops/binaries/build/share/bison/m4sugar/m4sugar.m4 new file mode 100644 index 0000000000000000000000000000000000000000..b42fc1a6de0b816b3e972e544950810b975dfa82 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/m4sugar/m4sugar.m4 @@ -0,0 +1,3329 @@ +divert(-1)# -*- Autoconf -*- +# This file is part of Autoconf. +# Base M4 layer. +# Requires GNU M4. +# +# Copyright (C) 1999-2017, 2020 Free Software Foundation, Inc. + +# This file is part of Autoconf. This program is free +# software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# Under Section 7 of GPL version 3, you are granted additional +# permissions described in the Autoconf Configure Script Exception, +# version 3.0, as published by the Free Software Foundation. +# +# You should have received a copy of the GNU General Public License +# and a copy of the Autoconf Configure Script Exception along with +# this program; see the files COPYINGv3 and COPYING.EXCEPTION +# respectively. If not, see . + +# Written by Akim Demaille. + +# Set the quotes, whatever the current quoting system. +changequote() +changequote([, ]) + +# Some old m4's don't support m4exit. But they provide +# equivalent functionality by core dumping because of the +# long macros we define. +ifdef([__gnu__], , +[errprint(M4sugar requires GNU M4. Install it before installing M4sugar or +set the M4 environment variable to its absolute file name.) +m4exit(2)]) + + +## ------------------------------- ## +## 1. Simulate --prefix-builtins. ## +## ------------------------------- ## + +# m4_define +# m4_defn +# m4_undefine +define([m4_define], defn([define])) +define([m4_defn], defn([defn])) +define([m4_undefine], defn([undefine])) + +m4_undefine([define]) +m4_undefine([defn]) +m4_undefine([undefine]) + + +# m4_copy(SRC, DST) +# ----------------- +# Define DST as the definition of SRC. +# What's the difference between: +# 1. m4_copy([from], [to]) +# 2. m4_define([to], [from($@)]) +# Well, obviously 1 is more expensive in space. Maybe 2 is more expensive +# in time, but because of the space cost of 1, it's not that obvious. +# Nevertheless, one huge difference is the handling of `$0'. If `from' +# uses `$0', then with 1, `to''s `$0' is `to', while it is `from' in 2. +# The user would certainly prefer to see `to'. +# +# This definition is in effect during m4sugar initialization, when +# there are no pushdef stacks; later on, we redefine it to something +# more powerful for all other clients to use. +m4_define([m4_copy], +[m4_define([$2], m4_defn([$1]))]) + + +# m4_rename(SRC, DST) +# ------------------- +# Rename the macro SRC to DST. +m4_define([m4_rename], +[m4_copy([$1], [$2])m4_undefine([$1])]) + + +# m4_rename_m4(MACRO-NAME) +# ------------------------ +# Rename MACRO-NAME to m4_MACRO-NAME. +m4_define([m4_rename_m4], +[m4_rename([$1], [m4_$1])]) + + +# m4_copy_unm4(m4_MACRO-NAME) +# --------------------------- +# Copy m4_MACRO-NAME to MACRO-NAME. +m4_define([m4_copy_unm4], +[m4_copy([$1], m4_bpatsubst([$1], [^m4_\(.*\)], [[\1]]))]) + + +# Some m4 internals have names colliding with tokens we might use. +# Rename them a` la `m4 --prefix-builtins'. Conditionals first, since +# some subsequent renames are conditional. +m4_rename_m4([ifdef]) +m4_rename([ifelse], [m4_if]) + +m4_rename_m4([builtin]) +m4_rename_m4([changecom]) +m4_rename_m4([changequote]) +m4_ifdef([changeword],dnl conditionally available in 1.4.x +[m4_undefine([changeword])]) +m4_rename_m4([debugfile]) +m4_rename_m4([debugmode]) +m4_rename_m4([decr]) +m4_rename_m4([divnum]) +m4_rename_m4([dumpdef]) +m4_rename_m4([errprint]) +m4_rename_m4([esyscmd]) +m4_rename_m4([eval]) +m4_rename_m4([format]) +m4_undefine([include]) +m4_rename_m4([incr]) +m4_rename_m4([index]) +m4_rename_m4([indir]) +m4_rename_m4([len]) +m4_rename([m4exit], [m4_exit]) +m4_undefine([m4wrap]) +m4_ifdef([mkstemp],dnl added in M4 1.4.8 +[m4_rename_m4([mkstemp]) +m4_copy([m4_mkstemp], [m4_maketemp]) +m4_undefine([maketemp])], +[m4_rename_m4([maketemp]) +m4_copy([m4_maketemp], [m4_mkstemp])]) +m4_rename([patsubst], [m4_bpatsubst]) +m4_rename_m4([popdef]) +m4_rename_m4([pushdef]) +m4_rename([regexp], [m4_bregexp]) +m4_rename_m4([shift]) +m4_undefine([sinclude]) +m4_rename_m4([substr]) +m4_ifdef([symbols],dnl present only in alpha-quality 1.4o +[m4_rename_m4([symbols])]) +m4_rename_m4([syscmd]) +m4_rename_m4([sysval]) +m4_rename_m4([traceoff]) +m4_rename_m4([traceon]) +m4_rename_m4([translit]) + +# _m4_defn(ARG) +# ------------- +# _m4_defn is for internal use only - it bypasses the wrapper, so it +# must only be used on one argument at a time, and only on macros +# known to be defined. Make sure this still works if the user renames +# m4_defn but not _m4_defn. +m4_copy([m4_defn], [_m4_defn]) + +# _m4_divert_raw(NUM) +# ------------------- +# _m4_divert_raw is for internal use only. Use this instead of +# m4_builtin([divert], NUM), so that tracing diversion flow is easier. +m4_rename([divert], [_m4_divert_raw]) + +# _m4_popdef(ARG...) +# ------------------ +# _m4_popdef is for internal use only - it bypasses the wrapper, so it +# must only be used on macros known to be defined. Make sure this +# still works if the user renames m4_popdef but not _m4_popdef. +m4_copy([m4_popdef], [_m4_popdef]) + +# _m4_undefine(ARG...) +# -------------------- +# _m4_undefine is for internal use only - it bypasses the wrapper, so +# it must only be used on macros known to be defined. Make sure this +# still works if the user renames m4_undefine but not _m4_undefine. +m4_copy([m4_undefine], [_m4_undefine]) + +# _m4_undivert(NUM...) +# -------------------- +# _m4_undivert is for internal use only, and should always be given +# arguments. Use this instead of m4_builtin([undivert], NUM...), so +# that tracing diversion flow is easier. +m4_rename([undivert], [_m4_undivert]) + + +## ------------------- ## +## 2. Error messages. ## +## ------------------- ## + + +# m4_location +# ----------- +# Output the current file, colon, and the current line number. +m4_define([m4_location], +[__file__:__line__]) + + +# m4_errprintn(MSG) +# ----------------- +# Same as `errprint', but with the missing end of line. +m4_define([m4_errprintn], +[m4_errprint([$1 +])]) + + +# m4_warning(MSG) +# --------------- +# Warn the user. +m4_define([m4_warning], +[m4_errprintn(m4_location[: warning: $1])]) + + +# m4_fatal(MSG, [EXIT-STATUS]) +# ---------------------------- +# Fatal the user. :) +m4_define([m4_fatal], +[m4_errprintn(m4_location[: error: $1] +m4_expansion_stack)m4_exit(m4_if([$2],, 1, [$2]))]) + + +# m4_assert(EXPRESSION, [EXIT-STATUS = 1]) +# ---------------------------------------- +# This macro ensures that EXPRESSION evaluates to true, and exits if +# EXPRESSION evaluates to false. +m4_define([m4_assert], +[m4_if(m4_eval([$1]), 0, + [m4_fatal([assert failed: $1], [$2])])]) + + + +## ------------- ## +## 3. Warnings. ## +## ------------- ## + + +# _m4_warn(CATEGORY, MESSAGE, [STACK-TRACE]) +# ------------------------------------------ +# Report a MESSAGE to the user if the CATEGORY of warnings is enabled. +# This is for traces only. +# If present, STACK-TRACE is a \n-separated list of "LOCATION: MESSAGE", +# where the last line (and no other) ends with "the top level". +# +# Within m4, the macro is a no-op. This macro really matters +# when autom4te post-processes the trace output. +m4_define([_m4_warn], []) + + +# m4_warn(CATEGORY, MESSAGE) +# -------------------------- +# Report a MESSAGE to the user if the CATEGORY of warnings is enabled. +m4_define([m4_warn], +[_m4_warn([$1], [$2], +m4_ifdef([_m4_expansion_stack], [m4_expansion_stack]))]) + + + +## ------------------- ## +## 4. File inclusion. ## +## ------------------- ## + + +# We also want to neutralize include (and sinclude for symmetry), +# but we want to extend them slightly: warn when a file is included +# several times. This is, in general, a dangerous operation, because +# too many people forget to quote the first argument of m4_define. +# +# For instance in the following case: +# m4_define(foo, [bar]) +# then a second reading will turn into +# m4_define(bar, [bar]) +# which is certainly not what was meant. + +# m4_include_unique(FILE) +# ----------------------- +# Declare that the FILE was loading; and warn if it has already +# been included. +m4_define([m4_include_unique], +[m4_ifdef([m4_include($1)], + [m4_warn([syntax], [file `$1' included several times])])dnl +m4_define([m4_include($1)])]) + + +# m4_include(FILE) +# ---------------- +# Like the builtin include, but warns against multiple inclusions. +m4_define([m4_include], +[m4_include_unique([$1])dnl +m4_builtin([include], [$1])]) + + +# m4_sinclude(FILE) +# ----------------- +# Like the builtin sinclude, but warns against multiple inclusions. +m4_define([m4_sinclude], +[m4_include_unique([$1])dnl +m4_builtin([sinclude], [$1])]) + + + +## ------------------------------------ ## +## 5. Additional branching constructs. ## +## ------------------------------------ ## + +# Both `m4_ifval' and `m4_ifset' tests against the empty string. The +# difference is that `m4_ifset' is specialized on macros. +# +# In case of arguments of macros, eg. $1, it makes little difference. +# In the case of a macro `FOO', you don't want to check `m4_ifval(FOO, +# TRUE)', because if `FOO' expands with commas, there is a shifting of +# the arguments. So you want to run `m4_ifval([FOO])', but then you just +# compare the *string* `FOO' against `', which, of course fails. +# +# So you want the variation `m4_ifset' that expects a macro name as $1. +# If this macro is both defined and defined to a non empty value, then +# it runs TRUE, etc. + + +# m4_ifblank(COND, [IF-BLANK], [IF-TEXT]) +# m4_ifnblank(COND, [IF-TEXT], [IF-BLANK]) +# ---------------------------------------- +# If COND is empty, or consists only of blanks (space, tab, newline), +# then expand IF-BLANK, otherwise expand IF-TEXT. This differs from +# m4_ifval only if COND has just whitespace, but it helps optimize in +# spite of users who mistakenly leave trailing space after what they +# thought was an empty argument: +# macro( +# [] +# ) +# +# Writing one macro in terms of the other causes extra overhead, so +# we inline both definitions. +m4_define([m4_ifblank], +[m4_if(m4_translit([[$1]], [ ][ ][ +]), [], [$2], [$3])]) + +m4_define([m4_ifnblank], +[m4_if(m4_translit([[$1]], [ ][ ][ +]), [], [$3], [$2])]) + + +# m4_ifval(COND, [IF-TRUE], [IF-FALSE]) +# ------------------------------------- +# If COND is not the empty string, expand IF-TRUE, otherwise IF-FALSE. +# Comparable to m4_ifdef. +m4_define([m4_ifval], +[m4_if([$1], [], [$3], [$2])]) + + +# m4_n(TEXT) +# ---------- +# If TEXT is not empty, return TEXT and a new line, otherwise nothing. +m4_define([m4_n], +[m4_if([$1], + [], [], + [$1 +])]) + + +# m4_ifvaln(COND, [IF-TRUE], [IF-FALSE]) +# -------------------------------------- +# Same as `m4_ifval', but add an extra newline to IF-TRUE or IF-FALSE +# unless that argument is empty. +m4_define([m4_ifvaln], +[m4_if([$1], + [], [m4_n([$3])], + [m4_n([$2])])]) + + +# m4_ifset(MACRO, [IF-TRUE], [IF-FALSE]) +# -------------------------------------- +# If MACRO has no definition, or of its definition is the empty string, +# expand IF-FALSE, otherwise IF-TRUE. +m4_define([m4_ifset], +[m4_ifdef([$1], + [m4_ifval(_m4_defn([$1]), [$2], [$3])], + [$3])]) + + +# m4_ifndef(NAME, [IF-NOT-DEFINED], [IF-DEFINED]) +# ----------------------------------------------- +m4_define([m4_ifndef], +[m4_ifdef([$1], [$3], [$2])]) + + +# m4_case(SWITCH, VAL1, IF-VAL1, VAL2, IF-VAL2, ..., DEFAULT) +# ----------------------------------------------------------- +# m4 equivalent of +# switch (SWITCH) +# { +# case VAL1: +# IF-VAL1; +# break; +# case VAL2: +# IF-VAL2; +# break; +# ... +# default: +# DEFAULT; +# break; +# }. +# All the values are optional, and the macro is robust to active +# symbols properly quoted. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_case], +[m4_if([$#], 0, [], + [$#], 1, [], + [$#], 2, [$2], + [$1], [$2], [$3], + [$0([$1], m4_shift3($@))])]) + + +# m4_bmatch(SWITCH, RE1, VAL1, RE2, VAL2, ..., DEFAULT) +# ----------------------------------------------------- +# m4 equivalent of +# +# if (SWITCH =~ RE1) +# VAL1; +# elif (SWITCH =~ RE2) +# VAL2; +# elif ... +# ... +# else +# DEFAULT +# +# All the values are optional, and the macro is robust to active symbols +# properly quoted. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_bmatch], +[m4_if([$#], 0, [m4_fatal([$0: too few arguments: $#])], + [$#], 1, [m4_fatal([$0: too few arguments: $#: $1])], + [$#], 2, [$2], + [m4_if(m4_bregexp([$1], [$2]), -1, [$0([$1], m4_shift3($@))], + [$3])])]) + +# m4_argn(N, ARGS...) +# ------------------- +# Extract argument N (greater than 0) from ARGS. Example: +# m4_define([b], [B]) +# m4_argn([2], [a], [b], [c]) => b +# +# Rather than using m4_car(m4_shiftn([$1], $@)), we exploit the fact that +# GNU m4 can directly reference any argument, through an indirect macro. +m4_define([m4_argn], +[m4_assert([0 < $1])]dnl +[m4_pushdef([_$0], [_m4_popdef([_$0])]m4_dquote([$]m4_incr([$1])))_$0($@)]) + + +# m4_car(ARGS...) +# m4_cdr(ARGS...) +# --------------- +# Manipulate m4 lists. m4_car returns the first argument. m4_cdr +# bundles all but the first argument into a quoted list. These two +# macros are generally used with list arguments, with quoting removed +# to break the list into multiple m4 ARGS. +m4_define([m4_car], [[$1]]) +m4_define([m4_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) + +# _m4_cdr(ARGS...) +# ---------------- +# Like m4_cdr, except include a leading comma unless only one argument +# remains. Why? Because comparing a large list against [] is more +# expensive in expansion time than comparing the number of arguments; so +# _m4_cdr can be used to reduce the number of arguments when it is time +# to end recursion. +m4_define([_m4_cdr], +[m4_if([$#], 1, [], + [, m4_dquote(m4_shift($@))])]) + + + +# m4_cond(TEST1, VAL1, IF-VAL1, TEST2, VAL2, IF-VAL2, ..., [DEFAULT]) +# ------------------------------------------------------------------- +# Similar to m4_if, except that each TEST is expanded when encountered. +# If the expansion of TESTn matches the string VALn, the result is IF-VALn. +# The result is DEFAULT if no tests passed. This macro allows +# short-circuiting of expensive tests, where it pays to arrange quick +# filter tests to run first. +# +# For an example, consider a previous implementation of _AS_QUOTE_IFELSE: +# +# m4_if(m4_index([$1], [\]), [-1], [$2], +# m4_eval(m4_index([$1], [\\]) >= 0), [1], [$2], +# m4_eval(m4_index([$1], [\$]) >= 0), [1], [$2], +# m4_eval(m4_index([$1], [\`]) >= 0), [1], [$3], +# m4_eval(m4_index([$1], [\"]) >= 0), [1], [$3], +# [$2]) +# +# Here, m4_index is computed 5 times, and m4_eval 4, even if $1 contains +# no backslash. It is more efficient to do: +# +# m4_cond([m4_index([$1], [\])], [-1], [$2], +# [m4_eval(m4_index([$1], [\\]) >= 0)], [1], [$2], +# [m4_eval(m4_index([$1], [\$]) >= 0)], [1], [$2], +# [m4_eval(m4_index([$1], [\`]) >= 0)], [1], [$3], +# [m4_eval(m4_index([$1], [\"]) >= 0)], [1], [$3], +# [$2]) +# +# In the common case of $1 with no backslash, only one m4_index expansion +# occurs, and m4_eval is avoided altogether. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_cond], +[m4_if([$#], [0], [m4_fatal([$0: cannot be called without arguments])], + [$#], [1], [$1], + m4_eval([$# % 3]), [2], [m4_fatal([$0: missing an argument])], + [_$0($@)])]) + +m4_define([_m4_cond], +[m4_if(($1), [($2)], [$3], + [$#], [3], [], + [$#], [4], [$4], + [$0(m4_shift3($@))])]) + + +## ---------------------------------------- ## +## 6. Enhanced version of some primitives. ## +## ---------------------------------------- ## + +# m4_bpatsubsts(STRING, RE1, SUBST1, RE2, SUBST2, ...) +# ---------------------------------------------------- +# m4 equivalent of +# +# $_ = STRING; +# s/RE1/SUBST1/g; +# s/RE2/SUBST2/g; +# ... +# +# All the values are optional, and the macro is robust to active symbols +# properly quoted. +# +# I would have liked to name this macro `m4_bpatsubst', unfortunately, +# due to quotation problems, I need to double quote $1 below, therefore +# the anchors are broken :( I can't let users be trapped by that. +# +# Recall that m4_shift3 always results in an argument. Hence, we need +# to distinguish between a final deletion vs. ending recursion. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_bpatsubsts], +[m4_if([$#], 0, [m4_fatal([$0: too few arguments: $#])], + [$#], 1, [m4_fatal([$0: too few arguments: $#: $1])], + [$#], 2, [m4_unquote(m4_builtin([patsubst], [[$1]], [$2]))], + [$#], 3, [m4_unquote(m4_builtin([patsubst], [[$1]], [$2], [$3]))], + [_$0($@m4_if(m4_eval($# & 1), 0, [,]))])]) +m4_define([_m4_bpatsubsts], +[m4_if([$#], 2, [$1], + [$0(m4_builtin([patsubst], [[$1]], [$2], [$3]), + m4_shift3($@))])]) + + +# m4_copy(SRC, DST) +# ----------------- +# Define the pushdef stack DST as a copy of the pushdef stack SRC; +# give an error if DST is already defined. This is particularly nice +# for copying self-modifying pushdef stacks, where the top definition +# includes one-shot initialization that is later popped to the normal +# definition. This version intentionally does nothing if SRC is +# undefined. +# +# Some macros simply can't be renamed with this method: namely, anything +# involved in the implementation of m4_stack_foreach_sep. +m4_define([m4_copy], +[m4_ifdef([$2], [m4_fatal([$0: won't overwrite defined macro: $2])], + [m4_stack_foreach_sep([$1], [m4_pushdef([$2],], [)])])]dnl +[m4_ifdef([m4_location($1)], [m4_define([m4_location($2)], m4_location)])]) + + +# m4_copy_force(SRC, DST) +# m4_rename_force(SRC, DST) +# ------------------------- +# Like m4_copy/m4_rename, except blindly overwrite any existing DST. +# Note that m4_copy_force tolerates undefined SRC, while m4_rename_force +# does not. +m4_define([m4_copy_force], +[m4_ifdef([$2], [_m4_undefine([$2])])m4_copy($@)]) + +m4_define([m4_rename_force], +[m4_ifdef([$2], [_m4_undefine([$2])])m4_rename($@)]) + + +# m4_define_default(MACRO, VALUE) +# ------------------------------- +# If MACRO is undefined, set it to VALUE. +m4_define([m4_define_default], +[m4_ifndef([$1], [m4_define($@)])]) + + +# m4_default(EXP1, EXP2) +# m4_default_nblank(EXP1, EXP2) +# ----------------------------- +# Returns EXP1 if not empty/blank, otherwise EXP2. Expand the result. +# +# m4_default is called on hot paths, so inline the contents of m4_ifval, +# for one less round of expansion. +m4_define([m4_default], +[m4_if([$1], [], [$2], [$1])]) + +m4_define([m4_default_nblank], +[m4_ifblank([$1], [$2], [$1])]) + + +# m4_default_quoted(EXP1, EXP2) +# m4_default_nblank_quoted(EXP1, EXP2) +# ------------------------------------ +# Returns EXP1 if non empty/blank, otherwise EXP2. Leave the result quoted. +# +# For comparison: +# m4_define([active], [ACTIVE]) +# m4_default([active], [default]) => ACTIVE +# m4_default([], [active]) => ACTIVE +# -m4_default([ ], [active])- => - - +# -m4_default_nblank([ ], [active])- => -ACTIVE- +# m4_default_quoted([active], [default]) => active +# m4_default_quoted([], [active]) => active +# -m4_default_quoted([ ], [active])- => - - +# -m4_default_nblank_quoted([ ], [active])- => -active- +# +# m4_default macro is called on hot paths, so inline the contents of m4_ifval, +# for one less round of expansion. +m4_define([m4_default_quoted], +[m4_if([$1], [], [[$2]], [[$1]])]) + +m4_define([m4_default_nblank_quoted], +[m4_ifblank([$1], [[$2]], [[$1]])]) + + +# m4_defn(NAME) +# ------------- +# Like the original, except guarantee a warning when using something which is +# undefined (unlike M4 1.4.x). This replacement is not a full-featured +# replacement: if any of the defined macros contain unbalanced quoting, but +# when pasted together result in a well-quoted string, then only native m4 +# support is able to get it correct. But that's where quadrigraphs come in +# handy, if you really need unbalanced quotes inside your macros. +# +# This macro is called frequently, so minimize the amount of additional +# expansions by skipping m4_ifndef. Better yet, if __m4_version__ exists, +# (added in M4 1.6), then let m4 do the job for us (see m4_init). +m4_define([m4_defn], +[m4_if([$#], [0], [[$0]], + [$#], [1], [m4_ifdef([$1], [_m4_defn([$1])], + [m4_fatal([$0: undefined macro: $1])])], + [m4_map_args([$0], $@)])]) + + +# m4_dumpdef(NAME...) +# ------------------- +# In m4 1.4.x, dumpdef writes to the current debugfile, rather than +# stderr. This in turn royally confuses autom4te; so we follow the +# lead of newer m4 and always dump to stderr. Unlike the original, +# this version requires an argument, since there is no convenient way +# in m4 1.4.x to grab the names of all defined macros. Newer m4 +# always dumps to stderr, regardless of the current debugfile; it also +# provides m4symbols as a way to grab all current macro names. But +# dumpdefs is not frequently called, so we don't need to worry about +# conditionally using these newer features. Also, this version +# doesn't sort multiple arguments. +# +# If we detect m4 1.6 or newer, then provide an alternate definition, +# installed during m4_init, that allows builtins through. +# Unfortunately, there is no nice way in m4 1.4.x to dump builtins. +m4_define([m4_dumpdef], +[m4_if([$#], [0], [m4_fatal([$0: missing argument])], + [$#], [1], [m4_ifdef([$1], [m4_errprintn( + [$1: ]m4_dquote(_m4_defn([$1])))], [m4_fatal([$0: undefined macro: $1])])], + [m4_map_args([$0], $@)])]) + +m4_define([_m4_dumpdef], +[m4_if([$#], [0], [m4_fatal([$0: missing argument])], + [$#], [1], [m4_builtin([dumpdef], [$1])], + [m4_map_args_sep([m4_builtin([dumpdef],], [)], [], $@)])]) + + +# m4_dumpdefs(NAME...) +# -------------------- +# Similar to `m4_dumpdef(NAME)', but if NAME was m4_pushdef'ed, display its +# value stack (most recent displayed first). Also, this version silently +# ignores undefined macros, rather than erroring out. +# +# This macro cheats, because it relies on the current definition of NAME +# while the second argument of m4_stack_foreach_lifo is evaluated (which +# would be undefined according to the API). +m4_define([m4_dumpdefs], +[m4_if([$#], [0], [m4_fatal([$0: missing argument])], + [$#], [1], [m4_stack_foreach_lifo([$1], [m4_dumpdef([$1])m4_ignore])], + [m4_map_args([$0], $@)])]) + +# m4_esyscmd_s(COMMAND) +# --------------------- +# Like m4_esyscmd, except strip any trailing newlines, thus behaving +# more like shell command substitution. +m4_define([m4_esyscmd_s], +[m4_chomp_all(m4_esyscmd([$1]))]) + + +# m4_popdef(NAME) +# --------------- +# Like the original, except guarantee a warning when using something which is +# undefined (unlike M4 1.4.x). +# +# This macro is called frequently, so minimize the amount of additional +# expansions by skipping m4_ifndef. Better yet, if __m4_version__ exists, +# (added in M4 1.6), then let m4 do the job for us (see m4_init). +m4_define([m4_popdef], +[m4_if([$#], [0], [[$0]], + [$#], [1], [m4_ifdef([$1], [_m4_popdef([$1])], + [m4_fatal([$0: undefined macro: $1])])], + [m4_map_args([$0], $@)])]) + + +# m4_shiftn(N, ...) +# ----------------- +# Returns ... shifted N times. Useful for recursive "varargs" constructs. +# +# Autoconf does not use this macro, because it is inherently slower than +# calling the common cases of m4_shift2 or m4_shift3 directly. But it +# might as well be fast for other clients, such as Libtool. One way to +# do this is to expand $@ only once in _m4_shiftn (otherwise, for long +# lists, the expansion of m4_if takes twice as much memory as what the +# list itself occupies, only to throw away the unused branch). The end +# result is strictly equivalent to +# m4_if([$1], 1, [m4_shift(,m4_shift(m4_shift($@)))], +# [_m4_shiftn(m4_decr([$1]), m4_shift(m4_shift($@)))]) +# but with the final `m4_shift(m4_shift($@)))' shared between the two +# paths. The first leg uses a no-op m4_shift(,$@) to balance out the (). +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_shiftn], +[m4_assert(0 < $1 && $1 < $#)_$0($@)]) + +m4_define([_m4_shiftn], +[m4_if([$1], 1, [m4_shift(], + [$0(m4_decr([$1])]), m4_shift(m4_shift($@)))]) + +# m4_shift2(...) +# m4_shift3(...) +# -------------- +# Returns ... shifted twice, and three times. Faster than m4_shiftn. +m4_define([m4_shift2], [m4_shift(m4_shift($@))]) +m4_define([m4_shift3], [m4_shift(m4_shift(m4_shift($@)))]) + +# _m4_shift2(...) +# _m4_shift3(...) +# --------------- +# Like m4_shift2 or m4_shift3, except include a leading comma unless shifting +# consumes all arguments. Why? Because in recursion, it is nice to +# distinguish between 1 element left and 0 elements left, based on how many +# arguments this shift expands to. +m4_define([_m4_shift2], +[m4_if([$#], [2], [], + [, m4_shift(m4_shift($@))])]) +m4_define([_m4_shift3], +[m4_if([$#], [3], [], + [, m4_shift(m4_shift(m4_shift($@)))])]) + + +# m4_undefine(NAME) +# ----------------- +# Like the original, except guarantee a warning when using something which is +# undefined (unlike M4 1.4.x). +# +# This macro is called frequently, so minimize the amount of additional +# expansions by skipping m4_ifndef. Better yet, if __m4_version__ exists, +# (added in M4 1.6), then let m4 do the job for us (see m4_init). +m4_define([m4_undefine], +[m4_if([$#], [0], [[$0]], + [$#], [1], [m4_ifdef([$1], [_m4_undefine([$1])], + [m4_fatal([$0: undefined macro: $1])])], + [m4_map_args([$0], $@)])]) + +# _m4_wrap(PRE, POST) +# ------------------- +# Helper macro for m4_wrap and m4_wrap_lifo. Allows nested calls to +# m4_wrap within wrapped text. Use _m4_defn and _m4_popdef for speed. +m4_define([_m4_wrap], +[m4_ifdef([$0_text], + [m4_define([$0_text], [$1]_m4_defn([$0_text])[$2])], + [m4_builtin([m4wrap], [m4_unquote( + _m4_defn([$0_text])_m4_popdef([$0_text]))])m4_define([$0_text], [$1$2])])]) + +# m4_wrap(TEXT) +# ------------- +# Append TEXT to the list of hooks to be executed at the end of input. +# Whereas the order of the original may be LIFO in the underlying m4, +# this version is always FIFO. +m4_define([m4_wrap], +[_m4_wrap([], [$1[]])]) + +# m4_wrap_lifo(TEXT) +# ------------------ +# Prepend TEXT to the list of hooks to be executed at the end of input. +# Whereas the order of m4_wrap may be FIFO in the underlying m4, this +# version is always LIFO. +m4_define([m4_wrap_lifo], +[_m4_wrap([$1[]])]) + +## ------------------------- ## +## 7. Quoting manipulation. ## +## ------------------------- ## + + +# m4_apply(MACRO, LIST) +# --------------------- +# Invoke MACRO, with arguments provided from the quoted list of +# comma-separated quoted arguments. If LIST is empty, invoke MACRO +# without arguments. The expansion will not be concatenated with +# subsequent text. +m4_define([m4_apply], +[m4_if([$2], [], [$1], [$1($2)])[]]) + +# _m4_apply(MACRO, LIST) +# ---------------------- +# Like m4_apply, except do nothing if LIST is empty. +m4_define([_m4_apply], +[m4_if([$2], [], [], [$1($2)[]])]) + + +# m4_count(ARGS) +# -------------- +# Return a count of how many ARGS are present. +m4_define([m4_count], [$#]) + + +# m4_curry(MACRO, ARG...) +# ----------------------- +# Perform argument currying. The expansion of this macro is another +# macro that takes exactly one argument, appends it to the end of the +# original ARG list, then invokes MACRO. For example: +# m4_curry([m4_curry], [m4_reverse], [1])([2])([3]) => 3, 2, 1 +# Not quite as practical as m4_incr, but you could also do: +# m4_define([add], [m4_eval(([$1]) + ([$2]))]) +# m4_define([add_one], [m4_curry([add], [1])]) +# add_one()([2]) => 3 +m4_define([m4_curry], [$1(m4_shift($@,)_$0]) +m4_define([_m4_curry], [[$1])]) + + +# m4_do(STRING, ...) +# ------------------ +# This macro invokes all its arguments (in sequence, of course). It is +# useful for making your macros more structured and readable by dropping +# unnecessary dnl's and have the macros indented properly. No concatenation +# occurs after a STRING; use m4_unquote(m4_join(,STRING)) for that. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_do], +[m4_if([$#], 0, [], + [$#], 1, [$1[]], + [$1[]$0(m4_shift($@))])]) + + +# m4_dquote(ARGS) +# --------------- +# Return ARGS as a quoted list of quoted arguments. +m4_define([m4_dquote], [[$@]]) + + +# m4_dquote_elt(ARGS) +# ------------------- +# Return ARGS as an unquoted list of double-quoted arguments. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_dquote_elt], +[m4_if([$#], [0], [], + [$#], [1], [[[$1]]], + [[[$1]],$0(m4_shift($@))])]) + + +# m4_echo(ARGS) +# ------------- +# Return the ARGS, with the same level of quoting. Whitespace after +# unquoted commas are consumed. +m4_define([m4_echo], [$@]) + + +# m4_expand(ARG) +# _m4_expand(ARG) +# --------------- +# Return the expansion of ARG as a single string. Unlike +# m4_quote($1), this preserves whitespace following single-quoted +# commas that appear within ARG. It also deals with shell case +# statements. +# +# m4_define([active], [ACT, IVE]) +# m4_define([active2], [[ACT, IVE]]) +# m4_quote(active, active2) +# => ACT,IVE,ACT, IVE +# m4_expand([active, active2]) +# => ACT, IVE, ACT, IVE +# +# Unfortunately, due to limitations in m4, ARG must expand to +# something with balanced quotes (use quadrigraphs to get around +# this), and should not contain the unlikely delimiters -=<{( or +# )}>=-. It is possible to have unbalanced quoted `(' or `)', as well +# as unbalanced unquoted `)'. m4_expand can handle unterminated +# comments or dnl on the final line, at the expense of speed; it also +# aids in detecting attempts to incorrectly change the current +# diversion inside ARG. Meanwhile, _m4_expand is faster but must be +# given a terminated expansion, and has no safety checks for +# mis-diverted text. +# +# Exploit that extra unquoted () will group unquoted commas and the +# following whitespace. m4_bpatsubst can't handle newlines inside $1, +# and m4_substr strips quoting. So we (ab)use m4_changequote, using +# temporary quotes to remove the delimiters that conveniently included +# the unquoted () that were added prior to the changequote. +# +# Thanks to shell case statements, too many people are prone to pass +# underquoted `)', so we try to detect that by passing a marker as a +# fourth argument; if the marker is not present, then we assume that +# we encountered an early `)', and re-expand the first argument, but +# this time with one more `(' in the second argument and in the +# open-quote delimiter. We must also ignore the slop from the +# previous try. The final macro is thus half line-noise, half art. +m4_define([m4_expand], +[m4_pushdef([m4_divert], _m4_defn([_m4_divert_unsafe]))]dnl +[m4_pushdef([m4_divert_push], _m4_defn([_m4_divert_unsafe]))]dnl +[m4_chomp(_$0([$1 +]))_m4_popdef([m4_divert], [m4_divert_push])]) + +m4_define([_m4_expand], [$0_([$1], [(], -=<{($1)}>=-, [}>=-])]) + +m4_define([_m4_expand_], +[m4_if([$4], [}>=-], + [m4_changequote([-=<{$2], [)}>=-])$3m4_changequote([, ])], + [$0([$1], [($2], -=<{($2$1)}>=-, [}>=-])m4_ignore$2])]) + + +# m4_ignore(ARGS) +# --------------- +# Expands to nothing. Useful for conditionally ignoring an arbitrary +# number of arguments (see _m4_list_cmp for an example). +m4_define([m4_ignore]) + + +# m4_make_list(ARGS) +# ------------------ +# Similar to m4_dquote, this creates a quoted list of quoted ARGS. This +# version is less efficient than m4_dquote, but separates each argument +# with a comma and newline, rather than just comma, for readability. +# When developing an m4sugar algorithm, you could temporarily use +# m4_pushdef([m4_dquote],m4_defn([m4_make_list])) +# around your code to make debugging easier. +m4_define([m4_make_list], [m4_join([, +], m4_dquote_elt($@))]) + + +# m4_noquote(STRING) +# ------------------ +# Return the result of ignoring all quotes in STRING and invoking the +# macros it contains. Among other things, this is useful for enabling +# macro invocations inside strings with [] blocks (for instance regexps +# and help-strings). On the other hand, since all quotes are disabled, +# any macro expanded during this time that relies on nested [] quoting +# will likely crash and burn. This macro is seldom useful; consider +# m4_unquote or m4_expand instead. +m4_define([m4_noquote], +[m4_changequote([-=<{(],[)}>=-])$1-=<{()}>=-m4_changequote([,])]) + + +# m4_quote(ARGS) +# -------------- +# Return ARGS as a single argument. Any whitespace after unquoted commas +# is stripped. There is always output, even when there were no arguments. +# +# It is important to realize the difference between `m4_quote(exp)' and +# `[exp]': in the first case you obtain the quoted *result* of the +# expansion of EXP, while in the latter you just obtain the string +# `exp'. +m4_define([m4_quote], [[$*]]) + + +# _m4_quote(ARGS) +# --------------- +# Like m4_quote, except that when there are no arguments, there is no +# output. For conditional scenarios (such as passing _m4_quote as the +# macro name in m4_mapall), this feature can be used to distinguish between +# one argument of the empty string vs. no arguments. However, in the +# normal case with arguments present, this is less efficient than m4_quote. +m4_define([_m4_quote], +[m4_if([$#], [0], [], [[$*]])]) + + +# m4_reverse(ARGS) +# ---------------- +# Output ARGS in reverse order. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_reverse], +[m4_if([$#], [0], [], [$#], [1], [[$1]], + [$0(m4_shift($@)), [$1]])]) + + +# m4_unquote(ARGS) +# ---------------- +# Remove one layer of quotes from each ARG, performing one level of +# expansion. For one argument, m4_unquote([arg]) is more efficient than +# m4_do([arg]), but for multiple arguments, the difference is that +# m4_unquote separates arguments with commas while m4_do concatenates. +# Follow this macro with [] if concatenation with subsequent text is +# undesired. +m4_define([m4_unquote], [$*]) + + +## -------------------------- ## +## 8. Implementing m4 loops. ## +## -------------------------- ## + + +# m4_for(VARIABLE, FIRST, LAST, [STEP = +/-1], EXPRESSION) +# -------------------------------------------------------- +# Expand EXPRESSION defining VARIABLE to FROM, FROM + 1, ..., TO with +# increments of STEP. Both limits are included, and bounds are +# checked for consistency. The algorithm is robust to indirect +# VARIABLE names. Changing VARIABLE inside EXPRESSION will not impact +# the number of iterations. +# +# Uses _m4_defn for speed, and avoid dnl in the macro body. Factor +# the _m4_for call so that EXPRESSION is only parsed once. +m4_define([m4_for], +[m4_pushdef([$1], m4_eval([$2]))]dnl +[m4_cond([m4_eval(([$3]) > ([$2]))], 1, + [m4_pushdef([_m4_step], m4_eval(m4_default_quoted([$4], + 1)))m4_assert(_m4_step > 0)_$0(_m4_defn([$1]), + m4_eval((([$3]) - ([$2])) / _m4_step * _m4_step + ([$2])), _m4_step,], + [m4_eval(([$3]) < ([$2]))], 1, + [m4_pushdef([_m4_step], m4_eval(m4_default_quoted([$4], + -1)))m4_assert(_m4_step < 0)_$0(_m4_defn([$1]), + m4_eval((([$2]) - ([$3])) / -(_m4_step) * _m4_step + ([$2])), _m4_step,], + [m4_pushdef([_m4_step])_$0(_m4_defn([$1]), _m4_defn([$1]), 0,])]dnl +[[m4_define([$1],], [)$5])m4_popdef([_m4_step], [$1])]) + +# _m4_for(COUNT, LAST, STEP, PRE, POST) +# ------------------------------------- +# Core of the loop, no consistency checks, all arguments are plain +# numbers. Expand PRE[COUNT]POST, then alter COUNT by STEP and +# iterate if COUNT is not LAST. +m4_define([_m4_for], +[$4[$1]$5[]m4_if([$1], [$2], [], + [$0(m4_eval([$1 + $3]), [$2], [$3], [$4], [$5])])]) + + +# Implementing `foreach' loops in m4 is much more tricky than it may +# seem. For example, the old M4 1.4.4 manual had an incorrect example, +# which looked like this (when translated to m4sugar): +# +# | # foreach(VAR, (LIST), STMT) +# | m4_define([foreach], +# | [m4_pushdef([$1])_foreach([$1], [$2], [$3])m4_popdef([$1])]) +# | m4_define([_arg1], [$1]) +# | m4_define([_foreach], +# | [m4_if([$2], [()], , +# | [m4_define([$1], _arg1$2)$3[]_foreach([$1], (m4_shift$2), [$3])])]) +# +# But then if you run +# +# | m4_define(a, 1) +# | m4_define(b, 2) +# | m4_define(c, 3) +# | foreach([f], [([a], [(b], [c)])], [echo f +# | ]) +# +# it gives +# +# => echo 1 +# => echo (2,3) +# +# which is not what is expected. +# +# Of course the problem is that many quotes are missing. So you add +# plenty of quotes at random places, until you reach the expected +# result. Alternatively, if you are a quoting wizard, you directly +# reach the following implementation (but if you really did, then +# apply to the maintenance of m4sugar!). +# +# | # foreach(VAR, (LIST), STMT) +# | m4_define([foreach], [m4_pushdef([$1])_foreach($@)m4_popdef([$1])]) +# | m4_define([_arg1], [[$1]]) +# | m4_define([_foreach], +# | [m4_if($2, [()], , +# | [m4_define([$1], [_arg1$2])$3[]_foreach([$1], [(m4_shift$2)], [$3])])]) +# +# which this time answers +# +# => echo a +# => echo (b +# => echo c) +# +# Bingo! +# +# Well, not quite. +# +# With a better look, you realize that the parens are more a pain than +# a help: since anyway you need to quote properly the list, you end up +# with always using an outermost pair of parens and an outermost pair +# of quotes. Rejecting the parens both eases the implementation, and +# simplifies the use: +# +# | # foreach(VAR, (LIST), STMT) +# | m4_define([foreach], [m4_pushdef([$1])_foreach($@)m4_popdef([$1])]) +# | m4_define([_arg1], [$1]) +# | m4_define([_foreach], +# | [m4_if($2, [], , +# | [m4_define([$1], [_arg1($2)])$3[]_foreach([$1], [m4_shift($2)], [$3])])]) +# +# +# Now, just replace the `$2' with `m4_quote($2)' in the outer `m4_if' +# to improve robustness, and you come up with a nice implementation +# that doesn't require extra parentheses in the user's LIST. +# +# But wait - now the algorithm is quadratic, because every recursion of +# the algorithm keeps the entire LIST and merely adds another m4_shift to +# the quoted text. If the user has a lot of elements in LIST, you can +# bring the system to its knees with the memory m4 then requires, or trip +# the m4 --nesting-limit recursion factor. The only way to avoid +# quadratic growth is ensure m4_shift is expanded prior to the recursion. +# Hence the design below. +# +# The M4 manual now includes a chapter devoted to this issue, with +# the lessons learned from m4sugar. And still, this design is only +# optimal for M4 1.6; see foreach.m4 for yet more comments on why +# M4 1.4.x uses yet another implementation. + + +# m4_foreach(VARIABLE, LIST, EXPRESSION) +# -------------------------------------- +# +# Expand EXPRESSION assigning each value of the LIST to VARIABLE. +# LIST should have the form `item_1, item_2, ..., item_n', i.e. the +# whole list must *quoted*. Quote members too if you don't want them +# to be expanded. +# +# This macro is robust to active symbols: +# | m4_define(active, [ACT, IVE]) +# | m4_foreach(Var, [active, active], [-Var-]) +# => -ACT--IVE--ACT--IVE- +# +# | m4_foreach(Var, [[active], [active]], [-Var-]) +# => -ACT, IVE--ACT, IVE- +# +# | m4_foreach(Var, [[[active]], [[active]]], [-Var-]) +# => -active--active- +# +# This macro is called frequently, so avoid extra expansions such as +# m4_ifval and dnl. Also, since $2 might be quite large, try to use it +# as little as possible in _m4_foreach; each extra use requires that much +# more memory for expansion. So, rather than directly compare $2 against +# [] and use m4_car/m4_cdr for recursion, we instead unbox the list (which +# requires swapping the argument order in the helper), insert an ignored +# third argument, and use m4_shift3 to detect when recursion is complete, +# at which point this looks very much like m4_map_args. +m4_define([m4_foreach], +[m4_if([$2], [], [], + [m4_pushdef([$1])_$0([m4_define([$1],], [)$3], [], + $2)m4_popdef([$1])])]) + +# _m4_foreach(PRE, POST, IGNORED, ARG...) +# --------------------------------------- +# Form the common basis of the m4_foreach and m4_map macros. For each +# ARG, expand PRE[ARG]POST[]. The IGNORED argument makes recursion +# easier, and must be supplied rather than implicit. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([_m4_foreach], +[m4_if([$#], [3], [], + [$1[$4]$2[]$0([$1], [$2], m4_shift3($@))])]) + + +# m4_foreach_w(VARIABLE, LIST, EXPRESSION) +# ---------------------------------------- +# Like m4_foreach, but the list is whitespace separated. Depending on +# EXPRESSION, it may be more efficient to use m4_map_args_w. +# +# This macro is robust to active symbols: +# m4_foreach_w([Var], [ active +# b act\ +# ive ], [-Var-])end +# => -active--b--active-end +# +# This used to use a slower implementation based on m4_foreach: +# m4_foreach([$1], m4_split(m4_normalize([$2]), [ ]), [$3]) +m4_define([m4_foreach_w], +[m4_pushdef([$1])m4_map_args_w([$2], + [m4_define([$1],], [)$3])m4_popdef([$1])]) + + +# m4_map(MACRO, LIST) +# m4_mapall(MACRO, LIST) +# ---------------------- +# Invoke MACRO($1), MACRO($2) etc. where $1, $2... are the elements of +# LIST. $1, $2... must in turn be lists, appropriate for m4_apply. +# If LIST contains an empty sublist, m4_map skips the expansion of +# MACRO, while m4_mapall expands MACRO with no arguments. +# +# Since LIST may be quite large, we want to minimize how often it +# appears in the expansion. Rather than use m4_car/m4_cdr iteration, +# we unbox the list, and use _m4_foreach for iteration. For m4_map, +# an empty list behaves like an empty sublist and gets ignored; for +# m4_mapall, we must special-case the empty list. +m4_define([m4_map], +[_m4_foreach([_m4_apply([$1],], [)], [], $2)]) + +m4_define([m4_mapall], +[m4_if([$2], [], [], + [_m4_foreach([m4_apply([$1],], [)], [], $2)])]) + + +# m4_map_sep(MACRO, [SEPARATOR], LIST) +# m4_mapall_sep(MACRO, [SEPARATOR], LIST) +# --------------------------------------- +# Invoke MACRO($1), SEPARATOR, MACRO($2), ..., MACRO($N) where $1, +# $2... $N are the elements of LIST, and are in turn lists appropriate +# for m4_apply. SEPARATOR is expanded, in order to allow the creation +# of a list of arguments by using a single-quoted comma as the +# separator. For each empty sublist, m4_map_sep skips the expansion +# of MACRO and SEPARATOR, while m4_mapall_sep expands MACRO with no +# arguments. +# +# For m4_mapall_sep, merely expand the first iteration without the +# separator, then include separator as part of subsequent recursion; +# but avoid extra expansion of LIST's side-effects via a helper macro. +# For m4_map_sep, things are trickier - we don't know if the first +# list element is an empty sublist, so we must define a self-modifying +# helper macro and use that as the separator instead. +m4_define([m4_map_sep], +[m4_pushdef([m4_Sep], [m4_define([m4_Sep], _m4_defn([m4_unquote]))])]dnl +[_m4_foreach([_m4_apply([m4_Sep([$2])[]$1],], [)], [], $3)m4_popdef([m4_Sep])]) + +m4_define([m4_mapall_sep], +[m4_if([$3], [], [], [_$0([$1], [$2], $3)])]) + +m4_define([_m4_mapall_sep], +[m4_apply([$1], [$3])_m4_foreach([m4_apply([$2[]$1],], [)], m4_shift2($@))]) + +# m4_map_args(EXPRESSION, ARG...) +# ------------------------------- +# Expand EXPRESSION([ARG]) for each argument. More efficient than +# m4_foreach([var], [ARG...], [EXPRESSION(m4_defn([var]))]) +# Shorthand for m4_map_args_sep([EXPRESSION(], [)], [], ARG...). +m4_define([m4_map_args], +[m4_if([$#], [0], [m4_fatal([$0: too few arguments: $#])], + [$#], [1], [], + [$#], [2], [$1([$2])[]], + [_m4_foreach([$1(], [)], $@)])]) + + +# m4_map_args_pair(EXPRESSION, [END-EXPR = EXPRESSION], ARG...) +# ------------------------------------------------------------- +# Perform a pairwise grouping of consecutive ARGs, by expanding +# EXPRESSION([ARG1], [ARG2]). If there are an odd number of ARGs, the +# final argument is expanded with END-EXPR([ARGn]). +# +# For example: +# m4_define([show], [($*)m4_newline])dnl +# m4_map_args_pair([show], [], [a], [b], [c], [d], [e])dnl +# => (a,b) +# => (c,d) +# => (e) +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_map_args_pair], +[m4_if([$#], [0], [m4_fatal([$0: too few arguments: $#])], + [$#], [1], [m4_fatal([$0: too few arguments: $#: $1])], + [$#], [2], [], + [$#], [3], [m4_default([$2], [$1])([$3])[]], + [$#], [4], [$1([$3], [$4])[]], + [$1([$3], [$4])[]$0([$1], [$2], m4_shift(m4_shift3($@)))])]) + + +# m4_map_args_sep([PRE], [POST], [SEP], ARG...) +# --------------------------------------------- +# Expand PRE[ARG]POST for each argument, with SEP between arguments. +m4_define([m4_map_args_sep], +[m4_if([$#], [0], [m4_fatal([$0: too few arguments: $#])], + [$#], [1], [], + [$#], [2], [], + [$#], [3], [], + [$#], [4], [$1[$4]$2[]], + [$1[$4]$2[]_m4_foreach([$3[]$1], [$2], m4_shift3($@))])]) + + +# m4_map_args_w(STRING, [PRE], [POST], [SEP]) +# ------------------------------------------- +# Perform the expansion of PRE[word]POST[] for each word in STRING +# separated by whitespace. More efficient than: +# m4_foreach_w([var], [STRING], [PRE[]m4_defn([var])POST]) +# Additionally, expand SEP between words. +# +# As long as we have to use m4_bpatsubst to split the string, we might +# as well make it also apply PRE and POST; this avoids iteration +# altogether. But we must be careful of any \ in PRE or POST. +# _m4_strip returns a quoted string, but that's okay, since it also +# supplies an empty leading and trailing argument due to our +# intentional whitespace around STRING. We use m4_substr to strip the +# empty elements and remove the extra layer of quoting. +m4_define([m4_map_args_w], +[_$0(_m4_split([ ]m4_flatten([$1])[ ], [[ ]+], + m4_if(m4_index([$2$3$4], [\]), [-1], [[$3[]$4[]$2]], + [m4_bpatsubst([[$3[]$4[]$2]], [\\], [\\\\])])), + m4_len([[]$3[]$4]), m4_len([$4[]$2[]]))]) + +m4_define([_m4_map_args_w], +[m4_substr([$1], [$2], m4_eval(m4_len([$1]) - [$2] - [$3]))]) + + +# m4_stack_foreach(MACRO, FUNC) +# m4_stack_foreach_lifo(MACRO, FUNC) +# ---------------------------------- +# Pass each stacked definition of MACRO to the one-argument macro FUNC. +# m4_stack_foreach proceeds in FIFO order, while m4_stack_foreach_lifo +# processes the topmost definitions first. In addition, FUNC should +# not push or pop definitions of MACRO, and should not expect anything about +# the active definition of MACRO (it will not be the topmost, and may not +# be the one passed to FUNC either). +# +# Some macros simply can't be examined with this method: namely, +# anything involved in the implementation of _m4_stack_reverse. +m4_define([m4_stack_foreach], +[_m4_stack_reverse([$1], [m4_tmp-$1])]dnl +[_m4_stack_reverse([m4_tmp-$1], [$1], [$2(_m4_defn([m4_tmp-$1]))])]) + +m4_define([m4_stack_foreach_lifo], +[_m4_stack_reverse([$1], [m4_tmp-$1], [$2(_m4_defn([m4_tmp-$1]))])]dnl +[_m4_stack_reverse([m4_tmp-$1], [$1])]) + +# m4_stack_foreach_sep(MACRO, [PRE], [POST], [SEP]) +# m4_stack_foreach_sep_lifo(MACRO, [PRE], [POST], [SEP]) +# ------------------------------------------------------ +# Similar to m4_stack_foreach and m4_stack_foreach_lifo, in that every +# definition of a pushdef stack will be visited. But rather than +# passing the definition as a single argument to a macro, this variant +# expands the concatenation of PRE[]definition[]POST, and expands SEP +# between consecutive expansions. Note that m4_stack_foreach([a], [b]) +# is equivalent to m4_stack_foreach_sep([a], [b(], [)]). +m4_define([m4_stack_foreach_sep], +[_m4_stack_reverse([$1], [m4_tmp-$1])]dnl +[_m4_stack_reverse([m4_tmp-$1], [$1], [$2[]_m4_defn([m4_tmp-$1])$3], [$4[]])]) + +m4_define([m4_stack_foreach_sep_lifo], +[_m4_stack_reverse([$1], [m4_tmp-$1], [$2[]_m4_defn([m4_tmp-$1])$3], [$4[]])]dnl +[_m4_stack_reverse([m4_tmp-$1], [$1])]) + + +# _m4_stack_reverse(OLD, NEW, [ACTION], [SEP]) +# -------------------------------------------- +# A recursive worker for pushdef stack manipulation. Destructively +# copy the OLD stack into the NEW, and expanding ACTION for each +# iteration. After the first iteration, SEP is promoted to the front +# of ACTION (note that SEP should include a trailing [] if it is to +# avoid interfering with ACTION). The current definition is examined +# after the NEW has been pushed but before OLD has been popped; this +# order is important, as ACTION is permitted to operate on either +# _m4_defn([OLD]) or _m4_defn([NEW]). Since the operation is +# destructive, this macro is generally used twice, with a temporary +# macro name holding the swapped copy. +m4_define([_m4_stack_reverse], +[m4_ifdef([$1], [m4_pushdef([$2], + _m4_defn([$1]))$3[]_m4_popdef([$1])$0([$1], [$2], [$4$3])])]) + + + +## --------------------------- ## +## 9. More diversion support. ## +## --------------------------- ## + + +# m4_cleardivert(DIVERSION-NAME...) +# --------------------------------- +# Discard any text in DIVERSION-NAME. +# +# This works even inside m4_expand. +m4_define([m4_cleardivert], +[m4_if([$#], [0], [m4_fatal([$0: missing argument])], + [_m4_divert_raw([-1])m4_undivert($@)_m4_divert_raw( + _m4_divert(_m4_defn([_m4_divert_diversion]), [-]))])]) + + +# _m4_divert(DIVERSION-NAME or NUMBER, [NOWARN]) +# ---------------------------------------------- +# If DIVERSION-NAME is the name of a diversion, return its number, +# otherwise if it is a NUMBER return it. Issue a warning about +# the use of a number instead of a name, unless NOWARN is provided. +m4_define([_m4_divert], +[m4_ifdef([_m4_divert($1)], + [m4_indir([_m4_divert($1)])], + [m4_if([$2], [], [m4_warn([syntax], + [prefer named diversions])])$1])]) + +# KILL is only used to suppress output. +m4_define([_m4_divert(KILL)], -1) + +# The empty diversion name is a synonym for 0. +m4_define([_m4_divert()], 0) + + +# m4_divert_stack +# --------------- +# Print the diversion stack, if it's nonempty. The caller is +# responsible for any leading or trailing newline. +m4_define([m4_divert_stack], +[m4_stack_foreach_sep_lifo([_m4_divert_stack], [], [], [ +])]) + + +# m4_divert_stack_push(MACRO-NAME, DIVERSION-NAME) +# ------------------------------------------------ +# Form an entry of the diversion stack from caller MACRO-NAME and +# entering DIVERSION-NAME and push it. +m4_define([m4_divert_stack_push], +[m4_pushdef([_m4_divert_stack], m4_location[: $1: $2])]) + + +# m4_divert(DIVERSION-NAME) +# ------------------------- +# Change the diversion stream to DIVERSION-NAME. +m4_define([m4_divert], +[m4_popdef([_m4_divert_stack])]dnl +[m4_define([_m4_divert_diversion], [$1])]dnl +[m4_divert_stack_push([$0], [$1])]dnl +[_m4_divert_raw(_m4_divert([$1]))]) + + +# m4_divert_push(DIVERSION-NAME, [NOWARN]) +# ---------------------------------------- +# Change the diversion stream to DIVERSION-NAME, while stacking old values. +# For internal use only: if NOWARN is not empty, DIVERSION-NAME can be a +# number instead of a name. +m4_define([m4_divert_push], +[m4_divert_stack_push([$0], [$1])]dnl +[m4_pushdef([_m4_divert_diversion], [$1])]dnl +[_m4_divert_raw(_m4_divert([$1], [$2]))]) + + +# m4_divert_pop([DIVERSION-NAME]) +# ------------------------------- +# Change the diversion stream to its previous value, unstacking it. +# If specified, verify we left DIVERSION-NAME. +# When we pop the last value from the stack, we divert to -1. +m4_define([m4_divert_pop], +[m4_if([$1], [], [], + [$1], _m4_defn([_m4_divert_diversion]), [], + [m4_fatal([$0($1): diversion mismatch: +]m4_divert_stack)])]dnl +[_m4_popdef([_m4_divert_stack], [_m4_divert_diversion])]dnl +[m4_ifdef([_m4_divert_diversion], [], + [m4_fatal([too many m4_divert_pop])])]dnl +[_m4_divert_raw(_m4_divert(_m4_defn([_m4_divert_diversion]), [-]))]) + + +# m4_divert_text(DIVERSION-NAME, CONTENT) +# --------------------------------------- +# Output CONTENT into DIVERSION-NAME (which may be a number actually). +# An end of line is appended for free to CONTENT. +m4_define([m4_divert_text], +[m4_divert_push([$1])$2 +m4_divert_pop([$1])]) + + +# m4_divert_once(DIVERSION-NAME, CONTENT) +# --------------------------------------- +# Output CONTENT into DIVERSION-NAME once, if not already there. +# An end of line is appended for free to CONTENT. +m4_define([m4_divert_once], +[m4_expand_once([m4_divert_text([$1], [$2])])]) + + +# _m4_divert_unsafe(DIVERSION-NAME) +# --------------------------------- +# Issue a warning that the attempt to change the current diversion to +# DIVERSION-NAME is unsafe, because this macro is being expanded +# during argument collection of m4_expand. +m4_define([_m4_divert_unsafe], +[m4_fatal([$0: cannot change diversion to `$1' inside m4_expand])]) + + +# m4_undivert(DIVERSION-NAME...) +# ------------------------------ +# Undivert DIVERSION-NAME. Unlike the M4 version, this requires at +# least one DIVERSION-NAME; also, due to support for named diversions, +# this should not be used to undivert files. +m4_define([m4_undivert], +[m4_if([$#], [0], [m4_fatal([$0: missing argument])], + [$#], [1], [_m4_undivert(_m4_divert([$1]))], + [m4_map_args([$0], $@)])]) + + +## --------------------------------------------- ## +## 10. Defining macros with bells and whistles. ## +## --------------------------------------------- ## + +# `m4_defun' is basically `m4_define' but it equips the macro with the +# needed machinery for `m4_require'. A macro must be m4_defun'd if +# either it is m4_require'd, or it m4_require's. +# +# Two things deserve attention and are detailed below: +# 1. Implementation of m4_require +# 2. Keeping track of the expansion stack +# +# 1. Implementation of m4_require +# =============================== +# +# Of course m4_defun calls m4_provide, so that a macro which has +# been expanded is not expanded again when m4_require'd, but the +# difficult part is the proper expansion of macros when they are +# m4_require'd. +# +# The implementation is based on three ideas, (i) using diversions to +# prepare the expansion of the macro and its dependencies (by Franc,ois +# Pinard), (ii) expand the most recently m4_require'd macros _after_ +# the previous macros (by Axel Thimm), and (iii) track instances of +# provide before require (by Eric Blake). +# +# +# The first idea: why use diversions? +# ----------------------------------- +# +# When a macro requires another, the other macro is expanded in new +# diversion, GROW. When the outer macro is fully expanded, we first +# undivert the most nested diversions (GROW - 1...), and finally +# undivert GROW. To understand why we need several diversions, +# consider the following example: +# +# | m4_defun([TEST1], [Test...m4_require([TEST2])1]) +# | m4_defun([TEST2], [Test...m4_require([TEST3])2]) +# | m4_defun([TEST3], [Test...3]) +# +# Because m4_require is not required to be first in the outer macros, we +# must keep the expansions of the various levels of m4_require separated. +# Right before executing the epilogue of TEST1, we have: +# +# GROW - 2: Test...3 +# GROW - 1: Test...2 +# GROW: Test...1 +# BODY: +# +# Finally the epilogue of TEST1 undiverts GROW - 2, GROW - 1, and +# GROW into the regular flow, BODY. +# +# GROW - 2: +# GROW - 1: +# GROW: +# BODY: Test...3; Test...2; Test...1 +# +# (The semicolons are here for clarification, but of course are not +# emitted.) This is what Autoconf 2.0 (I think) to 2.13 (I'm sure) +# implement. +# +# +# The second idea: first required first out +# ----------------------------------------- +# +# The natural implementation of the idea above is buggy and produces +# very surprising results in some situations. Let's consider the +# following example to explain the bug: +# +# | m4_defun([TEST1], [m4_require([TEST2a])m4_require([TEST2b])]) +# | m4_defun([TEST2a], []) +# | m4_defun([TEST2b], [m4_require([TEST3])]) +# | m4_defun([TEST3], [m4_require([TEST2a])]) +# | +# | AC_INIT +# | TEST1 +# +# The dependencies between the macros are: +# +# 3 --- 2b +# / \ is m4_require'd by +# / \ left -------------------- right +# 2a ------------ 1 +# +# If you strictly apply the rules given in the previous section you get: +# +# GROW - 2: TEST3 +# GROW - 1: TEST2a; TEST2b +# GROW: TEST1 +# BODY: +# +# (TEST2a, although required by TEST3 is not expanded in GROW - 3 +# because is has already been expanded before in GROW - 1, so it has +# been AC_PROVIDE'd, so it is not expanded again) so when you undivert +# the stack of diversions, you get: +# +# GROW - 2: +# GROW - 1: +# GROW: +# BODY: TEST3; TEST2a; TEST2b; TEST1 +# +# i.e., TEST2a is expanded after TEST3 although the latter required the +# former. +# +# Starting from 2.50, we use an implementation provided by Axel Thimm. +# The idea is simple: the order in which macros are emitted must be the +# same as the one in which macros are expanded. (The bug above can +# indeed be described as: a macro has been m4_provide'd before its +# dependent, but it is emitted after: the lack of correlation between +# emission and expansion order is guilty). +# +# How to do that? You keep the stack of diversions to elaborate the +# macros, but each time a macro is fully expanded, emit it immediately. +# +# In the example above, when TEST2a is expanded, but it's epilogue is +# not run yet, you have: +# +# GROW - 2: +# GROW - 1: TEST2a +# GROW: Elaboration of TEST1 +# BODY: +# +# The epilogue of TEST2a emits it immediately: +# +# GROW - 2: +# GROW - 1: +# GROW: Elaboration of TEST1 +# BODY: TEST2a +# +# TEST2b then requires TEST3, so right before the epilogue of TEST3, you +# have: +# +# GROW - 2: TEST3 +# GROW - 1: Elaboration of TEST2b +# GROW: Elaboration of TEST1 +# BODY: TEST2a +# +# The epilogue of TEST3 emits it: +# +# GROW - 2: +# GROW - 1: Elaboration of TEST2b +# GROW: Elaboration of TEST1 +# BODY: TEST2a; TEST3 +# +# TEST2b is now completely expanded, and emitted: +# +# GROW - 2: +# GROW - 1: +# GROW: Elaboration of TEST1 +# BODY: TEST2a; TEST3; TEST2b +# +# and finally, TEST1 is finished and emitted: +# +# GROW - 2: +# GROW - 1: +# GROW: +# BODY: TEST2a; TEST3; TEST2b: TEST1 +# +# The idea is simple, but the implementation is a bit involved. If +# you are like me, you will want to see the actual functioning of this +# implementation to be convinced. The next section gives the full +# details. +# +# +# The Axel Thimm implementation at work +# ------------------------------------- +# +# We consider the macros above, and this configure.ac: +# +# AC_INIT +# TEST1 +# +# You should keep the definitions of _m4_defun_pro, _m4_defun_epi, and +# m4_require at hand to follow the steps. +# +# This implementation tries not to assume that the current diversion is +# BODY, so as soon as a macro (m4_defun'd) is expanded, we first +# record the current diversion under the name _m4_divert_dump (denoted +# DUMP below for short). This introduces an important difference with +# the previous versions of Autoconf: you cannot use m4_require if you +# are not inside an m4_defun'd macro, and especially, you cannot +# m4_require directly from the top level. +# +# We have not tried to simulate the old behavior (better yet, we +# diagnose it), because it is too dangerous: a macro m4_require'd from +# the top level is expanded before the body of `configure', i.e., before +# any other test was run. I let you imagine the result of requiring +# AC_STDC_HEADERS for instance, before AC_PROG_CC was actually run.... +# +# After AC_INIT was run, the current diversion is BODY. +# * AC_INIT was run +# DUMP: undefined +# diversion stack: BODY |- +# +# * TEST1 is expanded +# The prologue of TEST1 sets _m4_divert_dump, which is the diversion +# where the current elaboration will be dumped, to the current +# diversion. It also m4_divert_push to GROW, where the full +# expansion of TEST1 and its dependencies will be elaborated. +# DUMP: BODY +# BODY: empty +# diversions: GROW, BODY |- +# +# * TEST1 requires TEST2a +# _m4_require_call m4_divert_pushes another temporary diversion, +# GROW - 1, and expands TEST2a in there. +# DUMP: BODY +# BODY: empty +# GROW - 1: TEST2a +# diversions: GROW - 1, GROW, BODY |- +# Then the content of the temporary diversion is moved to DUMP and the +# temporary diversion is popped. +# DUMP: BODY +# BODY: TEST2a +# diversions: GROW, BODY |- +# +# * TEST1 requires TEST2b +# Again, _m4_require_call pushes GROW - 1 and heads to expand TEST2b. +# DUMP: BODY +# BODY: TEST2a +# diversions: GROW - 1, GROW, BODY |- +# +# * TEST2b requires TEST3 +# _m4_require_call pushes GROW - 2 and expands TEST3 here. +# (TEST3 requires TEST2a, but TEST2a has already been m4_provide'd, so +# nothing happens.) +# DUMP: BODY +# BODY: TEST2a +# GROW - 2: TEST3 +# diversions: GROW - 2, GROW - 1, GROW, BODY |- +# Then the diversion is appended to DUMP, and popped. +# DUMP: BODY +# BODY: TEST2a; TEST3 +# diversions: GROW - 1, GROW, BODY |- +# +# * TEST1 requires TEST2b (contd.) +# The content of TEST2b is expanded... +# DUMP: BODY +# BODY: TEST2a; TEST3 +# GROW - 1: TEST2b, +# diversions: GROW - 1, GROW, BODY |- +# ... and moved to DUMP. +# DUMP: BODY +# BODY: TEST2a; TEST3; TEST2b +# diversions: GROW, BODY |- +# +# * TEST1 is expanded: epilogue +# TEST1's own content is in GROW... +# DUMP: BODY +# BODY: TEST2a; TEST3; TEST2b +# GROW: TEST1 +# diversions: BODY |- +# ... and it's epilogue moves it to DUMP and then undefines DUMP. +# DUMP: undefined +# BODY: TEST2a; TEST3; TEST2b; TEST1 +# diversions: BODY |- +# +# +# The third idea: track macros provided before they were required +# --------------------------------------------------------------- +# +# Using just the first two ideas, Autoconf 2.50 through 2.63 still had +# a subtle bug for more than seven years. Let's consider the +# following example to explain the bug: +# +# | m4_defun([TEST1], [1]) +# | m4_defun([TEST2], [2[]m4_require([TEST1])]) +# | m4_defun([TEST3], [3 TEST1 m4_require([TEST2])]) +# | TEST3 +# +# After the prologue of TEST3, we are collecting text in GROW with the +# intent of dumping it in BODY during the epilogue. Next, we +# encounter the direct invocation of TEST1, which provides the macro +# in place in GROW. From there, we encounter a requirement for TEST2, +# which must be collected in a new diversion. While expanding TEST2, +# we encounter a requirement for TEST1, but since it has already been +# expanded, the Axel Thimm algorithm states that we can treat it as a +# no-op. But that would lead to an end result of `2 3 1', meaning +# that we have once again output a macro (TEST2) prior to its +# requirements (TEST1). +# +# The problem can only occur if a single defun'd macro first provides, +# then later indirectly requires, the same macro. Note that directly +# expanding then requiring a macro is okay: because the dependency was +# met, the require phase can be a no-op. For that matter, the outer +# macro can even require two helpers, where the first helper expands +# the macro, and the second helper indirectly requires the macro. +# Out-of-order expansion is only present if the inner macro is +# required by something that will be hoisted in front of where the +# direct expansion occurred. In other words, we must be careful not +# to warn on: +# +# | m4_defun([TEST4], [4]) +# | m4_defun([TEST5], [5 TEST4 m4_require([TEST4])]) +# | TEST5 => 5 4 +# +# or even the more complex: +# +# | m4_defun([TEST6], [6]) +# | m4_defun([TEST7], [7 TEST6]) +# | m4_defun([TEST8], [8 m4_require([TEST6])]) +# | m4_defun([TEST9], [9 m4_require([TEST8])]) +# | m4_defun([TEST10], [10 m4_require([TEST7]) m4_require([TEST9])]) +# | TEST10 => 7 6 8 9 10 +# +# So, to detect whether a require was direct or indirect, m4_defun and +# m4_require track the name of the macro that caused a diversion to be +# created (using the stack _m4_diverting, coupled with an O(1) lookup +# _m4_diverting([NAME])), and m4_provide stores the name associated +# with the diversion at which a macro was provided. A require call is +# direct if it occurs within the same diversion where the macro was +# provided, or if the diversion associated with the providing context +# has been collected. +# +# The implementation of the warning involves tracking the set of +# macros which have been provided since the start of the outermost +# defun'd macro (the set is named _m4_provide). When starting an +# outermost macro, the set is emptied; when a macro is provided, it is +# added to the set; when require expands the body of a macro, it is +# removed from the set; and when a macro is indirectly required, the +# set is checked. If a macro is in the set, then it has been provided +# before it was required, and we satisfy dependencies by expanding the +# macro as if it had never been provided; in the example given above, +# this means we now output `1 2 3 1'. Meanwhile, a warning is issued +# to inform the user that her macros trigger the bug in older autoconf +# versions, and that her output file now contains redundant contents +# (and possibly new problems, if the repeated macro was not +# idempotent). Meanwhile, macros defined by m4_defun_once instead of +# m4_defun are idempotent, avoiding any warning or duplicate output. +# +# +# 2. Keeping track of the expansion stack +# ======================================= +# +# When M4 expansion goes wrong it is often extremely hard to find the +# path amongst macros that drove to the failure. What is needed is +# the stack of macro `calls'. One could imagine that GNU M4 would +# maintain a stack of macro expansions, unfortunately it doesn't, so +# we do it by hand. This is of course extremely costly, but the help +# this stack provides is worth it. Nevertheless to limit the +# performance penalty this is implemented only for m4_defun'd macros, +# not for define'd macros. +# +# Each time we enter an m4_defun'd macros, we add a definition in +# _m4_expansion_stack, and when we exit the macro, we remove it (thanks +# to pushdef/popdef). m4_stack_foreach is used to print the expansion +# stack in the rare cases when it's needed. +# +# In addition, we want to detect circular m4_require dependencies. +# Each time we expand a macro FOO we define _m4_expanding(FOO); and +# m4_require(BAR) simply checks whether _m4_expanding(BAR) is defined. + + +# m4_expansion_stack +# ------------------ +# Expands to the entire contents of the expansion stack. The caller +# must supply a trailing newline. This macro always prints a +# location; check whether _m4_expansion_stack is defined to filter out +# the case when no defun'd macro is in force. +m4_define([m4_expansion_stack], +[m4_stack_foreach_sep_lifo([_$0], [_$0_entry(], [) +])m4_location[: the top level]]) + +# _m4_expansion_stack_entry(MACRO) +# -------------------------------- +# Format an entry for MACRO found on the expansion stack. +m4_define([_m4_expansion_stack_entry], +[_m4_defn([m4_location($1)])[: $1 is expanded from...]]) + +# m4_expansion_stack_push(MACRO) +# ------------------------------ +# Form an entry of the expansion stack on entry to MACRO and push it. +m4_define([m4_expansion_stack_push], +[m4_pushdef([_m4_expansion_stack], [$1])]) + + +# _m4_divert(GROW) +# ---------------- +# This diversion is used by the m4_defun/m4_require machinery. It is +# important to keep room before GROW because for each nested +# AC_REQUIRE we use an additional diversion (i.e., two m4_require's +# will use GROW - 2. More than 3 levels has never seemed to be +# needed.) +# +# ... +# - GROW - 2 +# m4_require'd code, 2 level deep +# - GROW - 1 +# m4_require'd code, 1 level deep +# - GROW +# m4_defun'd macros are elaborated here. + +m4_define([_m4_divert(GROW)], 10000) + + +# _m4_defun_pro(MACRO-NAME) +# ------------------------- +# The prologue for Autoconf macros. +# +# This is called frequently, so minimize the number of macro invocations +# by avoiding dnl and m4_defn overhead. +m4_define([_m4_defun_pro], +[m4_ifdef([_m4_expansion_stack], [], [_m4_defun_pro_outer([$1])])]dnl +[m4_expansion_stack_push([$1])m4_pushdef([_m4_expanding($1)])]) + +m4_define([_m4_defun_pro_outer], +[m4_set_delete([_m4_provide])]dnl +[m4_pushdef([_m4_diverting([$1])])m4_pushdef([_m4_diverting], [$1])]dnl +[m4_pushdef([_m4_divert_dump], m4_divnum)m4_divert_push([GROW])]) + +# _m4_defun_epi(MACRO-NAME) +# ------------------------- +# The Epilogue for Autoconf macros. MACRO-NAME only helps tracing +# the PRO/EPI pairs. +# +# This is called frequently, so minimize the number of macro invocations +# by avoiding dnl and m4_popdef overhead. +m4_define([_m4_defun_epi], +[_m4_popdef([_m4_expanding($1)], [_m4_expansion_stack])]dnl +[m4_ifdef([_m4_expansion_stack], [], [_m4_defun_epi_outer([$1])])]dnl +[m4_provide([$1])]) + +m4_define([_m4_defun_epi_outer], +[_m4_popdef([_m4_divert_dump], [_m4_diverting([$1])], [_m4_diverting])]dnl +[m4_divert_pop([GROW])m4_undivert([GROW])]) + + +# _m4_divert_dump +# --------------- +# If blank, we are outside of any defun'd macro. Otherwise, expands +# to the diversion number (not name) where require'd macros should be +# moved once completed. +m4_define([_m4_divert_dump]) + + +# m4_divert_require(DIVERSION, NAME-TO-CHECK, [BODY-TO-EXPAND]) +# ------------------------------------------------------------- +# Same as m4_require, but BODY-TO-EXPAND goes into the named DIVERSION; +# requirements still go in the current diversion though. +# +m4_define([m4_divert_require], +[m4_ifdef([_m4_expanding($2)], + [m4_fatal([$0: circular dependency of $2])])]dnl +[m4_if(_m4_divert_dump, [], + [m4_fatal([$0($2): cannot be used outside of an m4_defun'd macro])])]dnl +[m4_provide_if([$2], [], + [_m4_require_call([$2], [$3], _m4_divert([$1], [-]))])]) + + +# m4_defun(NAME, EXPANSION, [MACRO = m4_define]) +# ---------------------------------------------- +# Define a macro NAME which automatically provides itself. Add +# machinery so the macro automatically switches expansion to the +# diversion stack if it is not already using it, prior to EXPANSION. +# In this case, once finished, it will bring back all the code +# accumulated in the diversion stack. This, combined with m4_require, +# achieves the topological ordering of macros. We don't use this +# macro to define some frequently called macros that are not involved +# in ordering constraints, to save m4 processing. +# +# MACRO is an undocumented argument; when set to m4_pushdef, and NAME +# is already defined, the new definition is added to the pushdef +# stack, rather than overwriting the current definition. It can thus +# be used to write self-modifying macros, which pop themselves to a +# previously m4_define'd definition so that subsequent use of the +# macro is faster. +m4_define([m4_defun], +[m4_define([m4_location($1)], m4_location)]dnl +[m4_default([$3], [m4_define])([$1], + [_m4_defun_pro(]m4_dquote($[0])[)$2[]_m4_defun_epi(]m4_dquote($[0])[)])]) + + +# m4_defun_init(NAME, INIT, COMMON) +# --------------------------------- +# Like m4_defun, but split EXPANSION into two portions: INIT which is +# done only the first time NAME is invoked, and COMMON which is +# expanded every time. +# +# For now, the COMMON definition is always m4_define'd, giving an even +# lighter-weight definition. m4_defun allows self-providing, but once +# a macro is provided, m4_require no longer cares if it is m4_define'd +# or m4_defun'd. m4_defun also provides location tracking to identify +# dependency bugs, but once the INIT has been expanded, we know there +# are no dependency bugs. However, if a future use needs COMMON to be +# m4_defun'd, we can add a parameter, similar to the third parameter +# to m4_defun. +m4_define([m4_defun_init], +[m4_define([$1], [$3[]])m4_defun([$1], + [$2[]_m4_popdef(]m4_dquote($[0])[)m4_indir(]m4_dquote($[0])dnl +[m4_if(]m4_dquote($[#])[, [0], [], ]m4_dquote([,$]@)[))], [m4_pushdef])]) + + +# m4_defun_once(NAME, EXPANSION) +# ------------------------------ +# Like m4_defun, but guarantee that EXPANSION only happens once +# (thereafter, using NAME is a no-op). +# +# If _m4_divert_dump is empty, we are called at the top level; +# otherwise, we must ensure that we are required in front of the +# current defun'd macro. Use a helper macro so that EXPANSION need +# only occur once in the definition of NAME, since it might be large. +m4_define([m4_defun_once], +[m4_define([m4_location($1)], m4_location)]dnl +[m4_define([$1], [_m4_defun_once([$1], [$2], m4_if(_m4_divert_dump, [], + [[_m4_defun_pro([$1])m4_unquote(], [)_m4_defun_epi([$1])]], +m4_ifdef([_m4_diverting([$1])], [-]), [-], [[m4_unquote(], [)]], + [[_m4_require_call([$1],], [, _m4_divert_dump)]]))])]) + +m4_define([_m4_defun_once], +[m4_pushdef([$1])$3[$2[]m4_provide([$1])]$4]) + + +# m4_pattern_forbid(ERE, [WHY]) +# ----------------------------- +# Declare that no token matching the forbidden perl extended regular +# expression ERE should be seen in the output unless... +m4_define([m4_pattern_forbid], []) + + +# m4_pattern_allow(ERE) +# --------------------- +# ... that token also matches the allowed extended regular expression ERE. +# Both used via traces, by autom4te post-processing. +m4_define([m4_pattern_allow], []) + + +## --------------------------------- ## +## 11. Dependencies between macros. ## +## --------------------------------- ## + + +# m4_before(THIS-MACRO-NAME, CALLED-MACRO-NAME) +# --------------------------------------------- +# Issue a warning if CALLED-MACRO-NAME was called before THIS-MACRO-NAME. +m4_define([m4_before], +[m4_provide_if([$2], + [m4_warn([syntax], [$2 was called before $1])])]) + + +# m4_require(NAME-TO-CHECK, [BODY-TO-EXPAND = NAME-TO-CHECK]) +# ----------------------------------------------------------- +# If NAME-TO-CHECK has never been expanded (actually, if it is not +# m4_provide'd), expand BODY-TO-EXPAND *before* the current macro +# expansion; follow the expansion with a newline. Once expanded, emit +# it in _m4_divert_dump. Keep track of the m4_require chain in +# _m4_expansion_stack. +# +# The normal cases are: +# +# - NAME-TO-CHECK == BODY-TO-EXPAND +# Which you can use for regular macros with or without arguments, e.g., +# m4_require([AC_PROG_CC], [AC_PROG_CC]) +# m4_require([AC_CHECK_HEADERS(threads.h)], [AC_CHECK_HEADERS(threads.h)]) +# which is just the same as +# m4_require([AC_PROG_CC]) +# m4_require([AC_CHECK_HEADERS(threads.h)]) +# +# - BODY-TO-EXPAND == m4_indir([NAME-TO-CHECK]) +# In the case of macros with irregular names. For instance: +# m4_require([AC_LANG_COMPILER(C)], [indir([AC_LANG_COMPILER(C)])]) +# which means `if the macro named `AC_LANG_COMPILER(C)' (the parens are +# part of the name, it is not an argument) has not been run, then +# call it.' +# Had you used +# m4_require([AC_LANG_COMPILER(C)], [AC_LANG_COMPILER(C)]) +# then m4_require would have tried to expand `AC_LANG_COMPILER(C)', i.e., +# call the macro `AC_LANG_COMPILER' with `C' as argument. +# +# You could argue that `AC_LANG_COMPILER', when it receives an argument +# such as `C' should dispatch the call to `AC_LANG_COMPILER(C)'. But this +# `extension' prevents `AC_LANG_COMPILER' from having actual arguments that +# it passes to `AC_LANG_COMPILER(C)'. +# +# This is called frequently, so minimize the number of macro invocations +# by avoiding dnl and other overhead on the common path. +m4_define([m4_require], +[m4_ifdef([_m4_expanding($1)], + [m4_fatal([$0: circular dependency of $1])])]dnl +[m4_if(_m4_divert_dump, [], + [m4_fatal([$0($1): cannot be used outside of an ]dnl +m4_if([$0], [m4_require], [[m4_defun]], [[AC_DEFUN]])['d macro])])]dnl +[m4_provide_if([$1], [m4_set_contains([_m4_provide], [$1], + [_m4_require_check([$1], _m4_defn([m4_provide($1)]), [$0])], [m4_ignore])], + [_m4_require_call])([$1], [$2], _m4_divert_dump)]) + + +# _m4_require_call(NAME-TO-CHECK, [BODY-TO-EXPAND = NAME-TO-CHECK], +# DIVERSION-NUMBER) +# ----------------------------------------------------------------- +# If m4_require decides to expand the body, it calls this macro. The +# expansion is placed in DIVERSION-NUMBER. +# +# This is called frequently, so minimize the number of macro invocations +# by avoiding dnl and other overhead on the common path. +# The use of a witness macro protecting the warning allows aclocal +# to silence any warnings when probing for what macros are required +# and must therefore be located, when using the Autoconf-without-aclocal-m4 +# autom4te language. For more background, see: +# https://lists.gnu.org/archive/html/automake-patches/2012-11/msg00035.html +m4_define([_m4_require_call], +[m4_pushdef([_m4_divert_grow], m4_decr(_m4_divert_grow))]dnl +[m4_pushdef([_m4_diverting([$1])])m4_pushdef([_m4_diverting], [$1])]dnl +[m4_divert_push(_m4_divert_grow, [-])]dnl +[m4_if([$2], [], [$1], [$2]) +m4_provide_if([$1], [m4_set_remove([_m4_provide], [$1])], + [m4_ifndef([m4_require_silent_probe], + [m4_warn([syntax], [$1 is m4_require'd but not m4_defun'd])])])]dnl +[_m4_divert_raw($3)_m4_undivert(_m4_divert_grow)]dnl +[m4_divert_pop(_m4_divert_grow)_m4_popdef([_m4_divert_grow], +[_m4_diverting([$1])], [_m4_diverting])]) + + +# _m4_require_check(NAME-TO-CHECK, OWNER, CALLER) +# ----------------------------------------------- +# NAME-TO-CHECK has been identified as previously expanded in the +# diversion owned by OWNER. If this is a problem, warn on behalf of +# CALLER and return _m4_require_call; otherwise return m4_ignore. +m4_define([_m4_require_check], +[m4_if(_m4_defn([_m4_diverting]), [$2], [m4_ignore], + m4_ifdef([_m4_diverting([$2])], [-]), [-], [m4_warn([syntax], + [$3: `$1' was expanded before it was required +https://www.gnu.org/software/autoconf/manual/autoconf.html#Expanded-Before-Required])_m4_require_call], + [m4_ignore])]) + + +# _m4_divert_grow +# --------------- +# The counter for _m4_require_call. +m4_define([_m4_divert_grow], _m4_divert([GROW])) + + +# m4_expand_once(TEXT, [WITNESS = TEXT]) +# -------------------------------------- +# If TEXT has never been expanded, expand it *here*. Use WITNESS as +# as a memory that TEXT has already been expanded. +m4_define([m4_expand_once], +[m4_provide_if(m4_default_quoted([$2], [$1]), + [], + [m4_provide(m4_default_quoted([$2], [$1]))[]$1])]) + + +# m4_provide(MACRO-NAME) +# ---------------------- +m4_define([m4_provide], +[m4_ifdef([m4_provide($1)], [], +[m4_set_add([_m4_provide], [$1], [m4_define([m4_provide($1)], + m4_ifdef([_m4_diverting], [_m4_defn([_m4_diverting])]))])])]) + + +# m4_provide_if(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED) +# ------------------------------------------------------- +# If MACRO-NAME is provided do IF-PROVIDED, else IF-NOT-PROVIDED. +# The purpose of this macro is to provide the user with a means to +# check macros which are provided without letting her know how the +# information is coded. +m4_define([m4_provide_if], +[m4_ifdef([m4_provide($1)], + [$2], [$3])]) + + +## --------------------- ## +## 12. Text processing. ## +## --------------------- ## + + +# m4_cr_letters +# m4_cr_LETTERS +# m4_cr_Letters +# ------------- +m4_define([m4_cr_letters], [abcdefghijklmnopqrstuvwxyz]) +m4_define([m4_cr_LETTERS], [ABCDEFGHIJKLMNOPQRSTUVWXYZ]) +m4_define([m4_cr_Letters], +m4_defn([m4_cr_letters])dnl +m4_defn([m4_cr_LETTERS])dnl +) + + +# m4_cr_digits +# ------------ +m4_define([m4_cr_digits], [0123456789]) + + +# m4_cr_alnum +# ----------- +m4_define([m4_cr_alnum], +m4_defn([m4_cr_Letters])dnl +m4_defn([m4_cr_digits])dnl +) + + +# m4_cr_symbols1 +# m4_cr_symbols2 +# -------------- +m4_define([m4_cr_symbols1], +m4_defn([m4_cr_Letters])dnl +_) + +m4_define([m4_cr_symbols2], +m4_defn([m4_cr_symbols1])dnl +m4_defn([m4_cr_digits])dnl +) + +# m4_cr_all +# --------- +# The character range representing everything, with `-' as the last +# character, since it is special to m4_translit. Use with care, because +# it contains characters special to M4 (fortunately, both ASCII and EBCDIC +# have [] in order, so m4_defn([m4_cr_all]) remains a valid string). It +# also contains characters special to terminals, so it should never be +# displayed in an error message. Also, attempts to map [ and ] to other +# characters via m4_translit must deal with the fact that m4_translit does +# not add quotes to the output. +# +# In EBCDIC, $ is immediately followed by *, which leads to problems +# if m4_cr_all is inlined into a macro definition; so swap them. +# +# It is mainly useful in generating inverted character range maps, for use +# in places where m4_translit is faster than an equivalent m4_bpatsubst; +# the regex `[^a-z]' is equivalent to: +# m4_translit(m4_dquote(m4_defn([m4_cr_all])), [a-z]) +m4_define([m4_cr_all], +m4_translit(m4_dquote(m4_format(m4_dquote(m4_for( + ,1,255,,[[%c]]))m4_for([i],1,255,,[,i]))), [$*-], [*$])-) + + +# _m4_define_cr_not(CATEGORY) +# --------------------------- +# Define m4_cr_not_CATEGORY as the inverse of m4_cr_CATEGORY. +m4_define([_m4_define_cr_not], +[m4_define([m4_cr_not_$1], + m4_translit(m4_dquote(m4_defn([m4_cr_all])), + m4_defn([m4_cr_$1])))]) + + +# m4_cr_not_letters +# m4_cr_not_LETTERS +# m4_cr_not_Letters +# m4_cr_not_digits +# m4_cr_not_alnum +# m4_cr_not_symbols1 +# m4_cr_not_symbols2 +# ------------------ +# Inverse character sets +_m4_define_cr_not([letters]) +_m4_define_cr_not([LETTERS]) +_m4_define_cr_not([Letters]) +_m4_define_cr_not([digits]) +_m4_define_cr_not([alnum]) +_m4_define_cr_not([symbols1]) +_m4_define_cr_not([symbols2]) + + +# m4_newline([STRING]) +# -------------------- +# Expands to a newline, possibly followed by STRING. Exists mostly for +# formatting reasons. +m4_define([m4_newline], [ +$1]) + + +# m4_re_escape(STRING) +# -------------------- +# Escape RE active characters in STRING. +m4_define([m4_re_escape], +[m4_bpatsubst([$1], + [[][*+.?\^$]], [\\\&])]) + + +# m4_re_string +# ------------ +# Regexp for `[a-zA-Z_0-9]*' +# m4_dquote provides literal [] for the character class. +m4_define([m4_re_string], +m4_dquote(m4_defn([m4_cr_symbols2]))dnl +[*]dnl +) + + +# m4_re_word +# ---------- +# Regexp for `[a-zA-Z_][a-zA-Z_0-9]*' +m4_define([m4_re_word], +m4_dquote(m4_defn([m4_cr_symbols1]))dnl +m4_defn([m4_re_string])dnl +) + + +# m4_tolower(STRING) +# m4_toupper(STRING) +# ------------------ +# These macros convert STRING to lowercase or uppercase. +# +# Rather than expand the m4_defn each time, we inline them up front. +m4_define([m4_tolower], +[m4_translit([[$1]], ]m4_dquote(m4_defn([m4_cr_LETTERS]))[, + ]m4_dquote(m4_defn([m4_cr_letters]))[)]) +m4_define([m4_toupper], +[m4_translit([[$1]], ]m4_dquote(m4_defn([m4_cr_letters]))[, + ]m4_dquote(m4_defn([m4_cr_LETTERS]))[)]) + + +# m4_split(STRING, [REGEXP]) +# -------------------------- +# Split STRING into an m4 list of quoted elements. The elements are +# quoted with [ and ]. Beginning spaces and end spaces *are kept*. +# Use m4_strip to remove them. +# +# REGEXP specifies where to split. Default is [\t ]+. +# +# If STRING is empty, the result is an empty list. +# +# Pay attention to the m4_changequotes. When m4 reads the definition of +# m4_split, it still has quotes set to [ and ]. Luckily, these are matched +# in the macro body, so the definition is stored correctly. Use the same +# alternate quotes as m4_noquote; it must be unlikely to appear in $1. +# +# Also, notice that $1 is quoted twice, since we want the result to +# be quoted. Then you should understand that the argument of +# patsubst is -=<{(STRING)}>=- (i.e., with additional -=<{( and )}>=-). +# +# This macro is safe on active symbols, i.e.: +# m4_define(active, ACTIVE) +# m4_split([active active ])end +# => [active], [active], []end +# +# Optimize on regex of ` ' (space), since m4_foreach_w already guarantees +# that the list contains single space separators, and a common case is +# splitting a single-element list. This macro is called frequently, +# so avoid unnecessary dnl inside the definition. +m4_define([m4_split], +[m4_if([$1], [], [], + [$2], [ ], [m4_if(m4_index([$1], [ ]), [-1], [[[$1]]], + [_$0([$1], [$2], [, ])])], + [$2], [], [_$0([$1], [[ ]+], [, ])], + [_$0([$1], [$2], [, ])])]) + +m4_define([_m4_split], +[m4_changequote([-=<{(],[)}>=-])]dnl +[[m4_bpatsubst(-=<{(-=<{($1)}>=-)}>=-, -=<{($2)}>=-, + -=<{(]$3[)}>=-)]m4_changequote([, ])]) + + +# m4_chomp(STRING) +# m4_chomp_all(STRING) +# -------------------- +# Return STRING quoted, but without a trailing newline. m4_chomp +# removes at most one newline, while m4_chomp_all removes all +# consecutive trailing newlines. Embedded newlines are not touched, +# and a trailing backslash-newline leaves just a trailing backslash. +# +# m4_bregexp is slower than m4_index, and we don't always want to +# remove all newlines; hence the two variants. We massage characters +# to give a nicer pattern to match, particularly since m4_bregexp is +# line-oriented. Both versions must guarantee a match, to avoid bugs +# with precision -1 in m4_format in older m4. +m4_define([m4_chomp], +[m4_format([[%.*s]], m4_index(m4_translit([[$1]], [ +/.], [/ ])[./.], [/.]), [$1])]) + +m4_define([m4_chomp_all], +[m4_format([[%.*s]], m4_bregexp(m4_translit([[$1]], [ +/], [/ ]), [/*$]), [$1])]) + + +# m4_flatten(STRING) +# ------------------ +# If STRING contains end of lines, replace them with spaces. If there +# are backslashed end of lines, remove them. This macro is safe with +# active symbols. +# m4_define(active, ACTIVE) +# m4_flatten([active +# act\ +# ive])end +# => active activeend +# +# In m4, m4_bpatsubst is expensive, so first check for a newline. +m4_define([m4_flatten], +[m4_if(m4_index([$1], [ +]), [-1], [[$1]], + [m4_translit(m4_bpatsubst([[[$1]]], [\\ +]), [ +], [ ])])]) + + +# m4_strip(STRING) +# ---------------- +# Expands into STRING with tabs and spaces singled out into a single +# space, and removing leading and trailing spaces. +# +# This macro is robust to active symbols. +# m4_define(active, ACTIVE) +# m4_strip([ active active ])end +# => active activeend +# +# First, notice that we guarantee trailing space. Why? Because regular +# expressions are greedy, and `.* ?' would always group the space into the +# .* portion. The algorithm is simpler by avoiding `?' at the end. The +# algorithm correctly strips everything if STRING is just ` '. +# +# Then notice the second pattern: it is in charge of removing the +# leading/trailing spaces. Why not just `[^ ]'? Because they are +# applied to over-quoted strings, i.e. more or less [STRING], due +# to the limitations of m4_bpatsubsts. So the leading space in STRING +# is the *second* character; equally for the trailing space. +m4_define([m4_strip], +[m4_bpatsubsts([$1 ], + [[ ]+], [ ], + [^. ?\(.*\) .$], [[[\1]]])]) + + +# m4_normalize(STRING) +# -------------------- +# Apply m4_flatten and m4_strip to STRING. +# +# The argument is quoted, so that the macro is robust to active symbols: +# +# m4_define(active, ACTIVE) +# m4_normalize([ act\ +# ive +# active ])end +# => active activeend + +m4_define([m4_normalize], +[m4_strip(m4_flatten([$1]))]) + + +# m4_validate_w(STRING) +# --------------------- +# Expands into m4_normalize(m4_expand([STRING])), but if that is not +# the same as just m4_normalize([STRING]), issue a warning. +# +# This is used in several Autoconf macros that take a +# whitespace-separated list of symbols as an argument. Ideally that +# list would not be expanded before use, but several packages used +# `dnl' to put comments inside those lists, so they must be expanded +# for compatibility's sake. +m4_define([m4_validate_w], +[_m4_validate_w(m4_normalize([$1]), m4_normalize(m4_expand([$1])))]) + +m4_define([_m4_validate_w], +[m4_if([$1], [$2], [], + [m4_warn([obsolete], [whitespace-separated list contains macros; +in a future version of Autoconf they will not be expanded]dnl +m4_if(m4_bregexp([$1], [\bdn[l]\b]), -1, [], [ +note: `dn@&t@l' is a macro]))])dnl +[$2]]) + + +# m4_join(SEP, ARG1, ARG2...) +# --------------------------- +# Produce ARG1SEPARG2...SEPARGn. Avoid back-to-back SEP when a given ARG +# is the empty string. No expansion is performed on SEP or ARGs. +# +# Since the number of arguments to join can be arbitrarily long, we +# want to avoid having more than one $@ in the macro definition; +# otherwise, the expansion would require twice the memory of the already +# long list. Hence, m4_join merely looks for the first non-empty element, +# and outputs just that element; while _m4_join looks for all non-empty +# elements, and outputs them following a separator. The final trick to +# note is that we decide between recursing with $0 or _$0 based on the +# nested m4_if ending with `_'. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift2($@))])]) +m4_define([_m4_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift2($@))])]) + +# m4_joinall(SEP, ARG1, ARG2...) +# ------------------------------ +# Produce ARG1SEPARG2...SEPARGn. An empty ARG results in back-to-back SEP. +# No expansion is performed on SEP or ARGs. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_joinall], [[$2]_$0([$1], m4_shift($@))]) +m4_define([_m4_joinall], +[m4_if([$#], [2], [], [[$1$3]$0([$1], m4_shift2($@))])]) + +# m4_combine([SEPARATOR], PREFIX-LIST, [INFIX], SUFFIX...) +# -------------------------------------------------------- +# Produce the pairwise combination of every element in the quoted, +# comma-separated PREFIX-LIST with every element from the SUFFIX arguments. +# Each pair is joined with INFIX, and pairs are separated by SEPARATOR. +# No expansion occurs on SEPARATOR, INFIX, or elements of either list. +# +# For example: +# m4_combine([, ], [[a], [b], [c]], [-], [1], [2], [3]) +# => a-1, a-2, a-3, b-1, b-2, b-3, c-1, c-2, c-3 +# +# This definition is a bit hairy; the thing to realize is that we want +# to construct m4_map_args_sep([[prefix$3]], [], [[$1]], m4_shift3($@)) +# as the inner loop, using each prefix generated by the outer loop, +# and without recalculating m4_shift3 every outer iteration. +m4_define([m4_combine], +[m4_if([$2], [], [], m4_eval([$# > 3]), [1], +[m4_map_args_sep([m4_map_args_sep(m4_dquote(], [)[[$3]], [], [[$1]],]]]dnl +[m4_dquote(m4_dquote(m4_shift3($@)))[[)], [[$1]], $2)])]) + + +# m4_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus `SEPARATOR`'STRING' +# at the end. It is valid to use this macro with MACRO-NAME undefined, +# in which case no SEPARATOR is added. Be aware that the criterion is +# `not being defined', and not `not being empty'. +# +# Note that neither STRING nor SEPARATOR are expanded here; rather, when +# you expand MACRO-NAME, they will be expanded at that point in time. +# +# This macro is robust to active symbols. It can be used to grow +# strings. +# +# | m4_define(active, ACTIVE)dnl +# | m4_append([sentence], [This is an])dnl +# | m4_append([sentence], [ active ])dnl +# | m4_append([sentence], [symbol.])dnl +# | sentence +# | m4_undefine([active])dnl +# | sentence +# => This is an ACTIVE symbol. +# => This is an active symbol. +# +# It can be used to define hooks. +# +# | m4_define(active, ACTIVE)dnl +# | m4_append([hooks], [m4_define([act1], [act2])])dnl +# | m4_append([hooks], [m4_define([act2], [active])])dnl +# | m4_undefine([active])dnl +# | act1 +# | hooks +# | act1 +# => act1 +# => +# => active +# +# It can also be used to create lists, although this particular usage was +# broken prior to autoconf 2.62. +# | m4_append([list], [one], [, ])dnl +# | m4_append([list], [two], [, ])dnl +# | m4_append([list], [three], [, ])dnl +# | list +# | m4_dquote(list) +# => one, two, three +# => [one],[two],[three] +# +# Note that m4_append can benefit from amortized O(n) m4 behavior, if +# the underlying m4 implementation is smart enough to avoid copying existing +# contents when enlarging a macro's definition into any pre-allocated storage +# (m4 1.4.x unfortunately does not implement this optimization). We do +# not implement m4_prepend, since it is inherently O(n^2) (pre-allocated +# storage only occurs at the end of a macro, so the existing contents must +# always be moved). +# +# Use _m4_defn for speed. +m4_define([m4_append], +[m4_define([$1], m4_ifdef([$1], [_m4_defn([$1])[$3]])[$2])]) + + +# m4_append_uniq(MACRO-NAME, STRING, [SEPARATOR], [IF-UNIQ], [IF-DUP]) +# -------------------------------------------------------------------- +# Like `m4_append', but append only if not yet present. Additionally, +# expand IF-UNIQ if STRING was appended, or IF-DUP if STRING was already +# present. Also, warn if SEPARATOR is not empty and occurs within STRING, +# as the algorithm no longer guarantees uniqueness. +# +# Note that while m4_append can be O(n) (depending on the quality of the +# underlying M4 implementation), m4_append_uniq is inherently O(n^2) +# because each append operation searches the entire string. +m4_define([m4_append_uniq], +[m4_ifval([$3], [m4_if(m4_index([$2], [$3]), [-1], [], + [m4_warn([syntax], + [$0: `$2' contains `$3'])])])_$0($@)]) +m4_define([_m4_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]_m4_defn([$1])[$3], [$3$2$3]), [-1], + [m4_append([$1], [$2], [$3])$4], [$5])], + [m4_define([$1], [$2])$4])]) + +# m4_append_uniq_w(MACRO-NAME, STRINGS) +# ------------------------------------- +# For each of the words in the whitespace separated list STRINGS, append +# only the unique strings to the definition of MACRO-NAME. +# +# Use _m4_defn for speed. +m4_define([m4_append_uniq_w], +[m4_map_args_w([$2], [_m4_append_uniq([$1],], [, [ ])])]) + + +# m4_escape(STRING) +# ----------------- +# Output quoted STRING, but with embedded #, $, [ and ] turned into +# quadrigraphs. +# +# It is faster to check if STRING is already good using m4_translit +# than to blindly perform four m4_bpatsubst. +# +# Because the translit is stripping quotes, it must also neutralize +# anything that might be in a macro name, as well as comments, commas, +# and parentheses. All the problem characters are unified so that a +# single m4_index can scan the result. +# +# Rather than expand m4_defn every time m4_escape is expanded, we +# inline its expansion up front. +m4_define([m4_escape], +[m4_if(m4_index(m4_translit([$1], + [[]#,()]]m4_dquote(m4_defn([m4_cr_symbols2]))[, [$$$]), [$]), + [-1], [m4_echo], [_$0])([$1])]) + +m4_define([_m4_escape], +[m4_changequote([-=<{(],[)}>=-])]dnl +[m4_bpatsubst(m4_bpatsubst(m4_bpatsubst(m4_bpatsubst( + -=<{(-=<{(-=<{(-=<{(-=<{($1)}>=-)}>=-)}>=-)}>=-)}>=-, + -=<{(#)}>=-, -=<{(@%:@)}>=-), + -=<{(\[)}>=-, -=<{(@<:@)}>=-), + -=<{(\])}>=-, -=<{(@:>@)}>=-), + -=<{(\$)}>=-, -=<{(@S|@)}>=-)m4_changequote([,])]) + + +# m4_text_wrap(STRING, [PREFIX], [FIRST-PREFIX], [WIDTH]) +# ------------------------------------------------------- +# Expands into STRING wrapped to hold in WIDTH columns (default = 79). +# If PREFIX is given, each line is prefixed with it. If FIRST-PREFIX is +# specified, then the first line is prefixed with it. As a special case, +# if the length of FIRST-PREFIX is greater than that of PREFIX, then +# FIRST-PREFIX will be left alone on the first line. +# +# No expansion occurs on the contents STRING, PREFIX, or FIRST-PREFIX, +# although quadrigraphs are correctly recognized. More precisely, +# you may redefine m4_qlen to recognize whatever escape sequences that +# you will post-process. +# +# Typical outputs are: +# +# m4_text_wrap([Short string */], [ ], [/* ], 20) +# => /* Short string */ +# +# m4_text_wrap([Much longer string */], [ ], [/* ], 20) +# => /* Much longer +# => string */ +# +# m4_text_wrap([Short doc.], [ ], [ --short ], 30) +# => --short Short doc. +# +# m4_text_wrap([Short doc.], [ ], [ --too-wide ], 30) +# => --too-wide +# => Short doc. +# +# m4_text_wrap([Super long documentation.], [ ], [ --too-wide ], 30) +# => --too-wide +# => Super long +# => documentation. +# +# FIXME: there is no checking of a longer PREFIX than WIDTH, but do +# we really want to bother with people trying each single corner +# of a software? +# +# This macro does not leave a trailing space behind the last word of a line, +# which complicates it a bit. The algorithm is otherwise stupid and simple: +# all the words are preceded by m4_Separator which is defined to empty for +# the first word, and then ` ' (single space) for all the others. +# +# The algorithm uses a helper that uses $2 through $4 directly, rather than +# using local variables, to avoid m4_defn overhead, or expansion swallowing +# any $. It also bypasses m4_popdef overhead with _m4_popdef since no user +# macro expansion occurs in the meantime. Also, the definition is written +# with m4_do, to avoid time wasted on dnl during expansion (since this is +# already a time-consuming macro). +m4_define([m4_text_wrap], +[_$0(m4_escape([$1]), [$2], m4_default_quoted([$3], [$2]), + m4_default_quoted([$4], [79]))]) + +m4_define([_m4_text_wrap], +m4_do(dnl set up local variables, to avoid repeated calculations +[[m4_pushdef([m4_Indent], m4_qlen([$2]))]], +[[m4_pushdef([m4_Cursor], m4_qlen([$3]))]], +[[m4_pushdef([m4_Separator], [m4_define([m4_Separator], [ ])])]], +dnl expand the first prefix, then check its length vs. regular prefix +dnl same length: nothing special +dnl prefix1 longer: output on line by itself, and reset cursor +dnl prefix1 shorter: pad to length of prefix, and reset cursor +[[[$3]m4_cond([m4_Cursor], m4_Indent, [], + [m4_eval(m4_Cursor > m4_Indent)], [1], [ +[$2]m4_define([m4_Cursor], m4_Indent)], + [m4_format([%*s], m4_max([0], + m4_eval(m4_Indent - m4_Cursor)), [])m4_define([m4_Cursor], m4_Indent)])]], +dnl now, for each word, compute the cursor after the word is output, then +dnl check if the cursor would exceed the wrap column +dnl if so, reset cursor, and insert newline and prefix +dnl if not, insert the separator (usually a space) +dnl either way, insert the word +[[m4_map_args_w([$1], [$0_word(], [, [$2], [$4])])]], +dnl finally, clean up the local variables +[[_m4_popdef([m4_Separator], [m4_Cursor], [m4_Indent])]])) + +m4_define([_m4_text_wrap_word], +[m4_define([m4_Cursor], m4_eval(m4_Cursor + m4_qlen([$1]) + 1))]dnl +[m4_if(m4_eval(m4_Cursor > ([$3])), + [1], [m4_define([m4_Cursor], m4_eval(m4_Indent + m4_qlen([$1]) + 1)) +[$2]], + [m4_Separator[]])[$1]]) + +# m4_text_box(MESSAGE, [FRAME-CHARACTER = `-']) +# --------------------------------------------- +# Turn MESSAGE into: +# ## ------- ## +# ## MESSAGE ## +# ## ------- ## +# using FRAME-CHARACTER in the border. +# +# Quadrigraphs are correctly recognized. More precisely, you may +# redefine m4_qlen to recognize whatever escape sequences that you +# will post-process. +m4_define([m4_text_box], +[m4_pushdef([m4_Border], + m4_translit(m4_format([[[%*s]]], m4_decr(m4_qlen(_m4_expand([$1 +]))), []), [ ], m4_default_quoted([$2], [-])))]dnl +[[##] _m4_defn([m4_Border]) [##] +[##] $1 [##] +[##] _m4_defn([m4_Border]) [##]_m4_popdef([m4_Border])]) + + +# m4_qlen(STRING) +# --------------- +# Expands to the length of STRING after autom4te converts all quadrigraphs. +# +# If you use some other means of post-processing m4 output rather than +# autom4te, then you may redefine this macro to recognize whatever +# escape sequences your post-processor will handle. For that matter, +# m4_define([m4_qlen], m4_defn([m4_len])) is sufficient if you don't +# do any post-processing. +# +# Avoid bpatsubsts for the common case of no quadrigraphs. Cache +# results, as configure scripts tend to ask about lengths of common +# strings like `/*' and `*/' rather frequently. Minimize the number +# of times that $1 occurs in m4_qlen, so there is less text to parse +# on a cache hit. +m4_define([m4_qlen], +[m4_ifdef([$0-$1], [_m4_defn([$0-]], [_$0(])[$1])]) +m4_define([_m4_qlen], +[m4_define([m4_qlen-$1], +m4_if(m4_index([$1], [@]), [-1], [m4_len([$1])], + [m4_len(m4_bpatsubst([[$1]], + [@\(\(<:\|:>\|S|\|%:\|\{:\|:\}\)\(@\)\|&t@\)], + [\3]))]))_m4_defn([m4_qlen-$1])]) + +# m4_copyright_condense(TEXT) +# --------------------------- +# Condense the copyright notice in TEXT to only display the final +# year, wrapping the results to fit in 80 columns. +m4_define([m4_copyright_condense], +[m4_text_wrap(m4_bpatsubst(m4_flatten([[$1]]), +[(C)[- ,0-9]*\([1-9][0-9][0-9][0-9]\)], [(C) \1]))]) + +## ----------------------- ## +## 13. Number processing. ## +## ----------------------- ## + +# m4_cmp(A, B) +# ------------ +# Compare two integer expressions. +# A < B -> -1 +# A = B -> 0 +# A > B -> 1 +m4_define([m4_cmp], +[m4_eval((([$1]) > ([$2])) - (([$1]) < ([$2])))]) + + +# m4_list_cmp(A, B) +# ----------------- +# +# Compare the two lists of integer expressions A and B. For instance: +# m4_list_cmp([1, 0], [1]) -> 0 +# m4_list_cmp([1, 0], [1, 0]) -> 0 +# m4_list_cmp([1, 2], [1, 0]) -> 1 +# m4_list_cmp([1, 2, 3], [1, 2]) -> 1 +# m4_list_cmp([1, 2, -3], [1, 2]) -> -1 +# m4_list_cmp([1, 0], [1, 2]) -> -1 +# m4_list_cmp([1], [1, 2]) -> -1 +# m4_define([xa], [oops])dnl +# m4_list_cmp([[0xa]], [5+5]) -> 0 +# +# Rather than face the overhead of m4_case, we use a helper function whose +# expansion includes the name of the macro to invoke on the tail, either +# m4_ignore or m4_unquote. This is particularly useful when comparing +# long lists, since less text is being expanded for deciding when to end +# recursion. The recursion is between a pair of macros that alternate +# which list is trimmed by one element; this is more efficient than +# calling m4_cdr on both lists from a single macro. Guarantee exactly +# one expansion of both lists' side effects. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_list_cmp], +[_$0_raw(m4_dquote($1), m4_dquote($2))]) + +m4_define([_m4_list_cmp_raw], +[m4_if([$1], [$2], [0], [_m4_list_cmp_1([$1], $2)])]) + +m4_define([_m4_list_cmp], +[m4_if([$1], [], [0m4_ignore], [$2], [0], [m4_unquote], [$2m4_ignore])]) + +m4_define([_m4_list_cmp_1], +[_m4_list_cmp_2([$2], [m4_shift2($@)], $1)]) + +m4_define([_m4_list_cmp_2], +[_m4_list_cmp([$1$3], m4_cmp([$3+0], [$1+0]))( + [_m4_list_cmp_1(m4_dquote(m4_shift3($@)), $2)])]) + +# m4_max(EXPR, ...) +# m4_min(EXPR, ...) +# ----------------- +# Return the decimal value of the maximum (or minimum) in a series of +# integer expressions. +# +# M4 1.4.x doesn't provide ?:. Hence this huge m4_eval. Avoid m4_eval +# if both arguments are identical, but be aware of m4_max(0xa, 10) (hence +# the use of <=, not just <, in the second multiply). +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_max], +[m4_if([$#], [0], [m4_fatal([too few arguments to $0])], + [$#], [1], [m4_eval([$1])], + [$#$1], [2$2], [m4_eval([$1])], + [$#], [2], [_$0($@)], + [_m4_minmax([_$0], $@)])]) + +m4_define([_m4_max], +[m4_eval((([$1]) > ([$2])) * ([$1]) + (([$1]) <= ([$2])) * ([$2]))]) + +m4_define([m4_min], +[m4_if([$#], [0], [m4_fatal([too few arguments to $0])], + [$#], [1], [m4_eval([$1])], + [$#$1], [2$2], [m4_eval([$1])], + [$#], [2], [_$0($@)], + [_m4_minmax([_$0], $@)])]) + +m4_define([_m4_min], +[m4_eval((([$1]) < ([$2])) * ([$1]) + (([$1]) >= ([$2])) * ([$2]))]) + +# _m4_minmax(METHOD, ARG1, ARG2...) +# --------------------------------- +# Common recursion code for m4_max and m4_min. METHOD must be _m4_max +# or _m4_min, and there must be at least two arguments to combine. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([_m4_minmax], +[m4_if([$#], [3], [$1([$2], [$3])], + [$0([$1], $1([$2], [$3]), m4_shift3($@))])]) + + +# m4_sign(A) +# ---------- +# The sign of the integer expression A. +m4_define([m4_sign], +[m4_eval((([$1]) > 0) - (([$1]) < 0))]) + + + +## ------------------------ ## +## 14. Version processing. ## +## ------------------------ ## + + +# m4_version_unletter(VERSION) +# ---------------------------- +# Normalize beta version numbers with letters to numeric expressions, which +# can then be handed to m4_eval for the purpose of comparison. +# +# Nl -> (N+1).-1.(l#) +# +# for example: +# [2.14a] -> [0,2,14+1,-1,[0r36:a]] -> 2.15.-1.10 +# [2.14b] -> [0,2,15+1,-1,[0r36:b]] -> 2.15.-1.11 +# [2.61aa.b] -> [0,2.61,1,-1,[0r36:aa],+1,-1,[0r36:b]] -> 2.62.-1.370.1.-1.11 +# [08] -> [0,[0r10:0]8] -> 8 +# +# This macro expects reasonable version numbers, but can handle double +# letters and does not expand any macros. Original version strings can +# use both `.' and `-' separators. +# +# Inline constant expansions, to avoid m4_defn overhead. +# _m4_version_unletter is the real workhorse used by m4_version_compare, +# but since [0r36:a] and commas are less readable than 10 and dots, we +# provide a wrapper for human use. +m4_define([m4_version_unletter], +[m4_substr(m4_map_args([.m4_eval], m4_unquote(_$0([$1]))), [3])]) +m4_define([_m4_version_unletter], +[m4_bpatsubst(m4_bpatsubst(m4_translit([[[[0,$1]]]], [.-], [,,]),]dnl +m4_dquote(m4_dquote(m4_defn([m4_cr_Letters])))[[+], + [+1,-1,[0r36:\&]]), [,0], [,[0r10:0]])]) + + +# m4_version_compare(VERSION-1, VERSION-2) +# ---------------------------------------- +# Compare the two version numbers and expand into +# -1 if VERSION-1 < VERSION-2 +# 0 if = +# 1 if > +# +# Since _m4_version_unletter does not output side effects, we can +# safely bypass the overhead of m4_version_cmp. +m4_define([m4_version_compare], +[_m4_list_cmp_raw(_m4_version_unletter([$1]), _m4_version_unletter([$2]))]) + + +# m4_PACKAGE_NAME +# m4_PACKAGE_TARNAME +# m4_PACKAGE_VERSION +# m4_PACKAGE_STRING +# m4_PACKAGE_BUGREPORT +# -------------------- +# If m4sugar/version.m4 is present, then define version strings. This +# file is optional, provided by Autoconf but absent in Bison. +m4_sinclude([m4sugar/version.m4]) + + +# m4_version_prereq(VERSION, [IF-OK], [IF-NOT = FAIL]) +# ---------------------------------------------------- +# Check this Autoconf version against VERSION. +m4_define([m4_version_prereq], +m4_ifdef([m4_PACKAGE_VERSION], +[[m4_if(m4_version_compare(]m4_dquote(m4_defn([m4_PACKAGE_VERSION]))[, [$1]), + [-1], + [m4_default([$3], + [m4_fatal([Autoconf version $1 or higher is required], + [63])])], + [$2])]], +[[m4_fatal([m4sugar/version.m4 not found])]])) + + +## ------------------ ## +## 15. Set handling. ## +## ------------------ ## + +# Autoconf likes to create arbitrarily large sets; for example, as of +# this writing, the configure.ac for coreutils tracks a set of more +# than 400 AC_SUBST. How do we track all of these set members, +# without introducing duplicates? We could use m4_append_uniq, with +# the set NAME residing in the contents of the macro NAME. +# Unfortunately, m4_append_uniq is quadratic for set creation, because +# it costs O(n) to search the string for each of O(n) insertions; not +# to mention that with m4 1.4.x, even using m4_append is slow, costing +# O(n) rather than O(1) per insertion. Other set operations, not used +# by Autoconf but still possible by manipulation of the definition +# tracked in macro NAME, include O(n) deletion of one element and O(n) +# computation of set size. Because the set is exposed to the user via +# the definition of a single macro, we cannot cache any data about the +# set without risking the cache being invalidated by the user +# redefining NAME. +# +# Can we do better? Yes, because m4 gives us an O(1) search function +# for free: ifdef. Additionally, even m4 1.4.x gives us an O(1) +# insert operation for free: pushdef. But to use these, we must +# represent the set via a group of macros; to keep the set consistent, +# we must hide the set so that the user can only manipulate it through +# accessor macros. The contents of the set are maintained through two +# access points; _m4_set([name]) is a pushdef stack of values in the +# set, useful for O(n) traversal of the set contents; while the +# existence of _m4_set([name],value) with no particular value is +# useful for O(1) querying of set membership. And since the user +# cannot externally manipulate the set, we are free to add additional +# caching macros for other performance improvements. Deletion can be +# O(1) per element rather than O(n), by reworking the definition of +# _m4_set([name],value) to be 0 or 1 based on current membership, and +# adding _m4_set_cleanup(name) to defer the O(n) cleanup of +# _m4_set([name]) until we have another reason to do an O(n) +# traversal. The existence of _m4_set_cleanup(name) can then be used +# elsewhere to determine if we must dereference _m4_set([name],value), +# or assume that definition implies set membership. Finally, size can +# be tracked in an O(1) fashion with _m4_set_size(name). +# +# The quoting in _m4_set([name],value) is chosen so that there is no +# ambiguity with a set whose name contains a comma, and so that we can +# supply the value via _m4_defn([_m4_set([name])]) without needing any +# quote manipulation. + +# m4_set_add(SET, VALUE, [IF-UNIQ], [IF-DUP]) +# ------------------------------------------- +# Add VALUE as an element of SET. Expand IF-UNIQ on the first +# addition, and IF-DUP if it is already in the set. Addition of one +# element is O(1), such that overall set creation is O(n). +# +# We do not want to add a duplicate for a previously deleted but +# unpruned element, but it is just as easy to check existence directly +# as it is to query _m4_set_cleanup($1). +m4_define([m4_set_add], +[m4_ifdef([_m4_set([$1],$2)], + [m4_if(m4_indir([_m4_set([$1],$2)]), [0], + [m4_define([_m4_set([$1],$2)], + [1])_m4_set_size([$1], [m4_incr])$3], [$4])], + [m4_define([_m4_set([$1],$2)], + [1])m4_pushdef([_m4_set([$1])], + [$2])_m4_set_size([$1], [m4_incr])$3])]) + +# m4_set_add_all(SET, VALUE...) +# ----------------------------- +# Add each VALUE into SET. This is O(n) in the number of VALUEs, and +# can be faster than calling m4_set_add for each VALUE. +# +# Implement two recursion helpers; the check variant is slower but +# handles the case where an element has previously been removed but +# not pruned. The recursion helpers ignore their second argument, so +# that we can use the faster m4_shift2 and 2 arguments, rather than +# _m4_shift2 and one argument, as the signal to end recursion. +# +# Please keep foreach.m4 in sync with any adjustments made here. +m4_define([m4_set_add_all], +[m4_define([_m4_set_size($1)], m4_eval(m4_set_size([$1]) + + m4_len(m4_ifdef([_m4_set_cleanup($1)], [_$0_check], [_$0])([$1], $@))))]) + +m4_define([_m4_set_add_all], +[m4_if([$#], [2], [], + [m4_ifdef([_m4_set([$1],$3)], [], + [m4_define([_m4_set([$1],$3)], [1])m4_pushdef([_m4_set([$1])], + [$3])-])$0([$1], m4_shift2($@))])]) + +m4_define([_m4_set_add_all_check], +[m4_if([$#], [2], [], + [m4_set_add([$1], [$3])$0([$1], m4_shift2($@))])]) + +# m4_set_contains(SET, VALUE, [IF-PRESENT], [IF-ABSENT]) +# ------------------------------------------------------ +# Expand IF-PRESENT if SET contains VALUE, otherwise expand IF-ABSENT. +# This is always O(1). +m4_define([m4_set_contains], +[m4_ifdef([_m4_set_cleanup($1)], + [m4_if(m4_ifdef([_m4_set([$1],$2)], + [m4_indir([_m4_set([$1],$2)])], [0]), [1], [$3], [$4])], + [m4_ifdef([_m4_set([$1],$2)], [$3], [$4])])]) + +# m4_set_contents(SET, [SEP]) +# --------------------------- +# Expand to a single string containing all the elements in SET, +# separated by SEP, without modifying SET. No provision is made for +# disambiguating set elements that contain non-empty SEP as a +# sub-string, or for recognizing a set that contains only the empty +# string. Order of the output is not guaranteed. If any elements +# have been previously removed from the set, this action will prune +# the unused memory. This is O(n) in the size of the set before +# pruning. +# +# Use _m4_popdef for speed. The existence of _m4_set_cleanup($1) +# determines which version of _1 helper we use. +m4_define([m4_set_contents], +[m4_set_map_sep([$1], [], [], [[$2]])]) + +# _m4_set_contents_1(SET) +# _m4_set_contents_1c(SET) +# _m4_set_contents_2(SET, [PRE], [POST], [SEP]) +# --------------------------------------------- +# Expand to a list of quoted elements currently in the set, each +# surrounded by PRE and POST, and moving SEP in front of PRE on +# recursion. To avoid nesting limit restrictions, the algorithm must +# be broken into two parts; _1 destructively copies the stack in +# reverse into _m4_set_($1), producing no output; then _2 +# destructively copies _m4_set_($1) back into the stack in reverse. +# If no elements were deleted, then this visits the set in the order +# that elements were inserted. Behavior is undefined if PRE/POST/SEP +# tries to recursively list or modify SET in any way other than +# calling m4_set_remove on the current element. Use _1 if all entries +# in the stack are guaranteed to be in the set, and _1c to prune +# removed entries. Uses _m4_defn and _m4_popdef for speed. +m4_define([_m4_set_contents_1], +[_m4_stack_reverse([_m4_set([$1])], [_m4_set_($1)])]) + +m4_define([_m4_set_contents_1c], +[m4_ifdef([_m4_set([$1])], + [m4_set_contains([$1], _m4_defn([_m4_set([$1])]), + [m4_pushdef([_m4_set_($1)], _m4_defn([_m4_set([$1])]))], + [_m4_popdef([_m4_set([$1],]_m4_defn( + [_m4_set([$1])])[)])])_m4_popdef([_m4_set([$1])])$0([$1])], + [_m4_popdef([_m4_set_cleanup($1)])])]) + +m4_define([_m4_set_contents_2], +[_m4_stack_reverse([_m4_set_($1)], [_m4_set([$1])], + [$2[]_m4_defn([_m4_set_($1)])$3], [$4[]])]) + +# m4_set_delete(SET) +# ------------------ +# Delete all elements in SET, and reclaim any memory occupied by the +# set. This is O(n) in the set size. +# +# Use _m4_defn and _m4_popdef for speed. +m4_define([m4_set_delete], +[m4_ifdef([_m4_set([$1])], + [_m4_popdef([_m4_set([$1],]_m4_defn([_m4_set([$1])])[)], + [_m4_set([$1])])$0([$1])], + [m4_ifdef([_m4_set_cleanup($1)], + [_m4_popdef([_m4_set_cleanup($1)])])m4_ifdef( + [_m4_set_size($1)], + [_m4_popdef([_m4_set_size($1)])])])]) + +# m4_set_difference(SET1, SET2) +# ----------------------------- +# Produce a LIST of quoted elements that occur in SET1 but not SET2. +# Output a comma prior to any elements, to distinguish the empty +# string from no elements. This can be directly used as a series of +# arguments, such as for m4_join, or wrapped inside quotes for use in +# m4_foreach. Order of the output is not guaranteed. +# +# Short-circuit the idempotence relation. +m4_define([m4_set_difference], +[m4_if([$1], [$2], [], [m4_set_map_sep([$1], [_$0([$2],], [)])])]) + +m4_define([_m4_set_difference], +[m4_set_contains([$1], [$2], [], [,[$2]])]) + +# m4_set_dump(SET, [SEP]) +# ----------------------- +# Expand to a single string containing all the elements in SET, +# separated by SEP, then delete SET. In general, if you only need to +# list the contents once, this is faster than m4_set_contents. No +# provision is made for disambiguating set elements that contain +# non-empty SEP as a sub-string. Order of the output is not +# guaranteed. This is O(n) in the size of the set before pruning. +# +# Use _m4_popdef for speed. Use existence of _m4_set_cleanup($1) to +# decide if more expensive recursion is needed. +m4_define([m4_set_dump], +[m4_ifdef([_m4_set_size($1)], + [_m4_popdef([_m4_set_size($1)])])m4_ifdef([_m4_set_cleanup($1)], + [_$0_check], [_$0])([$1], [], [$2])]) + +# _m4_set_dump(SET, [SEP], [PREP]) +# _m4_set_dump_check(SET, [SEP], [PREP]) +# -------------------------------------- +# Print SEP and the current element, then delete the element and +# recurse with empty SEP changed to PREP. The check variant checks +# whether the element has been previously removed. Use _m4_defn and +# _m4_popdef for speed. +m4_define([_m4_set_dump], +[m4_ifdef([_m4_set([$1])], + [[$2]_m4_defn([_m4_set([$1])])_m4_popdef([_m4_set([$1],]_m4_defn( + [_m4_set([$1])])[)], [_m4_set([$1])])$0([$1], [$2$3])])]) + +m4_define([_m4_set_dump_check], +[m4_ifdef([_m4_set([$1])], + [m4_set_contains([$1], _m4_defn([_m4_set([$1])]), + [[$2]_m4_defn([_m4_set([$1])])])_m4_popdef( + [_m4_set([$1],]_m4_defn([_m4_set([$1])])[)], + [_m4_set([$1])])$0([$1], [$2$3])], + [_m4_popdef([_m4_set_cleanup($1)])])]) + +# m4_set_empty(SET, [IF-EMPTY], [IF-ELEMENTS]) +# -------------------------------------------- +# Expand IF-EMPTY if SET has no elements, otherwise IF-ELEMENTS. +m4_define([m4_set_empty], +[m4_ifdef([_m4_set_size($1)], + [m4_if(m4_indir([_m4_set_size($1)]), [0], [$2], [$3])], [$2])]) + +# m4_set_foreach(SET, VAR, ACTION) +# -------------------------------- +# For each element of SET, define VAR to the element and expand +# ACTION. ACTION should not recursively list SET's contents, add +# elements to SET, nor delete any element from SET except the one +# currently in VAR. The order that the elements are visited in is not +# guaranteed. This is faster than the corresponding m4_foreach([VAR], +# m4_indir([m4_dquote]m4_set_listc([SET])), [ACTION]) +m4_define([m4_set_foreach], +[m4_pushdef([$2])m4_set_map_sep([$1], +[m4_define([$2],], [)$3])m4_popdef([$2])]) + +# m4_set_intersection(SET1, SET2) +# ------------------------------- +# Produce a LIST of quoted elements that occur in both SET1 or SET2. +# Output a comma prior to any elements, to distinguish the empty +# string from no elements. This can be directly used as a series of +# arguments, such as for m4_join, or wrapped inside quotes for use in +# m4_foreach. Order of the output is not guaranteed. +# +# Iterate over the smaller set, and short-circuit the idempotence +# relation. +m4_define([m4_set_intersection], +[m4_if([$1], [$2], [m4_set_listc([$1])], + m4_eval(m4_set_size([$2]) < m4_set_size([$1])), [1], [$0([$2], [$1])], + [m4_set_map_sep([$1], [_$0([$2],], [)])])]) + +m4_define([_m4_set_intersection], +[m4_set_contains([$1], [$2], [,[$2]])]) + +# m4_set_list(SET) +# m4_set_listc(SET) +# ----------------- +# Produce a LIST of quoted elements of SET. This can be directly used +# as a series of arguments, such as for m4_join or m4_set_add_all, or +# wrapped inside quotes for use in m4_foreach or m4_map. With +# m4_set_list, there is no way to distinguish an empty set from a set +# containing only the empty string; with m4_set_listc, a leading comma +# is output if there are any elements. +m4_define([m4_set_list], +[m4_set_map_sep([$1], [], [], [,])]) + +m4_define([m4_set_listc], +[m4_set_map_sep([$1], [,])]) + +# m4_set_map(SET, ACTION) +# ----------------------- +# For each element of SET, expand ACTION with a single argument of the +# current element. ACTION should not recursively list SET's contents, +# add elements to SET, nor delete any element from SET except the one +# passed as an argument. The order that the elements are visited in +# is not guaranteed. This is faster than either of the corresponding +# m4_map_args([ACTION]m4_set_listc([SET])) +# m4_set_foreach([SET], [VAR], [ACTION(m4_defn([VAR]))]) +m4_define([m4_set_map], +[m4_set_map_sep([$1], [$2(], [)])]) + +# m4_set_map_sep(SET, [PRE], [POST], [SEP]) +# ----------------------------------------- +# For each element of SET, expand PRE[value]POST[], and expand SEP +# between elements. +m4_define([m4_set_map_sep], +[m4_ifdef([_m4_set_cleanup($1)], [_m4_set_contents_1c], + [_m4_set_contents_1])([$1])_m4_set_contents_2($@)]) + +# m4_set_remove(SET, VALUE, [IF-PRESENT], [IF-ABSENT]) +# ---------------------------------------------------- +# If VALUE is an element of SET, delete it and expand IF-PRESENT. +# Otherwise expand IF-ABSENT. Deleting a single value is O(1), +# although it leaves memory occupied until the next O(n) traversal of +# the set which will compact the set. +# +# Optimize if the element being removed is the most recently added, +# since defining _m4_set_cleanup($1) slows down so many other macros. +# In particular, this plays well with m4_set_foreach and m4_set_map. +m4_define([m4_set_remove], +[m4_set_contains([$1], [$2], [_m4_set_size([$1], + [m4_decr])m4_if(_m4_defn([_m4_set([$1])]), [$2], + [_m4_popdef([_m4_set([$1],$2)], [_m4_set([$1])])], + [m4_define([_m4_set_cleanup($1)])m4_define( + [_m4_set([$1],$2)], [0])])$3], [$4])]) + +# m4_set_size(SET) +# ---------------- +# Expand to the number of elements currently in SET. This operation +# is O(1), and thus more efficient than m4_count(m4_set_list([SET])). +m4_define([m4_set_size], +[m4_ifdef([_m4_set_size($1)], [m4_indir([_m4_set_size($1)])], [0])]) + +# _m4_set_size(SET, ACTION) +# ------------------------- +# ACTION must be either m4_incr or m4_decr, and the size of SET is +# changed accordingly. If the set is empty, ACTION must not be +# m4_decr. +m4_define([_m4_set_size], +[m4_define([_m4_set_size($1)], + m4_ifdef([_m4_set_size($1)], [$2(m4_indir([_m4_set_size($1)]))], + [1]))]) + +# m4_set_union(SET1, SET2) +# ------------------------ +# Produce a LIST of double quoted elements that occur in either SET1 +# or SET2, without duplicates. Output a comma prior to any elements, +# to distinguish the empty string from no elements. This can be +# directly used as a series of arguments, such as for m4_join, or +# wrapped inside quotes for use in m4_foreach. Order of the output is +# not guaranteed. +# +# We can rely on the fact that m4_set_listc prunes SET1, so we don't +# need to check _m4_set([$1],element) for 0. Short-circuit the +# idempotence relation. +m4_define([m4_set_union], +[m4_set_listc([$1])m4_if([$1], [$2], [], + [m4_set_map_sep([$2], [_$0([$1],], [)])])]) + +m4_define([_m4_set_union], +[m4_ifdef([_m4_set([$1],$2)], [], [,[$2]])]) + + +## ------------------- ## +## 16. File handling. ## +## ------------------- ## + + +# It is a real pity that M4 comes with no macros to bind a diversion +# to a file. So we have to deal without, which makes us a lot more +# fragile than we should. + + +# m4_file_append(FILE-NAME, CONTENT) +# ---------------------------------- +m4_define([m4_file_append], +[m4_syscmd([cat >>$1 <<_m4eof +$2 +_m4eof +]) +m4_if(m4_sysval, [0], [], + [m4_fatal([$0: cannot write: $1])])]) + + + +## ------------------------ ## +## 17. Setting M4sugar up. ## +## ------------------------ ## + +# _m4_divert_diversion should be defined. +m4_divert_push([KILL]) + +# m4_init +# ------- +# Initialize the m4sugar language. +m4_define([m4_init], +[# All the M4sugar macros start with `m4_', except `dnl' kept as is +# for sake of simplicity. +m4_pattern_forbid([^_?m4_]) +m4_pattern_forbid([^dnl$]) + +# If __m4_version__ is defined, we assume that we are being run by M4 +# 1.6 or newer, thus $@ recursion is linear, and debugmode(+do) +# is available for faster checks of dereferencing undefined macros +# and forcing dumpdef to print to stderr regardless of debugfile. +# But if it is missing, we assume we are being run by M4 1.4.x, that +# $@ recursion is quadratic, and that we need foreach-based +# replacement macros. Also, m4 prior to 1.4.8 loses track of location +# during m4wrap text; __line__ should never be 0. +# +# Use the raw builtin to avoid tripping up include tracing. +# Meanwhile, avoid m4_copy, since it temporarily undefines m4_defn. +m4_ifdef([__m4_version__], +[m4_debugmode([+do]) +m4_define([m4_defn], _m4_defn([_m4_defn])) +m4_define([m4_dumpdef], _m4_defn([_m4_dumpdef])) +m4_define([m4_popdef], _m4_defn([_m4_popdef])) +m4_define([m4_undefine], _m4_defn([_m4_undefine]))], +[m4_builtin([include], [m4sugar/foreach.m4]) +m4_wrap_lifo([m4_if(__line__, [0], [m4_pushdef([m4_location], +]]m4_dquote(m4_dquote(m4_dquote(__file__:__line__)))[[)])])]) + +# Rewrite the first entry of the diversion stack. +m4_divert([KILL]) + +# Check the divert push/pop perfect balance. +# Some users are prone to also use m4_wrap to register last-minute +# m4_divert_text; so after our diversion cleanups, we restore +# KILL as the bottom of the diversion stack. +m4_wrap([m4_popdef([_m4_divert_diversion])m4_ifdef( + [_m4_divert_diversion], [m4_fatal([$0: unbalanced m4_divert_push: +]m4_divert_stack)])_m4_popdef([_m4_divert_stack])m4_divert_push([KILL])]) +]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/bison.m4 b/platform/dbops/binaries/build/share/bison/skeletons/bison.m4 new file mode 100644 index 0000000000000000000000000000000000000000..b7bf5c5c7bd153ea9c7359a04040dd94f3c95835 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/bison.m4 @@ -0,0 +1,1241 @@ + -*- Autoconf -*- + +# Language-independent M4 Macros for Bison. + +# Copyright (C) 2002, 2004-2015, 2018-2021 Free Software Foundation, +# Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + + +# m4_gsub(STRING, RE1, SUBST1, RE2, SUBST2, ...) +# ---------------------------------------------- +# m4 equivalent of +# +# $_ = STRING; +# s/RE1/SUBST1/g; +# s/RE2/SUBST2/g; +# ... +# +# Really similar to m4_bpatsubsts, but behaves properly with quotes. +# See m4.at's "Generating Comments". Super inelegant, but so far, I +# did not find any better solution. +m4_define([b4_gsub], +[m4_bpatsubst(m4_bpatsubst(m4_bpatsubst([[[[$1]]]], + [$2], [$3]), + [$4], [$5]), + [$6], [$7])]) + +# m4_shift2 and m4_shift3 are provided by m4sugar. +m4_define([m4_shift4], [m4_shift(m4_shift(m4_shift(m4_shift($@))))]) + + +## ---------------- ## +## Identification. ## +## ---------------- ## + +# b4_generated_by +# --------------- +m4_define([b4_generated_by], +[b4_comment([A Bison parser, made by GNU Bison b4_version_string.]) +]) + +# b4_copyright(TITLE, [YEARS]) +# ---------------------------- +# If YEARS are not defined, use b4_copyright_years. +m4_define([b4_copyright], +[b4_generated_by +b4_comment([$1 + +]m4_dquote(m4_text_wrap([Copyright (C) +]m4_ifval([$2], [[$2]], [m4_defn([b4_copyright_years])])[ +Free Software Foundation, Inc.]))[ + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see .]) + +b4_comment([As a special exception, you may create a larger work that contains +part or all of the Bison parser skeleton and distribute that work +under terms of your choice, so long as that work isn't itself a +parser generator using the skeleton or a modified version thereof +as a parser skeleton. Alternatively, if you modify or redistribute +the parser skeleton itself, you may (at your option) remove this +special exception, which will cause the skeleton and the resulting +Bison output files to be licensed under the GNU General Public +License without this special exception. + +This special exception was added by the Free Software Foundation in +version 2.2 of Bison.]) +]) + + +# b4_disclaimer +# ------------- +# Issue a warning about private implementation details. +m4_define([b4_disclaimer], +[b4_comment([DO NOT RELY ON FEATURES THAT ARE NOT DOCUMENTED in the manual, +especially those whose name start with YY_ or yy_. They are +private implementation details that can be changed or removed.]) +]) + + + +# b4_required_version_if(VERSION, IF_NEWER, IF_OLDER) +# --------------------------------------------------- +# If the version %require'd by the user is VERSION (or newer) expand +# IF_NEWER, otherwise IF_OLDER. VERSION should be an integer, e.g., +# 302 for 3.2. +m4_define([b4_required_version_if], +[m4_if(m4_eval($1 <= b4_required_version), + [1], [$2], [$3])]) + + +## -------- ## +## Output. ## +## -------- ## + +# b4_output_begin(FILE1, FILE2) +# ----------------------------- +# Enable output, i.e., send to diversion 0, expand after "#", and +# generate the tag to output into FILE. Must be followed by EOL. +# FILE is FILE1 concatenated to FILE2. FILE2 can be empty, or be +# absolute: do the right thing. +m4_define([b4_output_begin], +[m4_changecom() +m4_divert_push(0)dnl +@output(m4_unquote([$1])@,m4_unquote([$2])@)@dnl +]) + + +# b4_output_end +# ------------- +# Output nothing, restore # as comment character (no expansions after #). +m4_define([b4_output_end], +[m4_divert_pop(0) +m4_changecom([#]) +]) + + +# b4_divert_kill(CODE) +# -------------------- +# Expand CODE for its side effects, discard its output. +m4_define([b4_divert_kill], +[m4_divert_text([KILL], [$1])]) + + +# b4_define_silent(MACRO, CODE) +# ----------------------------- +# Same as m4_define, but throw away the expansion of CODE. +m4_define([b4_define_silent], +[m4_define([$1], [b4_divert_kill([$2])])]) + + +## ---------------- ## +## Error handling. ## +## ---------------- ## + +# The following error handling macros print error directives that should not +# become arguments of other macro invocations since they would likely then be +# mangled. Thus, they print to stdout directly. + +# b4_cat(TEXT) +# ------------ +# Write TEXT to stdout. Precede the final newline with an @ so that it's +# escaped. For example: +# +# b4_cat([[@complain(invalid input@)]]) +m4_define([b4_cat], +[m4_syscmd([cat <<'_m4eof' +]m4_bpatsubst(m4_dquote($1), [_m4eof], [_m4@`eof])[@ +_m4eof +])dnl +m4_if(m4_sysval, [0], [], [m4_fatal([$0: cannot write to stdout])])]) + +# b4_error(KIND, START, END, FORMAT, [ARG1], [ARG2], ...) +# ------------------------------------------------------- +# Write @KIND(START@,END@,FORMAT@,ARG1@,ARG2@,...@) to stdout. +# +# For example: +# +# b4_error([[complain]], [[input.y:2.3]], [[input.y:5.4]], +# [[invalid %s]], [[foo]]) +m4_define([b4_error], +[b4_cat([[@complain][(]$1[@,]$2[@,]$3[@,]$4[]]dnl +[m4_if([$#], [4], [], + [m4_foreach([b4_arg], + m4_dquote(m4_shift4($@)), + [[@,]b4_arg])])[@)]])]) + +# b4_warn(FORMAT, [ARG1], [ARG2], ...) +# ------------------------------------ +# Write @warn(FORMAT@,ARG1@,ARG2@,...@) to stdout. +# +# For example: +# +# b4_warn([[invalid value for '%s': %s]], [[foo]], [[3]]) +# +# As a simple test suite, this: +# +# m4_divert(-1) +# m4_define([asdf], [ASDF]) +# m4_define([fsa], [FSA]) +# m4_define([fdsa], [FDSA]) +# b4_warn_at([[[asdf), asdf]]], [[[fsa), fsa]]], [[[fdsa), fdsa]]]) +# b4_warn_at([[asdf), asdf]], [[fsa), fsa]], [[fdsa), fdsa]]) +# b4_warn_at() +# b4_warn_at(1) +# b4_warn_at(1, 2) +# +# Should produce this without newlines: +# +# @warn_at([asdf), asdf]@,@,@,[fsa), fsa]@,[fdsa), fdsa]@) +# @warn(asdf), asdf@,@,@,fsa), fsa@,fdsa), fdsa@) +# @warn(@) +# @warn(1@) +# @warn(1@,2@) +m4_define([b4_warn], +[b4_warn_at([], [], $@)]) + +# b4_warn_at(START, END, FORMAT, [ARG1], [ARG2], ...) +# --------------------------------------------------- +# Write @warn(START@,END@,FORMAT@,ARG1@,ARG2@,...@) to stdout. +# +# For example: +# +# b4_warn_at([[input.y:2.3]], [[input.y:5.4]], [[invalid %s]], [[foo]]) +m4_define([b4_warn_at], +[b4_error([[warn]], $@)]) + +# b4_complain(FORMAT, [ARG1], [ARG2], ...) +# ---------------------------------------- +# Bounce to b4_complain_at. +# +# See b4_warn example. +m4_define([b4_complain], +[b4_complain_at([], [], $@)]) + +# b4_complain_at(START, END, FORMAT, [ARG1], [ARG2], ...) +# ------------------------------------------------------- +# Write @complain(START@,END@,FORMAT@,ARG1@,ARG2@,...@) to stdout. +# +# See b4_warn_at example. +m4_define([b4_complain_at], +[b4_error([[complain]], $@)]) + +# b4_fatal(FORMAT, [ARG1], [ARG2], ...) +# ------------------------------------- +# Bounce to b4_fatal_at. +# +# See b4_warn example. +m4_define([b4_fatal], +[b4_fatal_at([], [], $@)]) + +# b4_fatal_at(START, END, FORMAT, [ARG1], [ARG2], ...) +# ---------------------------------------------------- +# Write @fatal(START@,END@,FORMAT@,ARG1@,ARG2@,...@) to stdout and exit. +# +# See b4_warn_at example. +m4_define([b4_fatal_at], +[b4_error([[fatal]], $@)dnl +m4_exit(1)]) + +# b4_canary(MSG) +# -------------- +# Issue a warning on stderr and in the output. Used in the test suite +# to catch spurious m4 evaluations. +m4_define([b4_canary], +[m4_errprintn([dead canary: $1])DEAD CANARY($1)]) + + +## ------------ ## +## Data Types. ## +## ------------ ## + +# b4_ints_in(INT1, INT2, LOW, HIGH) +# --------------------------------- +# Return 1 iff both INT1 and INT2 are in [LOW, HIGH], 0 otherwise. +m4_define([b4_ints_in], +[m4_eval([$3 <= $1 && $1 <= $4 && $3 <= $2 && $2 <= $4])]) + + +# b4_subtract(LHS, RHS) +# --------------------- +# Evaluate LHS - RHS if they are integer literals, otherwise expand +# to (LHS) - (RHS). +m4_define([b4_subtract], +[m4_bmatch([$1$2], [^[0123456789]*$], + [m4_eval([$1 - $2])], + [($1) - ($2)])]) + +# b4_join(ARG1, ...) +# _b4_join(ARG1, ...) +# ------------------- +# Join with comma, skipping empty arguments. +# b4_join calls itself recursively until it sees the first non-empty +# argument, then calls _b4_join (i.e., `_$0`) which prepends each +# non-empty argument with a comma. +m4_define([b4_join], +[m4_if([$#$1], + [1], [], + [m4_ifval([$1], + [$1[]_$0(m4_shift($@))], + [$0(m4_shift($@))])])]) + +# _b4_join(ARGS1, ...) +# -------------------- +m4_define([_b4_join], +[m4_if([$#$1], + [1], [], + [m4_ifval([$1], [, $1])[]$0(m4_shift($@))])]) + + + + +# b4_integral_parser_tables_map(MACRO) +# ------------------------------------- +# Map MACRO on all the integral tables. MACRO is expected to have +# the signature MACRO(TABLE-NAME, CONTENT, COMMENT). +m4_define([b4_integral_parser_tables_map], +[$1([pact], [b4_pact], + [[YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing +STATE-NUM.]]) + +$1([defact], [b4_defact], + [[YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. +Performed when YYTABLE does not specify something else to do. Zero +means the default is an error.]]) + +$1([pgoto], [b4_pgoto], [[YYPGOTO[NTERM-NUM].]]) + +$1([defgoto], [b4_defgoto], [[YYDEFGOTO[NTERM-NUM].]]) + +$1([table], [b4_table], + [[YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If +positive, shift that token. If negative, reduce the rule whose +number is the opposite. If YYTABLE_NINF, syntax error.]]) + +$1([check], [b4_check]) + +$1([stos], [b4_stos], + [[YYSTOS[STATE-NUM] -- The symbol kind of the accessing symbol of +state STATE-NUM.]]) + +$1([r1], [b4_r1], + [[YYR1[RULE-NUM] -- Symbol kind of the left-hand side of rule RULE-NUM.]]) + +$1([r2], [b4_r2], + [[YYR2[RULE-NUM] -- Number of symbols on the right-hand side of rule RULE-NUM.]]) +]) + + +# b4_parser_tables_declare +# b4_parser_tables_define +# ------------------------ +# Define/declare the (deterministic) parser tables. +m4_define([b4_parser_tables_declare], +[b4_integral_parser_tables_map([b4_integral_parser_table_declare])]) + +m4_define([b4_parser_tables_define], +[b4_integral_parser_tables_map([b4_integral_parser_table_define])]) + + + +## ------------------ ## +## Decoding options. ## +## ------------------ ## + +# b4_flag_if(FLAG, IF-TRUE, IF-FALSE) +# ----------------------------------- +# Run IF-TRUE if b4_FLAG_flag is 1, IF-FALSE if FLAG is 0, otherwise fail. +m4_define([b4_flag_if], +[m4_case(b4_$1_flag, + [0], [$3], + [1], [$2], + [m4_fatal([invalid $1 value: ]b4_$1_flag)])]) + + +# b4_define_flag_if(FLAG) +# ----------------------- +# Define "b4_FLAG_if(IF-TRUE, IF-FALSE)" that depends on the +# value of the Boolean FLAG. +m4_define([b4_define_flag_if], +[_b4_define_flag_if($[1], $[2], [$1])]) + +# _b4_define_flag_if($1, $2, FLAG) +# -------------------------------- +# Work around the impossibility to define macros inside macros, +# because issuing '[$1]' is not possible in M4. GNU M4 should provide +# $$1 a la M5/TeX. +m4_define([_b4_define_flag_if], +[m4_if([$1$2], $[1]$[2], [], + [m4_fatal([$0: Invalid arguments: $@])])dnl +m4_define([b4_$3_if], + [b4_flag_if([$3], [$1], [$2])])]) + + +# b4_FLAG_if(IF-TRUE, IF-FALSE) +# ----------------------------- +# Expand IF-TRUE, if FLAG is true, IF-FALSE otherwise. +b4_define_flag_if([glr]) # Whether a GLR parser is requested. +b4_define_flag_if([has_translations]) # Whether some tokens are internationalized. +b4_define_flag_if([header]) # Whether a header is requested. +b4_define_flag_if([nondeterministic]) # Whether conflicts should be handled. +b4_define_flag_if([token_table]) # Whether yytoken_table is demanded. +b4_define_flag_if([yacc]) # Whether POSIX Yacc is emulated. + + +# b4_glr_cc_if([IF-TRUE], [IF-FALSE]) +# ----------------------------------- +m4_define([b4_glr_cc_if], + [m4_if(b4_skeleton, ["glr.cc"], $@)]) + +# b4_glr2_cc_if([IF-TRUE], [IF-FALSE]) +# ------------------------------------ +m4_define([b4_glr2_cc_if], + [m4_if(b4_skeleton, ["glr2.cc"], $@)]) + +## --------- ## +## Symbols. ## +## --------- ## + +# For a description of the Symbol handling, see README.md. +# +# The following macros provide access to symbol related values. + +# __b4_symbol(NUM, FIELD) +# ----------------------- +# Fetch FIELD of symbol #NUM. Fail if undefined. +m4_define([__b4_symbol], +[m4_indir([b4_symbol($1, $2)])]) + + +# _b4_symbol(NUM, FIELD) +# ---------------------- +# Fetch FIELD of symbol #NUM (or "orig NUM", see README.md). +# Fail if undefined. +m4_define([_b4_symbol], +[m4_ifdef([b4_symbol($1, number)], + [__b4_symbol(m4_indir([b4_symbol($1, number)]), $2)], + [__b4_symbol([$1], [$2])])]) + + +# b4_symbol_token_kind(NUM) +# ------------------------- +# The token kind of this symbol. +m4_define([b4_symbol_token_kind], +[b4_percent_define_get([api.token.prefix])dnl +_b4_symbol([$1], [id])]) + + +# b4_symbol_kind_base(NUM) +# ------------------------ +# Build the name of the kind of this symbol. It must always exist, +# otherwise some symbols might not be represented in the enum, which +# might be compiled into too small a type to contain all the symbol +# numbers. +m4_define([b4_symbol_prefix], [b4_percent_define_get([api.symbol.prefix])]) +m4_define([b4_symbol_kind_base], +[b4_percent_define_get([api.symbol.prefix])dnl +m4_case([$1], + [-2], [[YYEMPTY]], + [0], [[YYEOF]], + [1], [[YYerror]], + [2], [[YYUNDEF]], + [m4_case(b4_symbol([$1], [tag]), + [$accept], [[YYACCEPT]], + [b4_symbol_if([$1], [has_id], _b4_symbol([$1], [id]), + [m4_bpatsubst([$1-][]_b4_symbol([$1], [tag]), [[^a-zA-Z_0-9]+], [_])])])])]) + + +# b4_symbol_kind(NUM) +# ------------------- +# Same as b4_symbol_kind, but possibly with a prefix in some +# languages. E.g., EOF's kind_base and kind are YYSYMBOL_YYEOF in C, +# but are S_YYEMPTY and symbol_kind::S_YYEMPTY in C++. +m4_copy([b4_symbol_kind_base], [b4_symbol_kind]) + + +# b4_symbol_slot(NUM) +# ------------------- +# The name of union member that contains the value of these symbols. +# Currently, we are messy, this should actually be type_tag, but type_tag +# has several meanings. +m4_define([b4_symbol_slot], +[m4_case(b4_percent_define_get([[api.value.type]]), + [union], [b4_symbol([$1], [type_tag])], + [variant], [b4_symbol([$1], [type_tag])], + [b4_symbol([$1], [type])])]) + + +# b4_symbol(NUM, FIELD) +# --------------------- +# Fetch FIELD of symbol #NUM (or "orig NUM", or "empty"). Fail if undefined. +# +# If FIELD = id, prepend the token prefix. +m4_define([b4_symbol], +[m4_if([$1], [empty], [b4_symbol([-2], [$2])], + [$1], [eof], [b4_symbol([0], [$2])], + [$1], [error], [b4_symbol([1], [$2])], + [$1], [undef], [b4_symbol([2], [$2])], + [m4_case([$2], + [id], [b4_symbol_token_kind([$1])], + [kind_base], [b4_symbol_kind_base([$1])], + [kind], [b4_symbol_kind([$1])], + [slot], [b4_symbol_slot([$1])], + [_b4_symbol($@)])])]) + + +# b4_symbol_if(NUM, FIELD, IF-TRUE, IF-FALSE) +# ------------------------------------------- +# If FIELD about symbol #NUM is 1 expand IF-TRUE, if is 0, expand IF-FALSE. +# Otherwise an error. +m4_define([b4_symbol_if], +[m4_case(b4_symbol([$1], [$2]), + [1], [$3], + [0], [$4], + [m4_fatal([$0: field $2 of $1 is not a Boolean:] b4_symbol([$1], [$2]))])]) + + +# b4_symbol_tag_comment(SYMBOL-NUM) +# --------------------------------- +# Issue a comment giving the tag of symbol NUM. +m4_define([b4_symbol_tag_comment], +[b4_comment([b4_symbol([$1], [tag])]) +]) + + +# b4_symbol_action(SYMBOL-NUM, ACTION) +# ------------------------------------ +# Run the action ACTION ("destructor" or "printer") for SYMBOL-NUM. +m4_define([b4_symbol_action], +[b4_symbol_if([$1], [has_$2], +[b4_dollar_pushdef([(*yyvaluep)], + [$1], + [], + [(*yylocationp)])dnl + _b4_symbol_case([$1])[]dnl +b4_syncline([b4_symbol([$1], [$2_line])], [b4_symbol([$1], [$2_file])])dnl +b4_symbol([$1], [$2]) +b4_syncline([@oline@], [@ofile@])dnl + break; + +b4_dollar_popdef[]dnl +])]) + + +# b4_symbol_destructor(SYMBOL-NUM) +# b4_symbol_printer(SYMBOL-NUM) +# -------------------------------- +m4_define([b4_symbol_destructor], [b4_symbol_action([$1], [destructor])]) +m4_define([b4_symbol_printer], [b4_symbol_action([$1], [printer])]) + + +# b4_symbol_actions(ACTION, [KIND = yykind]) +# ------------------------------------------ +# Emit the symbol actions for ACTION ("destructor" or "printer"). +# Dispatch on KIND. +m4_define([b4_symbol_actions], +[m4_pushdef([b4_actions_], m4_expand([b4_symbol_foreach([b4_symbol_$1])]))dnl +m4_ifval(m4_defn([b4_actions_]), +[switch (m4_default([$2], [yykind])) + { +m4_defn([b4_actions_])[]dnl + default: + break; + }dnl +], +[b4_use(m4_default([$2], [yykind]));])dnl +m4_popdef([b4_actions_])dnl +]) + +# _b4_symbol_case(SYMBOL-NUM) +# --------------------------- +# Issue a "case NUM" for SYMBOL-NUM. Ends with its EOL to make it +# easier to use with m4_map, but then, use []dnl to suppress the last +# one. +m4_define([_b4_symbol_case], +[case b4_symbol([$1], [kind]): b4_symbol_tag_comment([$1])]) +]) + + +# b4_symbol_foreach(MACRO) +# ------------------------ +# Invoke MACRO(SYMBOL-NUM) for each SYMBOL-NUM. +m4_define([b4_symbol_foreach], + [m4_map([$1], m4_defn([b4_symbol_numbers]))]) + +# b4_symbol_map(MACRO) +# -------------------- +# Return a list (possibly empty elements) of MACRO invoked for each +# SYMBOL-NUM. +m4_define([b4_symbol_map], +[m4_map_args_sep([$1(], [)], [,], b4_symbol_numbers)]) + + +# b4_token_visible_if(NUM, IF-TRUE, IF-FALSE) +# ------------------------------------------- +# Whether NUM denotes a token kind that has an exported definition +# (i.e., shows in enum yytokentype). +m4_define([b4_token_visible_if], +[b4_symbol_if([$1], [is_token], + [b4_symbol_if([$1], [has_id], [$2], [$3])], + [$3])]) + + +# b4_token_has_definition(NUM) +# ---------------------------- +# 1 if NUM is visible, nothing otherwise. +m4_define([b4_token_has_definition], +[b4_token_visible_if([$1], [1])]) + +# b4_any_token_visible_if([IF-TRUE], [IF-FALSE]) +# ---------------------------------------------- +# Whether there is a token that needs to be defined. +m4_define([b4_any_token_visible_if], +[m4_ifval(b4_symbol_foreach([b4_token_has_definition]), + [$1], [$2])]) + + +# b4_token_format(FORMAT, NUM) +# ---------------------------- +# If token NUM has a visible ID, format FORMAT with ID, USER_NUMBER. +m4_define([b4_token_format], +[b4_token_visible_if([$2], +[m4_format([[$1]], + b4_symbol([$2], [id]), + b4_symbol([$2], b4_api_token_raw_if([[number]], [[code]])))])]) + + +# b4_last_enum_token +# ------------------ +# The code of the last token visible token. +m4_define([_b4_last_enum_token], +[b4_token_visible_if([$1], + [m4_define([b4_last_enum_token], [$1])])]) +b4_symbol_foreach([_b4_last_enum_token]) + +# b4_last_symbol +# -------------- +# The code of the last symbol. +m4_define([b4_last_symbol], m4_eval(b4_tokens_number + b4_nterms_number - 1)) + +## ------- ## +## Types. ## +## ------- ## + +# _b4_type_action(NUMS) +# --------------------- +# Run actions for the symbol NUMS that all have the same type-name. +# Skip NUMS that have no type-name. +# +# To specify the action to run, define b4_dollar_dollar(SYMBOL-NUM, +# TAG, TYPE). +m4_define([_b4_type_action], +[b4_symbol_if([$1], [has_type], +[m4_map([ _b4_symbol_case], [$@])[]dnl + b4_dollar_dollar([b4_symbol([$1], [number])], + [b4_symbol([$1], [tag])], + [b4_symbol([$1], [type])]); + break; + +])]) + +# b4_type_foreach(MACRO, [SEP]) +# ----------------------------- +# Invoke MACRO(SYMBOL-NUMS) for each set of SYMBOL-NUMS for each type set. +m4_define([b4_type_foreach], + [m4_map_sep([$1], [$2], m4_defn([b4_type_names]))]) + + + +## ----------- ## +## Synclines. ## +## ----------- ## + +# b4_basename(NAME) +# ----------------- +# Similar to POSIX basename; the differences don't matter here. +# Beware that NAME is not evaluated. +m4_define([b4_basename], +[m4_bpatsubst([$1], [^.*/\([^/]+\)/*$], [\1])]) + + +# b4_syncline(LINE, FILE)dnl +# -------------------------- +# Should always be following by a dnl. +# +# Emit "#line LINE FILE /* __LINE__ __FILE__ */". +m4_define([b4_syncline], +[b4_flag_if([synclines], +[b4_sync_start([$1], [$2])[]dnl +b4_sync_end([__line__], [b4_basename(m4_quote(__file__))]) +])]) + +# b4_sync_start(LINE, FILE) +# ----------------------- +# Syncline for the new place. Typically a directive for the compiler. +m4_define([b4_sync_start], [b4_comment([$2:$1])]) + +# b4_sync_end(LINE, FILE) +# ----------------------- +# Syncline for the current place, which ends. Typically a comment +# left for the reader. +m4_define([b4_sync_end], [ b4_comment([$2:$1])] +) +# This generates dependencies on the Bison skeletons hence lots of +# useless 'git diff'. This location is useless for the regular +# user (who does not care about the skeletons) and is actually not +# useful for Bison developers too (I, Akim, never used this to locate +# the code in skeletons that generated output). So disable it +# completely. If someone thinks this was actually useful, a %define +# variable should be provided to control the level of verbosity of +# '#line', in replacement of --no-lines. +m4_define([b4_sync_end]) + + +# b4_user_code(USER-CODE) +# ----------------------- +# Emit code from the user, ending it with synclines. +m4_define([b4_user_code], +[$1 +b4_syncline([@oline@], [@ofile@])]) + + +# b4_define_user_code(MACRO, COMMENT) +# ----------------------------------- +# From b4_MACRO, if defined, build b4_user_MACRO that includes the synclines. +m4_define([b4_define_user_code], +[m4_define([b4_user_$1], + [m4_ifdef([b4_$1], + [m4_ifval([$2], + [b4_comment([$2]) +])b4_user_code([b4_$1])])])]) + +# b4_user_actions +# b4_user_initial_action +# b4_user_post_prologue +# b4_user_pre_prologue +# b4_user_union_members +# ---------------------- +# Macros that issue user code, ending with synclines. +b4_define_user_code([actions]) +b4_define_user_code([initial_action], [User initialization code.]) +b4_define_user_code([post_prologue], [Second part of user prologue.]) +b4_define_user_code([pre_prologue], [First part of user prologue.]) +b4_define_user_code([union_members]) + + +# b4_check_user_names(WHAT, USER-LIST, BISON-NAMESPACE) +# ----------------------------------------------------- +# Complain if any name of type WHAT is used by the user (as recorded in +# USER-LIST) but is not used by Bison (as recorded by macros in the +# namespace BISON-NAMESPACE). +# +# USER-LIST must expand to a list specifying all user occurrences of all names +# of type WHAT. Each item in the list must be a triplet specifying one +# occurrence: name, start boundary, and end boundary. Empty string names are +# fine. An empty list is fine. +# +# For example, to define b4_foo_user_names to be used for USER-LIST with three +# name occurrences and with correct quoting: +# +# m4_define([b4_foo_user_names], +# [[[[[[bar]], [[parser.y:1.7]], [[parser.y:1.16]]]], +# [[[[bar]], [[parser.y:5.7]], [[parser.y:5.16]]]], +# [[[[baz]], [[parser.y:8.7]], [[parser.y:8.16]]]]]]) +# +# The macro BISON-NAMESPACE(bar) must be defined iff the name bar of type WHAT +# is used by Bison (in the front-end or in the skeleton). Empty string names +# are fine, but it would be ugly for Bison to actually use one. +# +# For example, to use b4_foo_bison_names for BISON-NAMESPACE and define that +# the names bar and baz are used by Bison: +# +# m4_define([b4_foo_bison_names(bar)]) +# m4_define([b4_foo_bison_names(baz)]) +# +# To invoke b4_check_user_names with TYPE foo, with USER-LIST +# b4_foo_user_names, with BISON-NAMESPACE b4_foo_bison_names, and with correct +# quoting: +# +# b4_check_user_names([[foo]], [b4_foo_user_names], +# [[b4_foo_bison_names]]) +m4_define([b4_check_user_names], +[m4_foreach([b4_occurrence], $2, +[m4_pushdef([b4_occurrence], b4_occurrence)dnl +m4_pushdef([b4_user_name], m4_car(b4_occurrence))dnl +m4_pushdef([b4_start], m4_car(m4_shift(b4_occurrence)))dnl +m4_pushdef([b4_end], m4_shift2(b4_occurrence))dnl +m4_ifndef($3[(]m4_quote(b4_user_name)[)], + [b4_complain_at([b4_start], [b4_end], + [[%s '%s' is not used]], + [$1], [b4_user_name])])[]dnl +m4_popdef([b4_occurrence])dnl +m4_popdef([b4_user_name])dnl +m4_popdef([b4_start])dnl +m4_popdef([b4_end])dnl +])]) + + + +## --------------------- ## +## b4_percent_define_*. ## +## --------------------- ## + + +# b4_percent_define_use(VARIABLE) +# ------------------------------- +# Declare that VARIABLE was used. +m4_define([b4_percent_define_use], +[m4_define([b4_percent_define_bison_variables(]$1[)])dnl +]) + +# b4_percent_define_get(VARIABLE, [DEFAULT]) +# ------------------------------------------ +# Mimic muscle_percent_define_get in ../src/muscle-tab.h. That is, if +# the %define variable VARIABLE is defined, emit its value. Contrary +# to its C counterpart, return DEFAULT otherwise. Also, record +# Bison's usage of VARIABLE by defining +# b4_percent_define_bison_variables(VARIABLE). +# +# For example: +# +# b4_percent_define_get([[foo]]) +m4_define([b4_percent_define_get], +[b4_percent_define_use([$1])dnl +_b4_percent_define_ifdef([$1], + [m4_indir([b4_percent_define(]$1[)])], + [$2])]) + +# b4_percent_define_get_loc(VARIABLE) +# ----------------------------------- +# Mimic muscle_percent_define_get_loc in ../src/muscle-tab.h exactly. That is, +# if the %define variable VARIABLE is undefined, complain fatally since that's +# a Bison or skeleton error. Otherwise, return its definition location in a +# form appropriate for the first two arguments of b4_warn_at, b4_complain_at, or +# b4_fatal_at. Don't record this as a Bison usage of VARIABLE as there's no +# reason to suspect that the user-supplied value has yet influenced the output. +# +# For example: +# +# b4_complain_at(b4_percent_define_get_loc([[foo]]), [[invalid foo]]) +m4_define([b4_percent_define_get_loc], +[m4_ifdef([b4_percent_define_loc(]$1[)], + [m4_pushdef([b4_loc], m4_indir([b4_percent_define_loc(]$1[)]))dnl +b4_loc[]dnl +m4_popdef([b4_loc])], + [b4_fatal([[$0: undefined %%define variable '%s']], [$1])])]) + +# b4_percent_define_get_kind(VARIABLE) +# ------------------------------------ +# Get the kind (code, keyword, string) of VARIABLE, i.e., how its +# value was defined (braces, not delimiters, quotes). +# +# If the %define variable VARIABLE is undefined, complain fatally +# since that's a Bison or skeleton error. Don't record this as a +# Bison usage of VARIABLE as there's no reason to suspect that the +# user-supplied value has yet influenced the output. +m4_define([b4_percent_define_get_kind], +[m4_ifdef([b4_percent_define_kind(]$1[)], + [m4_indir([b4_percent_define_kind(]$1[)])], + [b4_fatal([[$0: undefined %%define variable '%s']], [$1])])]) + +# b4_percent_define_get_syncline(VARIABLE)dnl +# ------------------------------------------- +# Should always be following by a dnl. +# +# Mimic muscle_percent_define_get_syncline in ../src/muscle-tab.h exactly. +# That is, if the %define variable VARIABLE is undefined, complain fatally +# since that's a Bison or skeleton error. Otherwise, return its definition +# location as a b4_syncline invocation. Don't record this as a Bison usage of +# VARIABLE as there's no reason to suspect that the user-supplied value has yet +# influenced the output. +# +# For example: +# +# b4_percent_define_get_syncline([[foo]]) +m4_define([b4_percent_define_get_syncline], +[m4_ifdef([b4_percent_define_syncline(]$1[)], + [m4_indir([b4_percent_define_syncline(]$1[)])], + [b4_fatal([[$0: undefined %%define variable '%s']], [$1])])]) + +# _b4_percent_define_ifdef(VARIABLE, IF-TRUE, [IF-FALSE]) +# ------------------------------------------------------ +# If the %define variable VARIABLE is defined, expand IF-TRUE, else expand +# IF-FALSE. Don't record usage of VARIABLE. +# +# For example: +# +# _b4_percent_define_ifdef([[foo]], [[it's defined]], [[it's undefined]]) +m4_define([_b4_percent_define_ifdef], +[m4_ifdef([b4_percent_define(]$1[)], + [$2], + [$3])]) + +# b4_percent_define_ifdef(VARIABLE, IF-TRUE, [IF-FALSE]) +# ------------------------------------------------------ +# Mimic muscle_percent_define_ifdef in ../src/muscle-tab.h exactly. That is, +# if the %define variable VARIABLE is defined, expand IF-TRUE, else expand +# IF-FALSE. Also, record Bison's usage of VARIABLE by defining +# b4_percent_define_bison_variables(VARIABLE). +# +# For example: +# +# b4_percent_define_ifdef([[foo]], [[it's defined]], [[it's undefined]]) +m4_define([b4_percent_define_ifdef], +[_b4_percent_define_ifdef([$1], + [b4_percent_define_use([$1])$2], + [$3])]) + + +# b4_percent_define_check_file_complain(VARIABLE) +# ----------------------------------------------- +# Warn about %define variable VARIABLE having an incorrect +# value. +m4_define([b4_percent_define_check_file_complain], +[b4_complain_at(b4_percent_define_get_loc([$1]), + [[%%define variable '%s' requires 'none' or '"..."' values]], + [$1])]) + + +# b4_percent_define_check_file(MACRO, VARIABLE, DEFAULT) +# ------------------------------------------------------ +# If the %define variable VARIABLE: +# - is undefined, then if DEFAULT is non-empty, define MACRO to DEFAULT +# - is a string, define MACRO to its value +# - is the keyword 'none', do nothing +# - otherwise, warn about the incorrect value. +m4_define([b4_percent_define_check_file], +[b4_percent_define_ifdef([$2], + [m4_case(b4_percent_define_get_kind([$2]), + [string], + [m4_define([$1], b4_percent_define_get([$2]))], + [keyword], + [m4_if(b4_percent_define_get([$2]), [none], [], + [b4_percent_define_check_file_complain([$2])])], + [b4_percent_define_check_file_complain([$2])]) + ], + [m4_ifval([$3], + [m4_define([$1], [$3])])]) +]) + + + +## --------- ## +## Options. ## +## --------- ## + + +# b4_percent_define_flag_if(VARIABLE, IF-TRUE, [IF-FALSE]) +# -------------------------------------------------------- +# Mimic muscle_percent_define_flag_if in ../src/muscle-tab.h exactly. That is, +# if the %define variable VARIABLE is defined to "" or "true", expand IF-TRUE. +# If it is defined to "false", expand IF-FALSE. Complain if it is undefined +# (a Bison or skeleton error since the default value should have been set +# already) or defined to any other value (possibly a user error). Also, record +# Bison's usage of VARIABLE by defining +# b4_percent_define_bison_variables(VARIABLE). +# +# For example: +# +# b4_percent_define_flag_if([[foo]], [[it's true]], [[it's false]]) +m4_define([b4_percent_define_flag_if], +[b4_percent_define_ifdef([$1], + [m4_case(b4_percent_define_get([$1]), + [], [$2], [true], [$2], [false], [$3], + [m4_expand_once([b4_complain_at(b4_percent_define_get_loc([$1]), + [[invalid value for %%define Boolean variable '%s']], + [$1])], + [[b4_percent_define_flag_if($1)]])])], + [b4_fatal([[$0: undefined %%define variable '%s']], [$1])])]) + + +# b4_percent_define_default(VARIABLE, DEFAULT, [KIND = keyword]) +# -------------------------------------------------------------- +# Mimic muscle_percent_define_default in ../src/muscle-tab.h exactly. That is, +# if the %define variable VARIABLE is undefined, set its value to DEFAULT. +# Don't record this as a Bison usage of VARIABLE as there's no reason to +# suspect that the value has yet influenced the output. +# +# For example: +# +# b4_percent_define_default([[foo]], [[default value]]) +m4_define([_b4_percent_define_define], +[m4_define([b4_percent_define(]$1[)], [$2])dnl +m4_define([b4_percent_define_kind(]$1[)], + [m4_default([$3], [keyword])])dnl +m4_define([b4_percent_define_loc(]$1[)], + [[[[:-1.-1]], + [[:-1.-1]]]])dnl +m4_define([b4_percent_define_syncline(]$1[)], [[]])]) + +m4_define([b4_percent_define_default], +[_b4_percent_define_ifdef([$1], [], + [_b4_percent_define_define($@)])]) + + +# b4_percent_define_if_define(NAME, [VARIABLE = NAME]) +# ---------------------------------------------------- +# Define b4_NAME_if that executes its $1 or $2 depending whether +# VARIABLE was %defined. The characters '.' and `-' in VARIABLE are mapped +# to '_'. +m4_define([_b4_percent_define_if_define], +[m4_define(m4_bpatsubst([b4_$1_if], [[-.]], [_]), + [b4_percent_define_default([m4_default([$2], [$1])], [[false]])dnl +b4_percent_define_flag_if(m4_default([$2], [$1]), + [$3], [$4])])]) + +m4_define([b4_percent_define_if_define], +[_b4_percent_define_if_define([$1], [$2], $[1], $[2])]) + + +# b4_percent_define_check_kind(VARIABLE, KIND, [DIAGNOSTIC = complain]) +# --------------------------------------------------------------------- +m4_define([b4_percent_define_check_kind], +[_b4_percent_define_ifdef([$1], + [m4_if(b4_percent_define_get_kind([$1]), [$2], [], + [b4_error([m4_default([$3], [complain])], + b4_percent_define_get_loc([$1]), + [m4_case([$2], + [code], [[%%define variable '%s' requires '{...}' values]], + [keyword], [[%%define variable '%s' requires keyword values]], + [string], [[%%define variable '%s' requires '"..."' values]])], + [$1])])])dnl +]) + + +# b4_percent_define_check_values(VALUES) +# -------------------------------------- +# Mimic muscle_percent_define_check_values in ../src/muscle-tab.h exactly +# except that the VALUES structure is more appropriate for M4. That is, VALUES +# is a list of sublists of strings. For each sublist, the first string is the +# name of a %define variable, and all remaining strings in that sublist are the +# valid values for that variable. Complain if such a variable is undefined (a +# Bison error since the default value should have been set already) or defined +# to any other value (possibly a user error). Don't record this as a Bison +# usage of the variable as there's no reason to suspect that the value has yet +# influenced the output. +# +# For example: +# +# b4_percent_define_check_values([[[[foo]], [[foo-value1]], [[foo-value2]]]], +# [[[[bar]], [[bar-value1]]]]) +m4_define([b4_percent_define_check_values], +[m4_foreach([b4_sublist], m4_quote($@), + [_b4_percent_define_check_values(b4_sublist)])]) + +m4_define([_b4_percent_define_check_values], +[_b4_percent_define_ifdef([$1], + [b4_percent_define_check_kind(]$1[, [keyword], [deprecated])dnl + m4_pushdef([b4_good_value], [0])dnl + m4_if($#, 1, [], + [m4_foreach([b4_value], m4_dquote(m4_shift($@)), + [m4_if(m4_indir([b4_percent_define(]$1[)]), b4_value, + [m4_define([b4_good_value], [1])])])])dnl + m4_if(b4_good_value, [0], + [b4_complain_at(b4_percent_define_get_loc([$1]), + [[invalid value for %%define variable '%s': '%s']], + [$1], + m4_dquote(m4_indir([b4_percent_define(]$1[)]))) + m4_foreach([b4_value], m4_dquote(m4_shift($@)), + [b4_error([[note]], b4_percent_define_get_loc([$1]), [] + [[accepted value: '%s']], + m4_dquote(b4_value))])])dnl + m4_popdef([b4_good_value])], + [b4_fatal([[$0: undefined %%define variable '%s']], [$1])])]) + +# b4_percent_code_get([QUALIFIER]) +# -------------------------------- +# If any %code blocks for QUALIFIER are defined, emit them beginning with a +# comment and ending with synclines and a newline. If QUALIFIER is not +# specified or empty, do this for the unqualified %code blocks. Also, record +# Bison's usage of QUALIFIER (if specified) by defining +# b4_percent_code_bison_qualifiers(QUALIFIER). +# +# For example, to emit any unqualified %code blocks followed by any %code +# blocks for the qualifier foo: +# +# b4_percent_code_get +# b4_percent_code_get([[foo]]) +m4_define([b4_percent_code_get], +[m4_pushdef([b4_macro_name], [[b4_percent_code(]$1[)]])dnl +m4_ifval([$1], [m4_define([b4_percent_code_bison_qualifiers(]$1[)])])dnl +m4_ifdef(b4_macro_name, +[b4_comment(m4_if([$#], [0], [[[Unqualified %code blocks.]]], + [[["%code ]$1[" blocks.]]])) +b4_user_code([m4_indir(b4_macro_name)])])dnl +m4_popdef([b4_macro_name])]) + +# b4_percent_code_ifdef(QUALIFIER, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------- +# If any %code blocks for QUALIFIER (or unqualified %code blocks if +# QUALIFIER is empty) are defined, expand IF-TRUE, else expand IF-FALSE. +# Also, record Bison's usage of QUALIFIER (if specified) by defining +# b4_percent_code_bison_qualifiers(QUALIFIER). +m4_define([b4_percent_code_ifdef], +[m4_ifdef([b4_percent_code(]$1[)], + [m4_ifval([$1], [m4_define([b4_percent_code_bison_qualifiers(]$1[)])])$2], + [$3])]) + + +## ------------------ ## +## Common variables. ## +## ------------------ ## + + +# b4_parse_assert_if([IF-ASSERTIONS-ARE-USED], [IF-NOT]) +# b4_parse_trace_if([IF-DEBUG-TRACES-ARE-ENABLED], [IF-NOT]) +# b4_token_ctor_if([IF-YYLEX-RETURNS-A-TOKEN], [IF-NOT]) +# ---------------------------------------------------------- +b4_percent_define_if_define([api.token.raw]) +b4_percent_define_if_define([token_ctor], [api.token.constructor]) +b4_percent_define_if_define([locations]) # Whether locations are tracked. +b4_percent_define_if_define([parse.assert]) +b4_percent_define_if_define([parse.trace]) +b4_percent_define_if_define([posix]) + + +# b4_bison_locations_if([IF-TRUE]) +# -------------------------------- +# Expand IF-TRUE if using locations, and using the default location +# type. +m4_define([b4_bison_locations_if], +[b4_locations_if([b4_percent_define_ifdef([[api.location.type]], [], [$1])])]) + + + +# %define parse.error "(custom|detailed|simple|verbose)" +# ------------------------------------------------------ +b4_percent_define_default([[parse.error]], [[simple]]) +b4_percent_define_check_values([[[[parse.error]], + [[custom]], [[detailed]], [[simple]], [[verbose]]]]) + +# b4_parse_error_case(CASE1, THEN1, CASE2, THEN2, ..., ELSE) +# ---------------------------------------------------------- +m4_define([b4_parse_error_case], +[m4_case(b4_percent_define_get([[parse.error]]), $@)]) + +# b4_parse_error_bmatch(PATTERN1, THEN1, PATTERN2, THEN2, ..., ELSE) +# ------------------------------------------------------------------ +m4_define([b4_parse_error_bmatch], +[m4_bmatch(b4_percent_define_get([[parse.error]]), $@)]) + + + +# b4_union_if([IF-UNION-ARE-USED], [IF-NOT]) +# b4_variant_if([IF-VARIANT-ARE-USED], [IF-NOT]) +# ---------------------------------------------- +# Depend on whether api.value.type is union, or variant. +m4_define([b4_union_flag], [[0]]) +m4_define([b4_variant_flag], [[0]]) +b4_percent_define_ifdef([[api.value.type]], + [m4_case(b4_percent_define_get_kind([[api.value.type]]), [keyword], + [m4_case(b4_percent_define_get([[api.value.type]]), + [union], [m4_define([b4_union_flag], [[1]])], + [variant], [m4_define([b4_variant_flag], [[1]])])])]) +b4_define_flag_if([union]) +b4_define_flag_if([variant]) + + +## ----------------------------------------------------------- ## +## After processing the skeletons, check that all the user's ## +## %define variables and %code qualifiers were used by Bison. ## +## ----------------------------------------------------------- ## + +m4_define([b4_check_user_names_wrap], +[m4_ifdef([b4_percent_]$1[_user_]$2[s], + [b4_check_user_names([[%]$1 $2], + [b4_percent_]$1[_user_]$2[s], + [[b4_percent_]$1[_bison_]$2[s]])])]) + +m4_wrap_lifo([ +b4_check_user_names_wrap([[define]], [[variable]]) +b4_check_user_names_wrap([[code]], [[qualifier]]) +]) + + +## ---------------- ## +## Default values. ## +## ---------------- ## + +# m4_define_default([b4_lex_param], []) dnl breaks other skeletons +m4_define_default([b4_epilogue], []) +m4_define_default([b4_parse_param], []) + +# The initial column and line. +m4_define_default([b4_location_initial_column], [1]) +m4_define_default([b4_location_initial_line], [1]) + + +## --------------- ## +## Sanity checks. ## +## --------------- ## + +# api.location.type={...} (C, C++ and Java). +b4_percent_define_check_kind([api.location.type], [code], [deprecated]) + +# api.position.type={...} (Java). +b4_percent_define_check_kind([api.position.type], [code], [deprecated]) + +# api.prefix >< %name-prefix. +b4_percent_define_check_kind([api.prefix], [code], [deprecated]) +b4_percent_define_ifdef([api.prefix], +[m4_ifdef([b4_prefix], +[b4_complain_at(b4_percent_define_get_loc([api.prefix]), + [['%s' and '%s' cannot be used together]], + [%name-prefix], + [%define api.prefix])])]) + +# api.token.prefix={...} +# Make it a warning for those who used betas of Bison 3.0. +b4_percent_define_check_kind([api.token.prefix], [code], [deprecated]) + +# api.value.type >< %union. +b4_percent_define_ifdef([api.value.type], +[m4_ifdef([b4_union_members], +[b4_complain_at(b4_percent_define_get_loc([api.value.type]), + [['%s' and '%s' cannot be used together]], + [%union], + [%define api.value.type])])]) + +# api.value.type=union >< %yacc. +b4_percent_define_ifdef([api.value.type], +[m4_if(b4_percent_define_get([api.value.type]), [union], +[b4_yacc_if(dnl +[b4_complain_at(b4_percent_define_get_loc([api.value.type]), + [['%s' and '%s' cannot be used together]], + [%yacc], + [%define api.value.type "union"])])])]) + +# api.value.union.name. +b4_percent_define_check_kind([api.value.union.name], [keyword]) + +# parse.error (custom|detailed) >< token-table. +b4_token_table_if( +[b4_parse_error_bmatch([custom\|detailed], +[b4_complain_at(b4_percent_define_get_loc([parse.error]), + [['%s' and '%s' cannot be used together]], + [%token-table], + [%define parse.error (custom|detailed)])])]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/c++-skel.m4 b/platform/dbops/binaries/build/share/bison/skeletons/c++-skel.m4 new file mode 100644 index 0000000000000000000000000000000000000000..f22002b9616fa43c8bbb8940d64fd85457ba9798 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/c++-skel.m4 @@ -0,0 +1,27 @@ + -*- Autoconf -*- + +# C++ skeleton dispatching for Bison. + +# Copyright (C) 2006-2007, 2009-2015, 2018-2021 Free Software +# Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +b4_glr_if( [m4_define([b4_used_skeleton], [b4_skeletonsdir/[glr.cc]])]) +b4_nondeterministic_if([m4_define([b4_used_skeleton], [b4_skeletonsdir/[glr.cc]])]) + +m4_define_default([b4_used_skeleton], [b4_skeletonsdir/[lalr1.cc]]) +m4_define_default([b4_skeleton], ["b4_basename(b4_used_skeleton)"]) + +m4_include(b4_used_skeleton) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/c++.m4 b/platform/dbops/binaries/build/share/bison/skeletons/c++.m4 new file mode 100644 index 0000000000000000000000000000000000000000..2ae8423ab5d4369b3442a27896ea9c10e3026148 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/c++.m4 @@ -0,0 +1,778 @@ + -*- Autoconf -*- + +# C++ skeleton for Bison + +# Copyright (C) 2002-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Sanity checks, before defaults installed by c.m4. +b4_percent_define_ifdef([[api.value.union.name]], + [b4_complain_at(b4_percent_define_get_loc([[api.value.union.name]]), + [named %union is invalid in C++])]) + +b4_percent_define_default([[api.symbol.prefix]], [[S_]]) + +m4_include(b4_skeletonsdir/[c.m4]) + +b4_percent_define_check_kind([api.namespace], [code], [deprecated]) +b4_percent_define_check_kind([api.parser.class], [code], [deprecated]) + + +## ----- ## +## C++. ## +## ----- ## + +# b4_comment(TEXT, [PREFIX]) +# -------------------------- +# Put TEXT in comment. Prefix all the output lines with PREFIX. +m4_define([b4_comment], +[_b4_comment([$1], [$2// ], [$2// ])]) + + +# b4_inline(hh|cc) +# ---------------- +# Expand to `inline\n ` if $1 is hh. +m4_define([b4_inline], +[m4_case([$1], + [cc], [], + [hh], [[inline + ]], + [m4_fatal([$0: invalid argument: $1])])]) + + +# b4_cxx_portability +# ------------------ +m4_define([b4_cxx_portability], +[#if defined __cplusplus +# define YY_CPLUSPLUS __cplusplus +#else +# define YY_CPLUSPLUS 199711L +#endif + +// Support move semantics when possible. +#if 201103L <= YY_CPLUSPLUS +# define YY_MOVE std::move +# define YY_MOVE_OR_COPY move +# define YY_MOVE_REF(Type) Type&& +# define YY_RVREF(Type) Type&& +# define YY_COPY(Type) Type +#else +# define YY_MOVE +# define YY_MOVE_OR_COPY copy +# define YY_MOVE_REF(Type) Type& +# define YY_RVREF(Type) const Type& +# define YY_COPY(Type) const Type& +#endif + +// Support noexcept when possible. +#if 201103L <= YY_CPLUSPLUS +# define YY_NOEXCEPT noexcept +# define YY_NOTHROW +#else +# define YY_NOEXCEPT +# define YY_NOTHROW throw () +#endif + +// Support constexpr when possible. +#if 201703 <= YY_CPLUSPLUS +# define YY_CONSTEXPR constexpr +#else +# define YY_CONSTEXPR +#endif[]dnl +]) + + +## ---------------- ## +## Default values. ## +## ---------------- ## + +b4_percent_define_default([[api.parser.class]], [[parser]]) + +# Don't do that so that we remember whether we're using a user +# request, or the default value. +# +# b4_percent_define_default([[api.location.type]], [[location]]) + +b4_percent_define_default([[api.filename.type]], [[const std::string]]) +# Make it a warning for those who used betas of Bison 3.0. +b4_percent_define_default([[api.namespace]], m4_defn([b4_prefix])) + +b4_percent_define_default([[define_location_comparison]], + [m4_if(b4_percent_define_get([[filename_type]]), + [std::string], [[true]], [[false]])]) + + + +## ----------- ## +## Namespace. ## +## ----------- ## + +m4_define([b4_namespace_ref], [b4_percent_define_get([[api.namespace]])]) + + +# Don't permit an empty b4_namespace_ref. Any '::parser::foo' appended to it +# would compile as an absolute reference with 'parser' in the global namespace. +# b4_namespace_open would open an anonymous namespace and thus establish +# internal linkage. This would compile. However, it's cryptic, and internal +# linkage for the parser would be specified in all translation units that +# include the header, which is always generated. If we ever need to permit +# internal linkage somehow, surely we can find a cleaner approach. +m4_if(m4_bregexp(b4_namespace_ref, [^[ ]*$]), [-1], [], +[b4_complain_at(b4_percent_define_get_loc([[api.namespace]]), + [[namespace reference is empty]])]) + +# Instead of assuming the C++ compiler will do it, Bison should reject any +# invalid b4_namespace_ref that would be converted to a valid +# b4_namespace_open. The problem is that Bison doesn't always output +# b4_namespace_ref to uncommented code but should reserve the ability to do so +# in future releases without risking breaking any existing user grammars. +# Specifically, don't allow empty names as b4_namespace_open would just convert +# those into anonymous namespaces, and that might tempt some users. +m4_if(m4_bregexp(b4_namespace_ref, [::[ ]*::]), [-1], [], +[b4_complain_at(b4_percent_define_get_loc([[api.namespace]]), + [[namespace reference has consecutive "::"]])]) +m4_if(m4_bregexp(b4_namespace_ref, [::[ ]*$]), [-1], [], +[b4_complain_at(b4_percent_define_get_loc([[api.namespace]]), + [[namespace reference has a trailing "::"]])]) + +m4_define([b4_namespace_open], +[b4_user_code([b4_percent_define_get_syncline([[api.namespace]])dnl +[namespace ]m4_bpatsubst(m4_dquote(m4_bpatsubst(m4_dquote(b4_namespace_ref), + [^\(.\)[ ]*::], [\1])), + [::], [ { namespace ])[ {]])]) + +m4_define([b4_namespace_close], +[b4_user_code([b4_percent_define_get_syncline([[api.namespace]])dnl +m4_bpatsubst(m4_dquote(m4_bpatsubst(m4_dquote(b4_namespace_ref[ ]), + [^\(.\)[ ]*\(::\)?\([^][:]\|:[^:]\)*], + [\1])), + [::\([^][:]\|:[^:]\)*], [} ])[} // ]b4_namespace_ref])]) + + +## ------------- ## +## Token kinds. ## +## ------------- ## + + +# b4_token_enums +# -------------- +# Output the definition of the token kinds. +m4_define([b4_token_enums], +[[enum token_kind_type + { + ]b4_symbol([-2], [id])[ = -2, +]b4_symbol_foreach([b4_token_enum])dnl +[ };]dnl +]) + + + +## -------------- ## +## Symbol kinds. ## +## -------------- ## + +# b4_declare_symbol_enum +# ---------------------- +# The definition of the symbol internal numbers as an enum. +# Defining YYEMPTY here is important: it forces the compiler +# to use a signed type, which matters for yytoken. +m4_define([b4_declare_symbol_enum], +[[enum symbol_kind_type + { + YYNTOKENS = ]b4_tokens_number[, ///< Number of tokens. + ]b4_symbol(empty, kind_base)[ = -2, +]b4_symbol_foreach([ b4_symbol_enum])dnl +[ };]]) + + + +## ----------------- ## +## Semantic Values. ## +## ----------------- ## + + + +# b4_value_type_declare +# --------------------- +# Declare value_type. +m4_define([b4_value_type_declare], +[b4_value_type_setup[]dnl +[ /// Symbol semantic values. +]m4_bmatch(b4_percent_define_get_kind([[api.value.type]]), +[code], +[[ typedef ]b4_percent_define_get([[api.value.type]])[ value_type;]], +[m4_bmatch(b4_percent_define_get([[api.value.type]]), +[union\|union-directive], +[[ union value_type + { +]b4_user_union_members[ + };]])])dnl +]) + + +# b4_public_types_declare +# ----------------------- +# Define the public types: token, semantic value, location, and so forth. +# Depending on %define token_lex, may be output in the header or source file. +m4_define([b4_public_types_declare], +[b4_glr2_cc_if( +[b4_value_type_declare], +[[#ifdef ]b4_api_PREFIX[STYPE +# ifdef __GNUC__ +# pragma GCC message "bison: do not #define ]b4_api_PREFIX[STYPE in C++, use %define api.value.type" +# endif + typedef ]b4_api_PREFIX[STYPE value_type; +#else +]b4_value_type_declare[ +#endif + /// Backward compatibility (Bison 3.8). + typedef value_type semantic_type; +]])[]b4_locations_if([ + /// Symbol locations. + typedef b4_percent_define_get([[api.location.type]], + [[location]]) location_type;])[ + + /// Syntax errors thrown from user actions. + struct syntax_error : std::runtime_error + { + syntax_error (]b4_locations_if([const location_type& l, ])[const std::string& m) + : std::runtime_error (m)]b4_locations_if([ + , location (l)])[ + {} + + syntax_error (const syntax_error& s) + : std::runtime_error (s.what ())]b4_locations_if([ + , location (s.location)])[ + {} + + ~syntax_error () YY_NOEXCEPT YY_NOTHROW;]b4_locations_if([ + + location_type location;])[ + }; + + /// Token kinds. + struct token + { + ]b4_token_enums[]b4_glr2_cc_if([], [[ + /// Backward compatibility alias (Bison 3.6). + typedef token_kind_type yytokentype;]])[ + }; + + /// Token kind, as returned by yylex. + typedef token::token_kind_type token_kind_type;]b4_glr2_cc_if([], [[ + + /// Backward compatibility alias (Bison 3.6). + typedef token_kind_type token_type;]])[ + + /// Symbol kinds. + struct symbol_kind + { + ]b4_declare_symbol_enum[ + }; + + /// (Internal) symbol kind. + typedef symbol_kind::symbol_kind_type symbol_kind_type; + + /// The number of tokens. + static const symbol_kind_type YYNTOKENS = symbol_kind::YYNTOKENS; +]]) + + +# b4_symbol_type_define +# --------------------- +# Define symbol_type, the external type for symbols used for symbol +# constructors. +m4_define([b4_symbol_type_define], +[[ /// A complete symbol. + /// + /// Expects its Base type to provide access to the symbol kind + /// via kind (). + /// + /// Provide access to semantic value]b4_locations_if([ and location])[. + template + struct basic_symbol : Base + { + /// Alias to Base. + typedef Base super_type; + + /// Default constructor. + basic_symbol () YY_NOEXCEPT + : value ()]b4_locations_if([ + , location ()])[ + {} + +#if 201103L <= YY_CPLUSPLUS + /// Move constructor. + basic_symbol (basic_symbol&& that) + : Base (std::move (that)) + , value (]b4_variant_if([], [std::move (that.value)]))b4_locations_if([ + , location (std::move (that.location))])[ + {]b4_variant_if([ + b4_symbol_variant([this->kind ()], [value], [move], + [std::move (that.value)]) + ])[} +#endif + + /// Copy constructor. + basic_symbol (const basic_symbol& that);]b4_variant_if([[ + + /// Constructors for typed symbols. +]b4_type_foreach([b4_basic_symbol_constructor_define], [ +])], [[ + /// Constructor for valueless symbols. + basic_symbol (typename Base::kind_type t]b4_locations_if([, + YY_MOVE_REF (location_type) l])[); + + /// Constructor for symbols with semantic value. + basic_symbol (typename Base::kind_type t, + YY_RVREF (value_type) v]b4_locations_if([, + YY_RVREF (location_type) l])[); +]])[ + /// Destroy the symbol. + ~basic_symbol () + { + clear (); + } + +]b4_glr2_cc_if([[ + /// Copy assignment. + basic_symbol& operator= (const basic_symbol& that) + { + Base::operator= (that);]b4_variant_if([[ + ]b4_symbol_variant([this->kind ()], [value], [copy], + [that.value])], [[ + value = that.value]])[;]b4_locations_if([[ + location = that.location;]])[ + return *this; + } + + /// Move assignment. + basic_symbol& operator= (basic_symbol&& that) + { + Base::operator= (std::move (that));]b4_variant_if([[ + ]b4_symbol_variant([this->kind ()], [value], [move], + [std::move (that.value)])], [[ + value = std::move (that.value)]])[;]b4_locations_if([[ + location = std::move (that.location);]])[ + return *this; + } +]])[ + + /// Destroy contents, and record that is empty. + void clear () YY_NOEXCEPT + {]b4_variant_if([[ + // User destructor. + symbol_kind_type yykind = this->kind (); + basic_symbol& yysym = *this; + (void) yysym; + switch (yykind) + { +]b4_symbol_foreach([b4_symbol_destructor])dnl +[ default: + break; + } + + // Value type destructor. +]b4_symbol_variant([[yykind]], [[value]], [[template destroy]])])[ + Base::clear (); + } + +]b4_parse_error_bmatch( +[custom\|detailed], +[[ /// The user-facing name of this symbol. + const char *name () const YY_NOEXCEPT + { + return ]b4_parser_class[::symbol_name (this->kind ()); + }]], +[simple], +[[#if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ + /// The user-facing name of this symbol. + const char *name () const YY_NOEXCEPT + { + return ]b4_parser_class[::symbol_name (this->kind ()); + } +#endif // #if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ +]], +[verbose], +[[ /// The user-facing name of this symbol. + std::string name () const YY_NOEXCEPT + { + return ]b4_parser_class[::symbol_name (this->kind ()); + }]])[]b4_glr2_cc_if([], [[ + + /// Backward compatibility (Bison 3.6). + symbol_kind_type type_get () const YY_NOEXCEPT;]])[ + + /// Whether empty. + bool empty () const YY_NOEXCEPT; + + /// Destructive move, \a s is emptied into this. + void move (basic_symbol& s); + + /// The semantic value. + value_type value;]b4_locations_if([ + + /// The location. + location_type location;])[ + + private: +#if YY_CPLUSPLUS < 201103L + /// Assignment operator. + basic_symbol& operator= (const basic_symbol& that); +#endif + }; + + /// Type access provider for token (enum) based symbols. + struct by_kind + { + /// The symbol kind as needed by the constructor. + typedef token_kind_type kind_type; + + /// Default constructor. + by_kind () YY_NOEXCEPT; + +#if 201103L <= YY_CPLUSPLUS + /// Move constructor. + by_kind (by_kind&& that) YY_NOEXCEPT; +#endif + + /// Copy constructor. + by_kind (const by_kind& that) YY_NOEXCEPT; + + /// Constructor from (external) token numbers. + by_kind (kind_type t) YY_NOEXCEPT; + +]b4_glr2_cc_if([[ + /// Copy assignment. + by_kind& operator= (const by_kind& that); + + /// Move assignment. + by_kind& operator= (by_kind&& that); +]])[ + + /// Record that this symbol is empty. + void clear () YY_NOEXCEPT; + + /// Steal the symbol kind from \a that. + void move (by_kind& that); + + /// The (internal) type number (corresponding to \a type). + /// \a empty when empty. + symbol_kind_type kind () const YY_NOEXCEPT;]b4_glr2_cc_if([], [[ + + /// Backward compatibility (Bison 3.6). + symbol_kind_type type_get () const YY_NOEXCEPT;]])[ + + /// The symbol kind. + /// \a ]b4_symbol_prefix[YYEMPTY when empty. + symbol_kind_type kind_; + };]b4_glr2_cc_if([], [[ + + /// Backward compatibility for a private implementation detail (Bison 3.6). + typedef by_kind by_type;]])[ + + /// "External" symbols: returned by the scanner. + struct symbol_type : basic_symbol + {]b4_variant_if([[ + /// Superclass. + typedef basic_symbol super_type; + + /// Empty symbol. + symbol_type () YY_NOEXCEPT {} + + /// Constructor for valueless symbols, and symbols from each type. +]b4_type_foreach([_b4_symbol_constructor_define])dnl + ])[}; +]]) + + +# b4_public_types_define(hh|cc) +# ----------------------------- +# Provide the implementation needed by the public types. +m4_define([b4_public_types_define], +[[ // basic_symbol. + template + ]b4_parser_class[::basic_symbol::basic_symbol (const basic_symbol& that) + : Base (that) + , value (]b4_variant_if([], [that.value]))b4_locations_if([ + , location (that.location)])[ + {]b4_variant_if([ + b4_symbol_variant([this->kind ()], [value], [copy], + [YY_MOVE (that.value)]) + ])[} + +]b4_variant_if([], [[ + /// Constructor for valueless symbols. + template + ]b4_parser_class[::basic_symbol::basic_symbol (]b4_join( + [typename Base::kind_type t], + b4_locations_if([YY_MOVE_REF (location_type) l]))[) + : Base (t) + , value ()]b4_locations_if([ + , location (l)])[ + {} + + template + ]b4_parser_class[::basic_symbol::basic_symbol (]b4_join( + [typename Base::kind_type t], + [YY_RVREF (value_type) v], + b4_locations_if([YY_RVREF (location_type) l]))[) + : Base (t) + , value (]b4_variant_if([], [YY_MOVE (v)])[)]b4_locations_if([ + , location (YY_MOVE (l))])[ + {]b4_variant_if([[ + (void) v; + ]b4_symbol_variant([this->kind ()], [value], [YY_MOVE_OR_COPY], [YY_MOVE (v)])])[}]])[ + +]b4_glr2_cc_if([], [[ + template + ]b4_parser_class[::symbol_kind_type + ]b4_parser_class[::basic_symbol::type_get () const YY_NOEXCEPT + { + return this->kind (); + } +]])[ + + template + bool + ]b4_parser_class[::basic_symbol::empty () const YY_NOEXCEPT + { + return this->kind () == ]b4_symbol(empty, kind)[; + } + + template + void + ]b4_parser_class[::basic_symbol::move (basic_symbol& s) + { + super_type::move (s); + ]b4_variant_if([b4_symbol_variant([this->kind ()], [value], [move], + [YY_MOVE (s.value)])], + [value = YY_MOVE (s.value);])[]b4_locations_if([ + location = YY_MOVE (s.location);])[ + } + + // by_kind. + ]b4_inline([$1])b4_parser_class[::by_kind::by_kind () YY_NOEXCEPT + : kind_ (]b4_symbol(empty, kind)[) + {} + +#if 201103L <= YY_CPLUSPLUS + ]b4_inline([$1])b4_parser_class[::by_kind::by_kind (by_kind&& that) YY_NOEXCEPT + : kind_ (that.kind_) + { + that.clear (); + } +#endif + + ]b4_inline([$1])b4_parser_class[::by_kind::by_kind (const by_kind& that) YY_NOEXCEPT + : kind_ (that.kind_) + {} + + ]b4_inline([$1])b4_parser_class[::by_kind::by_kind (token_kind_type t) YY_NOEXCEPT + : kind_ (yytranslate_ (t)) + {} + +]b4_glr2_cc_if([[ + ]b4_inline([$1])]b4_parser_class[::by_kind& + b4_parser_class[::by_kind::by_kind::operator= (const by_kind& that) + { + kind_ = that.kind_; + return *this; + } + + ]b4_inline([$1])]b4_parser_class[::by_kind& + b4_parser_class[::by_kind::by_kind::operator= (by_kind&& that) + { + kind_ = that.kind_; + that.clear (); + return *this; + } +]])[ + + ]b4_inline([$1])[void + ]b4_parser_class[::by_kind::clear () YY_NOEXCEPT + { + kind_ = ]b4_symbol(empty, kind)[; + } + + ]b4_inline([$1])[void + ]b4_parser_class[::by_kind::move (by_kind& that) + { + kind_ = that.kind_; + that.clear (); + } + + ]b4_inline([$1])[]b4_parser_class[::symbol_kind_type + ]b4_parser_class[::by_kind::kind () const YY_NOEXCEPT + { + return kind_; + } + +]b4_glr2_cc_if([], [[ + ]b4_inline([$1])[]b4_parser_class[::symbol_kind_type + ]b4_parser_class[::by_kind::type_get () const YY_NOEXCEPT + { + return this->kind (); + } +]])[ +]]) + + +# b4_token_constructor_define +# ---------------------------- +# Define make_FOO for all the token kinds. +# Use at class-level. Redefined in variant.hh. +m4_define([b4_token_constructor_define], []) + + +# b4_yytranslate_define(cc|hh) +# ---------------------------- +# Define yytranslate_. Sometimes used in the header file ($1=hh), +# sometimes in the cc file. +m4_define([b4_yytranslate_define], +[ b4_inline([$1])b4_parser_class[::symbol_kind_type + ]b4_parser_class[::yytranslate_ (int t) YY_NOEXCEPT + { +]b4_api_token_raw_if( +[[ return static_cast (t);]], +[[ // YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to + // TOKEN-NUM as returned by yylex. + static + const ]b4_int_type_for([b4_translate])[ + translate_table[] = + { + ]b4_translate[ + }; + // Last valid token kind. + const int code_max = ]b4_code_max[; + + if (t <= 0) + return symbol_kind::]b4_symbol_prefix[YYEOF; + else if (t <= code_max) + return static_cast (translate_table[t]); + else + return symbol_kind::]b4_symbol_prefix[YYUNDEF;]])[ + } +]]) + + +# b4_lhs_value([TYPE]) +# -------------------- +m4_define([b4_lhs_value], +[b4_symbol_value([yyval], [$1])]) + + +# b4_rhs_value(RULE-LENGTH, POS, [TYPE]) +# -------------------------------------- +# FIXME: Dead code. +m4_define([b4_rhs_value], +[b4_symbol_value([yysemantic_stack_@{($1) - ($2)@}], [$3])]) + + +# b4_lhs_location() +# ----------------- +# Expansion of @$. +m4_define([b4_lhs_location], +[(yyloc)]) + + +# b4_rhs_location(RULE-LENGTH, POS) +# --------------------------------- +# Expansion of @POS, where the current rule has RULE-LENGTH symbols +# on RHS. +m4_define([b4_rhs_location], +[(yylocation_stack_@{($1) - ($2)@})]) + + +# b4_parse_param_decl +# ------------------- +# Extra formal arguments of the constructor. +# Change the parameter names from "foo" into "foo_yyarg", so that +# there is no collision bw the user chosen attribute name, and the +# argument name in the constructor. +m4_define([b4_parse_param_decl], +[m4_ifset([b4_parse_param], + [m4_map_sep([b4_parse_param_decl_1], [, ], [b4_parse_param])])]) + +m4_define([b4_parse_param_decl_1], +[$1_yyarg]) + + + +# b4_parse_param_cons +# ------------------- +# Extra initialisations of the constructor. +m4_define([b4_parse_param_cons], + [m4_ifset([b4_parse_param], + [ + b4_cc_constructor_calls(b4_parse_param)])]) +m4_define([b4_cc_constructor_calls], + [m4_map_sep([b4_cc_constructor_call], [, + ], [$@])]) +m4_define([b4_cc_constructor_call], + [$2 ($2_yyarg)]) + +# b4_parse_param_vars +# ------------------- +# Extra instance variables. +m4_define([b4_parse_param_vars], + [m4_ifset([b4_parse_param], + [ + // User arguments. +b4_cc_var_decls(b4_parse_param)])]) +m4_define([b4_cc_var_decls], + [m4_map_sep([b4_cc_var_decl], [ +], [$@])]) +m4_define([b4_cc_var_decl], + [ $1;]) + + +## ---------## +## Values. ## +## ---------## + +# b4_yylloc_default_define +# ------------------------ +# Define YYLLOC_DEFAULT. +m4_define([b4_yylloc_default_define], +[[/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. + If N is 0, then set CURRENT to the empty location which ends + the previous symbol: RHS[0] (always defined). */ + +# ifndef YYLLOC_DEFAULT +# define YYLLOC_DEFAULT(Current, Rhs, N) \ + do \ + if (N) \ + { \ + (Current).begin = YYRHSLOC (Rhs, 1).begin; \ + (Current).end = YYRHSLOC (Rhs, N).end; \ + } \ + else \ + { \ + (Current).begin = (Current).end = YYRHSLOC (Rhs, 0).end; \ + } \ + while (false) +# endif +]]) + +## -------- ## +## Checks. ## +## -------- ## + +b4_token_ctor_if([b4_variant_if([], + [b4_fatal_at(b4_percent_define_get_loc(api.token.constructor), + [cannot use '%s' without '%s'], + [%define api.token.constructor], + [%define api.value.type variant]))])]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/c-like.m4 b/platform/dbops/binaries/build/share/bison/skeletons/c-like.m4 new file mode 100644 index 0000000000000000000000000000000000000000..a9bbc2e86e9ecb03054a5695e8ba64a1903f7ddb --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/c-like.m4 @@ -0,0 +1,72 @@ + -*- Autoconf -*- + +# Common code for C-like languages (C, C++, Java, etc.) + +# Copyright (C) 2012-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# _b4_comment(TEXT, OPEN, CONTINUE, END) +# -------------------------------------- +# Put TEXT in comment. Avoid trailing spaces: don't indent empty lines. +# Avoid adding indentation to the first line, as the indentation comes +# from OPEN. That's why we don't patsubst([$1], [^\(.\)], [ \1]). +# Turn "*/" in TEXT into "* /" so that we don't unexpectedly close +# the comments before its end. +# +# Prefix all the output lines with PREFIX. +m4_define([_b4_comment], +[$2[]b4_gsub(m4_expand([$1]), + [[*]/], [*\\/], + [/[*]], [/\\*], + [ +\(.\)], [ +$3\1])$4]) + + +# b4_comment(TEXT, [PREFIX]) +# -------------------------- +# Put TEXT in comment. Prefix all the output lines with PREFIX. +m4_define([b4_comment], +[_b4_comment([$1], [$2/* ], [$2 ], [ */])]) + + + + +# _b4_dollar_dollar(VALUE, SYMBOL-NUM, FIELD, DEFAULT-FIELD) +# ---------------------------------------------------------- +# If FIELD (or DEFAULT-FIELD) is non-null, return "VALUE.FIELD", +# otherwise just VALUE. Be sure to pass "(VALUE)" if VALUE is a +# pointer. +m4_define([_b4_dollar_dollar], +[b4_symbol_value([$1], + [$2], + m4_if([$3], [[]], + [[$4]], [[$3]]))]) + +# b4_dollar_pushdef(VALUE-POINTER, SYMBOL-NUM, [TYPE_TAG], LOCATION) +# b4_dollar_popdef +# ------------------------------------------------------------------ +# Define b4_dollar_dollar for VALUE-POINTER and DEFAULT-FIELD, +# and b4_at_dollar for LOCATION. +m4_define([b4_dollar_pushdef], +[m4_pushdef([b4_dollar_dollar], + [_b4_dollar_dollar([$1], [$2], m4_dquote($][1), [$3])])dnl +m4_pushdef([b4_at_dollar], [$4])dnl +]) +m4_define([b4_dollar_popdef], +[m4_popdef([b4_at_dollar])dnl +m4_popdef([b4_dollar_dollar])dnl +]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/c-skel.m4 b/platform/dbops/binaries/build/share/bison/skeletons/c-skel.m4 new file mode 100644 index 0000000000000000000000000000000000000000..ac6ddd687c82166702a53272a76abd8bd5c9ec35 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/c-skel.m4 @@ -0,0 +1,27 @@ + -*- Autoconf -*- + +# C skeleton dispatching for Bison. + +# Copyright (C) 2006-2007, 2009-2015, 2018-2021 Free Software +# Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +b4_glr_if( [m4_define([b4_used_skeleton], [b4_skeletonsdir/[glr.c]])]) +b4_nondeterministic_if([m4_define([b4_used_skeleton], [b4_skeletonsdir/[glr.c]])]) + +m4_define_default([b4_used_skeleton], [b4_skeletonsdir/[yacc.c]]) +m4_define_default([b4_skeleton], ["b4_basename(b4_used_skeleton)"]) + +m4_include(b4_used_skeleton) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/c.m4 b/platform/dbops/binaries/build/share/bison/skeletons/c.m4 new file mode 100644 index 0000000000000000000000000000000000000000..2425b07158f822565d234ecd059eb271db0d9da7 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/c.m4 @@ -0,0 +1,1125 @@ + -*- Autoconf -*- + +# C M4 Macros for Bison. + +# Copyright (C) 2002, 2004-2015, 2018-2021 Free Software Foundation, +# Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_include(b4_skeletonsdir/[c-like.m4]) + +# b4_tocpp(STRING) +# ---------------- +# Convert STRING into a valid C macro name. +m4_define([b4_tocpp], +[m4_toupper(m4_bpatsubst(m4_quote($1), [[^a-zA-Z0-9]+], [_]))]) + + +# b4_cpp_guard(FILE) +# ------------------ +# A valid C macro name to use as a CPP header guard for FILE. +m4_define([b4_cpp_guard], +[[YY_]b4_tocpp(m4_defn([b4_prefix])/[$1])[_INCLUDED]]) + + +# b4_cpp_guard_open(FILE) +# b4_cpp_guard_close(FILE) +# ------------------------ +# If FILE does not expand to nothing, open/close CPP inclusion guards for FILE. +m4_define([b4_cpp_guard_open], +[m4_ifval(m4_quote($1), +[#ifndef b4_cpp_guard([$1]) +# define b4_cpp_guard([$1])])]) + +m4_define([b4_cpp_guard_close], +[m4_ifval(m4_quote($1), +[#endif b4_comment([!b4_cpp_guard([$1])])])]) + + +## ---------------- ## +## Identification. ## +## ---------------- ## + +# b4_identification +# ----------------- +# Depends on individual skeletons to define b4_pure_flag, b4_push_flag, or +# b4_pull_flag if they use the values of the %define variables api.pure or +# api.push-pull. +m4_define([b4_identification], +[[/* Identify Bison output, and Bison version. */ +#define YYBISON ]b4_version[ + +/* Bison version string. */ +#define YYBISON_VERSION "]b4_version_string[" + +/* Skeleton name. */ +#define YYSKELETON_NAME ]b4_skeleton[]m4_ifdef([b4_pure_flag], [[ + +/* Pure parsers. */ +#define YYPURE ]b4_pure_flag])[]m4_ifdef([b4_push_flag], [[ + +/* Push parsers. */ +#define YYPUSH ]b4_push_flag])[]m4_ifdef([b4_pull_flag], [[ + +/* Pull parsers. */ +#define YYPULL ]b4_pull_flag])[ +]]) + + +## ---------------- ## +## Default values. ## +## ---------------- ## + +# b4_api_prefix, b4_api_PREFIX +# ---------------------------- +# Corresponds to %define api.prefix +b4_percent_define_default([[api.prefix]], [[yy]]) +m4_define([b4_api_prefix], +[b4_percent_define_get([[api.prefix]])]) +m4_define([b4_api_PREFIX], +[m4_toupper(b4_api_prefix)]) + + +# b4_prefix +# --------- +# If the %name-prefix is not given, it is api.prefix. +m4_define_default([b4_prefix], [b4_api_prefix]) + +# If the %union is not named, its name is YYSTYPE. +b4_percent_define_default([[api.value.union.name]], + [b4_api_PREFIX[][STYPE]]) + +b4_percent_define_default([[api.symbol.prefix]], [[YYSYMBOL_]]) + +## ------------------------ ## +## Pure/impure interfaces. ## +## ------------------------ ## + +# b4_yylex_formals +# ---------------- +# All the yylex formal arguments. +# b4_lex_param arrives quoted twice, but we want to keep only one level. +m4_define([b4_yylex_formals], +[b4_pure_if([[[b4_api_PREFIX[STYPE *yylvalp]], [[&yylval]]][]dnl +b4_locations_if([, [b4_api_PREFIX[LTYPE *yyllocp], [&yylloc]]])])dnl +m4_ifdef([b4_lex_param], [, ]b4_lex_param)]) + + +# b4_yylex +# -------- +# Call yylex. +m4_define([b4_yylex], +[b4_function_call([yylex], [int], b4_yylex_formals)]) + + +# b4_user_args +# ------------ +m4_define([b4_user_args], +[m4_ifset([b4_parse_param], [, b4_user_args_no_comma])]) + +# b4_user_args_no_comma +# --------------------- +m4_define([b4_user_args_no_comma], +[m4_ifset([b4_parse_param], [b4_args(b4_parse_param)])]) + + +# b4_user_formals +# --------------- +# The possible parse-params formal arguments preceded by a comma. +m4_define([b4_user_formals], +[m4_ifset([b4_parse_param], [, b4_formals(b4_parse_param)])]) + + +# b4_parse_param +# -------------- +# If defined, b4_parse_param arrives double quoted, but below we prefer +# it to be single quoted. +m4_define([b4_parse_param], +b4_parse_param) + + +# b4_parse_param_for(DECL, FORMAL, BODY) +# --------------------------------------- +# Iterate over the user parameters, binding the declaration to DECL, +# the formal name to FORMAL, and evaluating the BODY. +m4_define([b4_parse_param_for], +[m4_foreach([$1_$2], m4_defn([b4_parse_param]), +[m4_pushdef([$1], m4_unquote(m4_car($1_$2)))dnl +m4_pushdef([$2], m4_shift($1_$2))dnl +$3[]dnl +m4_popdef([$2])dnl +m4_popdef([$1])dnl +])]) + + +# b4_use(EXPR) +# ------------ +# Pacify the compiler about some maybe unused value. +m4_define([b4_use], +[YY_USE ($1)]) + +# b4_parse_param_use([VAL], [LOC]) +# -------------------------------- +# 'YY_USE' VAL, LOC if locations are enabled, and all the parse-params. +m4_define([b4_parse_param_use], +[m4_ifvaln([$1], [ b4_use([$1]);])dnl +b4_locations_if([m4_ifvaln([$2], [ b4_use([$2]);])])dnl +b4_parse_param_for([Decl], [Formal], [ b4_use(Formal); +])dnl +]) + + +## ------------ ## +## Data Types. ## +## ------------ ## + +# b4_int_type(MIN, MAX) +# --------------------- +# Return a narrow int type able to handle integers ranging from MIN +# to MAX (included) in portable C code. Assume MIN and MAX fall in +# 'int' range. +m4_define([b4_int_type], +[m4_if(b4_ints_in($@, [-127], [127]), [1], [signed char], + b4_ints_in($@, [0], [255]), [1], [unsigned char], + + b4_ints_in($@, [-32767], [32767]), [1], [short], + b4_ints_in($@, [0], [65535]), [1], [unsigned short], + + [int])]) + +# b4_c99_int_type(MIN, MAX) +# ------------------------- +# Like b4_int_type, but for C99. +# b4_c99_int_type_define replaces b4_int_type with this. +m4_define([b4_c99_int_type], +[m4_if(b4_ints_in($@, [-127], [127]), [1], [yytype_int8], + b4_ints_in($@, [0], [255]), [1], [yytype_uint8], + + b4_ints_in($@, [-32767], [32767]), [1], [yytype_int16], + b4_ints_in($@, [0], [65535]), [1], [yytype_uint16], + + [int])]) + +# b4_c99_int_type_define +# ---------------------- +# Define private types suitable for holding small integers in C99 or later. +m4_define([b4_c99_int_type_define], +[m4_copy_force([b4_c99_int_type], [b4_int_type])dnl +[#ifdef short +# undef short +#endif + +/* On compilers that do not define __PTRDIFF_MAX__ etc., make sure + and (if available) are included + so that the code can choose integer types of a good width. */ + +#ifndef __PTRDIFF_MAX__ +# include /* INFRINGES ON USER NAME SPACE */ +# if defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_STDINT_H +# endif +#endif + +/* Narrow types that promote to a signed type and that can represent a + signed or unsigned integer of at least N bits. In tables they can + save space and decrease cache pressure. Promoting to a signed type + helps avoid bugs in integer arithmetic. */ + +#ifdef __INT_LEAST8_MAX__ +typedef __INT_LEAST8_TYPE__ yytype_int8; +#elif defined YY_STDINT_H +typedef int_least8_t yytype_int8; +#else +typedef signed char yytype_int8; +#endif + +#ifdef __INT_LEAST16_MAX__ +typedef __INT_LEAST16_TYPE__ yytype_int16; +#elif defined YY_STDINT_H +typedef int_least16_t yytype_int16; +#else +typedef short yytype_int16; +#endif + +/* Work around bug in HP-UX 11.23, which defines these macros + incorrectly for preprocessor constants. This workaround can likely + be removed in 2023, as HPE has promised support for HP-UX 11.23 + (aka HP-UX 11i v2) only through the end of 2022; see Table 2 of + . */ +#ifdef __hpux +# undef UINT_LEAST8_MAX +# undef UINT_LEAST16_MAX +# define UINT_LEAST8_MAX 255 +# define UINT_LEAST16_MAX 65535 +#endif + +#if defined __UINT_LEAST8_MAX__ && __UINT_LEAST8_MAX__ <= __INT_MAX__ +typedef __UINT_LEAST8_TYPE__ yytype_uint8; +#elif (!defined __UINT_LEAST8_MAX__ && defined YY_STDINT_H \ + && UINT_LEAST8_MAX <= INT_MAX) +typedef uint_least8_t yytype_uint8; +#elif !defined __UINT_LEAST8_MAX__ && UCHAR_MAX <= INT_MAX +typedef unsigned char yytype_uint8; +#else +typedef short yytype_uint8; +#endif + +#if defined __UINT_LEAST16_MAX__ && __UINT_LEAST16_MAX__ <= __INT_MAX__ +typedef __UINT_LEAST16_TYPE__ yytype_uint16; +#elif (!defined __UINT_LEAST16_MAX__ && defined YY_STDINT_H \ + && UINT_LEAST16_MAX <= INT_MAX) +typedef uint_least16_t yytype_uint16; +#elif !defined __UINT_LEAST16_MAX__ && USHRT_MAX <= INT_MAX +typedef unsigned short yytype_uint16; +#else +typedef int yytype_uint16; +#endif]]) + + +# b4_sizes_types_define +# --------------------- +# Define YYPTRDIFF_T/YYPTRDIFF_MAXIMUM, YYSIZE_T/YYSIZE_MAXIMUM, +# and YYSIZEOF. +m4_define([b4_sizes_types_define], +[[#ifndef YYPTRDIFF_T +# if defined __PTRDIFF_TYPE__ && defined __PTRDIFF_MAX__ +# define YYPTRDIFF_T __PTRDIFF_TYPE__ +# define YYPTRDIFF_MAXIMUM __PTRDIFF_MAX__ +# elif defined PTRDIFF_MAX +# ifndef ptrdiff_t +# include /* INFRINGES ON USER NAME SPACE */ +# endif +# define YYPTRDIFF_T ptrdiff_t +# define YYPTRDIFF_MAXIMUM PTRDIFF_MAX +# else +# define YYPTRDIFF_T long +# define YYPTRDIFF_MAXIMUM LONG_MAX +# endif +#endif + +#ifndef YYSIZE_T +# ifdef __SIZE_TYPE__ +# define YYSIZE_T __SIZE_TYPE__ +# elif defined size_t +# define YYSIZE_T size_t +# elif defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ +# include /* INFRINGES ON USER NAME SPACE */ +# define YYSIZE_T size_t +# else +# define YYSIZE_T unsigned +# endif +#endif + +#define YYSIZE_MAXIMUM \ + YY_CAST (YYPTRDIFF_T, \ + (YYPTRDIFF_MAXIMUM < YY_CAST (YYSIZE_T, -1) \ + ? YYPTRDIFF_MAXIMUM \ + : YY_CAST (YYSIZE_T, -1))) + +#define YYSIZEOF(X) YY_CAST (YYPTRDIFF_T, sizeof (X)) +]]) + + +# b4_int_type_for(NAME) +# --------------------- +# Return a narrow int type able to handle numbers ranging from +# 'NAME_min' to 'NAME_max' (included). +m4_define([b4_int_type_for], +[b4_int_type($1_min, $1_max)]) + + +# b4_table_value_equals(TABLE, VALUE, LITERAL, SYMBOL) +# ---------------------------------------------------- +# Without inducing a comparison warning from the compiler, check if the +# literal value LITERAL equals VALUE from table TABLE, which must have +# TABLE_min and TABLE_max defined. SYMBOL denotes +m4_define([b4_table_value_equals], +[m4_if(m4_eval($3 < m4_indir([b4_]$1[_min]) + || m4_indir([b4_]$1[_max]) < $3), [1], + [[0]], + [(($2) == $4)])]) + + +## ----------------- ## +## Compiler issues. ## +## ----------------- ## + +# b4_attribute_define([noreturn]) +# ------------------------------- +# Provide portable compiler "attributes". If "noreturn" is passed, define +# _Noreturn. +m4_define([b4_attribute_define], +[[#ifndef YY_ATTRIBUTE_PURE +# if defined __GNUC__ && 2 < __GNUC__ + (96 <= __GNUC_MINOR__) +# define YY_ATTRIBUTE_PURE __attribute__ ((__pure__)) +# else +# define YY_ATTRIBUTE_PURE +# endif +#endif + +#ifndef YY_ATTRIBUTE_UNUSED +# if defined __GNUC__ && 2 < __GNUC__ + (7 <= __GNUC_MINOR__) +# define YY_ATTRIBUTE_UNUSED __attribute__ ((__unused__)) +# else +# define YY_ATTRIBUTE_UNUSED +# endif +#endif + +]m4_bmatch([$1], [\bnoreturn\b], [[/* The _Noreturn keyword of C11. */ +]dnl This is close to lib/_Noreturn.h, except that we do enable +dnl the use of [[noreturn]], because _Noreturn is used in places +dnl where [[noreturn]] works in C++. We need this in particular +dnl because of glr.cc which compiles code from glr.c in C++. +dnl And the C++ compiler chokes on _Noreturn. Also, we do not +dnl use C' _Noreturn in C++, to avoid -Wc11-extensions warnings. +[#ifndef _Noreturn +# if (defined __cplusplus \ + && ((201103 <= __cplusplus && !(__GNUC__ == 4 && __GNUC_MINOR__ == 7)) \ + || (defined _MSC_VER && 1900 <= _MSC_VER))) +# define _Noreturn [[noreturn]] +# elif ((!defined __cplusplus || defined __clang__) \ + && (201112 <= (defined __STDC_VERSION__ ? __STDC_VERSION__ : 0) \ + || (!defined __STRICT_ANSI__ \ + && (4 < __GNUC__ + (7 <= __GNUC_MINOR__) \ + || (defined __apple_build_version__ \ + ? 6000000 <= __apple_build_version__ \ + : 3 < __clang_major__ + (5 <= __clang_minor__)))))) + /* _Noreturn works as-is. */ +# elif (2 < __GNUC__ + (8 <= __GNUC_MINOR__) || defined __clang__ \ + || 0x5110 <= __SUNPRO_C) +# define _Noreturn __attribute__ ((__noreturn__)) +# elif 1200 <= (defined _MSC_VER ? _MSC_VER : 0) +# define _Noreturn __declspec (noreturn) +# else +# define _Noreturn +# endif +#endif + +]])[/* Suppress unused-variable warnings by "using" E. */ +#if ! defined lint || defined __GNUC__ +# define YY_USE(E) ((void) (E)) +#else +# define YY_USE(E) /* empty */ +#endif + +/* Suppress an incorrect diagnostic about yylval being uninitialized. */ +#if defined __GNUC__ && ! defined __ICC && 406 <= __GNUC__ * 100 + __GNUC_MINOR__ +# if __GNUC__ * 100 + __GNUC_MINOR__ < 407 +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"") +# else +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"") \ + _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") +# endif +# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ + _Pragma ("GCC diagnostic pop") +#else +# define YY_INITIAL_VALUE(Value) Value +#endif +#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_END +#endif +#ifndef YY_INITIAL_VALUE +# define YY_INITIAL_VALUE(Value) /* Nothing. */ +#endif + +#if defined __cplusplus && defined __GNUC__ && ! defined __ICC && 6 <= __GNUC__ +# define YY_IGNORE_USELESS_CAST_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuseless-cast\"") +# define YY_IGNORE_USELESS_CAST_END \ + _Pragma ("GCC diagnostic pop") +#endif +#ifndef YY_IGNORE_USELESS_CAST_BEGIN +# define YY_IGNORE_USELESS_CAST_BEGIN +# define YY_IGNORE_USELESS_CAST_END +#endif +]]) + + +# b4_cast_define +# -------------- +m4_define([b4_cast_define], +[# ifndef YY_CAST +# ifdef __cplusplus +# define YY_CAST(Type, Val) static_cast (Val) +# define YY_REINTERPRET_CAST(Type, Val) reinterpret_cast (Val) +# else +# define YY_CAST(Type, Val) ((Type) (Val)) +# define YY_REINTERPRET_CAST(Type, Val) ((Type) (Val)) +# endif +# endif[]dnl +]) + + +# b4_null_define +# -------------- +# Portability issues: define a YY_NULLPTR appropriate for the current +# language (C, C++98, or C++11). +# +# In C++ pre C++11 it is standard practice to use 0 (not NULL) for the +# null pointer. In C, prefer ((void*)0) to avoid having to include stdlib.h. +m4_define([b4_null_define], +[# ifndef YY_NULLPTR +# if defined __cplusplus +# if 201103L <= __cplusplus +# define YY_NULLPTR nullptr +# else +# define YY_NULLPTR 0 +# endif +# else +# define YY_NULLPTR ((void*)0) +# endif +# endif[]dnl +]) + + +# b4_null +# ------- +# Return a null pointer constant. +m4_define([b4_null], [YY_NULLPTR]) + + + +## ---------## +## Values. ## +## ---------## + +# b4_integral_parser_table_define(TABLE-NAME, CONTENT, COMMENT) +# ------------------------------------------------------------- +# Define "yy" whose contents is CONTENT. +m4_define([b4_integral_parser_table_define], +[m4_ifvaln([$3], [b4_comment([$3])])dnl +static const b4_int_type_for([$2]) yy$1[[]] = +{ + $2 +};dnl +]) + + +## ------------- ## +## Token kinds. ## +## ------------- ## + +# Because C enums are not scoped, because tokens are exposed in the +# header, and because these tokens are common to all the parsers, we +# need to make sure their names don't collide: use the api.prefix. +# YYEOF is special, since the user may give it a different name. +m4_define([b4_symbol(-2, id)], [b4_api_PREFIX[][EMPTY]]) +m4_define([b4_symbol(-2, tag)], [[No symbol.]]) + +m4_if(b4_symbol(eof, id), [YYEOF], + [m4_define([b4_symbol(0, id)], [b4_api_PREFIX[][EOF]])]) +m4_define([b4_symbol(1, id)], [b4_api_PREFIX[][error]]) +m4_define([b4_symbol(2, id)], [b4_api_PREFIX[][UNDEF]]) + + +# b4_token_define(TOKEN-NUM) +# -------------------------- +# Output the definition of this token as #define. +m4_define([b4_token_define], +[b4_token_format([#define %s %s], [$1])]) + +# b4_token_defines +# ---------------- +# Output the definition of the tokens. +m4_define([b4_token_defines], +[[/* Token kinds. */ +#define ]b4_symbol(empty, [id])[ -2 +]m4_join([ +], b4_symbol_map([b4_token_define])) +]) + + +# b4_token_enum(TOKEN-NUM) +# ------------------------ +# Output the definition of this token as an enum. +m4_define([b4_token_enum], +[b4_token_visible_if([$1], + [m4_format([ %-30s %s], + m4_format([[%s = %s%s%s]], + b4_symbol([$1], [id]), + b4_symbol([$1], b4_api_token_raw_if([[number]], [[code]])), + m4_if([$1], b4_last_enum_token, [], [[,]])), + [b4_symbol_tag_comment([$1])])])]) + + +# b4_token_enums +# -------------- +# The definition of the token kinds. +m4_define([b4_token_enums], +[b4_any_token_visible_if([[/* Token kinds. */ +#ifndef ]b4_api_PREFIX[TOKENTYPE +# define ]b4_api_PREFIX[TOKENTYPE + enum ]b4_api_prefix[tokentype + { + ]b4_symbol(empty, [id])[ = -2, +]b4_symbol_foreach([b4_token_enum])dnl +[ }; + typedef enum ]b4_api_prefix[tokentype ]b4_api_prefix[token_kind_t; +#endif +]])]) + + +# b4_token_enums_defines +# ---------------------- +# The definition of the tokens (if there are any) as enums and, +# if POSIX Yacc is enabled, as #defines. +m4_define([b4_token_enums_defines], +[b4_token_enums[]b4_yacc_if([b4_token_defines])]) + + +# b4_symbol_translate(STRING) +# --------------------------- +# Used by "bison" in the array of symbol names to mark those that +# require translation. +m4_define([b4_symbol_translate], +[[N_($1)]]) + + + +## -------------- ## +## Symbol kinds. ## +## -------------- ## + +# b4_symbol_enum(SYMBOL-NUM) +# -------------------------- +# Output the definition of this symbol as an enum. +m4_define([b4_symbol_enum], +[m4_format([ %-40s %s], + m4_format([[%s = %s%s%s]], + b4_symbol([$1], [kind_base]), + [$1], + m4_if([$1], b4_last_symbol, [], [[,]])), + [b4_symbol_tag_comment([$1])])]) + + +# b4_declare_symbol_enum +# ---------------------- +# The definition of the symbol internal numbers as an enum. +# Defining YYEMPTY here is important: it forces the compiler +# to use a signed type, which matters for yytoken. +m4_define([b4_declare_symbol_enum], +[[/* Symbol kind. */ +enum yysymbol_kind_t +{ + ]b4_symbol(empty, [kind_base])[ = -2, +]b4_symbol_foreach([b4_symbol_enum])dnl +[}; +typedef enum yysymbol_kind_t yysymbol_kind_t; +]])]) + + +## ----------------- ## +## Semantic Values. ## +## ----------------- ## + + +# b4_symbol_value(VAL, [SYMBOL-NUM], [TYPE-TAG]) +# ---------------------------------------------- +# See README. +m4_define([b4_symbol_value], +[m4_ifval([$3], + [($1.$3)], + [m4_ifval([$2], + [b4_symbol_if([$2], [has_type], + [($1.b4_symbol([$2], [type]))], + [$1])], + [$1])])]) + + +## ---------------------- ## +## Defining C functions. ## +## ---------------------- ## + + +# b4_formals([DECL1, NAME1], ...) +# ------------------------------- +# The formal arguments of a C function definition. +m4_define([b4_formals], +[m4_if([$#], [0], [void], + [$#$1], [1], [void], + [m4_map_sep([b4_formal], [, ], [$@])])]) + +m4_define([b4_formal], +[$1]) + + +# b4_function_declare(NAME, RETURN-VALUE, [DECL1, NAME1], ...) +# ------------------------------------------------------------ +# Declare the function NAME. +m4_define([b4_function_declare], +[$2 $1 (b4_formals(m4_shift2($@)));[]dnl +]) + + + +## --------------------- ## +## Calling C functions. ## +## --------------------- ## + + +# b4_function_call(NAME, RETURN-VALUE, [DECL1, NAME1], ...) +# ----------------------------------------------------------- +# Call the function NAME with arguments NAME1, NAME2 etc. +m4_define([b4_function_call], +[$1 (b4_args(m4_shift2($@)))[]dnl +]) + + +# b4_args([DECL1, NAME1], ...) +# ---------------------------- +# Output the arguments NAME1, NAME2... +m4_define([b4_args], +[m4_map_sep([b4_arg], [, ], [$@])]) + +m4_define([b4_arg], +[$2]) + + +## ----------- ## +## Synclines. ## +## ----------- ## + +# b4_sync_start(LINE, FILE) +# ------------------------- +m4_define([b4_sync_start], [[#]line $1 $2]) + + +## -------------- ## +## User actions. ## +## -------------- ## + +# b4_case(LABEL, STATEMENTS, [COMMENTS]) +# -------------------------------------- +m4_define([b4_case], +[ case $1:m4_ifval([$3], [ b4_comment([$3])]) +$2 +b4_syncline([@oline@], [@ofile@])dnl + break;]) + + +# b4_predicate_case(LABEL, CONDITIONS) +# ------------------------------------ +m4_define([b4_predicate_case], +[ case $1: + if (! ( +$2)) YYERROR; +b4_syncline([@oline@], [@ofile@])dnl + break;]) + + +# b4_yydestruct_define +# -------------------- +# Define the "yydestruct" function. +m4_define_default([b4_yydestruct_define], +[[/*-----------------------------------------------. +| Release the memory associated to this symbol. | +`-----------------------------------------------*/ + +static void +yydestruct (const char *yymsg, + yysymbol_kind_t yykind, YYSTYPE *yyvaluep]b4_locations_if(dnl +[[, YYLTYPE *yylocationp]])[]b4_user_formals[) +{ +]b4_parse_param_use([yyvaluep], [yylocationp])dnl +[ if (!yymsg) + yymsg = "Deleting"; + YY_SYMBOL_PRINT (yymsg, yykind, yyvaluep, yylocationp); + + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + ]b4_symbol_actions([destructor])[ + YY_IGNORE_MAYBE_UNINITIALIZED_END +}]dnl +]) + + +# b4_yy_symbol_print_define +# ------------------------- +# Define the "yy_symbol_print" function. +m4_define_default([b4_yy_symbol_print_define], +[[ +/*-----------------------------------. +| Print this symbol's value on YYO. | +`-----------------------------------*/ + +static void +yy_symbol_value_print (FILE *yyo, + yysymbol_kind_t yykind, YYSTYPE const * const yyvaluep]b4_locations_if(dnl +[[, YYLTYPE const * const yylocationp]])[]b4_user_formals[) +{ + FILE *yyoutput = yyo; +]b4_parse_param_use([yyoutput], [yylocationp])dnl +[ if (!yyvaluep) + return;] +b4_percent_code_get([[pre-printer]])dnl + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + b4_symbol_actions([printer]) + YY_IGNORE_MAYBE_UNINITIALIZED_END +b4_percent_code_get([[post-printer]])dnl +[} + + +/*---------------------------. +| Print this symbol on YYO. | +`---------------------------*/ + +static void +yy_symbol_print (FILE *yyo, + yysymbol_kind_t yykind, YYSTYPE const * const yyvaluep]b4_locations_if(dnl +[[, YYLTYPE const * const yylocationp]])[]b4_user_formals[) +{ + YYFPRINTF (yyo, "%s %s (", + yykind < YYNTOKENS ? "token" : "nterm", yysymbol_name (yykind)); + +]b4_locations_if([ YYLOCATION_PRINT (yyo, yylocationp); + YYFPRINTF (yyo, ": "); +])dnl +[ yy_symbol_value_print (yyo, yykind, yyvaluep]dnl +b4_locations_if([, yylocationp])[]b4_user_args[); + YYFPRINTF (yyo, ")"); +}]dnl +]) + + +## ---------------- ## +## api.value.type. ## +## ---------------- ## + + +# ---------------------- # +# api.value.type=union. # +# ---------------------- # + +# b4_symbol_type_register(SYMBOL-NUM) +# ----------------------------------- +# Symbol SYMBOL-NUM has a type (for variant) instead of a type-tag. +# Extend the definition of %union's body (b4_union_members) with a +# field of that type, and extend the symbol's "type" field to point to +# the field name, instead of the type name. +m4_define([b4_symbol_type_register], +[m4_define([b4_symbol($1, type_tag)], + [b4_symbol_if([$1], [has_id], + [b4_symbol([$1], [id])], + [yykind_[]b4_symbol([$1], [number])])])dnl +m4_append([b4_union_members], +m4_expand([m4_format([ %-40s %s], + m4_expand([b4_symbol([$1], [type]) b4_symbol([$1], [type_tag]);]), + [b4_symbol_tag_comment([$1])])])) +]) + + +# b4_type_define_tag(SYMBOL1-NUM, ...) +# ------------------------------------ +# For the batch of symbols SYMBOL1-NUM... (which all have the same +# type), enhance the %union definition for each of them, and set +# there "type" field to the field tag name, instead of the type name. +m4_define([b4_type_define_tag], +[b4_symbol_if([$1], [has_type], + [m4_map([b4_symbol_type_register], [$@])]) +]) + + +# b4_symbol_value_union(VAL, SYMBOL-NUM, [TYPE]) +# ---------------------------------------------- +# Same of b4_symbol_value, but when api.value.type=union. +m4_define([b4_symbol_value_union], +[m4_ifval([$3], + [(*($3*)(&$1))], + [m4_ifval([$2], + [b4_symbol_if([$2], [has_type], + [($1.b4_symbol([$2], [type_tag]))], + [$1])], + [$1])])]) + + +# b4_value_type_setup_union +# ------------------------- +# Setup support for api.value.type=union. Symbols are defined with a +# type instead of a union member name: build the corresponding union, +# and give the symbols their tag. +m4_define([b4_value_type_setup_union], +[m4_define([b4_union_members]) +b4_type_foreach([b4_type_define_tag]) +m4_copy_force([b4_symbol_value_union], [b4_symbol_value]) +]) + + +# -------------------------- # +# api.value.type = variant. # +# -------------------------- # + +# b4_value_type_setup_variant +# --------------------------- +# Setup support for api.value.type=variant. By default, fail, specialized +# by other skeletons. +m4_define([b4_value_type_setup_variant], +[b4_complain_at(b4_percent_define_get_loc([[api.value.type]]), + [['%s' does not support '%s']], + [b4_skeleton], + [%define api.value.type variant])]) + + +# _b4_value_type_setup_keyword +# ---------------------------- +# api.value.type is defined with a keyword/string syntax. Check if +# that is properly defined, and prepare its use. +m4_define([_b4_value_type_setup_keyword], +[b4_percent_define_check_values([[[[api.value.type]], + [[none]], + [[union]], + [[union-directive]], + [[variant]], + [[yystype]]]])dnl +m4_case(b4_percent_define_get([[api.value.type]]), + [union], [b4_value_type_setup_union], + [variant], [b4_value_type_setup_variant])]) + + +# b4_value_type_setup +# ------------------- +# Check if api.value.type is properly defined, and possibly prepare +# its use. +b4_define_silent([b4_value_type_setup], +[# Define default value. +b4_percent_define_ifdef([[api.value.type]], [], +[# %union => api.value.type=union-directive +m4_ifdef([b4_union_members], +[m4_define([b4_percent_define_kind(api.value.type)], [keyword]) +m4_define([b4_percent_define(api.value.type)], [union-directive])], +[# no tag seen => api.value.type={int} +m4_if(b4_tag_seen_flag, 0, +[m4_define([b4_percent_define_kind(api.value.type)], [code]) +m4_define([b4_percent_define(api.value.type)], [int])], +[# otherwise api.value.type=yystype +m4_define([b4_percent_define_kind(api.value.type)], [keyword]) +m4_define([b4_percent_define(api.value.type)], [yystype])])])]) + +# Set up. +m4_bmatch(b4_percent_define_get_kind([[api.value.type]]), + [keyword\|string], [_b4_value_type_setup_keyword]) +]) + + +## -------------- ## +## Declarations. ## +## -------------- ## + + +# b4_value_type_define +# -------------------- +m4_define([b4_value_type_define], +[b4_value_type_setup[]dnl +/* Value type. */ +m4_bmatch(b4_percent_define_get_kind([[api.value.type]]), +[code], +[[#if ! defined ]b4_api_PREFIX[STYPE && ! defined ]b4_api_PREFIX[STYPE_IS_DECLARED +typedef ]b4_percent_define_get([[api.value.type]])[ ]b4_api_PREFIX[STYPE; +# define ]b4_api_PREFIX[STYPE_IS_TRIVIAL 1 +# define ]b4_api_PREFIX[STYPE_IS_DECLARED 1 +#endif +]], +[m4_bmatch(b4_percent_define_get([[api.value.type]]), +[union\|union-directive], +[[#if ! defined ]b4_api_PREFIX[STYPE && ! defined ]b4_api_PREFIX[STYPE_IS_DECLARED +]b4_percent_define_get_syncline([[api.value.union.name]])dnl +[union ]b4_percent_define_get([[api.value.union.name]])[ +{ +]b4_user_union_members[ +}; +]b4_percent_define_get_syncline([[api.value.union.name]])dnl +[typedef union ]b4_percent_define_get([[api.value.union.name]])[ ]b4_api_PREFIX[STYPE; +# define ]b4_api_PREFIX[STYPE_IS_TRIVIAL 1 +# define ]b4_api_PREFIX[STYPE_IS_DECLARED 1 +#endif +]])])]) + + +# b4_location_type_define +# ----------------------- +m4_define([b4_location_type_define], +[[/* Location type. */ +]b4_percent_define_ifdef([[api.location.type]], +[[typedef ]b4_percent_define_get([[api.location.type]])[ ]b4_api_PREFIX[LTYPE; +]], +[[#if ! defined ]b4_api_PREFIX[LTYPE && ! defined ]b4_api_PREFIX[LTYPE_IS_DECLARED +typedef struct ]b4_api_PREFIX[LTYPE ]b4_api_PREFIX[LTYPE; +struct ]b4_api_PREFIX[LTYPE +{ + int first_line; + int first_column; + int last_line; + int last_column; +}; +# define ]b4_api_PREFIX[LTYPE_IS_DECLARED 1 +# define ]b4_api_PREFIX[LTYPE_IS_TRIVIAL 1 +#endif +]])]) + + +# b4_declare_yylstype +# ------------------- +# Declarations that might either go into the header (if --header) or +# in the parser body. Declare YYSTYPE/YYLTYPE, and yylval/yylloc. +m4_define([b4_declare_yylstype], +[b4_value_type_define[]b4_locations_if([ +b4_location_type_define]) + +b4_pure_if([], [[extern ]b4_api_PREFIX[STYPE ]b4_prefix[lval; +]b4_locations_if([[extern ]b4_api_PREFIX[LTYPE ]b4_prefix[lloc;]])])[]dnl +]) + + +# b4_YYDEBUG_define +# ----------------- +m4_define([b4_YYDEBUG_define], +[[/* Debug traces. */ +]m4_if(b4_api_prefix, [yy], +[[#ifndef YYDEBUG +# define YYDEBUG ]b4_parse_trace_if([1], [0])[ +#endif]], +[[#ifndef ]b4_api_PREFIX[DEBUG +# if defined YYDEBUG +#if YYDEBUG +# define ]b4_api_PREFIX[DEBUG 1 +# else +# define ]b4_api_PREFIX[DEBUG 0 +# endif +# else /* ! defined YYDEBUG */ +# define ]b4_api_PREFIX[DEBUG ]b4_parse_trace_if([1], [0])[ +# endif /* ! defined YYDEBUG */ +#endif /* ! defined ]b4_api_PREFIX[DEBUG */]])[]dnl +]) + +# b4_declare_yydebug +# ------------------ +m4_define([b4_declare_yydebug], +[b4_YYDEBUG_define[ +#if ]b4_api_PREFIX[DEBUG +extern int ]b4_prefix[debug; +#endif][]dnl +]) + +# b4_yylloc_default_define +# ------------------------ +# Define YYLLOC_DEFAULT. +m4_define([b4_yylloc_default_define], +[[/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. + If N is 0, then set CURRENT to the empty location which ends + the previous symbol: RHS[0] (always defined). */ + +#ifndef YYLLOC_DEFAULT +# define YYLLOC_DEFAULT(Current, Rhs, N) \ + do \ + if (N) \ + { \ + (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ + (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ + (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ + (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ + } \ + else \ + { \ + (Current).first_line = (Current).last_line = \ + YYRHSLOC (Rhs, 0).last_line; \ + (Current).first_column = (Current).last_column = \ + YYRHSLOC (Rhs, 0).last_column; \ + } \ + while (0) +#endif +]]) + +# b4_yylocation_print_define +# -------------------------- +# Define YYLOCATION_PRINT. +m4_define([b4_yylocation_print_define], +[b4_locations_if([[ +/* YYLOCATION_PRINT -- Print the location on the stream. + This macro was not mandated originally: define only if we know + we won't break user code: when these are the locations we know. */ + +# ifndef YYLOCATION_PRINT + +# if defined YY_LOCATION_PRINT + + /* Temporary convenience wrapper in case some people defined the + undocumented and private YY_LOCATION_PRINT macros. */ +# define YYLOCATION_PRINT(File, Loc) YY_LOCATION_PRINT(File, *(Loc)) + +# elif defined ]b4_api_PREFIX[LTYPE_IS_TRIVIAL && ]b4_api_PREFIX[LTYPE_IS_TRIVIAL + +/* Print *YYLOCP on YYO. Private, do not rely on its existence. */ + +YY_ATTRIBUTE_UNUSED +static int +yy_location_print_ (FILE *yyo, YYLTYPE const * const yylocp) +{ + int res = 0; + int end_col = 0 != yylocp->last_column ? yylocp->last_column - 1 : 0; + if (0 <= yylocp->first_line) + { + res += YYFPRINTF (yyo, "%d", yylocp->first_line); + if (0 <= yylocp->first_column) + res += YYFPRINTF (yyo, ".%d", yylocp->first_column); + } + if (0 <= yylocp->last_line) + { + if (yylocp->first_line < yylocp->last_line) + { + res += YYFPRINTF (yyo, "-%d", yylocp->last_line); + if (0 <= end_col) + res += YYFPRINTF (yyo, ".%d", end_col); + } + else if (0 <= end_col && yylocp->first_column < end_col) + res += YYFPRINTF (yyo, "-%d", end_col); + } + return res; +} + +# define YYLOCATION_PRINT yy_location_print_ + + /* Temporary convenience wrapper in case some people defined the + undocumented and private YY_LOCATION_PRINT macros. */ +# define YY_LOCATION_PRINT(File, Loc) YYLOCATION_PRINT(File, &(Loc)) + +# else + +# define YYLOCATION_PRINT(File, Loc) ((void) 0) + /* Temporary convenience wrapper in case some people defined the + undocumented and private YY_LOCATION_PRINT macros. */ +# define YY_LOCATION_PRINT YYLOCATION_PRINT + +# endif +# endif /* !defined YYLOCATION_PRINT */]]) +]) + +# b4_yyloc_default +# ---------------- +# Expand to a possible default value for yylloc. +m4_define([b4_yyloc_default], +[[ +# if defined ]b4_api_PREFIX[LTYPE_IS_TRIVIAL && ]b4_api_PREFIX[LTYPE_IS_TRIVIAL + = { ]m4_join([, ], + m4_defn([b4_location_initial_line]), + m4_defn([b4_location_initial_column]), + m4_defn([b4_location_initial_line]), + m4_defn([b4_location_initial_column]))[ } +# endif +]]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/d-skel.m4 b/platform/dbops/binaries/build/share/bison/skeletons/d-skel.m4 new file mode 100644 index 0000000000000000000000000000000000000000..2a38f02f2a7b8f6ef324e6a4359c9d0812ecf650 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/d-skel.m4 @@ -0,0 +1,26 @@ + -*- Autoconf -*- + +# D skeleton dispatching for Bison. + +# Copyright (C) 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +b4_glr_if( [b4_complain([%%glr-parser not supported for D])]) +b4_nondeterministic_if([b4_complain([%%nondeterministic-parser not supported for D])]) + +m4_define_default([b4_used_skeleton], [b4_skeletonsdir/[lalr1.d]]) +m4_define_default([b4_skeleton], ["b4_basename(b4_used_skeleton)"]) + +m4_include(b4_used_skeleton) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/d.m4 b/platform/dbops/binaries/build/share/bison/skeletons/d.m4 new file mode 100644 index 0000000000000000000000000000000000000000..c0632e4728bf4b4cf56754224a656afe7602a489 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/d.m4 @@ -0,0 +1,628 @@ + -*- Autoconf -*- + +# D language support for Bison + +# Copyright (C) 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +m4_include(b4_skeletonsdir/[c-like.m4]) + + +# b4_symbol_action(SYMBOL-NUM, ACTION) +# ------------------------------------ +# Run the action ACTION ("destructor" or "printer") for SYMBOL-NUM. +m4_define([b4_symbol_action], +[b4_symbol_if([$1], [has_$2], +[b4_dollar_pushdef([yyval], + [$1], + [], + [yyloc])dnl + _b4_symbol_case([$1])[]dnl +b4_syncline([b4_symbol([$1], [$2_line])], [b4_symbol([$1], [$2_file])])dnl +b4_symbol([$1], [$2]) +b4_syncline([@oline@], [@ofile@])dnl + break; + +b4_dollar_popdef[]dnl +])]) + + +# b4_use(EXPR) +# ------------ +# Pacify the compiler about some maybe unused value. +m4_define([b4_use], +[]) + + +# b4_sync_start(LINE, FILE) +# ------------------------- +m4_define([b4_sync_start], [[#]line $1 $2]) + + +# b4_list2(LIST1, LIST2) +# ---------------------- +# Join two lists with a comma if necessary. +m4_define([b4_list2], + [$1[]m4_ifval(m4_quote($1), [m4_ifval(m4_quote($2), [[, ]])])[]$2]) + + +# b4_percent_define_get3(DEF, PRE, POST, NOT) +# ------------------------------------------- +# Expand to the value of DEF surrounded by PRE and POST if it's %define'ed, +# otherwise NOT. +m4_define([b4_percent_define_get3], + [m4_ifval(m4_quote(b4_percent_define_get([$1])), + [$2[]b4_percent_define_get([$1])[]$3], [$4])]) + +# b4_percent_define_if_get2(ARG1, ARG2, DEF, NOT) +# ----------------------------------------------- +# Expand to the value of DEF if ARG1 or ARG2 are %define'ed, +# otherwise NOT. +m4_define([b4_percent_define_if_get2], + [m4_ifval(m4_quote(b4_percent_define_get([$1])), + [$3], [m4_ifval(m4_quote(b4_percent_define_get([$2])), + [$3], [$4])])]) + +# b4_percent_define_class_before_interface(CLASS, INTERFACE) +# ---------------------------------------------------------- +# Expand to a ', ' if both a class and an interface have been %define'ed +m4_define([b4_percent_define_class_before_interface], + [m4_ifval(m4_quote(b4_percent_define_get([$1])), + [m4_ifval(m4_quote(b4_percent_define_get([$2])), + [, ])])]) + + +# b4_flag_value(BOOLEAN-FLAG) +# --------------------------- +m4_define([b4_flag_value], [b4_flag_if([$1], [true], [false])]) + + +# b4_parser_class_declaration +# --------------------------- +# The declaration of the parser class ("class YYParser"), with all its +# qualifiers/annotations. +b4_percent_define_default([[api.parser.abstract]], [[false]]) +b4_percent_define_default([[api.parser.final]], [[false]]) +b4_percent_define_default([[api.parser.public]], [[false]]) + +m4_define([b4_parser_class_declaration], +[b4_percent_define_get3([api.parser.annotations], [], [ ])dnl +b4_percent_define_flag_if([api.parser.public], [public ])dnl +b4_percent_define_flag_if([api.parser.abstract], [abstract ])dnl +b4_percent_define_flag_if([api.parser.final], [final ])dnl +[class ]b4_parser_class[]dnl +b4_percent_define_if_get2([api.parser.extends], [api.parser.implements], [ : ])dnl +b4_percent_define_get([api.parser.extends])dnl +b4_percent_define_class_before_interface([api.parser.extends], [api.parser.implements])dnl +b4_percent_define_get([api.parser.implements])]) + + +# b4_lexer_if(TRUE, FALSE) +# ------------------------ +m4_define([b4_lexer_if], +[b4_percent_code_ifdef([[lexer]], [$1], [$2])]) + + +# b4_position_type_if(TRUE, FALSE) +# -------------------------------- +m4_define([b4_position_type_if], +[b4_percent_define_ifdef([[position_type]], [$1], [$2])]) + + +# b4_location_type_if(TRUE, FALSE) +# -------------------------------- +m4_define([b4_location_type_if], +[b4_percent_define_ifdef([[location_type]], [$1], [$2])]) + + +# b4_identification +# ----------------- +m4_define([b4_identification], +[[/** Version number for the Bison executable that generated this parser. */ + public static immutable string yy_bison_version = "]b4_version_string["; + + /** Name of the skeleton that generated this parser. */ + public static immutable string yy_bison_skeleton = ]b4_skeleton[; +]]) + + +## ------------ ## +## Data types. ## +## ------------ ## + +# b4_int_type(MIN, MAX) +# --------------------- +# Return the smallest int type able to handle numbers ranging from +# MIN to MAX (included). +m4_define([b4_int_type], +[m4_if(b4_ints_in($@, [-128], [127]), [1], [byte], + b4_ints_in($@, [-32768], [32767]), [1], [short], + [int])]) + +# b4_int_type_for(NAME) +# --------------------- +# Return the smallest int type able to handle numbers ranging from +# `NAME_min' to `NAME_max' (included). +m4_define([b4_int_type_for], +[b4_int_type($1_min, $1_max)]) + +# b4_null +# ------- +m4_define([b4_null], [null]) + + +# b4_integral_parser_table_define(NAME, DATA, COMMENT) +#----------------------------------------------------- +# Define "yy" whose contents is CONTENT. +m4_define([b4_integral_parser_table_define], +[m4_ifvaln([$3], [b4_comment([$3], [ ])])dnl +private static immutable b4_int_type_for([$2])[[]] yy$1_ = +@{ + $2 +@};dnl +]) + + +## ------------- ## +## Token kinds. ## +## ------------- ## + +m4_define([b4_symbol(-2, id)], [[YYEMPTY]]) +b4_percent_define_default([[api.token.raw]], [[true]]) + +# b4_token_enum(TOKEN-NAME, TOKEN-NUMBER) +# --------------------------------------- +# Output the definition of this token as an enum. +m4_define([b4_token_enum], +[b4_token_format([ %s = %s, +], [$1])]) + +# b4_token_enums +# -------------- +# Output the definition of the tokens as enums. +m4_define([b4_token_enums], +[/* Token kinds. */ +public enum TokenKind { + ]b4_symbol(empty, id)[ = -2, +b4_symbol_foreach([b4_token_enum])dnl +} +]) + +# b4_symbol_translate(STRING) +# --------------------------- +# Used by "bison" in the array of symbol names to mark those that +# require translation. +m4_define([b4_symbol_translate], +[[_($1)]]) + + +# _b4_token_constructor_define(SYMBOL-NUM) +# ---------------------------------------- +# Define Symbol.FOO for SYMBOL-NUM. +m4_define([_b4_token_constructor_define], +[b4_token_visible_if([$1], +[[ + static auto ]b4_symbol([$1], [id])[(]b4_symbol_if([$1], [has_type], +[b4_union_if([b4_symbol([$1], [type]], +[[typeof(YYSemanticType.]b4_symbol([$1], [type])[]])) [val]])dnl +[]b4_locations_if([b4_symbol_if([$1], [has_type], [[, ]])[Location l]])[) + { + return Symbol(TokenKind.]b4_symbol([$1], [id])[]b4_symbol_if([$1], [has_type], + [[, val]])[]b4_locations_if([[, l]])[); + }]])]) + +# b4_token_constructor_define +# --------------------------- +# Define Symbol.FOO for each token kind FOO. +m4_define([b4_token_constructor_define], +[[ + /* Implementation of token constructors for each symbol type visible to + * the user. The code generates static methods that have the same names + * as the TokenKinds. + */]b4_symbol_foreach([_b4_token_constructor_define])dnl +]) + +## -------------- ## +## Symbol kinds. ## +## -------------- ## + +# b4_symbol_kind(NUM) +# ------------------- +m4_define([b4_symbol_kind], +[SymbolKind.b4_symbol_kind_base($@)]) + + +# b4_symbol_enum(SYMBOL-NUM) +# -------------------------- +# Output the definition of this symbol as an enum. +m4_define([b4_symbol_enum], +[m4_format([ %-30s %s], + m4_format([[%s = %s,]], + b4_symbol([$1], [kind_base]), + [$1]), + [b4_symbol_tag_comment([$1])])]) + + +# b4_declare_symbol_enum +# ---------------------- +# The definition of the symbol internal numbers as an enum. +# Defining YYEMPTY here is important: it forces the compiler +# to use a signed type, which matters for yytoken. +m4_define([b4_declare_symbol_enum], +[[ /* Symbol kinds. */ + struct SymbolKind + { + enum + { + ]b4_symbol(empty, kind_base)[ = -2, /* No symbol. */ +]b4_symbol_foreach([b4_symbol_enum])dnl +[ } + + private int yycode_; + alias yycode_ this; + + this(int code) + { + yycode_ = code; + } + + /* Return YYSTR after stripping away unnecessary quotes and + backslashes, so that it's suitable for yyerror. The heuristic is + that double-quoting is unnecessary unless the string contains an + apostrophe, a comma, or backslash (other than backslash-backslash). + YYSTR is taken from yytname. */ + final void toString(W)(W sink) const + if (isOutputRange!(W, char)) + { + immutable string[] yy_sname = @{ + ]b4_symbol_names[ + @};]b4_has_translations_if([[ + /* YYTRANSLATABLE[SYMBOL-NUM] -- Whether YY_SNAME[SYMBOL-NUM] is + internationalizable. */ + immutable ]b4_int_type_for([b4_translatable])[[] yytranslatable = @{ + ]b4_translatable[ + @};]])[ + + put(sink, yy_sname[yycode_]); + } + } +]]) + + +# b4_case(ID, CODE, [COMMENTS]) +# ----------------------------- +m4_define([b4_case], [ case $1:m4_ifval([$3], [ b4_comment([$3])]) +$2 + break;]) + + +## ---------------- ## +## Default values. ## +## ---------------- ## + +m4_define([b4_yystype], [b4_percent_define_get([[stype]])]) +b4_percent_define_default([[stype]], [[YYSemanticType]])]) + +# %name-prefix +m4_define_default([b4_prefix], [[YY]]) + +b4_percent_define_default([[api.parser.class]], [b4_prefix[]Parser])]) +m4_define([b4_parser_class], [b4_percent_define_get([[api.parser.class]])]) + +#b4_percent_define_default([[location_type]], [Location])]) +m4_define([b4_location_type], b4_percent_define_ifdef([[location_type]],[b4_percent_define_get([[location_type]])],[YYLocation])) + +#b4_percent_define_default([[position_type]], [Position])]) +m4_define([b4_position_type], b4_percent_define_ifdef([[position_type]],[b4_percent_define_get([[position_type]])],[YYPosition])) + + +## ---------------- ## +## api.value.type. ## +## ---------------- ## + + +# ---------------------- # +# api.value.type=union. # +# ---------------------- # + +# b4_symbol_type_register(SYMBOL-NUM) +# ----------------------------------- +# Symbol SYMBOL-NUM has a type (for union) instead of a type-tag. +# Extend the definition of %union's body (b4_union_members) with a +# field of that type, and extend the symbol's "type" field to point to +# the field name, instead of the type name. +m4_define([b4_symbol_type_register], +[m4_define([b4_symbol($1, type_tag)], + [b4_symbol_if([$1], [has_id], + [b4_symbol([$1], [id])], + [yykind_[]b4_symbol([$1], [number])])])dnl +m4_append([b4_union_members], +m4_expand([m4_format([ %-40s %s], + m4_expand([b4_symbol([$1], [type]) b4_symbol([$1], [type_tag]);]), + [b4_symbol_tag_comment([$1])])])) +]) + + +# b4_type_define_tag(SYMBOL1-NUM, ...) +# ------------------------------------ +# For the batch of symbols SYMBOL1-NUM... (which all have the same +# type), enhance the %union definition for each of them, and set +# there "type" field to the field tag name, instead of the type name. +m4_define([b4_type_define_tag], +[b4_symbol_if([$1], [has_type], + [m4_map([b4_symbol_type_register], [$@])]) +]) + + +# b4_symbol_value_union(VAL, SYMBOL-NUM, [TYPE]) +# ---------------------------------------------- +# Same of b4_symbol_value, but when api.value.type=union. +m4_define([b4_symbol_value_union], +[m4_ifval([$3], + [(*($3*)(&$1))], + [m4_ifval([$2], + [b4_symbol_if([$2], [has_type], + [($1.b4_symbol([$2], [type_tag]))], + [$1])], + [$1])])]) + + +# b4_value_type_setup_union +# ------------------------- +# Setup support for api.value.type=union. Symbols are defined with a +# type instead of a union member name: build the corresponding union, +# and give the symbols their tag. +m4_define([b4_value_type_setup_union], +[m4_define([b4_union_members]) +b4_type_foreach([b4_type_define_tag]) +m4_copy_force([b4_symbol_value_union], [b4_symbol_value]) +]) + + +# _b4_value_type_setup_keyword +# ---------------------------- +# api.value.type is defined with a keyword/string syntax. Check if +# that is properly defined, and prepare its use. +m4_define([_b4_value_type_setup_keyword], +[b4_percent_define_check_values([[[[api.value.type]], + [[none]], + [[union]], + [[union-directive]], + [[yystype]]]])dnl +m4_case(b4_percent_define_get([[api.value.type]]), + [union], [b4_value_type_setup_union])]) + + +# b4_value_type_setup +# ------------------- +# Check if api.value.type is properly defined, and possibly prepare +# its use. +b4_define_silent([b4_value_type_setup], +[ +# Define default value. +b4_percent_define_ifdef([[api.value.type]], [], +[# %union => api.value.type=union-directive +m4_ifdef([b4_union_members], +[m4_define([b4_percent_define_kind(api.value.type)], [keyword]) +m4_define([b4_percent_define(api.value.type)], [union-directive])], +[# no tag seen => api.value.type={int} +m4_if(b4_tag_seen_flag, 0, +[m4_define([b4_percent_define_kind(api.value.type)], [code]) +m4_define([b4_percent_define(api.value.type)], [int])], +[# otherwise api.value.type=yystype +m4_define([b4_percent_define_kind(api.value.type)], [keyword]) +m4_define([b4_percent_define(api.value.type)], [yystype])])])]) + +# Set up. +m4_bmatch(b4_percent_define_get_kind([[api.value.type]]), + [keyword], [_b4_value_type_setup_keyword]) +]) + + +## ----------------- ## +## Semantic Values. ## +## ----------------- ## + + +# b4_symbol_value(VAL, [SYMBOL-NUM], [TYPE-TAG]) +# ---------------------------------------------- +# See README. FIXME: factor in c-like? +m4_define([b4_symbol_value], +[m4_ifval([$3], + [($1.$3)], + [m4_ifval([$2], + [b4_symbol_if([$2], [has_type], + [($1.b4_symbol([$2], [type]))], + [$1])], + [$1])])]) + +# b4_lhs_value(SYMBOL-NUM, [TYPE]) +# -------------------------------- +# See README. +m4_define([b4_lhs_value], +[b4_symbol_value([yyval], [$1], [$2])]) + + +# b4_rhs_value(RULE-LENGTH, POS, SYMBOL-NUM, [TYPE]) +# -------------------------------------------------- +# See README. +# +# In this simple implementation, %token and %type have class names +# between the angle brackets. +m4_define([b4_rhs_value], +[b4_symbol_value([(yystack.valueAt (b4_subtract([$1], [$2])))], [$3], [$4])]) + + +# b4_lhs_location() +# ----------------- +# Expansion of @$. +m4_define([b4_lhs_location], +[(yyloc)]) + + +# b4_rhs_location(RULE-LENGTH, POS) +# --------------------------------- +# Expansion of @POS, where the current rule has RULE-LENGTH symbols +# on RHS. +m4_define([b4_rhs_location], +[yystack.locationAt (b4_subtract($@))]) + + +# b4_lex_param +# b4_parse_param +# -------------- +# If defined, b4_lex_param arrives double quoted, but below we prefer +# it to be single quoted. Same for b4_parse_param. + +# TODO: should be in bison.m4 +m4_define_default([b4_lex_param], [[]])) +m4_define([b4_lex_param], b4_lex_param)) +m4_define([b4_parse_param], b4_parse_param)) + +# b4_lex_param_decl +# ------------------- +# Extra formal arguments of the constructor. +m4_define([b4_lex_param_decl], +[m4_ifset([b4_lex_param], + [b4_remove_comma([$1], + b4_param_decls(b4_lex_param))], + [$1])]) + +m4_define([b4_param_decls], + [m4_map([b4_param_decl], [$@])]) +m4_define([b4_param_decl], [, $1]) + +m4_define([b4_remove_comma], [m4_ifval(m4_quote($1), [$1, ], [])m4_shift2($@)]) + + + +# b4_parse_param_decl +# ------------------- +# Extra formal arguments of the constructor. +m4_define([b4_parse_param_decl], +[m4_ifset([b4_parse_param], + [b4_remove_comma([$1], + b4_param_decls(b4_parse_param))], + [$1])]) + + + +# b4_lex_param_call +# ------------------- +# Delegating the lexer parameters to the lexer constructor. +m4_define([b4_lex_param_call], + [m4_ifset([b4_lex_param], + [b4_remove_comma([$1], + b4_param_calls(b4_lex_param))], + [$1])]) +m4_define([b4_param_calls], + [m4_map([b4_param_call], [$@])]) +m4_define([b4_param_call], [, $2]) + + + +# b4_parse_param_cons +# ------------------- +# Extra initialisations of the constructor. +m4_define([b4_parse_param_cons], + [m4_ifset([b4_parse_param], + [b4_constructor_calls(b4_parse_param)])]) + +m4_define([b4_constructor_calls], + [m4_map([b4_constructor_call], [$@])]) +m4_define([b4_constructor_call], + [this.$2 = $2; + ]) + + + +# b4_parse_param_vars +# ------------------- +# Extra instance variables. +m4_define([b4_parse_param_vars], + [m4_ifset([b4_parse_param], + [ + /* User arguments. */ +b4_var_decls(b4_parse_param)])]) + +m4_define([b4_var_decls], + [m4_map_sep([b4_var_decl], [ +], [$@])]) +m4_define([b4_var_decl], + [ protected $1;]) + + +# b4_public_types_declare +# ----------------------- +# Define the public types: token, semantic value, location, and so forth. +# Depending on %define token_lex, may be output in the header or source file. +m4_define([b4_public_types_declare], +[[ +alias Symbol = ]b4_parser_class[.Symbol; +alias Value = ]b4_yystype[;]b4_locations_if([[ +alias Location = ]b4_location_type[; +alias Position = ]b4_position_type[;]b4_push_if([[ +alias PUSH_MORE = ]b4_parser_class[.YYPUSH_MORE; +alias ABORT = ]b4_parser_class[.YYABORT; +alias ACCEPT = ]b4_parser_class[.YYACCEPT;]])[]])[ +]]) + + +# b4_basic_symbol_constructor_define +# ---------------------------------- +# Create Symbol struct constructors for all the visible types. +m4_define([b4_basic_symbol_constructor_define], +[b4_token_visible_if([$1], +[[ this(TokenKind token]b4_symbol_if([$1], [has_type], +[[, ]b4_union_if([], [[typeof(YYSemanticType.]])b4_symbol([$1], [type])dnl +[]b4_union_if([], [[) ]])[ val]])[]b4_locations_if([[, Location loc]])[) + { + kind = yytranslate_(token);]b4_union_if([b4_symbol_if([$1], [has_type], [[ + static foreach (member; __traits(allMembers, YYSemanticType)) + { + static if (is(typeof(mixin("value_." ~ member)) == ]b4_symbol([$1], [type])[)) + { + mixin("value_." ~ member ~ " = val;"); + } + }]])], [b4_symbol_if([$1], [has_type], [[ + value_.]b4_symbol([$1], [type])[ = val;]])])[]b4_locations_if([ + location_ = loc;])[ + } +]])]) + + +# b4_symbol_type_define +# --------------------- +# Define symbol_type, the external type for symbols used for symbol +# constructors. +m4_define([b4_symbol_type_define], +[[ + /** + * A complete symbol + */ + struct Symbol + { + private SymbolKind kind; + private Value value_;]b4_locations_if([[ + private Location location_;]])[ + +]b4_type_foreach([b4_basic_symbol_constructor_define])[ + SymbolKind token() { return kind; } + Value value() { return value_; }]b4_locations_if([[ + Location location() { return location_; }]])[ +]b4_token_ctor_if([b4_token_constructor_define])[ + } +]]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/glr.c b/platform/dbops/binaries/build/share/bison/skeletons/glr.c new file mode 100644 index 0000000000000000000000000000000000000000..fab3733f1fdf2cc2f1ca6878171ab40fa0f227c4 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/glr.c @@ -0,0 +1,2763 @@ +# -*- C -*- + +# GLR skeleton for Bison + +# Copyright (C) 2002-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# If we are loaded by glr.cc, do not override c++.m4 definitions by +# those of c.m4. +m4_if(b4_skeleton, ["glr.c"], + [m4_include(b4_skeletonsdir/[c.m4])]) + + +## ---------------- ## +## Default values. ## +## ---------------- ## + +# Stack parameters. +m4_define_default([b4_stack_depth_max], [10000]) +m4_define_default([b4_stack_depth_init], [200]) + +# Included header. +b4_percent_define_default([[api.header.include]], + [["@basename(]b4_spec_header_file[@)"]]) + +## ------------------------ ## +## Pure/impure interfaces. ## +## ------------------------ ## + +b4_define_flag_if([pure]) +# If glr.cc is including this file and thus has already set b4_pure_flag, +# do not change the value of b4_pure_flag, and do not record a use of api.pure. +m4_ifndef([b4_pure_flag], +[b4_percent_define_default([[api.pure]], [[false]]) + m4_define([b4_pure_flag], + [b4_percent_define_flag_if([[api.pure]], [[1]], [[0]])])]) + +# b4_yyerror_args +# --------------- +# Optional effective arguments passed to yyerror: user args plus yylloc, and +# a trailing comma. +m4_define([b4_yyerror_args], +[b4_pure_if([b4_locations_if([yylocp, ])])dnl +m4_ifset([b4_parse_param], [b4_args(b4_parse_param), ])]) + + +# b4_lyyerror_args +# ---------------- +# Same as above, but on the lookahead, hence &yylloc instead of yylocp. +m4_define([b4_lyyerror_args], +[b4_pure_if([b4_locations_if([&yylloc, ])])dnl +m4_ifset([b4_parse_param], [b4_args(b4_parse_param), ])]) + + +# b4_pure_args +# ------------ +# Same as b4_yyerror_args, but with a leading comma. +m4_define([b4_pure_args], +[b4_pure_if([b4_locations_if([, yylocp])])[]b4_user_args]) + + +# b4_lpure_args +# ------------- +# Same as above, but on the lookahead, hence &yylloc instead of yylocp. +m4_define([b4_lpure_args], +[b4_pure_if([b4_locations_if([, &yylloc])])[]b4_user_args]) + + + +# b4_pure_formals +# --------------- +# Arguments passed to yyerror: user formals plus yylocp with leading comma. +m4_define([b4_pure_formals], +[b4_pure_if([b4_locations_if([, YYLTYPE *yylocp])])[]b4_user_formals]) + + +# b4_locuser_formals(LOC = yylocp) +# -------------------------------- +# User formal arguments, possibly preceded by location argument. +m4_define([b4_locuser_formals], +[b4_locations_if([, YYLTYPE *m4_default([$1], [yylocp])])[]b4_user_formals]) + + +# b4_locuser_args(LOC = yylocp) +# ----------------------------- +m4_define([b4_locuser_args], +[b4_locations_if([, m4_default([$1], [yylocp])])[]b4_user_args]) + + + +## ----------------- ## +## Semantic Values. ## +## ----------------- ## + + +# b4_lhs_value(SYMBOL-NUM, [TYPE]) +# -------------------------------- +# See README. +m4_define([b4_lhs_value], +[b4_symbol_value([(*yyvalp)], [$1], [$2])]) + + +# b4_rhs_data(RULE-LENGTH, POS) +# ----------------------------- +# See README. +m4_define([b4_rhs_data], +[YY_CAST (yyGLRStackItem const *, yyvsp)@{YYFILL (b4_subtract([$2], [$1]))@}.yystate]) + + +# b4_rhs_value(RULE-LENGTH, POS, SYMBOL-NUM, [TYPE]) +# -------------------------------------------------- +# Expansion of $$ or $$, for symbol SYMBOL-NUM. +m4_define([b4_rhs_value], +[b4_symbol_value([b4_rhs_data([$1], [$2]).yysemantics.yyval], [$3], [$4])]) + + + +## ----------- ## +## Locations. ## +## ----------- ## + +# b4_lhs_location() +# ----------------- +# Expansion of @$. +m4_define([b4_lhs_location], +[(*yylocp)]) + + +# b4_rhs_location(RULE-LENGTH, NUM) +# --------------------------------- +# Expansion of @NUM, where the current rule has RULE-LENGTH symbols +# on RHS. +m4_define([b4_rhs_location], +[(b4_rhs_data([$1], [$2]).yyloc)]) + + +# b4_call_merger(MERGER-NUM, MERGER-NAME, SYMBOL-SUM) +# --------------------------------------------------- +m4_define([b4_call_merger], +[b4_case([$1], + [ b4_symbol_if([$3], [has_type], + [yy0->b4_symbol($3, slot) = $2 (*yy0, *yy1);], + [*yy0 = $2 (*yy0, *yy1);])])]) + + +## -------------- ## +## Declarations. ## +## -------------- ## + +# b4_shared_declarations +# ---------------------- +# Declaration that might either go into the header (if --header) +# or open coded in the parser body. glr.cc has its own definition. +m4_if(b4_skeleton, ["glr.c"], +[m4_define([b4_shared_declarations], +[b4_declare_yydebug[ +]b4_percent_code_get([[requires]])[ +]b4_token_enums[ +]b4_declare_yylstype[ +int ]b4_prefix[parse (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[); +]b4_percent_code_get([[provides]])[]dnl +]) +]) + +## -------------- ## +## Output files. ## +## -------------- ## + +# Unfortunately the order of generation between the header and the +# implementation file matters (for glr.c) because of the current +# implementation of api.value.type=union. In that case we still use a +# union for YYSTYPE, but we generate the contents of this union when +# setting up YYSTYPE. This is needed for other aspects, such as +# defining yy_symbol_value_print, since we need to now the name of the +# members of this union. +# +# To avoid this issue, just generate the header before the +# implementation file. But we should also make them more independent. + +# ----------------- # +# The header file. # +# ----------------- # + +# glr.cc produces its own header. +b4_glr_cc_if([], +[b4_header_if( +[b4_output_begin([b4_spec_header_file]) +b4_copyright([Skeleton interface for Bison GLR parsers in C], + [2002-2015, 2018-2021])[ +]b4_cpp_guard_open([b4_spec_mapped_header_file])[ +]b4_shared_declarations[ +]b4_cpp_guard_close([b4_spec_mapped_header_file])[ +]b4_output_end[ +]])]) + + +# ------------------------- # +# The implementation file. # +# ------------------------- # + +b4_output_begin([b4_parser_file_name]) +b4_copyright([Skeleton implementation for Bison GLR parsers in C], + [2002-2015, 2018-2021])[ +/* C GLR parser skeleton written by Paul Hilfinger. */ + +]b4_disclaimer[ +]b4_identification[ + +]b4_percent_code_get([[top]])[ +]m4_if(b4_api_prefix, [yy], [], +[[/* Substitute the type names. */ +#define YYSTYPE ]b4_api_PREFIX[STYPE]b4_locations_if([[ +#define YYLTYPE ]b4_api_PREFIX[LTYPE]])])[ +]m4_if(b4_prefix, [yy], [], +[[/* Substitute the variable and function names. */ +#define ]b4_glr_cc_if([yy_parse_impl], [yyparse])[ ]b4_prefix[]b4_glr_cc_if([_parse_impl], [parse])[ +#define yylex ]b4_prefix[lex +#define yyerror ]b4_prefix[error +#define yydebug ]b4_prefix[debug]]b4_pure_if([], [[ +#define yylval ]b4_prefix[lval +#define yychar ]b4_prefix[char +#define yynerrs ]b4_prefix[nerrs]b4_locations_if([[ +#define yylloc ]b4_prefix[lloc]])]))[ + +]b4_user_pre_prologue[ +]b4_cast_define[ +]b4_null_define[ + +]b4_header_if([[#include ]b4_percent_define_get([[api.header.include]])], + [b4_shared_declarations])[ + +]b4_glr_cc_if([b4_glr_cc_setup], + [b4_declare_symbol_enum])[ + +/* Default (constant) value used for initialization for null + right-hand sides. Unlike the standard yacc.c template, here we set + the default value of $$ to a zeroed-out value. Since the default + value is undefined, this behavior is technically correct. */ +static YYSTYPE yyval_default;]b4_locations_if([[ +static YYLTYPE yyloc_default][]b4_yyloc_default;])[ + +]b4_user_post_prologue[ +]b4_percent_code_get[]dnl + +[#include +#include +#include +#include +#include + +]b4_c99_int_type_define[ +]b4_sizes_types_define[ + +#ifndef YY_ +# if defined YYENABLE_NLS && YYENABLE_NLS +# if ENABLE_NLS +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_(Msgid) dgettext ("bison-runtime", Msgid) +# endif +# endif +# ifndef YY_ +# define YY_(Msgid) Msgid +# endif +#endif +]b4_has_translations_if([ +#ifndef N_ +# define N_(Msgid) Msgid +#endif +])[ + +#ifndef YYFREE +# define YYFREE free +#endif +#ifndef YYMALLOC +# define YYMALLOC malloc +#endif +#ifndef YYREALLOC +# define YYREALLOC realloc +#endif + +#ifdef __cplusplus + typedef bool yybool; +# define yytrue true +# define yyfalse false +#else + /* When we move to stdbool, get rid of the various casts to yybool. */ + typedef signed char yybool; +# define yytrue 1 +# define yyfalse 0 +#endif + +#ifndef YYSETJMP +# include +# define YYJMP_BUF jmp_buf +# define YYSETJMP(Env) setjmp (Env) +/* Pacify Clang and ICC. */ +# define YYLONGJMP(Env, Val) \ + do { \ + longjmp (Env, Val); \ + YY_ASSERT (0); \ + } while (yyfalse) +#endif + +]b4_attribute_define([noreturn])[ + +]b4_parse_assert_if([[#ifdef NDEBUG +# define YY_ASSERT(E) ((void) (0 && (E))) +#else +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_ASSERT(E) assert (E) +#endif +]], +[[#define YY_ASSERT(E) ((void) (0 && (E)))]])[ + +/* YYFINAL -- State number of the termination state. */ +#define YYFINAL ]b4_final_state_number[ +/* YYLAST -- Last index in YYTABLE. */ +#define YYLAST ]b4_last[ + +/* YYNTOKENS -- Number of terminals. */ +#define YYNTOKENS ]b4_tokens_number[ +/* YYNNTS -- Number of nonterminals. */ +#define YYNNTS ]b4_nterms_number[ +/* YYNRULES -- Number of rules. */ +#define YYNRULES ]b4_rules_number[ +/* YYNSTATES -- Number of states. */ +#define YYNSTATES ]b4_states_number[ +/* YYMAXRHS -- Maximum number of symbols on right-hand side of rule. */ +#define YYMAXRHS ]b4_r2_max[ +/* YYMAXLEFT -- Maximum number of symbols to the left of a handle + accessed by $0, $-1, etc., in any rule. */ +#define YYMAXLEFT ]b4_max_left_semantic_context[ + +/* YYMAXUTOK -- Last valid token kind. */ +#define YYMAXUTOK ]b4_code_max[ + +/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, with out-of-bounds checking. */ +]b4_api_token_raw_if(dnl +[[#define YYTRANSLATE(YYX) YY_CAST (yysymbol_kind_t, YYX)]], +[[#define YYTRANSLATE(YYX) \ + (0 <= (YYX) && (YYX) <= YYMAXUTOK \ + ? YY_CAST (yysymbol_kind_t, yytranslate[YYX]) \ + : ]b4_symbol_prefix[YYUNDEF) + +/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM + as returned by yylex. */ +static const ]b4_int_type_for([b4_translate])[ yytranslate[] = +{ + ]b4_translate[ +};]])[ + +#if ]b4_api_PREFIX[DEBUG +/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ +static const ]b4_int_type_for([b4_rline])[ yyrline[] = +{ + ]b4_rline[ +}; +#endif + +#define YYPACT_NINF (]b4_pact_ninf[) +#define YYTABLE_NINF (]b4_table_ninf[) + +]b4_parser_tables_define[ + +/* YYDPREC[RULE-NUM] -- Dynamic precedence of rule #RULE-NUM (0 if none). */ +static const ]b4_int_type_for([b4_dprec])[ yydprec[] = +{ + ]b4_dprec[ +}; + +/* YYMERGER[RULE-NUM] -- Index of merging function for rule #RULE-NUM. */ +static const ]b4_int_type_for([b4_merger])[ yymerger[] = +{ + ]b4_merger[ +}; + +/* YYIMMEDIATE[RULE-NUM] -- True iff rule #RULE-NUM is not to be deferred, as + in the case of predicates. */ +static const yybool yyimmediate[] = +{ + ]b4_immediate[ +}; + +/* YYCONFLP[YYPACT[STATE-NUM]] -- Pointer into YYCONFL of start of + list of conflicting reductions corresponding to action entry for + state STATE-NUM in yytable. 0 means no conflicts. The list in + yyconfl is terminated by a rule number of 0. */ +static const ]b4_int_type_for([b4_conflict_list_heads])[ yyconflp[] = +{ + ]b4_conflict_list_heads[ +}; + +/* YYCONFL[I] -- lists of conflicting rule numbers, each terminated by + 0, pointed into by YYCONFLP. */ +]dnl Do not use b4_int_type_for here, since there are places where +dnl pointers onto yyconfl are taken, whose type is "short*". +dnl We probably ought to introduce a type for confl. +[static const short yyconfl[] = +{ + ]b4_conflicting_rules[ +}; + +]b4_locations_if([[ +]b4_yylloc_default_define[ +# define YYRHSLOC(Rhs, K) ((Rhs)[K].yystate.yyloc) +]])[ + +]b4_pure_if( +[ +#undef yynerrs +#define yynerrs (yystackp->yyerrcnt) +#undef yychar +#define yychar (yystackp->yyrawchar) +#undef yylval +#define yylval (yystackp->yyval) +#undef yylloc +#define yylloc (yystackp->yyloc) +m4_if(b4_prefix[], [yy], [], +[#define b4_prefix[]nerrs yynerrs +#define b4_prefix[]char yychar +#define b4_prefix[]lval yylval +#define b4_prefix[]lloc yylloc])], +[YYSTYPE yylval;]b4_locations_if([[ +YYLTYPE yylloc;]])[ + +int yynerrs; +int yychar;])[ + +enum { YYENOMEM = -2 }; + +typedef enum { yyok, yyaccept, yyabort, yyerr, yynomem } YYRESULTTAG; + +#define YYCHK(YYE) \ + do { \ + YYRESULTTAG yychk_flag = YYE; \ + if (yychk_flag != yyok) \ + return yychk_flag; \ + } while (0) + +/* YYINITDEPTH -- initial size of the parser's stacks. */ +#ifndef YYINITDEPTH +# define YYINITDEPTH ]b4_stack_depth_init[ +#endif + +/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only + if the built-in stack extension method is used). + + Do not make this value too large; the results are undefined if + SIZE_MAX < YYMAXDEPTH * sizeof (GLRStackItem) + evaluated with infinite-precision integer arithmetic. */ + +#ifndef YYMAXDEPTH +# define YYMAXDEPTH ]b4_stack_depth_max[ +#endif + +/* Minimum number of free items on the stack allowed after an + allocation. This is to allow allocation and initialization + to be completed by functions that call yyexpandGLRStack before the + stack is expanded, thus insuring that all necessary pointers get + properly redirected to new data. */ +#define YYHEADROOM 2 + +#ifndef YYSTACKEXPANDABLE +# define YYSTACKEXPANDABLE 1 +#endif + +#if YYSTACKEXPANDABLE +# define YY_RESERVE_GLRSTACK(Yystack) \ + do { \ + if (Yystack->yyspaceLeft < YYHEADROOM) \ + yyexpandGLRStack (Yystack); \ + } while (0) +#else +# define YY_RESERVE_GLRSTACK(Yystack) \ + do { \ + if (Yystack->yyspaceLeft < YYHEADROOM) \ + yyMemoryExhausted (Yystack); \ + } while (0) +#endif + +/** State numbers. */ +typedef int yy_state_t; + +/** Rule numbers. */ +typedef int yyRuleNum; + +/** Item references. */ +typedef short yyItemNum; + +typedef struct yyGLRState yyGLRState; +typedef struct yyGLRStateSet yyGLRStateSet; +typedef struct yySemanticOption yySemanticOption; +typedef union yyGLRStackItem yyGLRStackItem; +typedef struct yyGLRStack yyGLRStack; + +struct yyGLRState +{ + /** Type tag: always true. */ + yybool yyisState; + /** Type tag for yysemantics. If true, yyval applies, otherwise + * yyfirstVal applies. */ + yybool yyresolved; + /** Number of corresponding LALR(1) machine state. */ + yy_state_t yylrState; + /** Preceding state in this stack */ + yyGLRState* yypred; + /** Source position of the last token produced by my symbol */ + YYPTRDIFF_T yyposn; + union { + /** First in a chain of alternative reductions producing the + * nonterminal corresponding to this state, threaded through + * yynext. */ + yySemanticOption* yyfirstVal; + /** Semantic value for this state. */ + YYSTYPE yyval; + } yysemantics;]b4_locations_if([[ + /** Source location for this state. */ + YYLTYPE yyloc;]])[ +}; + +struct yyGLRStateSet +{ + yyGLRState** yystates; + /** During nondeterministic operation, yylookaheadNeeds tracks which + * stacks have actually needed the current lookahead. During deterministic + * operation, yylookaheadNeeds[0] is not maintained since it would merely + * duplicate yychar != ]b4_symbol(empty, id)[. */ + yybool* yylookaheadNeeds; + YYPTRDIFF_T yysize; + YYPTRDIFF_T yycapacity; +}; + +struct yySemanticOption +{ + /** Type tag: always false. */ + yybool yyisState; + /** Rule number for this reduction */ + yyRuleNum yyrule; + /** The last RHS state in the list of states to be reduced. */ + yyGLRState* yystate; + /** The lookahead for this reduction. */ + int yyrawchar; + YYSTYPE yyval;]b4_locations_if([[ + YYLTYPE yyloc;]])[ + /** Next sibling in chain of options. To facilitate merging, + * options are chained in decreasing order by address. */ + yySemanticOption* yynext; +}; + +/** Type of the items in the GLR stack. The yyisState field + * indicates which item of the union is valid. */ +union yyGLRStackItem { + yyGLRState yystate; + yySemanticOption yyoption; +}; + +struct yyGLRStack { + int yyerrState; +]b4_locations_if([[ /* To compute the location of the error token. */ + yyGLRStackItem yyerror_range[3];]])[ +]b4_pure_if( +[ + int yyerrcnt; + int yyrawchar; + YYSTYPE yyval;]b4_locations_if([[ + YYLTYPE yyloc;]])[ +])[ + YYJMP_BUF yyexception_buffer; + yyGLRStackItem* yyitems; + yyGLRStackItem* yynextFree; + YYPTRDIFF_T yyspaceLeft; + yyGLRState* yysplitPoint; + yyGLRState* yylastDeleted; + yyGLRStateSet yytops; +}; + +#if YYSTACKEXPANDABLE +static void yyexpandGLRStack (yyGLRStack* yystackp); +#endif + +_Noreturn static void +yyFail (yyGLRStack* yystackp]b4_pure_formals[, const char* yymsg) +{ + if (yymsg != YY_NULLPTR) + yyerror (]b4_yyerror_args[yymsg); + YYLONGJMP (yystackp->yyexception_buffer, 1); +} + +_Noreturn static void +yyMemoryExhausted (yyGLRStack* yystackp) +{ + YYLONGJMP (yystackp->yyexception_buffer, 2); +} + +/** Accessing symbol of state YYSTATE. */ +static inline yysymbol_kind_t +yy_accessing_symbol (yy_state_t yystate) +{ + return YY_CAST (yysymbol_kind_t, yystos[yystate]); +} + +#if ]b4_parse_error_case([simple], [b4_api_PREFIX[DEBUG || ]b4_token_table_flag], [[1]])[ +/* The user-facing name of the symbol whose (internal) number is + YYSYMBOL. No bounds checking. */ +static const char *yysymbol_name (yysymbol_kind_t yysymbol) YY_ATTRIBUTE_UNUSED; + +]b4_parse_error_bmatch([simple\|verbose], +[[/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + First, the terminals, then, starting at YYNTOKENS, nonterminals. */ +static const char *const yytname[] = +{ + ]b4_tname[ +}; + +static const char * +yysymbol_name (yysymbol_kind_t yysymbol) +{ + return yytname[yysymbol]; +}]], +[[static const char * +yysymbol_name (yysymbol_kind_t yysymbol) +{ + static const char *const yy_sname[] = + { + ]b4_symbol_names[ + };]b4_has_translations_if([[ + /* YYTRANSLATABLE[SYMBOL-NUM] -- Whether YY_SNAME[SYMBOL-NUM] is + internationalizable. */ + static ]b4_int_type_for([b4_translatable])[ yytranslatable[] = + { + ]b4_translatable[ + }; + return (yysymbol < YYNTOKENS && yytranslatable[yysymbol] + ? _(yy_sname[yysymbol]) + : yy_sname[yysymbol]);]], [[ + return yy_sname[yysymbol];]])[ +}]])[ +#endif + +/** Left-hand-side symbol for rule #YYRULE. */ +static inline yysymbol_kind_t +yylhsNonterm (yyRuleNum yyrule) +{ + return YY_CAST (yysymbol_kind_t, yyr1[yyrule]); +} + +#if ]b4_api_PREFIX[DEBUG + +# ifndef YYFPRINTF +# define YYFPRINTF fprintf +# endif + +# define YY_FPRINTF \ + YY_IGNORE_USELESS_CAST_BEGIN YY_FPRINTF_ + +# define YY_FPRINTF_(Args) \ + do { \ + YYFPRINTF Args; \ + YY_IGNORE_USELESS_CAST_END \ + } while (0) + +# define YY_DPRINTF \ + YY_IGNORE_USELESS_CAST_BEGIN YY_DPRINTF_ + +# define YY_DPRINTF_(Args) \ + do { \ + if (yydebug) \ + YYFPRINTF Args; \ + YY_IGNORE_USELESS_CAST_END \ + } while (0) + +]b4_yylocation_print_define[ + +]b4_yy_symbol_print_define[ + +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) \ + do { \ + if (yydebug) \ + { \ + YY_FPRINTF ((stderr, "%s ", Title)); \ + yy_symbol_print (stderr, Kind, Value]b4_locuser_args([Location])[); \ + YY_FPRINTF ((stderr, "\n")); \ + } \ + } while (0) + +static inline void +yy_reduce_print (yybool yynormal, yyGLRStackItem* yyvsp, YYPTRDIFF_T yyk, + yyRuleNum yyrule]b4_user_formals[); + +# define YY_REDUCE_PRINT(Args) \ + do { \ + if (yydebug) \ + yy_reduce_print Args; \ + } while (0) + +/* Nonzero means print parse trace. It is left uninitialized so that + multiple parsers can coexist. */ +int yydebug; + +static void yypstack (yyGLRStack* yystackp, YYPTRDIFF_T yyk) + YY_ATTRIBUTE_UNUSED; +static void yypdumpstack (yyGLRStack* yystackp) + YY_ATTRIBUTE_UNUSED; + +#else /* !]b4_api_PREFIX[DEBUG */ + +# define YY_DPRINTF(Args) do {} while (yyfalse) +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) +# define YY_REDUCE_PRINT(Args) + +#endif /* !]b4_api_PREFIX[DEBUG */ + +]b4_parse_error_case( + [simple], +[[]], +[[#ifndef yystrlen +# define yystrlen(S) (YY_CAST (YYPTRDIFF_T, strlen (S))) +#endif + +]b4_parse_error_bmatch( + [detailed\|verbose], +[[#ifndef yystpcpy +# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE +# define yystpcpy stpcpy +# else +/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in + YYDEST. */ +static char * +yystpcpy (char *yydest, const char *yysrc) +{ + char *yyd = yydest; + const char *yys = yysrc; + + while ((*yyd++ = *yys++) != '\0') + continue; + + return yyd - 1; +} +# endif +#endif]])[ + +]b4_parse_error_case( + [verbose], +[[#ifndef yytnamerr +/* Copy to YYRES the contents of YYSTR after stripping away unnecessary + quotes and backslashes, so that it's suitable for yyerror. The + heuristic is that double-quoting is unnecessary unless the string + contains an apostrophe, a comma, or backslash (other than + backslash-backslash). YYSTR is taken from yytname. If YYRES is + null, do not copy; instead, return the length of what the result + would have been. */ +static YYPTRDIFF_T +yytnamerr (char *yyres, const char *yystr) +{ + if (*yystr == '"') + { + YYPTRDIFF_T yyn = 0; + char const *yyp = yystr; + + for (;;) + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + else + goto append; + + append: + default: + if (yyres) + yyres[yyn] = *yyp; + yyn++; + break; + + case '"': + if (yyres) + yyres[yyn] = '\0'; + return yyn; + } + do_not_strip_quotes: ; + } + + if (yyres) + return yystpcpy (yyres, yystr) - yyres; + else + return yystrlen (yystr); +} +#endif +]])])[ + +/** Fill in YYVSP[YYLOW1 .. YYLOW0-1] from the chain of states starting + * at YYVSP[YYLOW0].yystate.yypred. Leaves YYVSP[YYLOW1].yystate.yypred + * containing the pointer to the next state in the chain. */ +static void yyfillin (yyGLRStackItem *, int, int) YY_ATTRIBUTE_UNUSED; +static void +yyfillin (yyGLRStackItem *yyvsp, int yylow0, int yylow1) +{ + int i; + yyGLRState *s = yyvsp[yylow0].yystate.yypred; + for (i = yylow0-1; i >= yylow1; i -= 1) + { +#if ]b4_api_PREFIX[DEBUG + yyvsp[i].yystate.yylrState = s->yylrState; +#endif + yyvsp[i].yystate.yyresolved = s->yyresolved; + if (s->yyresolved) + yyvsp[i].yystate.yysemantics.yyval = s->yysemantics.yyval; + else + /* The effect of using yyval or yyloc (in an immediate rule) is + * undefined. */ + yyvsp[i].yystate.yysemantics.yyfirstVal = YY_NULLPTR;]b4_locations_if([[ + yyvsp[i].yystate.yyloc = s->yyloc;]])[ + s = yyvsp[i].yystate.yypred = s->yypred; + } +} + +]m4_define([b4_yygetToken_call], + [[yygetToken (&yychar][]b4_pure_if([, yystackp])[]b4_user_args[)]])[ +/** If yychar is empty, fetch the next token. */ +static inline yysymbol_kind_t +yygetToken (int *yycharp][]b4_pure_if([, yyGLRStack* yystackp])[]b4_user_formals[) +{ + yysymbol_kind_t yytoken; +]b4_parse_param_use()dnl +[ if (*yycharp == ]b4_symbol(empty, id)[) + { + YY_DPRINTF ((stderr, "Reading a token\n"));]b4_glr_cc_if([[ +#if YY_EXCEPTIONS + try + { +#endif // YY_EXCEPTIONS + *yycharp = ]b4_yylex[; +#if YY_EXCEPTIONS + } + catch (const ]b4_namespace_ref[::]b4_parser_class[::syntax_error& yyexc) + { + YY_DPRINTF ((stderr, "Caught exception: %s\n", yyexc.what()));]b4_locations_if([ + yylloc = yyexc.location;])[ + yyerror (]b4_lyyerror_args[yyexc.what ()); + // Map errors caught in the scanner to the undefined token, + // so that error handling is started. However, record this + // with this special value of yychar. + *yycharp = ]b4_symbol(error, id)[; + } +#endif // YY_EXCEPTIONS]], [[ + *yycharp = ]b4_yylex[;]])[ + } + if (*yycharp <= ]b4_symbol(eof, [id])[) + { + *yycharp = ]b4_symbol(eof, [id])[; + yytoken = ]b4_symbol_prefix[YYEOF; + YY_DPRINTF ((stderr, "Now at end of input.\n")); + } + else + { + yytoken = YYTRANSLATE (*yycharp); + YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); + } + return yytoken; +} + +/* Do nothing if YYNORMAL or if *YYLOW <= YYLOW1. Otherwise, fill in + * YYVSP[YYLOW1 .. *YYLOW-1] as in yyfillin and set *YYLOW = YYLOW1. + * For convenience, always return YYLOW1. */ +static inline int yyfill (yyGLRStackItem *, int *, int, yybool) + YY_ATTRIBUTE_UNUSED; +static inline int +yyfill (yyGLRStackItem *yyvsp, int *yylow, int yylow1, yybool yynormal) +{ + if (!yynormal && yylow1 < *yylow) + { + yyfillin (yyvsp, *yylow, yylow1); + *yylow = yylow1; + } + return yylow1; +} + +/** Perform user action for rule number YYN, with RHS length YYRHSLEN, + * and top stack item YYVSP. YYLVALP points to place to put semantic + * value ($$), and yylocp points to place for location information + * (@@$). Returns yyok for normal return, yyaccept for YYACCEPT, + * yyerr for YYERROR, yyabort for YYABORT, yynomem for YYNOMEM. */ +static YYRESULTTAG +yyuserAction (yyRuleNum yyrule, int yyrhslen, yyGLRStackItem* yyvsp, + yyGLRStack* yystackp, YYPTRDIFF_T yyk, + YYSTYPE* yyvalp]b4_locuser_formals[) +{ + const yybool yynormal YY_ATTRIBUTE_UNUSED = yystackp->yysplitPoint == YY_NULLPTR; + int yylow = 1; +]b4_parse_param_use([yyvalp], [yylocp])dnl +[ YY_USE (yyk); + YY_USE (yyrhslen); +# undef yyerrok +# define yyerrok (yystackp->yyerrState = 0) +# undef YYACCEPT +# define YYACCEPT return yyaccept +# undef YYABORT +# define YYABORT return yyabort +# undef YYNOMEM +# define YYNOMEM return yynomem +# undef YYERROR +# define YYERROR return yyerrok, yyerr +# undef YYRECOVERING +# define YYRECOVERING() (yystackp->yyerrState != 0) +# undef yyclearin +# define yyclearin (yychar = ]b4_symbol(empty, id)[) +# undef YYFILL +# define YYFILL(N) yyfill (yyvsp, &yylow, (N), yynormal) +# undef YYBACKUP +# define YYBACKUP(Token, Value) \ + return yyerror (]b4_yyerror_args[YY_("syntax error: cannot back up")), \ + yyerrok, yyerr + + if (yyrhslen == 0) + *yyvalp = yyval_default; + else + *yyvalp = yyvsp[YYFILL (1-yyrhslen)].yystate.yysemantics.yyval;]b4_locations_if([[ + /* Default location. */ + YYLLOC_DEFAULT ((*yylocp), (yyvsp - yyrhslen), yyrhslen); + yystackp->yyerror_range[1].yystate.yyloc = *yylocp;]])[ + /* If yyk == -1, we are running a deferred action on a temporary + stack. In that case, YY_REDUCE_PRINT must not play with YYFILL, + so pretend the stack is "normal". */ + YY_REDUCE_PRINT ((yynormal || yyk == -1, yyvsp, yyk, yyrule]b4_user_args[));]b4_glr_cc_if([[ +#if YY_EXCEPTIONS + typedef ]b4_namespace_ref[::]b4_parser_class[::syntax_error syntax_error; + try + { +#endif // YY_EXCEPTIONS]])[ + switch (yyrule) + { +]b4_user_actions[ + default: break; + }]b4_glr_cc_if([[ +#if YY_EXCEPTIONS + } + catch (const syntax_error& yyexc) + { + YY_DPRINTF ((stderr, "Caught exception: %s\n", yyexc.what()));]b4_locations_if([ + *yylocp = yyexc.location;])[ + yyerror (]b4_yyerror_args[yyexc.what ()); + YYERROR; + } +#endif // YY_EXCEPTIONS]])[ + YY_SYMBOL_PRINT ("-> $$ =", yylhsNonterm (yyrule), yyvalp, yylocp); + + return yyok; +# undef yyerrok +# undef YYABORT +# undef YYACCEPT +# undef YYNOMEM +# undef YYERROR +# undef YYBACKUP +# undef yyclearin +# undef YYRECOVERING +} + + +static void +yyuserMerge (int yyn, YYSTYPE* yy0, YYSTYPE* yy1) +{ + YY_USE (yy0); + YY_USE (yy1); + + switch (yyn) + { +]b4_mergers[ + default: break; + } +} + + /* Bison grammar-table manipulation. */ + +]b4_yydestruct_define[ + +/** Number of symbols composing the right hand side of rule #RULE. */ +static inline int +yyrhsLength (yyRuleNum yyrule) +{ + return yyr2[yyrule]; +} + +static void +yydestroyGLRState (char const *yymsg, yyGLRState *yys]b4_user_formals[) +{ + if (yys->yyresolved) + yydestruct (yymsg, yy_accessing_symbol (yys->yylrState), + &yys->yysemantics.yyval]b4_locuser_args([&yys->yyloc])[); + else + { +#if ]b4_api_PREFIX[DEBUG + if (yydebug) + { + if (yys->yysemantics.yyfirstVal) + YY_FPRINTF ((stderr, "%s unresolved", yymsg)); + else + YY_FPRINTF ((stderr, "%s incomplete", yymsg)); + YY_SYMBOL_PRINT ("", yy_accessing_symbol (yys->yylrState), YY_NULLPTR, &yys->yyloc); + } +#endif + + if (yys->yysemantics.yyfirstVal) + { + yySemanticOption *yyoption = yys->yysemantics.yyfirstVal; + yyGLRState *yyrh; + int yyn; + for (yyrh = yyoption->yystate, yyn = yyrhsLength (yyoption->yyrule); + yyn > 0; + yyrh = yyrh->yypred, yyn -= 1) + yydestroyGLRState (yymsg, yyrh]b4_user_args[); + } + } +} + +#define yypact_value_is_default(Yyn) \ + ]b4_table_value_equals([[pact]], [[Yyn]], [b4_pact_ninf], [YYPACT_NINF])[ + +/** True iff LR state YYSTATE has only a default reduction (regardless + * of token). */ +static inline yybool +yyisDefaultedState (yy_state_t yystate) +{ + return yypact_value_is_default (yypact[yystate]); +} + +/** The default reduction for YYSTATE, assuming it has one. */ +static inline yyRuleNum +yydefaultAction (yy_state_t yystate) +{ + return yydefact[yystate]; +} + +#define yytable_value_is_error(Yyn) \ + ]b4_table_value_equals([[table]], [[Yyn]], [b4_table_ninf], [YYTABLE_NINF])[ + +/** The action to take in YYSTATE on seeing YYTOKEN. + * Result R means + * R < 0: Reduce on rule -R. + * R = 0: Error. + * R > 0: Shift to state R. + * Set *YYCONFLICTS to a pointer into yyconfl to a 0-terminated list + * of conflicting reductions. + */ +static inline int +yygetLRActions (yy_state_t yystate, yysymbol_kind_t yytoken, const short** yyconflicts) +{ + int yyindex = yypact[yystate] + yytoken; + if (yytoken == ]b4_symbol(error, kind)[) + { + // This is the error token. + *yyconflicts = yyconfl; + return 0; + } + else if (yyisDefaultedState (yystate) + || yyindex < 0 || YYLAST < yyindex || yycheck[yyindex] != yytoken) + { + *yyconflicts = yyconfl; + return -yydefact[yystate]; + } + else if (! yytable_value_is_error (yytable[yyindex])) + { + *yyconflicts = yyconfl + yyconflp[yyindex]; + return yytable[yyindex]; + } + else + { + *yyconflicts = yyconfl + yyconflp[yyindex]; + return 0; + } +} + +/** Compute post-reduction state. + * \param yystate the current state + * \param yysym the nonterminal to push on the stack + */ +static inline yy_state_t +yyLRgotoState (yy_state_t yystate, yysymbol_kind_t yysym) +{ + int yyr = yypgoto[yysym - YYNTOKENS] + yystate; + if (0 <= yyr && yyr <= YYLAST && yycheck[yyr] == yystate) + return yytable[yyr]; + else + return yydefgoto[yysym - YYNTOKENS]; +} + +static inline yybool +yyisShiftAction (int yyaction) +{ + return 0 < yyaction; +} + +static inline yybool +yyisErrorAction (int yyaction) +{ + return yyaction == 0; +} + + /* GLRStates */ + +/** Return a fresh GLRStackItem in YYSTACKP. The item is an LR state + * if YYISSTATE, and otherwise a semantic option. Callers should call + * YY_RESERVE_GLRSTACK afterwards to make sure there is sufficient + * headroom. */ + +static inline yyGLRStackItem* +yynewGLRStackItem (yyGLRStack* yystackp, yybool yyisState) +{ + yyGLRStackItem* yynewItem = yystackp->yynextFree; + yystackp->yyspaceLeft -= 1; + yystackp->yynextFree += 1; + yynewItem->yystate.yyisState = yyisState; + return yynewItem; +} + +/** Add a new semantic action that will execute the action for rule + * YYRULE on the semantic values in YYRHS to the list of + * alternative actions for YYSTATE. Assumes that YYRHS comes from + * stack #YYK of *YYSTACKP. */ +static void +yyaddDeferredAction (yyGLRStack* yystackp, YYPTRDIFF_T yyk, yyGLRState* yystate, + yyGLRState* yyrhs, yyRuleNum yyrule) +{ + yySemanticOption* yynewOption = + &yynewGLRStackItem (yystackp, yyfalse)->yyoption; + YY_ASSERT (!yynewOption->yyisState); + yynewOption->yystate = yyrhs; + yynewOption->yyrule = yyrule; + if (yystackp->yytops.yylookaheadNeeds[yyk]) + { + yynewOption->yyrawchar = yychar; + yynewOption->yyval = yylval;]b4_locations_if([ + yynewOption->yyloc = yylloc;])[ + } + else + yynewOption->yyrawchar = ]b4_symbol(empty, id)[; + yynewOption->yynext = yystate->yysemantics.yyfirstVal; + yystate->yysemantics.yyfirstVal = yynewOption; + + YY_RESERVE_GLRSTACK (yystackp); +} + + /* GLRStacks */ + +/** Initialize YYSET to a singleton set containing an empty stack. */ +static yybool +yyinitStateSet (yyGLRStateSet* yyset) +{ + yyset->yysize = 1; + yyset->yycapacity = 16; + yyset->yystates + = YY_CAST (yyGLRState**, + YYMALLOC (YY_CAST (YYSIZE_T, yyset->yycapacity) + * sizeof yyset->yystates[0])); + if (! yyset->yystates) + return yyfalse; + yyset->yystates[0] = YY_NULLPTR; + yyset->yylookaheadNeeds + = YY_CAST (yybool*, + YYMALLOC (YY_CAST (YYSIZE_T, yyset->yycapacity) + * sizeof yyset->yylookaheadNeeds[0])); + if (! yyset->yylookaheadNeeds) + { + YYFREE (yyset->yystates); + return yyfalse; + } + memset (yyset->yylookaheadNeeds, + 0, + YY_CAST (YYSIZE_T, yyset->yycapacity) * sizeof yyset->yylookaheadNeeds[0]); + return yytrue; +} + +static void yyfreeStateSet (yyGLRStateSet* yyset) +{ + YYFREE (yyset->yystates); + YYFREE (yyset->yylookaheadNeeds); +} + +/** Initialize *YYSTACKP to a single empty stack, with total maximum + * capacity for all stacks of YYSIZE. */ +static yybool +yyinitGLRStack (yyGLRStack* yystackp, YYPTRDIFF_T yysize) +{ + yystackp->yyerrState = 0; + yynerrs = 0; + yystackp->yyspaceLeft = yysize; + yystackp->yyitems + = YY_CAST (yyGLRStackItem*, + YYMALLOC (YY_CAST (YYSIZE_T, yysize) + * sizeof yystackp->yynextFree[0])); + if (!yystackp->yyitems) + return yyfalse; + yystackp->yynextFree = yystackp->yyitems; + yystackp->yysplitPoint = YY_NULLPTR; + yystackp->yylastDeleted = YY_NULLPTR; + return yyinitStateSet (&yystackp->yytops); +} + + +#if YYSTACKEXPANDABLE +# define YYRELOC(YYFROMITEMS, YYTOITEMS, YYX, YYTYPE) \ + &((YYTOITEMS) \ + - ((YYFROMITEMS) - YY_REINTERPRET_CAST (yyGLRStackItem*, (YYX))))->YYTYPE + +/** If *YYSTACKP is expandable, extend it. WARNING: Pointers into the + stack from outside should be considered invalid after this call. + We always expand when there are 1 or fewer items left AFTER an + allocation, so that we can avoid having external pointers exist + across an allocation. */ +static void +yyexpandGLRStack (yyGLRStack* yystackp) +{ + yyGLRStackItem* yynewItems; + yyGLRStackItem* yyp0, *yyp1; + YYPTRDIFF_T yynewSize; + YYPTRDIFF_T yyn; + YYPTRDIFF_T yysize = yystackp->yynextFree - yystackp->yyitems; + if (YYMAXDEPTH - YYHEADROOM < yysize) + yyMemoryExhausted (yystackp); + yynewSize = 2*yysize; + if (YYMAXDEPTH < yynewSize) + yynewSize = YYMAXDEPTH; + yynewItems + = YY_CAST (yyGLRStackItem*, + YYMALLOC (YY_CAST (YYSIZE_T, yynewSize) + * sizeof yynewItems[0])); + if (! yynewItems) + yyMemoryExhausted (yystackp); + for (yyp0 = yystackp->yyitems, yyp1 = yynewItems, yyn = yysize; + 0 < yyn; + yyn -= 1, yyp0 += 1, yyp1 += 1) + { + *yyp1 = *yyp0; + if (*YY_REINTERPRET_CAST (yybool *, yyp0)) + { + yyGLRState* yys0 = &yyp0->yystate; + yyGLRState* yys1 = &yyp1->yystate; + if (yys0->yypred != YY_NULLPTR) + yys1->yypred = + YYRELOC (yyp0, yyp1, yys0->yypred, yystate); + if (! yys0->yyresolved && yys0->yysemantics.yyfirstVal != YY_NULLPTR) + yys1->yysemantics.yyfirstVal = + YYRELOC (yyp0, yyp1, yys0->yysemantics.yyfirstVal, yyoption); + } + else + { + yySemanticOption* yyv0 = &yyp0->yyoption; + yySemanticOption* yyv1 = &yyp1->yyoption; + if (yyv0->yystate != YY_NULLPTR) + yyv1->yystate = YYRELOC (yyp0, yyp1, yyv0->yystate, yystate); + if (yyv0->yynext != YY_NULLPTR) + yyv1->yynext = YYRELOC (yyp0, yyp1, yyv0->yynext, yyoption); + } + } + if (yystackp->yysplitPoint != YY_NULLPTR) + yystackp->yysplitPoint = YYRELOC (yystackp->yyitems, yynewItems, + yystackp->yysplitPoint, yystate); + + for (yyn = 0; yyn < yystackp->yytops.yysize; yyn += 1) + if (yystackp->yytops.yystates[yyn] != YY_NULLPTR) + yystackp->yytops.yystates[yyn] = + YYRELOC (yystackp->yyitems, yynewItems, + yystackp->yytops.yystates[yyn], yystate); + YYFREE (yystackp->yyitems); + yystackp->yyitems = yynewItems; + yystackp->yynextFree = yynewItems + yysize; + yystackp->yyspaceLeft = yynewSize - yysize; +} +#endif + +static void +yyfreeGLRStack (yyGLRStack* yystackp) +{ + YYFREE (yystackp->yyitems); + yyfreeStateSet (&yystackp->yytops); +} + +/** Assuming that YYS is a GLRState somewhere on *YYSTACKP, update the + * splitpoint of *YYSTACKP, if needed, so that it is at least as deep as + * YYS. */ +static inline void +yyupdateSplit (yyGLRStack* yystackp, yyGLRState* yys) +{ + if (yystackp->yysplitPoint != YY_NULLPTR && yystackp->yysplitPoint > yys) + yystackp->yysplitPoint = yys; +} + +/** Invalidate stack #YYK in *YYSTACKP. */ +static inline void +yymarkStackDeleted (yyGLRStack* yystackp, YYPTRDIFF_T yyk) +{ + if (yystackp->yytops.yystates[yyk] != YY_NULLPTR) + yystackp->yylastDeleted = yystackp->yytops.yystates[yyk]; + yystackp->yytops.yystates[yyk] = YY_NULLPTR; +} + +/** Undelete the last stack in *YYSTACKP that was marked as deleted. Can + only be done once after a deletion, and only when all other stacks have + been deleted. */ +static void +yyundeleteLastStack (yyGLRStack* yystackp) +{ + if (yystackp->yylastDeleted == YY_NULLPTR || yystackp->yytops.yysize != 0) + return; + yystackp->yytops.yystates[0] = yystackp->yylastDeleted; + yystackp->yytops.yysize = 1; + YY_DPRINTF ((stderr, "Restoring last deleted stack as stack #0.\n")); + yystackp->yylastDeleted = YY_NULLPTR; +} + +static inline void +yyremoveDeletes (yyGLRStack* yystackp) +{ + YYPTRDIFF_T yyi, yyj; + yyi = yyj = 0; + while (yyj < yystackp->yytops.yysize) + { + if (yystackp->yytops.yystates[yyi] == YY_NULLPTR) + { + if (yyi == yyj) + YY_DPRINTF ((stderr, "Removing dead stacks.\n")); + yystackp->yytops.yysize -= 1; + } + else + { + yystackp->yytops.yystates[yyj] = yystackp->yytops.yystates[yyi]; + /* In the current implementation, it's unnecessary to copy + yystackp->yytops.yylookaheadNeeds[yyi] since, after + yyremoveDeletes returns, the parser immediately either enters + deterministic operation or shifts a token. However, it doesn't + hurt, and the code might evolve to need it. */ + yystackp->yytops.yylookaheadNeeds[yyj] = + yystackp->yytops.yylookaheadNeeds[yyi]; + if (yyj != yyi) + YY_DPRINTF ((stderr, "Rename stack %ld -> %ld.\n", + YY_CAST (long, yyi), YY_CAST (long, yyj))); + yyj += 1; + } + yyi += 1; + } +} + +/** Shift to a new state on stack #YYK of *YYSTACKP, corresponding to LR + * state YYLRSTATE, at input position YYPOSN, with (resolved) semantic + * value *YYVALP and source location *YYLOCP. */ +static inline void +yyglrShift (yyGLRStack* yystackp, YYPTRDIFF_T yyk, yy_state_t yylrState, + YYPTRDIFF_T yyposn, + YYSTYPE* yyvalp]b4_locations_if([, YYLTYPE* yylocp])[) +{ + yyGLRState* yynewState = &yynewGLRStackItem (yystackp, yytrue)->yystate; + + yynewState->yylrState = yylrState; + yynewState->yyposn = yyposn; + yynewState->yyresolved = yytrue; + yynewState->yypred = yystackp->yytops.yystates[yyk]; + yynewState->yysemantics.yyval = *yyvalp;]b4_locations_if([ + yynewState->yyloc = *yylocp;])[ + yystackp->yytops.yystates[yyk] = yynewState; + + YY_RESERVE_GLRSTACK (yystackp); +} + +/** Shift stack #YYK of *YYSTACKP, to a new state corresponding to LR + * state YYLRSTATE, at input position YYPOSN, with the (unresolved) + * semantic value of YYRHS under the action for YYRULE. */ +static inline void +yyglrShiftDefer (yyGLRStack* yystackp, YYPTRDIFF_T yyk, yy_state_t yylrState, + YYPTRDIFF_T yyposn, yyGLRState* yyrhs, yyRuleNum yyrule) +{ + yyGLRState* yynewState = &yynewGLRStackItem (yystackp, yytrue)->yystate; + YY_ASSERT (yynewState->yyisState); + + yynewState->yylrState = yylrState; + yynewState->yyposn = yyposn; + yynewState->yyresolved = yyfalse; + yynewState->yypred = yystackp->yytops.yystates[yyk]; + yynewState->yysemantics.yyfirstVal = YY_NULLPTR; + yystackp->yytops.yystates[yyk] = yynewState; + + /* Invokes YY_RESERVE_GLRSTACK. */ + yyaddDeferredAction (yystackp, yyk, yynewState, yyrhs, yyrule); +} + +#if ]b4_api_PREFIX[DEBUG + +/*----------------------------------------------------------------------. +| Report that stack #YYK of *YYSTACKP is going to be reduced by YYRULE. | +`----------------------------------------------------------------------*/ + +static inline void +yy_reduce_print (yybool yynormal, yyGLRStackItem* yyvsp, YYPTRDIFF_T yyk, + yyRuleNum yyrule]b4_user_formals[) +{ + int yynrhs = yyrhsLength (yyrule);]b4_locations_if([ + int yylow = 1;])[ + int yyi; + YY_FPRINTF ((stderr, "Reducing stack %ld by rule %d (line %d):\n", + YY_CAST (long, yyk), yyrule - 1, yyrline[yyrule])); + if (! yynormal) + yyfillin (yyvsp, 1, -yynrhs); + /* The symbols being reduced. */ + for (yyi = 0; yyi < yynrhs; yyi++) + { + YY_FPRINTF ((stderr, " $%d = ", yyi + 1)); + yy_symbol_print (stderr, + yy_accessing_symbol (yyvsp[yyi - yynrhs + 1].yystate.yylrState), + &yyvsp[yyi - yynrhs + 1].yystate.yysemantics.yyval]b4_locations_if([, + &]b4_rhs_location(yynrhs, yyi + 1))[]dnl + b4_user_args[); + if (!yyvsp[yyi - yynrhs + 1].yystate.yyresolved) + YY_FPRINTF ((stderr, " (unresolved)")); + YY_FPRINTF ((stderr, "\n")); + } +} +#endif + +/** Pop the symbols consumed by reduction #YYRULE from the top of stack + * #YYK of *YYSTACKP, and perform the appropriate semantic action on their + * semantic values. Assumes that all ambiguities in semantic values + * have been previously resolved. Set *YYVALP to the resulting value, + * and *YYLOCP to the computed location (if any). Return value is as + * for userAction. */ +static inline YYRESULTTAG +yydoAction (yyGLRStack* yystackp, YYPTRDIFF_T yyk, yyRuleNum yyrule, + YYSTYPE* yyvalp]b4_locuser_formals[) +{ + int yynrhs = yyrhsLength (yyrule); + + if (yystackp->yysplitPoint == YY_NULLPTR) + { + /* Standard special case: single stack. */ + yyGLRStackItem* yyrhs + = YY_REINTERPRET_CAST (yyGLRStackItem*, yystackp->yytops.yystates[yyk]); + YY_ASSERT (yyk == 0); + yystackp->yynextFree -= yynrhs; + yystackp->yyspaceLeft += yynrhs; + yystackp->yytops.yystates[0] = & yystackp->yynextFree[-1].yystate; + return yyuserAction (yyrule, yynrhs, yyrhs, yystackp, yyk, + yyvalp]b4_locuser_args[); + } + else + { + yyGLRStackItem yyrhsVals[YYMAXRHS + YYMAXLEFT + 1]; + yyGLRState* yys = yyrhsVals[YYMAXRHS + YYMAXLEFT].yystate.yypred + = yystackp->yytops.yystates[yyk]; + int yyi;]b4_locations_if([[ + if (yynrhs == 0) + /* Set default location. */ + yyrhsVals[YYMAXRHS + YYMAXLEFT - 1].yystate.yyloc = yys->yyloc;]])[ + for (yyi = 0; yyi < yynrhs; yyi += 1) + { + yys = yys->yypred; + YY_ASSERT (yys); + } + yyupdateSplit (yystackp, yys); + yystackp->yytops.yystates[yyk] = yys; + return yyuserAction (yyrule, yynrhs, yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, + yystackp, yyk, yyvalp]b4_locuser_args[); + } +} + +/** Pop items off stack #YYK of *YYSTACKP according to grammar rule YYRULE, + * and push back on the resulting nonterminal symbol. Perform the + * semantic action associated with YYRULE and store its value with the + * newly pushed state, if YYFORCEEVAL or if *YYSTACKP is currently + * unambiguous. Otherwise, store the deferred semantic action with + * the new state. If the new state would have an identical input + * position, LR state, and predecessor to an existing state on the stack, + * it is identified with that existing state, eliminating stack #YYK from + * *YYSTACKP. In this case, the semantic value is + * added to the options for the existing state's semantic value. + */ +static inline YYRESULTTAG +yyglrReduce (yyGLRStack* yystackp, YYPTRDIFF_T yyk, yyRuleNum yyrule, + yybool yyforceEval]b4_user_formals[) +{ + YYPTRDIFF_T yyposn = yystackp->yytops.yystates[yyk]->yyposn; + + if (yyforceEval || yystackp->yysplitPoint == YY_NULLPTR) + { + YYSTYPE yyval;]b4_locations_if([[ + YYLTYPE yyloc;]])[ + + YYRESULTTAG yyflag = yydoAction (yystackp, yyk, yyrule, &yyval]b4_locuser_args([&yyloc])[); + if (yyflag == yyerr && yystackp->yysplitPoint != YY_NULLPTR) + YY_DPRINTF ((stderr, + "Parse on stack %ld rejected by rule %d (line %d).\n", + YY_CAST (long, yyk), yyrule - 1, yyrline[yyrule])); + if (yyflag != yyok) + return yyflag; + yyglrShift (yystackp, yyk, + yyLRgotoState (yystackp->yytops.yystates[yyk]->yylrState, + yylhsNonterm (yyrule)), + yyposn, &yyval]b4_locations_if([, &yyloc])[); + } + else + { + YYPTRDIFF_T yyi; + int yyn; + yyGLRState* yys, *yys0 = yystackp->yytops.yystates[yyk]; + yy_state_t yynewLRState; + + for (yys = yystackp->yytops.yystates[yyk], yyn = yyrhsLength (yyrule); + 0 < yyn; yyn -= 1) + { + yys = yys->yypred; + YY_ASSERT (yys); + } + yyupdateSplit (yystackp, yys); + yynewLRState = yyLRgotoState (yys->yylrState, yylhsNonterm (yyrule)); + YY_DPRINTF ((stderr, + "Reduced stack %ld by rule %d (line %d); action deferred. " + "Now in state %d.\n", + YY_CAST (long, yyk), yyrule - 1, yyrline[yyrule], + yynewLRState)); + for (yyi = 0; yyi < yystackp->yytops.yysize; yyi += 1) + if (yyi != yyk && yystackp->yytops.yystates[yyi] != YY_NULLPTR) + { + yyGLRState *yysplit = yystackp->yysplitPoint; + yyGLRState *yyp = yystackp->yytops.yystates[yyi]; + while (yyp != yys && yyp != yysplit && yyp->yyposn >= yyposn) + { + if (yyp->yylrState == yynewLRState && yyp->yypred == yys) + { + yyaddDeferredAction (yystackp, yyk, yyp, yys0, yyrule); + yymarkStackDeleted (yystackp, yyk); + YY_DPRINTF ((stderr, "Merging stack %ld into stack %ld.\n", + YY_CAST (long, yyk), YY_CAST (long, yyi))); + return yyok; + } + yyp = yyp->yypred; + } + } + yystackp->yytops.yystates[yyk] = yys; + yyglrShiftDefer (yystackp, yyk, yynewLRState, yyposn, yys0, yyrule); + } + return yyok; +} + +static YYPTRDIFF_T +yysplitStack (yyGLRStack* yystackp, YYPTRDIFF_T yyk) +{ + if (yystackp->yysplitPoint == YY_NULLPTR) + { + YY_ASSERT (yyk == 0); + yystackp->yysplitPoint = yystackp->yytops.yystates[yyk]; + } + if (yystackp->yytops.yycapacity <= yystackp->yytops.yysize) + { + YYPTRDIFF_T state_size = YYSIZEOF (yystackp->yytops.yystates[0]); + YYPTRDIFF_T half_max_capacity = YYSIZE_MAXIMUM / 2 / state_size; + if (half_max_capacity < yystackp->yytops.yycapacity) + yyMemoryExhausted (yystackp); + yystackp->yytops.yycapacity *= 2; + + { + yyGLRState** yynewStates + = YY_CAST (yyGLRState**, + YYREALLOC (yystackp->yytops.yystates, + (YY_CAST (YYSIZE_T, yystackp->yytops.yycapacity) + * sizeof yynewStates[0]))); + if (yynewStates == YY_NULLPTR) + yyMemoryExhausted (yystackp); + yystackp->yytops.yystates = yynewStates; + } + + { + yybool* yynewLookaheadNeeds + = YY_CAST (yybool*, + YYREALLOC (yystackp->yytops.yylookaheadNeeds, + (YY_CAST (YYSIZE_T, yystackp->yytops.yycapacity) + * sizeof yynewLookaheadNeeds[0]))); + if (yynewLookaheadNeeds == YY_NULLPTR) + yyMemoryExhausted (yystackp); + yystackp->yytops.yylookaheadNeeds = yynewLookaheadNeeds; + } + } + yystackp->yytops.yystates[yystackp->yytops.yysize] + = yystackp->yytops.yystates[yyk]; + yystackp->yytops.yylookaheadNeeds[yystackp->yytops.yysize] + = yystackp->yytops.yylookaheadNeeds[yyk]; + yystackp->yytops.yysize += 1; + return yystackp->yytops.yysize - 1; +} + +/** True iff YYY0 and YYY1 represent identical options at the top level. + * That is, they represent the same rule applied to RHS symbols + * that produce the same terminal symbols. */ +static yybool +yyidenticalOptions (yySemanticOption* yyy0, yySemanticOption* yyy1) +{ + if (yyy0->yyrule == yyy1->yyrule) + { + yyGLRState *yys0, *yys1; + int yyn; + for (yys0 = yyy0->yystate, yys1 = yyy1->yystate, + yyn = yyrhsLength (yyy0->yyrule); + yyn > 0; + yys0 = yys0->yypred, yys1 = yys1->yypred, yyn -= 1) + if (yys0->yyposn != yys1->yyposn) + return yyfalse; + return yytrue; + } + else + return yyfalse; +} + +/** Assuming identicalOptions (YYY0,YYY1), destructively merge the + * alternative semantic values for the RHS-symbols of YYY1 and YYY0. */ +static void +yymergeOptionSets (yySemanticOption* yyy0, yySemanticOption* yyy1) +{ + yyGLRState *yys0, *yys1; + int yyn; + for (yys0 = yyy0->yystate, yys1 = yyy1->yystate, + yyn = yyrhsLength (yyy0->yyrule); + 0 < yyn; + yys0 = yys0->yypred, yys1 = yys1->yypred, yyn -= 1) + { + if (yys0 == yys1) + break; + else if (yys0->yyresolved) + { + yys1->yyresolved = yytrue; + yys1->yysemantics.yyval = yys0->yysemantics.yyval; + } + else if (yys1->yyresolved) + { + yys0->yyresolved = yytrue; + yys0->yysemantics.yyval = yys1->yysemantics.yyval; + } + else + { + yySemanticOption** yyz0p = &yys0->yysemantics.yyfirstVal; + yySemanticOption* yyz1 = yys1->yysemantics.yyfirstVal; + while (yytrue) + { + if (yyz1 == *yyz0p || yyz1 == YY_NULLPTR) + break; + else if (*yyz0p == YY_NULLPTR) + { + *yyz0p = yyz1; + break; + } + else if (*yyz0p < yyz1) + { + yySemanticOption* yyz = *yyz0p; + *yyz0p = yyz1; + yyz1 = yyz1->yynext; + (*yyz0p)->yynext = yyz; + } + yyz0p = &(*yyz0p)->yynext; + } + yys1->yysemantics.yyfirstVal = yys0->yysemantics.yyfirstVal; + } + } +} + +/** Y0 and Y1 represent two possible actions to take in a given + * parsing state; return 0 if no combination is possible, + * 1 if user-mergeable, 2 if Y0 is preferred, 3 if Y1 is preferred. */ +static int +yypreference (yySemanticOption* y0, yySemanticOption* y1) +{ + yyRuleNum r0 = y0->yyrule, r1 = y1->yyrule; + int p0 = yydprec[r0], p1 = yydprec[r1]; + + if (p0 == p1) + { + if (yymerger[r0] == 0 || yymerger[r0] != yymerger[r1]) + return 0; + else + return 1; + } + if (p0 == 0 || p1 == 0) + return 0; + if (p0 < p1) + return 3; + if (p1 < p0) + return 2; + return 0; +} + +static YYRESULTTAG +yyresolveValue (yyGLRState* yys, yyGLRStack* yystackp]b4_user_formals[); + + +/** Resolve the previous YYN states starting at and including state YYS + * on *YYSTACKP. If result != yyok, some states may have been left + * unresolved possibly with empty semantic option chains. Regardless + * of whether result = yyok, each state has been left with consistent + * data so that yydestroyGLRState can be invoked if necessary. */ +static YYRESULTTAG +yyresolveStates (yyGLRState* yys, int yyn, + yyGLRStack* yystackp]b4_user_formals[) +{ + if (0 < yyn) + { + YY_ASSERT (yys->yypred); + YYCHK (yyresolveStates (yys->yypred, yyn-1, yystackp]b4_user_args[)); + if (! yys->yyresolved) + YYCHK (yyresolveValue (yys, yystackp]b4_user_args[)); + } + return yyok; +} + +/** Resolve the states for the RHS of YYOPT on *YYSTACKP, perform its + * user action, and return the semantic value and location in *YYVALP + * and *YYLOCP. Regardless of whether result = yyok, all RHS states + * have been destroyed (assuming the user action destroys all RHS + * semantic values if invoked). */ +static YYRESULTTAG +yyresolveAction (yySemanticOption* yyopt, yyGLRStack* yystackp, + YYSTYPE* yyvalp]b4_locuser_formals[) +{ + yyGLRStackItem yyrhsVals[YYMAXRHS + YYMAXLEFT + 1]; + int yynrhs = yyrhsLength (yyopt->yyrule); + YYRESULTTAG yyflag = + yyresolveStates (yyopt->yystate, yynrhs, yystackp]b4_user_args[); + if (yyflag != yyok) + { + yyGLRState *yys; + for (yys = yyopt->yystate; yynrhs > 0; yys = yys->yypred, yynrhs -= 1) + yydestroyGLRState ("Cleanup: popping", yys]b4_user_args[); + return yyflag; + } + + yyrhsVals[YYMAXRHS + YYMAXLEFT].yystate.yypred = yyopt->yystate;]b4_locations_if([[ + if (yynrhs == 0) + /* Set default location. */ + yyrhsVals[YYMAXRHS + YYMAXLEFT - 1].yystate.yyloc = yyopt->yystate->yyloc;]])[ + { + int yychar_current = yychar; + YYSTYPE yylval_current = yylval;]b4_locations_if([ + YYLTYPE yylloc_current = yylloc;])[ + yychar = yyopt->yyrawchar; + yylval = yyopt->yyval;]b4_locations_if([ + yylloc = yyopt->yyloc;])[ + yyflag = yyuserAction (yyopt->yyrule, yynrhs, + yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, + yystackp, -1, yyvalp]b4_locuser_args[); + yychar = yychar_current; + yylval = yylval_current;]b4_locations_if([ + yylloc = yylloc_current;])[ + } + return yyflag; +} + +#if ]b4_api_PREFIX[DEBUG +static void +yyreportTree (yySemanticOption* yyx, int yyindent) +{ + int yynrhs = yyrhsLength (yyx->yyrule); + int yyi; + yyGLRState* yys; + yyGLRState* yystates[1 + YYMAXRHS]; + yyGLRState yyleftmost_state; + + for (yyi = yynrhs, yys = yyx->yystate; 0 < yyi; yyi -= 1, yys = yys->yypred) + yystates[yyi] = yys; + if (yys == YY_NULLPTR) + { + yyleftmost_state.yyposn = 0; + yystates[0] = &yyleftmost_state; + } + else + yystates[0] = yys; + + if (yyx->yystate->yyposn < yys->yyposn + 1) + YY_FPRINTF ((stderr, "%*s%s -> \n", + yyindent, "", yysymbol_name (yylhsNonterm (yyx->yyrule)), + yyx->yyrule - 1)); + else + YY_FPRINTF ((stderr, "%*s%s -> \n", + yyindent, "", yysymbol_name (yylhsNonterm (yyx->yyrule)), + yyx->yyrule - 1, YY_CAST (long, yys->yyposn + 1), + YY_CAST (long, yyx->yystate->yyposn))); + for (yyi = 1; yyi <= yynrhs; yyi += 1) + { + if (yystates[yyi]->yyresolved) + { + if (yystates[yyi-1]->yyposn+1 > yystates[yyi]->yyposn) + YY_FPRINTF ((stderr, "%*s%s \n", yyindent+2, "", + yysymbol_name (yy_accessing_symbol (yystates[yyi]->yylrState)))); + else + YY_FPRINTF ((stderr, "%*s%s \n", yyindent+2, "", + yysymbol_name (yy_accessing_symbol (yystates[yyi]->yylrState)), + YY_CAST (long, yystates[yyi-1]->yyposn + 1), + YY_CAST (long, yystates[yyi]->yyposn))); + } + else + yyreportTree (yystates[yyi]->yysemantics.yyfirstVal, yyindent+2); + } +} +#endif + +static YYRESULTTAG +yyreportAmbiguity (yySemanticOption* yyx0, + yySemanticOption* yyx1]b4_pure_formals[) +{ + YY_USE (yyx0); + YY_USE (yyx1); + +#if ]b4_api_PREFIX[DEBUG + YY_FPRINTF ((stderr, "Ambiguity detected.\n")); + YY_FPRINTF ((stderr, "Option 1,\n")); + yyreportTree (yyx0, 2); + YY_FPRINTF ((stderr, "\nOption 2,\n")); + yyreportTree (yyx1, 2); + YY_FPRINTF ((stderr, "\n")); +#endif + + yyerror (]b4_yyerror_args[YY_("syntax is ambiguous")); + return yyabort; +}]b4_locations_if([[ + +/** Resolve the locations for each of the YYN1 states in *YYSTACKP, + * ending at YYS1. Has no effect on previously resolved states. + * The first semantic option of a state is always chosen. */ +static void +yyresolveLocations (yyGLRState *yys1, int yyn1, + yyGLRStack *yystackp]b4_user_formals[) +{ + if (0 < yyn1) + { + yyresolveLocations (yys1->yypred, yyn1 - 1, yystackp]b4_user_args[); + if (!yys1->yyresolved) + { + yyGLRStackItem yyrhsloc[1 + YYMAXRHS]; + int yynrhs; + yySemanticOption *yyoption = yys1->yysemantics.yyfirstVal; + YY_ASSERT (yyoption); + yynrhs = yyrhsLength (yyoption->yyrule); + if (0 < yynrhs) + { + yyGLRState *yys; + int yyn; + yyresolveLocations (yyoption->yystate, yynrhs, + yystackp]b4_user_args[); + for (yys = yyoption->yystate, yyn = yynrhs; + yyn > 0; + yys = yys->yypred, yyn -= 1) + yyrhsloc[yyn].yystate.yyloc = yys->yyloc; + } + else + { + /* Both yyresolveAction and yyresolveLocations traverse the GSS + in reverse rightmost order. It is only necessary to invoke + yyresolveLocations on a subforest for which yyresolveAction + would have been invoked next had an ambiguity not been + detected. Thus the location of the previous state (but not + necessarily the previous state itself) is guaranteed to be + resolved already. */ + yyGLRState *yyprevious = yyoption->yystate; + yyrhsloc[0].yystate.yyloc = yyprevious->yyloc; + } + YYLLOC_DEFAULT ((yys1->yyloc), yyrhsloc, yynrhs); + } + } +}]])[ + +/** Resolve the ambiguity represented in state YYS in *YYSTACKP, + * perform the indicated actions, and set the semantic value of YYS. + * If result != yyok, the chain of semantic options in YYS has been + * cleared instead or it has been left unmodified except that + * redundant options may have been removed. Regardless of whether + * result = yyok, YYS has been left with consistent data so that + * yydestroyGLRState can be invoked if necessary. */ +static YYRESULTTAG +yyresolveValue (yyGLRState* yys, yyGLRStack* yystackp]b4_user_formals[) +{ + yySemanticOption* yyoptionList = yys->yysemantics.yyfirstVal; + yySemanticOption* yybest = yyoptionList; + yySemanticOption** yypp; + yybool yymerge = yyfalse; + YYSTYPE yyval; + YYRESULTTAG yyflag;]b4_locations_if([ + YYLTYPE *yylocp = &yys->yyloc;])[ + + for (yypp = &yyoptionList->yynext; *yypp != YY_NULLPTR; ) + { + yySemanticOption* yyp = *yypp; + + if (yyidenticalOptions (yybest, yyp)) + { + yymergeOptionSets (yybest, yyp); + *yypp = yyp->yynext; + } + else + { + switch (yypreference (yybest, yyp)) + { + case 0:]b4_locations_if([[ + yyresolveLocations (yys, 1, yystackp]b4_user_args[);]])[ + return yyreportAmbiguity (yybest, yyp]b4_pure_args[); + break; + case 1: + yymerge = yytrue; + break; + case 2: + break; + case 3: + yybest = yyp; + yymerge = yyfalse; + break; + default: + /* This cannot happen so it is not worth a YY_ASSERT (yyfalse), + but some compilers complain if the default case is + omitted. */ + break; + } + yypp = &yyp->yynext; + } + } + + if (yymerge) + { + yySemanticOption* yyp; + int yyprec = yydprec[yybest->yyrule]; + yyflag = yyresolveAction (yybest, yystackp, &yyval]b4_locuser_args[); + if (yyflag == yyok) + for (yyp = yybest->yynext; yyp != YY_NULLPTR; yyp = yyp->yynext) + { + if (yyprec == yydprec[yyp->yyrule]) + { + YYSTYPE yyval_other;]b4_locations_if([ + YYLTYPE yydummy;])[ + yyflag = yyresolveAction (yyp, yystackp, &yyval_other]b4_locuser_args([&yydummy])[); + if (yyflag != yyok) + { + yydestruct ("Cleanup: discarding incompletely merged value for", + yy_accessing_symbol (yys->yylrState), + &yyval]b4_locuser_args[); + break; + } + yyuserMerge (yymerger[yyp->yyrule], &yyval, &yyval_other); + } + } + } + else + yyflag = yyresolveAction (yybest, yystackp, &yyval]b4_locuser_args([yylocp])[); + + if (yyflag == yyok) + { + yys->yyresolved = yytrue; + yys->yysemantics.yyval = yyval; + } + else + yys->yysemantics.yyfirstVal = YY_NULLPTR; + return yyflag; +} + +static YYRESULTTAG +yyresolveStack (yyGLRStack* yystackp]b4_user_formals[) +{ + if (yystackp->yysplitPoint != YY_NULLPTR) + { + yyGLRState* yys; + int yyn; + + for (yyn = 0, yys = yystackp->yytops.yystates[0]; + yys != yystackp->yysplitPoint; + yys = yys->yypred, yyn += 1) + continue; + YYCHK (yyresolveStates (yystackp->yytops.yystates[0], yyn, yystackp + ]b4_user_args[)); + } + return yyok; +} + +/** Called when returning to deterministic operation to clean up the extra + * stacks. */ +static void +yycompressStack (yyGLRStack* yystackp) +{ + /* yyr is the state after the split point. */ + yyGLRState *yyr; + + if (yystackp->yytops.yysize != 1 || yystackp->yysplitPoint == YY_NULLPTR) + return; + + { + yyGLRState *yyp, *yyq; + for (yyp = yystackp->yytops.yystates[0], yyq = yyp->yypred, yyr = YY_NULLPTR; + yyp != yystackp->yysplitPoint; + yyr = yyp, yyp = yyq, yyq = yyp->yypred) + yyp->yypred = yyr; + } + + yystackp->yyspaceLeft += yystackp->yynextFree - yystackp->yyitems; + yystackp->yynextFree = YY_REINTERPRET_CAST (yyGLRStackItem*, yystackp->yysplitPoint) + 1; + yystackp->yyspaceLeft -= yystackp->yynextFree - yystackp->yyitems; + yystackp->yysplitPoint = YY_NULLPTR; + yystackp->yylastDeleted = YY_NULLPTR; + + while (yyr != YY_NULLPTR) + { + yystackp->yynextFree->yystate = *yyr; + yyr = yyr->yypred; + yystackp->yynextFree->yystate.yypred = &yystackp->yynextFree[-1].yystate; + yystackp->yytops.yystates[0] = &yystackp->yynextFree->yystate; + yystackp->yynextFree += 1; + yystackp->yyspaceLeft -= 1; + } +} + +static YYRESULTTAG +yyprocessOneStack (yyGLRStack* yystackp, YYPTRDIFF_T yyk, + YYPTRDIFF_T yyposn]b4_pure_formals[) +{ + while (yystackp->yytops.yystates[yyk] != YY_NULLPTR) + { + yy_state_t yystate = yystackp->yytops.yystates[yyk]->yylrState; + YY_DPRINTF ((stderr, "Stack %ld Entering state %d\n", + YY_CAST (long, yyk), yystate)); + + YY_ASSERT (yystate != YYFINAL); + + if (yyisDefaultedState (yystate)) + { + YYRESULTTAG yyflag; + yyRuleNum yyrule = yydefaultAction (yystate); + if (yyrule == 0) + { + YY_DPRINTF ((stderr, "Stack %ld dies.\n", YY_CAST (long, yyk))); + yymarkStackDeleted (yystackp, yyk); + return yyok; + } + yyflag = yyglrReduce (yystackp, yyk, yyrule, yyimmediate[yyrule]]b4_user_args[); + if (yyflag == yyerr) + { + YY_DPRINTF ((stderr, + "Stack %ld dies " + "(predicate failure or explicit user error).\n", + YY_CAST (long, yyk))); + yymarkStackDeleted (yystackp, yyk); + return yyok; + } + if (yyflag != yyok) + return yyflag; + } + else + { + yysymbol_kind_t yytoken = ]b4_yygetToken_call[; + const short* yyconflicts; + const int yyaction = yygetLRActions (yystate, yytoken, &yyconflicts); + yystackp->yytops.yylookaheadNeeds[yyk] = yytrue; + + for (/* nothing */; *yyconflicts; yyconflicts += 1) + { + YYRESULTTAG yyflag; + YYPTRDIFF_T yynewStack = yysplitStack (yystackp, yyk); + YY_DPRINTF ((stderr, "Splitting off stack %ld from %ld.\n", + YY_CAST (long, yynewStack), YY_CAST (long, yyk))); + yyflag = yyglrReduce (yystackp, yynewStack, + *yyconflicts, + yyimmediate[*yyconflicts]]b4_user_args[); + if (yyflag == yyok) + YYCHK (yyprocessOneStack (yystackp, yynewStack, + yyposn]b4_pure_args[)); + else if (yyflag == yyerr) + { + YY_DPRINTF ((stderr, "Stack %ld dies.\n", YY_CAST (long, yynewStack))); + yymarkStackDeleted (yystackp, yynewStack); + } + else + return yyflag; + } + + if (yyisShiftAction (yyaction)) + break; + else if (yyisErrorAction (yyaction)) + { + YY_DPRINTF ((stderr, "Stack %ld dies.\n", YY_CAST (long, yyk))); + yymarkStackDeleted (yystackp, yyk); + break; + } + else + { + YYRESULTTAG yyflag = yyglrReduce (yystackp, yyk, -yyaction, + yyimmediate[-yyaction]]b4_user_args[); + if (yyflag == yyerr) + { + YY_DPRINTF ((stderr, + "Stack %ld dies " + "(predicate failure or explicit user error).\n", + YY_CAST (long, yyk))); + yymarkStackDeleted (yystackp, yyk); + break; + } + else if (yyflag != yyok) + return yyflag; + } + } + } + return yyok; +} + +]b4_parse_error_case([simple], [], +[[/* Put in YYARG at most YYARGN of the expected tokens given the + current YYSTACKP, and return the number of tokens stored in YYARG. If + YYARG is null, return the number of expected tokens (guaranteed to + be less than YYNTOKENS). */ +static int +yypcontext_expected_tokens (const yyGLRStack* yystackp, + yysymbol_kind_t yyarg[], int yyargn) +{ + /* Actual size of YYARG. */ + int yycount = 0; + int yyn = yypact[yystackp->yytops.yystates[0]->yylrState]; + if (!yypact_value_is_default (yyn)) + { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for + this state because they are default actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yyx; + for (yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != ]b4_symbol(error, kind)[ + && !yytable_value_is_error (yytable[yyx + yyn])) + { + if (!yyarg) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = YY_CAST (yysymbol_kind_t, yyx); + } + } + if (yyarg && yycount == 0 && 0 < yyargn) + yyarg[0] = ]b4_symbol(empty, kind)[; + return yycount; +}]])[ + +]b4_parse_error_bmatch( + [custom], +[[/* User defined function to report a syntax error. */ +typedef yyGLRStack yypcontext_t; +static int +yyreport_syntax_error (const yyGLRStack* yystackp]b4_user_formals[); + +/* The kind of the lookahead of this context. */ +static yysymbol_kind_t +yypcontext_token (const yyGLRStack *yystackp) YY_ATTRIBUTE_UNUSED; + +static yysymbol_kind_t +yypcontext_token (const yyGLRStack *yystackp) +{ + YY_USE (yystackp); + yysymbol_kind_t yytoken = yychar == ]b4_symbol(empty, id)[ ? ]b4_symbol(empty, kind)[ : YYTRANSLATE (yychar); + return yytoken; +} + +]b4_locations_if([[/* The location of the lookahead of this context. */ +static const YYLTYPE * +yypcontext_location (const yyGLRStack *yystackp) YY_ATTRIBUTE_UNUSED; + +static const YYLTYPE * +yypcontext_location (const yyGLRStack *yystackp) +{ + YY_USE (yystackp); + return &yylloc; +}]])], + [detailed\|verbose], +[[static int +yy_syntax_error_arguments (const yyGLRStack* yystackp, + yysymbol_kind_t yyarg[], int yyargn) +{ + yysymbol_kind_t yytoken = yychar == ]b4_symbol(empty, id)[ ? ]b4_symbol(empty, kind)[ : YYTRANSLATE (yychar); + /* Actual size of YYARG. */ + int yycount = 0; + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, then + the only way this function was invoked is if the default action + is an error action. In that case, don't check for expected + tokens because there are none. + - The only way there can be no lookahead present (in yychar) is if + this state is a consistent state with a default action. Thus, + detecting the absence of a lookahead is sufficient to determine + that there is no unexpected or expected token to report. In that + case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this state is a + consistent state with a default action. There might have been a + previous inconsistent state, consistent state with a non-default + action, or user semantic action that manipulated yychar. + - Of course, the expected token list depends on states to have + correct lookahead information, and it depends on the parser not + to perform extra reductions after fetching a lookahead from the + scanner and before detecting a syntax error. Thus, state merging + (from LALR or IELR) and default reductions corrupt the expected + token list. However, the list is correct for canonical LR with + one exception: it will still contain any token that will not be + accepted due to an error action in a later state. + */ + if (yytoken != ]b4_symbol(empty, kind)[) + { + int yyn; + if (yyarg) + yyarg[yycount] = yytoken; + ++yycount; + yyn = yypcontext_expected_tokens (yystackp, + yyarg ? yyarg + 1 : yyarg, yyargn - 1); + if (yyn == YYENOMEM) + return YYENOMEM; + else + yycount += yyn; + } + return yycount; +} +]])[ + + +static void +yyreportSyntaxError (yyGLRStack* yystackp]b4_user_formals[) +{ + if (yystackp->yyerrState != 0) + return; +]b4_parse_error_case( + [custom], +[[ if (yyreport_syntax_error (yystackp]b4_user_args[)) + yyMemoryExhausted (yystackp);]], + [simple], +[[ yyerror (]b4_lyyerror_args[YY_("syntax error"));]], +[[ { + yybool yysize_overflow = yyfalse; + char* yymsg = YY_NULLPTR; + enum { YYARGS_MAX = 5 }; + /* Internationalized format string. */ + const char *yyformat = YY_NULLPTR; + /* Arguments of yyformat: reported tokens (one for the "unexpected", + one per "expected"). */ + yysymbol_kind_t yyarg[YYARGS_MAX]; + /* Cumulated lengths of YYARG. */ + YYPTRDIFF_T yysize = 0; + + /* Actual size of YYARG. */ + int yycount + = yy_syntax_error_arguments (yystackp, yyarg, YYARGS_MAX); + if (yycount == YYENOMEM) + yyMemoryExhausted (yystackp); + + switch (yycount) + { +#define YYCASE_(N, S) \ + case N: \ + yyformat = S; \ + break + default: /* Avoid compiler warnings. */ + YYCASE_(0, YY_("syntax error")); + YYCASE_(1, YY_("syntax error, unexpected %s")); + YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); + YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); + YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); + YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); +#undef YYCASE_ + } + + /* Compute error message size. Don't count the "%s"s, but reserve + room for the terminator. */ + yysize = yystrlen (yyformat) - 2 * yycount + 1; + { + int yyi; + for (yyi = 0; yyi < yycount; ++yyi) + { + YYPTRDIFF_T yysz + = ]b4_parse_error_case( + [verbose], [[yytnamerr (YY_NULLPTR, yytname[yyarg[yyi]])]], + [[yystrlen (yysymbol_name (yyarg[yyi]))]]);[ + if (YYSIZE_MAXIMUM - yysize < yysz) + yysize_overflow = yytrue; + else + yysize += yysz; + } + } + + if (!yysize_overflow) + yymsg = YY_CAST (char *, YYMALLOC (YY_CAST (YYSIZE_T, yysize))); + + if (yymsg) + { + char *yyp = yymsg; + int yyi = 0; + while ((*yyp = *yyformat)) + { + if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) + {]b4_parse_error_case([verbose], [[ + yyp += yytnamerr (yyp, yytname[yyarg[yyi++]]);]], [[ + yyp = yystpcpy (yyp, yysymbol_name (yyarg[yyi++]));]])[ + yyformat += 2; + } + else + { + ++yyp; + ++yyformat; + } + } + yyerror (]b4_lyyerror_args[yymsg); + YYFREE (yymsg); + } + else + { + yyerror (]b4_lyyerror_args[YY_("syntax error")); + yyMemoryExhausted (yystackp); + } + }]])[ + yynerrs += 1; +} + +/* Recover from a syntax error on *YYSTACKP, assuming that *YYSTACKP->YYTOKENP, + yylval, and yylloc are the syntactic category, semantic value, and location + of the lookahead. */ +static void +yyrecoverSyntaxError (yyGLRStack* yystackp]b4_user_formals[) +{ + if (yystackp->yyerrState == 3) + /* We just shifted the error token and (perhaps) took some + reductions. Skip tokens until we can proceed. */ + while (yytrue) + { + yysymbol_kind_t yytoken; + int yyj; + if (yychar == ]b4_symbol(eof, [id])[) + yyFail (yystackp][]b4_lpure_args[, YY_NULLPTR); + if (yychar != ]b4_symbol(empty, id)[) + {]b4_locations_if([[ + /* We throw away the lookahead, but the error range + of the shifted error token must take it into account. */ + yyGLRState *yys = yystackp->yytops.yystates[0]; + yyGLRStackItem yyerror_range[3]; + yyerror_range[1].yystate.yyloc = yys->yyloc; + yyerror_range[2].yystate.yyloc = yylloc; + YYLLOC_DEFAULT ((yys->yyloc), yyerror_range, 2);]])[ + yytoken = YYTRANSLATE (yychar); + yydestruct ("Error: discarding", + yytoken, &yylval]b4_locuser_args([&yylloc])[); + yychar = ]b4_symbol(empty, id)[; + } + yytoken = ]b4_yygetToken_call[; + yyj = yypact[yystackp->yytops.yystates[0]->yylrState]; + if (yypact_value_is_default (yyj)) + return; + yyj += yytoken; + if (yyj < 0 || YYLAST < yyj || yycheck[yyj] != yytoken) + { + if (yydefact[yystackp->yytops.yystates[0]->yylrState] != 0) + return; + } + else if (! yytable_value_is_error (yytable[yyj])) + return; + } + + /* Reduce to one stack. */ + { + YYPTRDIFF_T yyk; + for (yyk = 0; yyk < yystackp->yytops.yysize; yyk += 1) + if (yystackp->yytops.yystates[yyk] != YY_NULLPTR) + break; + if (yyk >= yystackp->yytops.yysize) + yyFail (yystackp][]b4_lpure_args[, YY_NULLPTR); + for (yyk += 1; yyk < yystackp->yytops.yysize; yyk += 1) + yymarkStackDeleted (yystackp, yyk); + yyremoveDeletes (yystackp); + yycompressStack (yystackp); + } + + /* Pop stack until we find a state that shifts the error token. */ + yystackp->yyerrState = 3; + while (yystackp->yytops.yystates[0] != YY_NULLPTR) + { + yyGLRState *yys = yystackp->yytops.yystates[0]; + int yyj = yypact[yys->yylrState]; + if (! yypact_value_is_default (yyj)) + { + yyj += ]b4_symbol(error, kind)[; + if (0 <= yyj && yyj <= YYLAST && yycheck[yyj] == ]b4_symbol(error, kind)[ + && yyisShiftAction (yytable[yyj])) + { + /* Shift the error token. */ + int yyaction = yytable[yyj];]b4_locations_if([[ + /* First adjust its location.*/ + YYLTYPE yyerrloc; + yystackp->yyerror_range[2].yystate.yyloc = yylloc; + YYLLOC_DEFAULT (yyerrloc, (yystackp->yyerror_range), 2);]])[ + YY_SYMBOL_PRINT ("Shifting", yy_accessing_symbol (yyaction), + &yylval, &yyerrloc); + yyglrShift (yystackp, 0, yyaction, + yys->yyposn, &yylval]b4_locations_if([, &yyerrloc])[); + yys = yystackp->yytops.yystates[0]; + break; + } + }]b4_locations_if([[ + yystackp->yyerror_range[1].yystate.yyloc = yys->yyloc;]])[ + if (yys->yypred != YY_NULLPTR) + yydestroyGLRState ("Error: popping", yys]b4_user_args[); + yystackp->yytops.yystates[0] = yys->yypred; + yystackp->yynextFree -= 1; + yystackp->yyspaceLeft += 1; + } + if (yystackp->yytops.yystates[0] == YY_NULLPTR) + yyFail (yystackp][]b4_lpure_args[, YY_NULLPTR); +} + +#define YYCHK1(YYE) \ + do { \ + switch (YYE) { \ + case yyok: break; \ + case yyabort: goto yyabortlab; \ + case yyaccept: goto yyacceptlab; \ + case yyerr: goto yyuser_error; \ + case yynomem: goto yyexhaustedlab; \ + default: goto yybuglab; \ + } \ + } while (0) + +/*----------. +| yyparse. | +`----------*/ + +int +]b4_glr_cc_if([yy_parse_impl], [yyparse])[ (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[) +{ + int yyresult; + yyGLRStack yystack; + yyGLRStack* const yystackp = &yystack; + YYPTRDIFF_T yyposn; + + YY_DPRINTF ((stderr, "Starting parse\n")); + + yychar = ]b4_symbol(empty, id)[; + yylval = yyval_default;]b4_locations_if([ + yylloc = yyloc_default;])[ +]m4_ifdef([b4_initial_action], [ +b4_dollar_pushdef([yylval], [], [], [yylloc])dnl + b4_user_initial_action +b4_dollar_popdef])[]dnl +[ + if (! yyinitGLRStack (yystackp, YYINITDEPTH)) + goto yyexhaustedlab; + switch (YYSETJMP (yystack.yyexception_buffer)) + { + case 0: break; + case 1: goto yyabortlab; + case 2: goto yyexhaustedlab; + default: goto yybuglab; + } + yyglrShift (&yystack, 0, 0, 0, &yylval]b4_locations_if([, &yylloc])[); + yyposn = 0; + + while (yytrue) + { + /* For efficiency, we have two loops, the first of which is + specialized to deterministic operation (single stack, no + potential ambiguity). */ + /* Standard mode. */ + while (yytrue) + { + yy_state_t yystate = yystack.yytops.yystates[0]->yylrState; + YY_DPRINTF ((stderr, "Entering state %d\n", yystate)); + if (yystate == YYFINAL) + goto yyacceptlab; + if (yyisDefaultedState (yystate)) + { + yyRuleNum yyrule = yydefaultAction (yystate); + if (yyrule == 0) + {]b4_locations_if([[ + yystack.yyerror_range[1].yystate.yyloc = yylloc;]])[ + yyreportSyntaxError (&yystack]b4_user_args[); + goto yyuser_error; + } + YYCHK1 (yyglrReduce (&yystack, 0, yyrule, yytrue]b4_user_args[)); + } + else + { + yysymbol_kind_t yytoken = ]b4_yygetToken_call;[ + const short* yyconflicts; + int yyaction = yygetLRActions (yystate, yytoken, &yyconflicts); + if (*yyconflicts) + /* Enter nondeterministic mode. */ + break; + if (yyisShiftAction (yyaction)) + { + YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); + yychar = ]b4_symbol(empty, id)[; + yyposn += 1; + yyglrShift (&yystack, 0, yyaction, yyposn, &yylval]b4_locations_if([, &yylloc])[); + if (0 < yystack.yyerrState) + yystack.yyerrState -= 1; + } + else if (yyisErrorAction (yyaction)) + {]b4_locations_if([[ + yystack.yyerror_range[1].yystate.yyloc = yylloc;]])[ + /* Issue an error message unless the scanner already + did. */ + if (yychar != ]b4_symbol(error, id)[) + yyreportSyntaxError (&yystack]b4_user_args[); + goto yyuser_error; + } + else + YYCHK1 (yyglrReduce (&yystack, 0, -yyaction, yytrue]b4_user_args[)); + } + } + + /* Nondeterministic mode. */ + while (yytrue) + { + yysymbol_kind_t yytoken_to_shift; + YYPTRDIFF_T yys; + + for (yys = 0; yys < yystack.yytops.yysize; yys += 1) + yystackp->yytops.yylookaheadNeeds[yys] = yychar != ]b4_symbol(empty, id)[; + + /* yyprocessOneStack returns one of three things: + + - An error flag. If the caller is yyprocessOneStack, it + immediately returns as well. When the caller is finally + yyparse, it jumps to an error label via YYCHK1. + + - yyok, but yyprocessOneStack has invoked yymarkStackDeleted + (&yystack, yys), which sets the top state of yys to NULL. Thus, + yyparse's following invocation of yyremoveDeletes will remove + the stack. + + - yyok, when ready to shift a token. + + Except in the first case, yyparse will invoke yyremoveDeletes and + then shift the next token onto all remaining stacks. This + synchronization of the shift (that is, after all preceding + reductions on all stacks) helps prevent double destructor calls + on yylval in the event of memory exhaustion. */ + + for (yys = 0; yys < yystack.yytops.yysize; yys += 1) + YYCHK1 (yyprocessOneStack (&yystack, yys, yyposn]b4_lpure_args[)); + yyremoveDeletes (&yystack); + if (yystack.yytops.yysize == 0) + { + yyundeleteLastStack (&yystack); + if (yystack.yytops.yysize == 0) + yyFail (&yystack][]b4_lpure_args[, YY_("syntax error")); + YYCHK1 (yyresolveStack (&yystack]b4_user_args[)); + YY_DPRINTF ((stderr, "Returning to deterministic operation.\n"));]b4_locations_if([[ + yystack.yyerror_range[1].yystate.yyloc = yylloc;]])[ + yyreportSyntaxError (&yystack]b4_user_args[); + goto yyuser_error; + } + + /* If any yyglrShift call fails, it will fail after shifting. Thus, + a copy of yylval will already be on stack 0 in the event of a + failure in the following loop. Thus, yychar is set to ]b4_symbol(empty, id)[ + before the loop to make sure the user destructor for yylval isn't + called twice. */ + yytoken_to_shift = YYTRANSLATE (yychar); + yychar = ]b4_symbol(empty, id)[; + yyposn += 1; + for (yys = 0; yys < yystack.yytops.yysize; yys += 1) + { + yy_state_t yystate = yystack.yytops.yystates[yys]->yylrState; + const short* yyconflicts; + int yyaction = yygetLRActions (yystate, yytoken_to_shift, + &yyconflicts); + /* Note that yyconflicts were handled by yyprocessOneStack. */ + YY_DPRINTF ((stderr, "On stack %ld, ", YY_CAST (long, yys))); + YY_SYMBOL_PRINT ("shifting", yytoken_to_shift, &yylval, &yylloc); + yyglrShift (&yystack, yys, yyaction, yyposn, + &yylval]b4_locations_if([, &yylloc])[); + YY_DPRINTF ((stderr, "Stack %ld now in state %d\n", + YY_CAST (long, yys), + yystack.yytops.yystates[yys]->yylrState)); + } + + if (yystack.yytops.yysize == 1) + { + YYCHK1 (yyresolveStack (&yystack]b4_user_args[)); + YY_DPRINTF ((stderr, "Returning to deterministic operation.\n")); + yycompressStack (&yystack); + break; + } + } + continue; + yyuser_error: + yyrecoverSyntaxError (&yystack]b4_user_args[); + yyposn = yystack.yytops.yystates[0]->yyposn; + } + + yyacceptlab: + yyresult = 0; + goto yyreturnlab; + + yybuglab: + YY_ASSERT (yyfalse); + goto yyabortlab; + + yyabortlab: + yyresult = 1; + goto yyreturnlab; + + yyexhaustedlab: + yyerror (]b4_lyyerror_args[YY_("memory exhausted")); + yyresult = 2; + goto yyreturnlab; + + yyreturnlab: + if (yychar != ]b4_symbol(empty, id)[) + yydestruct ("Cleanup: discarding lookahead", + YYTRANSLATE (yychar), &yylval]b4_locuser_args([&yylloc])[); + + /* If the stack is well-formed, pop the stack until it is empty, + destroying its entries as we go. But free the stack regardless + of whether it is well-formed. */ + if (yystack.yyitems) + { + yyGLRState** yystates = yystack.yytops.yystates; + if (yystates) + { + YYPTRDIFF_T yysize = yystack.yytops.yysize; + YYPTRDIFF_T yyk; + for (yyk = 0; yyk < yysize; yyk += 1) + if (yystates[yyk]) + { + while (yystates[yyk]) + { + yyGLRState *yys = yystates[yyk];]b4_locations_if([[ + yystack.yyerror_range[1].yystate.yyloc = yys->yyloc;]])[ + if (yys->yypred != YY_NULLPTR) + yydestroyGLRState ("Cleanup: popping", yys]b4_user_args[); + yystates[yyk] = yys->yypred; + yystack.yynextFree -= 1; + yystack.yyspaceLeft += 1; + } + break; + } + } + yyfreeGLRStack (&yystack); + } + + return yyresult; +} + +/* DEBUGGING ONLY */ +#if ]b4_api_PREFIX[DEBUG +/* Print *YYS and its predecessors. */ +static void +yy_yypstack (yyGLRState* yys) +{ + if (yys->yypred) + { + yy_yypstack (yys->yypred); + YY_FPRINTF ((stderr, " -> ")); + } + YY_FPRINTF ((stderr, "%d@@%ld", yys->yylrState, YY_CAST (long, yys->yyposn))); +} + +/* Print YYS (possibly NULL) and its predecessors. */ +static void +yypstates (yyGLRState* yys) +{ + if (yys == YY_NULLPTR) + YY_FPRINTF ((stderr, "")); + else + yy_yypstack (yys); + YY_FPRINTF ((stderr, "\n")); +} + +/* Print the stack #YYK. */ +static void +yypstack (yyGLRStack* yystackp, YYPTRDIFF_T yyk) +{ + yypstates (yystackp->yytops.yystates[yyk]); +} + +/* Print all the stacks. */ +static void +yypdumpstack (yyGLRStack* yystackp) +{ +#define YYINDEX(YYX) \ + YY_CAST (long, \ + ((YYX) \ + ? YY_REINTERPRET_CAST (yyGLRStackItem*, (YYX)) - yystackp->yyitems \ + : -1)) + + yyGLRStackItem* yyp; + for (yyp = yystackp->yyitems; yyp < yystackp->yynextFree; yyp += 1) + { + YY_FPRINTF ((stderr, "%3ld. ", + YY_CAST (long, yyp - yystackp->yyitems))); + if (*YY_REINTERPRET_CAST (yybool *, yyp)) + { + YY_ASSERT (yyp->yystate.yyisState); + YY_ASSERT (yyp->yyoption.yyisState); + YY_FPRINTF ((stderr, "Res: %d, LR State: %d, posn: %ld, pred: %ld", + yyp->yystate.yyresolved, yyp->yystate.yylrState, + YY_CAST (long, yyp->yystate.yyposn), + YYINDEX (yyp->yystate.yypred))); + if (! yyp->yystate.yyresolved) + YY_FPRINTF ((stderr, ", firstVal: %ld", + YYINDEX (yyp->yystate.yysemantics.yyfirstVal))); + } + else + { + YY_ASSERT (!yyp->yystate.yyisState); + YY_ASSERT (!yyp->yyoption.yyisState); + YY_FPRINTF ((stderr, "Option. rule: %d, state: %ld, next: %ld", + yyp->yyoption.yyrule - 1, + YYINDEX (yyp->yyoption.yystate), + YYINDEX (yyp->yyoption.yynext))); + } + YY_FPRINTF ((stderr, "\n")); + } + + YY_FPRINTF ((stderr, "Tops:")); + { + YYPTRDIFF_T yyi; + for (yyi = 0; yyi < yystackp->yytops.yysize; yyi += 1) + YY_FPRINTF ((stderr, "%ld: %ld; ", YY_CAST (long, yyi), + YYINDEX (yystackp->yytops.yystates[yyi]))); + YY_FPRINTF ((stderr, "\n")); + } +#undef YYINDEX +} +#endif + +#undef yylval +#undef yychar +#undef yynerrs]b4_locations_if([ +#undef yylloc]) + +m4_if(b4_prefix, [yy], [], +[[/* Substitute the variable and function names. */ +#define yyparse ]b4_prefix[parse +#define yylex ]b4_prefix[lex +#define yyerror ]b4_prefix[error +#define yylval ]b4_prefix[lval +#define yychar ]b4_prefix[char +#define yydebug ]b4_prefix[debug +#define yynerrs ]b4_prefix[nerrs]b4_locations_if([[ +#define yylloc ]b4_prefix[lloc]])])[ + +]b4_glr_cc_if([b4_glr_cc_pre_epilogue +b4_glr_cc_cleanup])[ +]b4_percent_code_get([[epilogue]])[]dnl +b4_epilogue[]dnl +b4_output_end diff --git a/platform/dbops/binaries/build/share/bison/skeletons/glr.cc b/platform/dbops/binaries/build/share/bison/skeletons/glr.cc new file mode 100644 index 0000000000000000000000000000000000000000..7181402f3dbe443d348f4e538bcca930765406df --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/glr.cc @@ -0,0 +1,397 @@ +# C++ GLR skeleton for Bison + +# Copyright (C) 2002-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# This skeleton produces a C++ class that encapsulates a C glr parser. +# This is in order to reduce the maintenance burden. The glr.c +# skeleton is clean and pure enough so that there are no real +# problems. The C++ interface is the same as that of lalr1.cc. In +# fact, glr.c can replace yacc.c without the user noticing any +# difference, and similarly for glr.cc replacing lalr1.cc. +# +# The passing of parse-params +# +# The additional arguments are stored as members of the parser +# object, yyparser. The C routines need to carry yyparser +# throughout the C parser; that's easy: make yyparser an +# additional parse-param. But because the C++ skeleton needs to +# know the "real" original parse-param, we save them +# (b4_parse_param_orig). Note that b4_parse_param is overquoted +# (and c.m4 strips one level of quotes). This is a PITA, and +# explains why there are so many levels of quotes. +# +# The locations +# +# We use location.cc just like lalr1.cc, but because glr.c stores +# the locations in a union, the position and location classes +# must not have a constructor. Therefore, contrary to lalr1.cc, we +# must not define "b4_location_constructors". As a consequence the +# user must initialize the first positions (in particular the +# filename member). + +# We require a pure interface. +m4_define([b4_pure_flag], [1]) + +m4_include(b4_skeletonsdir/[c++.m4]) +b4_bison_locations_if([m4_include(b4_skeletonsdir/[location.cc])]) + +m4_define([b4_parser_class], + [b4_percent_define_get([[api.parser.class]])]) + +# Save the parse parameters. +m4_define([b4_parse_param_orig], m4_defn([b4_parse_param])) + +# b4_parse_param_wrap +# ------------------- +# New ones. +m4_ifset([b4_parse_param], +[m4_define([b4_parse_param_wrap], + [[b4_namespace_ref::b4_parser_class[& yyparser], [[yyparser]]],] +m4_defn([b4_parse_param]))], +[m4_define([b4_parse_param_wrap], + [[b4_namespace_ref::b4_parser_class[& yyparser], [[yyparser]]]]) +]) + + +# b4_yy_symbol_print_define +# ------------------------- +# Bypass the default implementation to generate the "yy_symbol_print" +# and "yy_symbol_value_print" functions. +m4_define([b4_yy_symbol_print_define], +[[/*--------------------. +| Print this symbol. | +`--------------------*/ + +static void +yy_symbol_print (FILE *, ]b4_namespace_ref::b4_parser_class[::symbol_kind_type yytoken, + const ]b4_namespace_ref::b4_parser_class[::value_type *yyvaluep]b4_locations_if([[, + const ]b4_namespace_ref::b4_parser_class[::location_type *yylocationp]])[]b4_user_formals[) +{ +]b4_parse_param_use[]dnl +[ yyparser.yy_symbol_print_ (yytoken, yyvaluep]b4_locations_if([, yylocationp])[); +} +]])[ + +# Hijack the initial action to initialize the locations. +]b4_bison_locations_if([m4_define([b4_initial_action], +[yylloc.initialize ();]m4_ifdef([b4_initial_action], [ +m4_defn([b4_initial_action])]))])[ + +# Hijack the post prologue to declare yyerror. +]m4_append([b4_post_prologue], +[b4_syncline([@oline@], [@ofile@])dnl +[static void +yyerror (]b4_locations_if([[const ]b4_namespace_ref::b4_parser_class[::location_type *yylocationp, + ]])[]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param), + ])[const char* msg);]])[ + +# Inserted before the epilogue to define implementations (yyerror, parser member +# functions etc.). +]m4_define([b4_glr_cc_pre_epilogue], +[b4_syncline([@oline@], [@ofile@])dnl +[ +/*------------------. +| Report an error. | +`------------------*/ + +static void +yyerror (]b4_locations_if([[const ]b4_namespace_ref::b4_parser_class[::location_type *yylocationp, + ]])[]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param), + ])[const char* msg) +{ +]b4_parse_param_use[]dnl +[ yyparser.error (]b4_locations_if([[*yylocationp, ]])[msg); +} + + +]b4_namespace_open[ +]dnl In this section, the parse params are the original parse_params. +m4_pushdef([b4_parse_param], m4_defn([b4_parse_param_orig]))dnl +[ /// Build a parser object. + ]b4_parser_class::b4_parser_class[ (]b4_parse_param_decl[)]m4_ifset([b4_parse_param], [ + :])[ +#if ]b4_api_PREFIX[DEBUG + ]m4_ifset([b4_parse_param], [ ], [ :])[yycdebug_ (&std::cerr)]m4_ifset([b4_parse_param], [,])[ +#endif]b4_parse_param_cons[ + {} + + ]b4_parser_class::~b4_parser_class[ () + {} + + ]b4_parser_class[::syntax_error::~syntax_error () YY_NOEXCEPT YY_NOTHROW + {} + + int + ]b4_parser_class[::operator() () + { + return parse (); + } + + int + ]b4_parser_class[::parse () + { + return ::yy_parse_impl (*this]b4_user_args[); + } + +#if ]b4_api_PREFIX[DEBUG + /*--------------------. + | Print this symbol. | + `--------------------*/ + + void + ]b4_parser_class[::yy_symbol_value_print_ (symbol_kind_type yykind, + const value_type* yyvaluep]b4_locations_if([[, + const location_type* yylocationp]])[) const + {]b4_locations_if([[ + YY_USE (yylocationp);]])[ + YY_USE (yyvaluep); + std::ostream& yyo = debug_stream (); + std::ostream& yyoutput = yyo; + YY_USE (yyoutput); + ]b4_symbol_actions([printer])[ + } + + + void + ]b4_parser_class[::yy_symbol_print_ (symbol_kind_type yykind, + const value_type* yyvaluep]b4_locations_if([[, + const location_type* yylocationp]])[) const + { + *yycdebug_ << (yykind < YYNTOKENS ? "token" : "nterm") + << ' ' << yysymbol_name (yykind) << " ("]b4_locations_if([[ + << *yylocationp << ": "]])[; + yy_symbol_value_print_ (yykind, yyvaluep]b4_locations_if([[, yylocationp]])[); + *yycdebug_ << ')'; + } + + std::ostream& + ]b4_parser_class[::debug_stream () const + { + return *yycdebug_; + } + + void + ]b4_parser_class[::set_debug_stream (std::ostream& o) + { + yycdebug_ = &o; + } + + + ]b4_parser_class[::debug_level_type + ]b4_parser_class[::debug_level () const + { + return yydebug; + } + + void + ]b4_parser_class[::set_debug_level (debug_level_type l) + { + // Actually, it is yydebug which is really used. + yydebug = l; + } + +#endif +]m4_popdef([b4_parse_param])dnl +b4_namespace_close[]dnl +]) + + +m4_define([b4_define_symbol_kind], +[m4_format([#define %-15s %s], + b4_symbol($][1, kind_base), + b4_namespace_ref[::]b4_parser_class[::symbol_kind::]b4_symbol($1, kind_base)) +]) + +# b4_glr_cc_setup +# --------------- +# Setup redirections for glr.c: Map the names used in c.m4 to the ones used +# in c++.m4. +m4_define([b4_glr_cc_setup], +[[]b4_attribute_define[ +]b4_null_define[ + +// This skeleton is based on C, yet compiles it as C++. +// So expect warnings about C style casts. +#if defined __clang__ && 306 <= __clang_major__ * 100 + __clang_minor__ +# pragma clang diagnostic ignored "-Wold-style-cast" +#elif defined __GNUC__ && 406 <= __GNUC__ * 100 + __GNUC_MINOR__ +# pragma GCC diagnostic ignored "-Wold-style-cast" +#endif + +// On MacOS, PTRDIFF_MAX is defined as long long, which Clang's +// -pedantic reports as being a C++11 extension. +#if defined __APPLE__ && YY_CPLUSPLUS < 201103L \ + && defined __clang__ && 4 <= __clang_major__ +# pragma clang diagnostic ignored "-Wc++11-long-long" +#endif + +#undef ]b4_symbol(empty, [id])[ +#define ]b4_symbol(empty, [id])[ ]b4_namespace_ref[::]b4_parser_class[::token::]b4_symbol(empty, [id])[ +#undef ]b4_symbol(eof, [id])[ +#define ]b4_symbol(eof, [id])[ ]b4_namespace_ref[::]b4_parser_class[::token::]b4_symbol(eof, [id])[ +#undef ]b4_symbol(error, [id])[ +#define ]b4_symbol(error, [id])[ ]b4_namespace_ref[::]b4_parser_class[::token::]b4_symbol(error, [id])[ + +#ifndef ]b4_api_PREFIX[STYPE +# define ]b4_api_PREFIX[STYPE ]b4_namespace_ref[::]b4_parser_class[::value_type +#endif +#ifndef ]b4_api_PREFIX[LTYPE +# define ]b4_api_PREFIX[LTYPE ]b4_namespace_ref[::]b4_parser_class[::location_type +#endif + +typedef ]b4_namespace_ref[::]b4_parser_class[::symbol_kind_type yysymbol_kind_t; + +// Expose C++ symbol kinds to C. +]b4_define_symbol_kind(-2)dnl +b4_symbol_foreach([b4_define_symbol_kind])])[ +]]) + + +m4_define([b4_undef_symbol_kind], +[[#undef ]b4_symbol($1, kind_base)[ +]]) + + +# b4_glr_cc_cleanup +# ----------------- +# Remove redirections for glr.c. +m4_define([b4_glr_cc_cleanup], +[[#undef ]b4_symbol(empty, [id])[ +#undef ]b4_symbol(eof, [id])[ +#undef ]b4_symbol(error, [id])[ + +]b4_undef_symbol_kind(-2)dnl +b4_symbol_foreach([b4_undef_symbol_kind])dnl +]) + + +# b4_shared_declarations(hh|cc) +# ----------------------------- +# Declaration that might either go into the header (if --header, $1 = hh) +# or in the implementation file. +m4_define([b4_shared_declarations], +[m4_pushdef([b4_parse_param], m4_defn([b4_parse_param_orig]))dnl +b4_percent_code_get([[requires]])[ +#include +#include +#include + +]b4_cxx_portability[ +]m4_ifdef([b4_location_include], + [[# include ]b4_location_include])[ +]b4_variant_if([b4_variant_includes])[ + +// Whether we are compiled with exception support. +#ifndef YY_EXCEPTIONS +# if defined __GNUC__ && !defined __EXCEPTIONS +# define YY_EXCEPTIONS 0 +# else +# define YY_EXCEPTIONS 1 +# endif +#endif + +]b4_YYDEBUG_define[ + +]b4_namespace_open[ + +]b4_bison_locations_if([m4_ifndef([b4_location_file], + [b4_location_define])])[ + + /// A Bison parser. + class ]b4_parser_class[ + { + public: +]b4_public_types_declare[ + + /// Build a parser object. + ]b4_parser_class[ (]b4_parse_param_decl[); + virtual ~]b4_parser_class[ (); + + /// Parse. An alias for parse (). + /// \returns 0 iff parsing succeeded. + int operator() (); + + /// Parse. + /// \returns 0 iff parsing succeeded. + virtual int parse (); + +#if ]b4_api_PREFIX[DEBUG + /// The current debugging stream. + std::ostream& debug_stream () const; + /// Set the current debugging stream. + void set_debug_stream (std::ostream &); + + /// Type for debugging levels. + typedef int debug_level_type; + /// The current debugging level. + debug_level_type debug_level () const; + /// Set the current debugging level. + void set_debug_level (debug_level_type l); +#endif + + /// Report a syntax error.]b4_locations_if([[ + /// \param loc where the syntax error is found.]])[ + /// \param msg a description of the syntax error. + virtual void error (]b4_locations_if([[const location_type& loc, ]])[const std::string& msg); + +# if ]b4_api_PREFIX[DEBUG + public: + /// \brief Report a symbol value on the debug stream. + /// \param yykind The symbol kind. + /// \param yyvaluep Its semantic value.]b4_locations_if([[ + /// \param yylocationp Its location.]])[ + virtual void yy_symbol_value_print_ (symbol_kind_type yykind, + const value_type* yyvaluep]b4_locations_if([[, + const location_type* yylocationp]])[) const; + /// \brief Report a symbol on the debug stream. + /// \param yykind The symbol kind. + /// \param yyvaluep Its semantic value.]b4_locations_if([[ + /// \param yylocationp Its location.]])[ + virtual void yy_symbol_print_ (symbol_kind_type yykind, + const value_type* yyvaluep]b4_locations_if([[, + const location_type* yylocationp]])[) const; + private: + /// Debug stream. + std::ostream* yycdebug_; +#endif + +]b4_parse_param_vars[ + }; + +]b4_namespace_close[ + +]b4_percent_code_get([[provides]])[ +]m4_popdef([b4_parse_param])dnl +])[ + +]b4_header_if( +[b4_output_begin([b4_spec_header_file]) +b4_copyright([Skeleton interface for Bison GLR parsers in C++], + [2002-2015, 2018-2021])[ +// C++ GLR parser skeleton written by Akim Demaille. + +]b4_disclaimer[ +]b4_cpp_guard_open([b4_spec_mapped_header_file])[ +]b4_shared_declarations[ +]b4_cpp_guard_close([b4_spec_mapped_header_file])[ +]b4_output_end]) + +# Let glr.c (and b4_shared_declarations) believe that the user +# arguments include the parser itself. +m4_pushdef([b4_parse_param], m4_defn([b4_parse_param_wrap])) +m4_include(b4_skeletonsdir/[glr.c]) +m4_popdef([b4_parse_param]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/glr2.cc b/platform/dbops/binaries/build/share/bison/skeletons/glr2.cc new file mode 100644 index 0000000000000000000000000000000000000000..757d68d4970984ade94ceed25fa92c7f73058b5a --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/glr2.cc @@ -0,0 +1,3533 @@ +# C++ GLR skeleton for Bison + +# Copyright (C) 2002-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_include(b4_skeletonsdir/[c++.m4]) + +# api.value.type=variant is valid. +m4_define([b4_value_type_setup_variant]) + +# b4_tname_if(TNAME-NEEDED, TNAME-NOT-NEEDED) +# ------------------------------------------- +m4_define([b4_tname_if], +[m4_case(b4_percent_define_get([[parse.error]]), + [verbose], [$1], + [b4_token_table_if([$1], + [$2])])]) + +b4_bison_locations_if([ + m4_define([b4_location_constructors]) + m4_include(b4_skeletonsdir/[location.cc])]) +b4_variant_if([m4_include(b4_skeletonsdir/[variant.hh])]) + +m4_define([b4_parser_class], + [b4_percent_define_get([[api.parser.class]])]) + +]m4_define([b4_define_symbol_kind], +[m4_format([#define %-15s %s], + b4_symbol($][1, kind_base), + b4_namespace_ref[::]b4_parser_class[::symbol_kind::]b4_symbol($1, kind_base)) +]) + + +# b4_integral_parser_table_define(TABLE-NAME, CONTENT, COMMENT) +# ------------------------------------------------------------- +# Define "yy" whose contents is CONTENT. Does not use "static", +# should be in unnamed namespace. +m4_define([b4_integral_parser_table_define], +[m4_ifvaln([$3], [ b4_comment([$3])])dnl + const b4_int_type_for([$2]) yy$1[[]] = + { + $2 + };dnl +]) + + +## ---------------- ## +## Default values. ## +## ---------------- ## + +# Stack parameters. +m4_define_default([b4_stack_depth_max], [10000]) +m4_define_default([b4_stack_depth_init], [200]) + + + +## ------------ ## +## Interfaces. ## +## ------------ ## + +# b4_user_formals +# --------------- +# The possible parse-params formal arguments preceded by a comma. +# +# This is not shared with yacc.c in c.m4 because GLR relies on ISO C +# formal argument declarations. +m4_define([b4_user_formals], +[m4_ifset([b4_parse_param], [, b4_formals(b4_parse_param)])]) + + +# b4_symbol_kind(NUM) +# ------------------- +m4_define([b4_symbol_kind], +[symbol_kind::b4_symbol_kind_base($@)]) + + +## ----------------- ## +## Semantic Values. ## +## ----------------- ## + + +# b4_lhs_value(SYMBOL-NUM, [TYPE]) +# -------------------------------- +# See README. +m4_define([b4_lhs_value], +[b4_symbol_value([(*yyvalp)], [$1], [$2])]) + + +# b4_rhs_data(RULE-LENGTH, POS) +# ----------------------------- +# See README. +m4_define([b4_rhs_data], +[(static_cast(yyvsp))@{YYFILL (b4_subtract([$2], [$1]))@}.getState()]) + + +# b4_rhs_value(RULE-LENGTH, POS, SYMBOL-NUM, [TYPE]) +# -------------------------------------------------- +# Expansion of $$ or $$, for symbol SYMBOL-NUM. +m4_define([b4_rhs_value], +[b4_symbol_value([b4_rhs_data([$1], [$2]).value ()], [$3], [$4])]) + + + +## ----------- ## +## Locations. ## +## ----------- ## + +# b4_lhs_location() +# ----------------- +# Expansion of @$. +m4_define([b4_lhs_location], +[(*yylocp)]) + + +# b4_rhs_location(RULE-LENGTH, NUM) +# --------------------------------- +# Expansion of @NUM, where the current rule has RULE-LENGTH symbols +# on RHS. +m4_define([b4_rhs_location], +[(b4_rhs_data([$1], [$2]).yyloc)]) + + +# b4_symbol_action(SYMBOL-NUM, KIND) +# ---------------------------------- +# Run the action KIND (destructor or printer) for SYMBOL-NUM. +# Same as in C, but using references instead of pointers. +# +# Currently we need two different b4_symbol_action: once for the +# self-contained symbols, and another time for yy_destroy_ and +# yy_symbol_value_print_, which don't use genuine symbols yet. +m4_define([b4_symbol_action], +[b4_symbol_if([$1], [has_$2], +[m4_pushdef([b4_symbol_value], m4_defn([b4_symbol_value_template]))[]dnl +b4_dollar_pushdef([yysym.value], + [$1], + [], + [yysym.location])dnl + _b4_symbol_case([$1])[]dnl +b4_syncline([b4_symbol([$1], [$2_line])], [b4_symbol([$1], [$2_file])])dnl + b4_symbol([$1], [$2]) +b4_syncline([@oline@], [@ofile@])dnl + break; + +m4_popdef([b4_symbol_value])[]dnl +b4_dollar_popdef[]dnl +])]) + + +# b4_symbol_action_for_yyval(SYMBOL-NUM, KIND) +# -------------------------------------------- +# Run the action KIND (destructor or printer) for SYMBOL-NUM. +# Same as in C, but using references instead of pointers. +m4_define([b4_symbol_action_for_yyval], +[b4_symbol_if([$1], [has_$2], +[b4_dollar_pushdef([yyval], + [$1], + [], + [yyloc])dnl + _b4_symbol_case([$1])[]dnl +b4_syncline([b4_symbol([$1], [$2_line])], [b4_symbol([$1], [$2_file])])dnl + b4_symbol([$1], [$2]) +b4_syncline([@oline@], [@ofile@])dnl + break; + +b4_dollar_popdef[]dnl +])]) + + +# b4_call_merger(MERGER-NUM, MERGER-NAME, SYMBOL-SUM) +# --------------------------------------------------- +m4_define([b4_call_merger], +[b4_case([$1], + [ b4_symbol_if([$3], [has_type], + [b4_variant_if([yy0.as< b4_symbol($3, type) > () = $2 (yy0.as< b4_symbol($3, type) >(), yy1.as< b4_symbol($3, type) >());], + [yy0.b4_symbol($3, slot) = $2 (yy0, yy1);])], + [yy0 = $2 (yy0, yy1);])])]) + +# b4_yylex +# -------- +# Call yylex. +m4_define([b4_yylex], +[b4_token_ctor_if( +[b4_function_call([yylex], + [symbol_type], m4_ifdef([b4_lex_param], b4_lex_param))], +[b4_function_call([yylex], [int], + [[value_type *], [&this->yyla.value]][]dnl +b4_locations_if([, [[location_type *], [&this->yyla.location]]])dnl +m4_ifdef([b4_lex_param], [, ]b4_lex_param))])]) + + +# b4_shared_declarations(hh|cc) +# ----------------------------- +# Declaration that might either go into the header (if --header, $1 = hh) +# or in the implementation file. +m4_define([b4_shared_declarations], +[b4_percent_code_get([[requires]])[ +#include +#include // ptrdiff_t +#include // memcpy +#include +#include +#include +#include +#include +#include +#include + +]b4_cxx_portability[ +]m4_ifdef([b4_location_include], + [[# include ]b4_location_include])[ +]b4_variant_if([b4_variant_includes])[ + +]b4_YYDEBUG_define[ + +]b4_namespace_open[ + +]b4_bison_locations_if([m4_ifndef([b4_location_file], + [b4_location_define])])[ + + /// A Bison parser. + class ]b4_parser_class[ + { + public: +]b4_public_types_declare[ +]b4_symbol_type_define[ + + // FIXME: should be private eventually. + class glr_stack; + class glr_state; + + /// Build a parser object. + ]b4_parser_class[ (]b4_parse_param_decl[); + ~]b4_parser_class[ (); + + /// Parse. An alias for parse (). + /// \returns 0 iff parsing succeeded. + int operator() (); + + /// Parse. + /// \returns 0 iff parsing succeeded. + int parse (); + +#if ]b4_api_PREFIX[DEBUG + /// The current debugging stream. + std::ostream& debug_stream () const; + /// Set the current debugging stream. + void set_debug_stream (std::ostream &); + + /// Type for debugging levels. + using debug_level_type = int; + /// The current debugging level. + debug_level_type debug_level () const; + /// Set the current debugging level. + void set_debug_level (debug_level_type l); +#endif + + /// Report a syntax error.]b4_locations_if([[ + /// \param loc where the syntax error is found.]])[ + /// \param msg a description of the syntax error. + void error (]b4_locations_if([[const location_type& loc, ]])[const std::string& msg); + +]b4_parse_error_bmatch( +[custom\|detailed], +[[ /// The user-facing name of the symbol whose (internal) number is + /// YYSYMBOL. No bounds checking. + static const char *symbol_name (symbol_kind_type yysymbol);]], +[simple], +[[#if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ + /// The user-facing name of the symbol whose (internal) number is + /// YYSYMBOL. No bounds checking. + static const char *symbol_name (symbol_kind_type yysymbol); +#endif // #if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ +]], +[verbose], +[[ /// The user-facing name of the symbol whose (internal) number is + /// YYSYMBOL. No bounds checking. + static std::string symbol_name (symbol_kind_type yysymbol);]])[ + +]b4_token_constructor_define[ +]b4_parse_error_bmatch([custom\|detailed\|verbose], [[ + class context + { + public: + context (glr_stack& yystack, const symbol_type& yyla); + const symbol_type& lookahead () const YY_NOEXCEPT { return yyla_; } + symbol_kind_type token () const YY_NOEXCEPT { return yyla_.kind (); }]b4_locations_if([[ + const location_type& location () const YY_NOEXCEPT { return yyla_.location; } +]])[ + /// Put in YYARG at most YYARGN of the expected tokens, and return the + /// number of tokens stored in YYARG. If YYARG is null, return the + /// number of expected tokens (guaranteed to be less than YYNTOKENS). + int expected_tokens (symbol_kind_type yyarg[], int yyargn) const; + + private: + glr_stack& yystack_; + const symbol_type& yyla_; + }; +]])[ +# if ]b4_api_PREFIX[DEBUG + public: + /// \brief Report a symbol value on the debug stream. + /// \param yykind The symbol kind. + /// \param yyval Its semantic value.]b4_locations_if([[ + /// \param yyloc Its location.]])[ + void yy_symbol_value_print_ (symbol_kind_type yykind, + const value_type& yyval]b4_locations_if([[, + const location_type& yyloc]])[) const; + /// \brief Report a symbol on the debug stream. + /// \param yykind The symbol kind. + /// \param yyval Its semantic value.]b4_locations_if([[ + /// \param yyloc Its location.]])[ + void yy_symbol_print_ (symbol_kind_type yykind, + const value_type& yyval]b4_locations_if([[, + const location_type& yyloc]])[) const; + private: + /// Debug stream. + std::ostream* yycdebug_; +#endif + +]b4_parse_error_bmatch( +[custom], [[ + private: + /// Report a syntax error + /// \param yyctx the context in which the error occurred. + void report_syntax_error (const context& yyctx) const;]], +[detailed\|verbose], [[ + private: + /// The arguments of the error message. + int yy_syntax_error_arguments_ (const context& yyctx, + symbol_kind_type yyarg[], int yyargn) const; + + /// Generate an error message. + /// \param yyctx the context in which the error occurred. + virtual std::string yysyntax_error_ (const context& yyctx) const;]])[ + + /// Convert a scanner token kind \a t to a symbol kind. + /// In theory \a t should be a token_kind_type, but character literals + /// are valid, yet not members of the token_kind_type enum. + static symbol_kind_type yytranslate_ (int t) YY_NOEXCEPT; + +]b4_parse_error_bmatch( +[simple], +[[#if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ + /// For a symbol, its name in clear. + static const char* const yytname_[]; +#endif // #if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ +]], +[verbose], +[[ /// Convert the symbol name \a n to a form suitable for a diagnostic. + static std::string yytnamerr_ (const char *yystr); + + /// For a symbol, its name in clear. + static const char* const yytname_[]; +]])[ + + /// \brief Reclaim the memory associated to a symbol. + /// \param yymsg Why this token is reclaimed. + /// If null, print nothing. + /// \param yykind The symbol kind. + void yy_destroy_ (const char* yymsg, symbol_kind_type yykind, + value_type& yyval]b4_locations_if([[, + location_type& yyloc]])[); + +]b4_parse_param_vars[ + // Needs access to yy_destroy_, report_syntax_error, etc. + friend glr_stack; + }; + +]b4_token_ctor_if([b4_yytranslate_define([$1])[ +]b4_public_types_define([$1])])[ +]b4_namespace_close[ + +]b4_percent_code_get([[provides]])[ +]])[ + + +## -------------- ## +## Output files. ## +## -------------- ## + + +# ------------- # +# Header file. # +# ------------- # + +]b4_header_if([[ +]b4_output_begin([b4_spec_header_file])[ +]b4_copyright([Skeleton interface for Bison GLR parsers in C++], + [2002-2015, 2018-2021])[ +// C++ GLR parser skeleton written by Valentin Tolmer. + +]b4_disclaimer[ +]b4_cpp_guard_open([b4_spec_mapped_header_file])[ +]b4_shared_declarations([hh])[ +]b4_cpp_guard_close([b4_spec_mapped_header_file])[ +]b4_output_end])[ + + +# --------------------- # +# Implementation file. # +# --------------------- # + +]b4_output_begin([b4_parser_file_name])[ +]b4_copyright([Skeleton implementation for Bison GLR parsers in C], + [2002-2015, 2018-2021])[ +// C++ GLR parser skeleton written by Valentin Tolmer. + +]b4_disclaimer[ +]b4_identification[ + +]b4_percent_code_get([[top]])[ +]m4_if(b4_prefix, [yy], [], +[[/* Substitute the variable and function names. */ +#define yyparse ]b4_prefix[parse +#define yylex ]b4_prefix[lex +#define yyerror ]b4_prefix[error +#define yydebug ]b4_prefix[debug]])[ + +]b4_user_pre_prologue[ + +]b4_null_define[ + +]b4_header_if([[#include "@basename(]b4_spec_header_file[@)"]], + [b4_shared_declarations([cc])])[ + +namespace +{ + /* Default (constant) value used for initialization for null + right-hand sides. Unlike the standard yacc.c template, here we set + the default value of $$ to a zeroed-out value. Since the default + value is undefined, this behavior is technically correct. */ + ]b4_namespace_ref[::]b4_parser_class[::value_type yyval_default; +} + +]b4_user_post_prologue[ +]b4_percent_code_get[ + +#include +#include + +#ifndef YY_ +# if defined YYENABLE_NLS && YYENABLE_NLS +# if ENABLE_NLS +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_(Msgid) dgettext ("bison-runtime", Msgid) +# endif +# endif +# ifndef YY_ +# define YY_(Msgid) Msgid +# endif +#endif + +// Whether we are compiled with exception support. +#ifndef YY_EXCEPTIONS +# if defined __GNUC__ && !defined __EXCEPTIONS +# define YY_EXCEPTIONS 0 +# else +# define YY_EXCEPTIONS 1 +# endif +#endif + +#ifndef YYFREE +# define YYFREE free +#endif +#ifndef YYMALLOC +# define YYMALLOC malloc +#endif + +#ifndef YYSETJMP +# include +# define YYJMP_BUF jmp_buf +# define YYSETJMP(Env) setjmp (Env) +/* Pacify Clang and ICC. */ +# define YYLONGJMP(Env, Val) \ + do { \ + longjmp (Env, Val); \ + YYASSERT (0); \ + } while (false) +#endif + +]b4_attribute_define([noreturn])[ + +#if defined __GNUC__ && ! defined __ICC && 6 <= __GNUC__ +# define YY_IGNORE_NULL_DEREFERENCE_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wnull-dereference\"") +# define YY_IGNORE_NULL_DEREFERENCE_END \ + _Pragma ("GCC diagnostic pop") +#else +# define YY_IGNORE_NULL_DEREFERENCE_BEGIN +# define YY_IGNORE_NULL_DEREFERENCE_END +#endif + +]b4_null_define[ +]b4_cast_define[ + +// FIXME: Use the same conventions as lalr1.cc. +]b4_parse_assert_if[ +#ifndef YYASSERT +# define YYASSERT(Condition) ((void) ((Condition) || (abort (), 0))) +#endif + +#ifdef YYDEBUG +# define YYDASSERT(Condition) YYASSERT(Condition) +#else +# define YYDASSERT(Condition) +#endif + +/* YYFINAL -- State number of the termination state. */ +#define YYFINAL ]b4_final_state_number[ +/* YYLAST -- Last index in YYTABLE. */ +#define YYLAST ]b4_last[ + +/* YYNTOKENS -- Number of terminals. */ +#define YYNTOKENS ]b4_tokens_number[ +/* YYNNTS -- Number of nonterminals. */ +#define YYNNTS ]b4_nterms_number[ +/* YYNRULES -- Number of rules. */ +#define YYNRULES ]b4_rules_number[ +/* YYNSTATES -- Number of states. */ +#define YYNSTATES ]b4_states_number[ +/* YYMAXRHS -- Maximum number of symbols on right-hand side of rule. */ +#define YYMAXRHS ]b4_r2_max[ +/* YYMAXLEFT -- Maximum number of symbols to the left of a handle + accessed by $0, $-1, etc., in any rule. */ +#define YYMAXLEFT ]b4_max_left_semantic_context[ + +namespace +{ +#if ]b4_api_PREFIX[DEBUG + /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ + const ]b4_int_type_for([b4_rline])[ yyrline[] = + { + ]b4_rline[ + }; +#endif + +#define YYPACT_NINF ]b4_pact_ninf[ +#define YYTABLE_NINF ]b4_table_ninf[ + +]b4_parser_tables_define[ + + /* YYDPREC[RULE-NUM] -- Dynamic precedence of rule #RULE-NUM (0 if none). */ + const ]b4_int_type_for([b4_dprec])[ yydprec[] = + { + ]b4_dprec[ + }; + + /* YYMERGER[RULE-NUM] -- Index of merging function for rule #RULE-NUM. */ + const ]b4_int_type_for([b4_merger])[ yymerger[] = + { + ]b4_merger[ + }; + + /* YYIMMEDIATE[RULE-NUM] -- True iff rule #RULE-NUM is not to be deferred, as + in the case of predicates. */ + const bool yyimmediate[] = + { + ]b4_immediate[ + }; + + /* YYCONFLP[YYPACT[STATE-NUM]] -- Pointer into YYCONFL of start of + list of conflicting reductions corresponding to action entry for + state STATE-NUM in yytable. 0 means no conflicts. The list in + yyconfl is terminated by a rule number of 0. */ + const ]b4_int_type_for([b4_conflict_list_heads])[ yyconflp[] = + { + ]b4_conflict_list_heads[ + }; + + /* YYCONFL[I] -- lists of conflicting rule numbers, each terminated by + 0, pointed into by YYCONFLP. */ + ]dnl Do not use b4_int_type_for here, since there are places where + dnl pointers onto yyconfl are taken, whose type is "short*". + dnl We probably ought to introduce a type for confl. + [const short yyconfl[] = + { + ]b4_conflicting_rules[ + }; +} // namespace + + +/* Error token number */ +#define YYTERROR 1 + +]b4_locations_if([[ +]b4_yylloc_default_define[ +# define YYRHSLOC(Rhs, K) ((Rhs)[K].getState().yyloc) +]])[ + +enum YYRESULTTAG { yyok, yyaccept, yyabort, yyerr }; + +#define YYCHK(YYE) \ + do { \ + YYRESULTTAG yychk_flag = YYE; \ + if (yychk_flag != yyok) \ + return yychk_flag; \ + } while (false) + +#if ]b4_api_PREFIX[DEBUG + +#define YYCDEBUG if (!yydebug) {} else std::cerr + +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) \ + do { \ + if (yydebug) \ + { \ + std::cerr << Title << ' '; \ + yyparser.yy_symbol_print_ (Kind, Value]b4_locations_if([, Location])[); \ + std::cerr << '\n'; \ + } \ + } while (false) + +# define YY_REDUCE_PRINT(Args) \ + do { \ + if (yydebug) \ + yystateStack.yy_reduce_print Args; \ + } while (false) + +/* Nonzero means print parse trace. It is left uninitialized so that + multiple parsers can coexist. */ +int yydebug; + +namespace +{ + using glr_stack = ]b4_namespace_ref[::]b4_parser_class[::glr_stack; + using glr_state = ]b4_namespace_ref[::]b4_parser_class[::glr_state; + + void yypstack (const glr_stack& yystack, size_t yyk) + YY_ATTRIBUTE_UNUSED; + void yypdumpstack (const glr_stack& yystack) + YY_ATTRIBUTE_UNUSED; +} + +#else /* !]b4_api_PREFIX[DEBUG */ + +# define YYCDEBUG if (true) {} else std::cerr +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) {} +# define YY_REDUCE_PRINT(Args) {} + +#endif /* !]b4_api_PREFIX[DEBUG */ + +/* YYINITDEPTH -- initial size of the parser's stacks. */ +#ifndef YYINITDEPTH +# define YYINITDEPTH ]b4_stack_depth_init[ +#endif + +/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only + if the built-in stack extension method is used). + + Do not make this value too large; the results are undefined if + SIZE_MAX < YYMAXDEPTH * sizeof (GLRStackItem) + evaluated with infinite-precision integer arithmetic. */ + +#ifndef YYMAXDEPTH +# define YYMAXDEPTH ]b4_stack_depth_max[ +#endif + +/* Minimum number of free items on the stack allowed after an + allocation. This is to allow allocation and initialization + to be completed by functions that call yyexpandGLRStack before the + stack is expanded, thus insuring that all necessary pointers get + properly redirected to new data. */ +#define YYHEADROOM 2 + +#ifndef YYSTACKEXPANDABLE +# define YYSTACKEXPANDABLE 1 +#endif + +namespace +{ + template + class strong_index_alias + { + public: + static strong_index_alias create (std::ptrdiff_t value) + { + strong_index_alias result; + result.value_ = value; + return result; + } + + std::ptrdiff_t const& get () const { return value_; } + + size_t uget () const { return static_cast (value_); } + + strong_index_alias operator+ (std::ptrdiff_t other) const + { + return strong_index_alias (get () + other); + } + + void operator+= (std::ptrdiff_t other) + { + value_ += other; + } + + strong_index_alias operator- (std::ptrdiff_t other) + { + return strong_index_alias (get () - other); + } + + void operator-= (std::ptrdiff_t other) + { + value_ -= other; + } + + size_t operator- (strong_index_alias other) + { + return strong_index_alias (get () - other.get ()); + } + + strong_index_alias& operator++ () + { + ++value_; + return *this; + } + + bool isValid () const + { + return value_ != INVALID_INDEX; + } + + void setInvalid() + { + value_ = INVALID_INDEX; + } + + bool operator== (strong_index_alias other) + { + return get () == other.get (); + } + + bool operator!= (strong_index_alias other) + { + return get () != other.get (); + } + + bool operator< (strong_index_alias other) + { + return get () < other.get (); + } + + private: + static const std::ptrdiff_t INVALID_INDEX; + + // WARNING: 0-initialized. + std::ptrdiff_t value_; + }; // class strong_index_alias + + template + const std::ptrdiff_t strong_index_alias::INVALID_INDEX = + std::numeric_limits::max (); + + using state_set_index = strong_index_alias; + + state_set_index create_state_set_index (std::ptrdiff_t value) + { + return state_set_index::create (value); + } + + /** State numbers, as in LALR(1) machine */ + using state_num = int; + + /** Rule numbers, as in LALR(1) machine */ + using rule_num = int; + + using parser_type = ]b4_namespace_ref[::]b4_parser_class[; + using glr_state = parser_type::glr_state; + using symbol_kind = parser_type::symbol_kind; + using symbol_kind_type = parser_type::symbol_kind_type; + using symbol_type = parser_type::symbol_type; + using value_type = parser_type::value_type;]b4_locations_if([[ + using location_type = parser_type::location_type;]])[ + + // Forward declarations. + class glr_stack_item; + class semantic_option; +} // namespace + +namespace +{ + /** Accessing symbol of state YYSTATE. */ + inline symbol_kind_type + yy_accessing_symbol (state_num yystate) + { + return YY_CAST (symbol_kind_type, yystos[yystate]); + } + + /** Left-hand-side symbol for rule #YYRULE. */ + inline symbol_kind_type + yylhsNonterm (rule_num yyrule) + { + return static_cast(yyr1[yyrule]); + } + + /** Number of symbols composing the right hand side of rule #RULE. */ + inline int + yyrhsLength (rule_num yyrule) + { + return yyr2[yyrule]; + } +} + +namespace ]b4_namespace_ref[ +{ + class ]b4_parser_class[::glr_state + { + public: + glr_state () + : yyresolved (false) + , yylrState (0) + , yyposn (0) + , yypred (0) + , yyfirstVal (0)]b4_locations_if([[ + , yyloc ()]])[]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + {} + + /// Build with a semantic value. + glr_state (state_num lrState, size_t posn, const value_type& val]b4_locations_if([[, const location_type& loc]])[) + : yyresolved (true) + , yylrState (lrState) + , yyposn (posn) + , yypred (0) + , yyval (]b4_variant_if([], [[val]])[)]b4_locations_if([[ + , yyloc (loc)]])[]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + {]b4_variant_if([[ + ]b4_symbol_variant([yy_accessing_symbol (lrState)], + [yyval], [copy], [val])])[} + + /// Build with a semantic option. + glr_state (state_num lrState, size_t posn) + : yyresolved (false) + , yylrState (lrState) + , yyposn (posn) + , yypred (0) + , yyfirstVal (0)]b4_locations_if([[ + , yyloc ()]])[]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + {} + + glr_state (const glr_state& other) + : yyresolved (other.yyresolved) + , yylrState (other.yylrState) + , yyposn (other.yyposn) + , yypred (0)]b4_locations_if([[ + , yyloc (other.yyloc)]])[]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + { + setPred (other.pred ()); + if (other.yyresolved)]b4_variant_if([[ + { + new (&yyval) value_type (); + ]b4_symbol_variant([yy_accessing_symbol (other.yylrState)], + [yyval], [copy], [other.value ()])[ + }]], [[ + new (&yyval) value_type (other.value ());]])[ + else + { + yyfirstVal = 0; + setFirstVal (other.firstVal ()); + }]b4_parse_assert_if([[ + check_();]])[ + } + + ~glr_state () + {]b4_parse_assert_if([[ + check_ ();]])[ + if (yyresolved) + {]b4_variant_if([[ + symbol_kind_type yykind = yy_accessing_symbol (yylrState); + // FIXME: User destructors. + // Value type destructor. + ]b4_symbol_variant([[yykind]], [[yyval]], [[template destroy]])])[ + yyval.~value_type (); + }]b4_parse_assert_if([[ + magic_ = 0;]])[ + } + + glr_state& operator= (const glr_state& other) + {]b4_parse_assert_if([[ + check_ (); + other.check_ ();]])[ + if (!yyresolved && other.yyresolved) + new (&yyval) value_type; + yyresolved = other.yyresolved; + yylrState = other.yylrState; + yyposn = other.yyposn; + setPred (other.pred ()); + if (other.yyresolved)]b4_variant_if([[ + ]b4_symbol_variant([yy_accessing_symbol (other.yylrState)], + [yyval], [copy], [other.value ()])], [[ + value () = other.value ();]])[ + else + setFirstVal (other.firstVal ());]b4_locations_if([[ + yyloc = other.yyloc;]])[ + return *this; + } + + /** Type tag for the semantic value. If true, yyval applies, otherwise + * yyfirstVal applies. */ + bool yyresolved; + /** Number of corresponding LALR(1) machine state. */ + state_num yylrState; + /** Source position of the last token produced by my symbol */ + size_t yyposn; + + /// Only call pred() and setPred() on objects in yyitems, not temporaries. + glr_state* pred (); + const glr_state* pred () const; + void setPred (const glr_state* state); + + /// Only call firstVal() and setFirstVal() on objects in yyitems, not + /// temporaries. + semantic_option* firstVal (); + const semantic_option* firstVal () const; + void setFirstVal (const semantic_option* option); + + value_type& value () + {]b4_parse_assert_if([[ + check_ ();]])[ + return yyval; + } + + const value_type& value () const + {]b4_parse_assert_if([[ + check_ ();]])[ + return yyval; + } + + void + destroy (char const *yymsg, ]b4_namespace_ref[::]b4_parser_class[& yyparser); + + /* DEBUGGING ONLY */ + #if ]b4_api_PREFIX[DEBUG + void yy_yypstack () const + {]b4_parse_assert_if([[ + check_ ();]])[ + if (pred () != YY_NULLPTR) + { + pred ()->yy_yypstack (); + std::cerr << " -> "; + } + std::cerr << yylrState << "@@" << yyposn; + } + #endif + + std::ptrdiff_t indexIn (const glr_stack_item* array) const YY_ATTRIBUTE_UNUSED; + + glr_stack_item* asItem () + {]b4_parse_assert_if([[ + check_ ();]])[ + return asItem(this); + } + + const glr_stack_item* asItem () const + {]b4_parse_assert_if([[ + check_ ();]])[ + return asItem (this); + } + + private: + template + static const glr_stack_item* asItem (const T* state) + { + return reinterpret_cast(state); + } + template + static glr_stack_item* asItem (T* state) + { + return reinterpret_cast (state); + } + static const char *as_pointer_ (const glr_state *state) + { + return reinterpret_cast (state); + } + static char *as_pointer_ (glr_state *state) + { + return reinterpret_cast (state); + } + /** Preceding state in this stack */ + std::ptrdiff_t yypred; + union { + /** First in a chain of alternative reductions producing the + * nonterminal corresponding to this state, threaded through + * yyfirstVal. Value "0" means empty. */ + std::ptrdiff_t yyfirstVal; + /** Semantic value for this state. */ + value_type yyval; + };]b4_locations_if([[ + // FIXME: Why public? + public: + /** Source location for this state. */ + location_type yyloc;]])[ + +]b4_parse_assert_if([[ + public: + // Check invariants. + void check_ () const + { + YY_IGNORE_NULL_DEREFERENCE_BEGIN + YYASSERT (this->magic_ == MAGIC); + YY_IGNORE_NULL_DEREFERENCE_END + } + + // A magic number to check our pointer arithmetic is sane. + enum { MAGIC = 713705 }; + unsigned int magic_;]])[ + }; // class ]b4_parser_class[::glr_state +} // namespace ]b4_namespace_ref[ + + +namespace +{ + /** A stack of GLRState representing the different heads during + * nondeterministic evaluation. */ + class glr_state_set + { + public: + /** Initialize YYSET to a singleton set containing an empty stack. */ + glr_state_set () + : yylastDeleted (YY_NULLPTR) + { + yystates.push_back (YY_NULLPTR); + yylookaheadNeeds.push_back (false); + } + + // Behave like a vector of states. + glr_state*& operator[] (state_set_index index) + { + return yystates[index.uget()]; + } + + glr_state* operator[] (state_set_index index) const + { + return yystates[index.uget()]; + } + + size_t size () const + { + return yystates.size (); + } + + std::vector::iterator begin () + { + return yystates.begin (); + } + + std::vector::iterator end () + { + return yystates.end (); + } + + bool lookaheadNeeds (state_set_index index) const + { + return yylookaheadNeeds[index.uget ()]; + } + + bool setLookaheadNeeds (state_set_index index, bool value) + { + return yylookaheadNeeds[index.uget ()] = value; + } + + /** Invalidate stack #YYK. */ + void + yymarkStackDeleted (state_set_index yyk) + { + size_t k = yyk.uget (); + if (yystates[k] != YY_NULLPTR) + yylastDeleted = yystates[k]; + yystates[k] = YY_NULLPTR; + } + + /** Undelete the last stack in *this that was marked as deleted. Can + only be done once after a deletion, and only when all other stacks have + been deleted. */ + void + yyundeleteLastStack () + { + if (yylastDeleted == YY_NULLPTR || !yystates.empty ()) + return; + yystates.push_back (yylastDeleted); + YYCDEBUG << "Restoring last deleted stack as stack #0.\n"; + clearLastDeleted (); + } + + /** Remove the dead stacks (yystates[i] == YY_NULLPTR) and shift the later + * ones. */ + void + yyremoveDeletes () + { + size_t newsize = yystates.size (); + /* j is the number of live stacks we have seen. */ + for (size_t i = 0, j = 0; j < newsize; ++i) + { + if (yystates[i] == YY_NULLPTR) + { + if (i == j) + { + YYCDEBUG << "Removing dead stacks.\n"; + } + newsize -= 1; + } + else + { + yystates[j] = yystates[i]; + /* In the current implementation, it's unnecessary to copy + yylookaheadNeeds[i] since, after + yyremoveDeletes returns, the parser immediately either enters + deterministic operation or shifts a token. However, it doesn't + hurt, and the code might evolve to need it. */ + yylookaheadNeeds[j] = yylookaheadNeeds[i]; + if (j != i) + { + YYCDEBUG << "Rename stack " << i << " -> " << j << ".\n"; + } + j += 1; + } + } + yystates.resize (newsize); + yylookaheadNeeds.resize (newsize); + } + + + state_set_index + yysplitStack (state_set_index yyk) + { + const size_t k = yyk.uget (); + yystates.push_back (yystates[k]); + yylookaheadNeeds.push_back (yylookaheadNeeds[k]); + return create_state_set_index (static_cast (yystates.size () - 1)); + } + + void clearLastDeleted () + { + yylastDeleted = YY_NULLPTR; + } + + private: + + std::vector yystates; + /** During nondeterministic operation, yylookaheadNeeds tracks which + * stacks have actually needed the current lookahead. During deterministic + * operation, yylookaheadNeeds[0] is not maintained since it would merely + * duplicate !yyla.empty (). */ + std::vector yylookaheadNeeds; + + /** The last stack we invalidated. */ + glr_state* yylastDeleted; + }; // class glr_state_set +} // namespace + +namespace +{ + class semantic_option + { + public: + semantic_option () + : yyrule (0) + , yystate (0) + , yynext (0) + , yyla ()]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + {} + + semantic_option (rule_num rule) + : yyrule (rule) + , yystate (0) + , yynext (0) + , yyla ()]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + {} + + semantic_option (const semantic_option& that) + : yyrule (that.yyrule) + , yystate (that.yystate) + , yynext (that.yynext) + , yyla (that.yyla)]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + {]b4_parse_assert_if([[ + that.check_ ();]])[ + } + + // Needed for the assignment in yynewSemanticOption. + semantic_option& operator= (const semantic_option& that) + {]b4_parse_assert_if([[ + check_ (); + that.check_ ();]])[ + yyrule = that.yyrule; + yystate = that.yystate; + yynext = that.yynext; + yyla = that.yyla; + return *this; + } + + /// Only call state() and setState() on objects in yyitems, not temporaries. + glr_state* state(); + const glr_state* state() const; + void setState(const glr_state* s); + + const semantic_option* next () const YY_ATTRIBUTE_UNUSED; + semantic_option* next (); + void setNext (const semantic_option* s); + + std::ptrdiff_t indexIn (const glr_stack_item* array) const YY_ATTRIBUTE_UNUSED; + + /** True iff YYY0 and YYY1 represent identical options at the top level. + * That is, they represent the same rule applied to RHS symbols + * that produce the same terminal symbols. */ + bool + isIdenticalTo (const semantic_option& yyy1) const + {]b4_parse_assert_if([[ + check_ (); + yyy1.check_ ();]])[ + if (this->yyrule == yyy1.yyrule) + { + const glr_state *yys0, *yys1; + int yyn; + for (yys0 = this->state(), + yys1 = yyy1.state(), + yyn = yyrhsLength (this->yyrule); + yyn > 0; + yys0 = yys0->pred(), + yys1 = yys1->pred(), yyn -= 1) + if (yys0->yyposn != yys1->yyposn) + return false; + return true; + } + else + return false; + } + + /** Assuming identicalOptions (YYY0,YYY1), destructively merge the + * alternative semantic values for the RHS-symbols of YYY1 and YYY0. */ + void + mergeWith (semantic_option& yyy1) + {]b4_parse_assert_if([[ + check_ (); + yyy1.check_ ();]])[ + glr_state *yys0 = this->state (); + glr_state *yys1 = yyy1.state (); + for (int yyn = yyrhsLength (this->yyrule); + yyn > 0; + yyn -= 1, yys0 = yys0->pred (), yys1 = yys1->pred ()) + { + if (yys0 == yys1) + break; + else if (yys0->yyresolved) + { + yys1->yyresolved = true;]b4_variant_if([[ + YYASSERT (yys1->yylrState == yys0->yylrState); + ]b4_symbol_variant([yy_accessing_symbol (yys0->yylrState)], + [yys1->value ()], [copy], [yys0->value ()])], [[ + yys1->value () = yys0->value ();]])[ + } + else if (yys1->yyresolved) + { + yys0->yyresolved = true;]b4_variant_if([[ + YYASSERT (yys0->yylrState == yys1->yylrState); + ]b4_symbol_variant([yy_accessing_symbol (yys1->yylrState)], + [yys0->value ()], [copy], [yys1->value ()])], [[ + yys0->value () = yys1->value ();]])[ + } + else + { + semantic_option* yyz0prev = YY_NULLPTR; + semantic_option* yyz0 = yys0->firstVal(); + semantic_option* yyz1 = yys1->firstVal(); + while (true) + { + if (yyz1 == yyz0 || yyz1 == YY_NULLPTR) + break; + else if (yyz0 == YY_NULLPTR) + { + if (yyz0prev != YY_NULLPTR) + yyz0prev->setNext (yyz1); + else + yys0->setFirstVal (yyz1); + break; + } + else if (yyz0 < yyz1) + { + semantic_option* yyz = yyz0; + if (yyz0prev != YY_NULLPTR) + yyz0prev->setNext(yyz1); + else + yys0->setFirstVal(yyz1); + yyz1 = yyz1->next(); + yyz0->setNext(yyz); + } + yyz0prev = yyz0; + yyz0 = yyz0->next(); + } + yys1->setFirstVal(yys0->firstVal()); + } + } + } + +#if ]b4_api_PREFIX[DEBUG + void yyreportTree (size_t yyindent = 2) const + {]b4_parse_assert_if([[ + check_ ();]])[ + int yynrhs = yyrhsLength (this->yyrule); + const glr_state* yystates[1 + YYMAXRHS]; + glr_state yyleftmost_state; + + { + const glr_state* yys = this->state(); + for (int yyi = yynrhs; 0 < yyi; yyi -= 1) + { + yystates[yyi] = yys; + yys = yys->pred(); + } + if (yys == YY_NULLPTR) + { + yyleftmost_state.yyposn = 0; + yystates[0] = &yyleftmost_state; + } + else + yystates[0] = yys; + } + + std::string yylhs = ]b4_namespace_ref[::]b4_parser_class[::symbol_name (yylhsNonterm (this->yyrule)); + YYASSERT(this->state()); + if (this->state()->yyposn < yystates[0]->yyposn + 1) + std::cerr << std::string(yyindent, ' ') << yylhs << " -> yyrule - 1 << ", empty>\n"; + else + std::cerr << std::string(yyindent, ' ') << yylhs << " -> yyrule - 1 << ", tokens " + << yystates[0]->yyposn + 1 << " .. " + << this->state()->yyposn << ">\n"; + for (int yyi = 1; yyi <= yynrhs; yyi += 1) + { + if (yystates[yyi]->yyresolved) + { + std::string yysym = ]b4_namespace_ref[::]b4_parser_class[::symbol_name (yy_accessing_symbol (yystates[yyi]->yylrState)); + if (yystates[yyi-1]->yyposn+1 > yystates[yyi]->yyposn) + std::cerr << std::string(yyindent + 2, ' ') << yysym + << " \n"; + else + std::cerr << std::string(yyindent + 2, ' ') << yysym + << " yyposn + 1 + << " .. " << yystates[yyi]->yyposn << ">\n"; + } + else + yystates[yyi]->firstVal ()->yyreportTree (yyindent+2); + } + } +#endif + + /** Rule number for this reduction */ + rule_num yyrule; + + private: + template + static const glr_stack_item* asItem(const T* state) + { + return reinterpret_cast(state); + } + template + static glr_stack_item* asItem(T* state) + { + return reinterpret_cast(state); + } + /** The last RHS state in the list of states to be reduced. */ + std::ptrdiff_t yystate; + /** Next sibling in chain of options. To facilitate merging, + * options are chained in decreasing order by address. */ + std::ptrdiff_t yynext; + + public: + /** The lookahead for this reduction. */ + symbol_type yyla; + +]b4_parse_assert_if([[ + public: + // Check invariants. + void check_ () const + { + YY_IGNORE_NULL_DEREFERENCE_BEGIN + YYASSERT (this->magic_ == MAGIC); + YY_IGNORE_NULL_DEREFERENCE_END + } + + // A magic number to check our pointer arithmetic is sane. + enum { MAGIC = 0xeff1cace }; + unsigned int magic_;]])[ + }; // class semantic_option +} // namespace + +namespace +{ + /** Type of the items in the GLR stack. + * It can be either a glr_state or a semantic_option. The is_state_ field + * indicates which item of the union is valid. */ + class glr_stack_item + { + public: + glr_stack_item (bool state = true) + : is_state_ (state)]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + { + if (is_state_) + new (&raw_) glr_state; + else + new (&raw_) semantic_option; + } + + glr_stack_item (const glr_stack_item& other) YY_NOEXCEPT YY_NOTHROW + : is_state_ (other.is_state_)]b4_parse_assert_if([[ + , magic_ (MAGIC)]])[ + {]b4_parse_assert_if([[ + other.check_ ();]])[ + std::memcpy (raw_, other.raw_, union_size); + } + + glr_stack_item& operator= (glr_stack_item other) + {]b4_parse_assert_if([[ + check_ (); + other.check_ ();]])[ + std::swap (is_state_, other.is_state_); + std::swap (raw_, other.raw_); + return *this; + } + + ~glr_stack_item () + {]b4_parse_assert_if([[ + check_ ();]])[ + if (is_state ()) + getState ().~glr_state (); + else + getOption ().~semantic_option (); + } + + void setState (const glr_state &state) + {]b4_parse_assert_if([[ + check_ (); + state.check_ ();]])[ + if (this != state.asItem ()) + { + if (is_state_) + getState ().~glr_state (); + else + getOption ().~semantic_option (); + new (&raw_) glr_state (state); + is_state_ = true; + } + } + + glr_state& getState () + {]b4_parse_assert_if([[ + check_ ();]])[ + YYDASSERT (is_state ()); + void *yyp = raw_; + glr_state& res = *static_cast (yyp);]b4_parse_assert_if([[ + res.check_ ();]])[ + return res; + } + + const glr_state& getState () const + {]b4_parse_assert_if([[ + check_ ();]])[ + YYDASSERT (is_state ()); + const void *yyp = raw_; + const glr_state& res = *static_cast (yyp);]b4_parse_assert_if([[ + res.check_ ();]])[ + return res; + } + + semantic_option& getOption () + {]b4_parse_assert_if([[ + check_ ();]])[ + YYDASSERT (!is_state ()); + void *yyp = raw_; + return *static_cast (yyp); + } + const semantic_option& getOption () const + {]b4_parse_assert_if([[ + check_ ();]])[ + YYDASSERT (!is_state ()); + const void *yyp = raw_; + return *static_cast (yyp); + } + bool is_state () const + {]b4_parse_assert_if([[ + check_ ();]])[ + return is_state_; + } + + private: + /// The possible contents of raw_. Since they have constructors, they cannot + /// be directly included in the union. + union contents + { + char yystate[sizeof (glr_state)]; + char yyoption[sizeof (semantic_option)]; + }; + enum { union_size = sizeof (contents) }; + union { + /// Strongest alignment constraints. + long double yyalign_me; + /// A buffer large enough to store the contents. + char raw_[union_size]; + }; + /** Type tag for the union. */ + bool is_state_; +]b4_parse_assert_if([[ + public: + // Check invariants. + void check_ () const + { + YYASSERT (this->magic_ == MAGIC); + YYASSERT (this->is_state_ == false || this->is_state_ == true); + } + // A magic number to check our pointer arithmetic is sane. + enum { MAGIC = 0xDEAD1ACC }; // 3735886540. + const unsigned int magic_;]])[ + }; // class glr_stack_item +} // namespace + +glr_state* glr_state::pred () +{]b4_parse_assert_if([[ + check_ ();]])[ + YY_IGNORE_NULL_DEREFERENCE_BEGIN + return yypred ? &asItem (as_pointer_ (this) - yypred)->getState () : YY_NULLPTR; + YY_IGNORE_NULL_DEREFERENCE_END +} + +const glr_state* glr_state::pred () const +{]b4_parse_assert_if([[ + check_ ();]])[ + YY_IGNORE_NULL_DEREFERENCE_BEGIN + return yypred ? &asItem (as_pointer_ (this) - yypred)->getState () : YY_NULLPTR; + YY_IGNORE_NULL_DEREFERENCE_END +} + +void glr_state::setPred (const glr_state* state) +{]b4_parse_assert_if([[ + check_ (); + if (state) + state->check_ ();]])[ + yypred = state ? as_pointer_ (this) - as_pointer_ (state) : 0; +} + +semantic_option* glr_state::firstVal () +{]b4_parse_assert_if([[ + check_ ();]])[ + return yyfirstVal ? &(asItem(this) - yyfirstVal)->getOption() : YY_NULLPTR; +} + +const semantic_option* glr_state::firstVal () const +{]b4_parse_assert_if([[ + check_ ();]])[ + return yyfirstVal ? &(asItem(this) - yyfirstVal)->getOption() : YY_NULLPTR; +} + +void glr_state::setFirstVal (const semantic_option* option) +{]b4_parse_assert_if([[ + check_ ();]])[ + yyfirstVal = option ? asItem(this) - asItem(option) : 0; +} + +std::ptrdiff_t glr_state::indexIn (const glr_stack_item* array) const +{]b4_parse_assert_if([[ + check_ ();]])[ + return asItem(this) - array; +} + +std::ptrdiff_t semantic_option::indexIn (const glr_stack_item* array) const +{ + return asItem(this) - array; +} + +glr_state* semantic_option::state () +{ + YY_IGNORE_NULL_DEREFERENCE_BEGIN + return yystate ? &(asItem(this) - yystate)->getState() : YY_NULLPTR; + YY_IGNORE_NULL_DEREFERENCE_END +} + +const glr_state* semantic_option::state () const +{ + return yystate ? &(asItem(this) - yystate)->getState() : YY_NULLPTR; +} + +void semantic_option::setState (const glr_state* s) +{ + yystate = s ? asItem(this) - asItem(s) : 0; +} + +const semantic_option* semantic_option::next () const +{ + return yynext ? &(asItem(this) - yynext)->getOption() : YY_NULLPTR; +} + +semantic_option* semantic_option::next () +{ + return yynext ? &(asItem(this) - yynext)->getOption() : YY_NULLPTR; +} + +void semantic_option::setNext (const semantic_option* s) +{ + yynext = s ? asItem(this) - asItem(s) : 0; +} + +void glr_state::destroy (char const* yymsg, ]b4_namespace_ref[::]b4_parser_class[& yyparser) +{]b4_parse_assert_if([[ + check_ ();]])[ + if (yyresolved) + yyparser.yy_destroy_ (yymsg, yy_accessing_symbol(yylrState), + value ()]b4_locations_if([, yyloc])[); + else + { +#if ]b4_api_PREFIX[DEBUG + YYCDEBUG << yymsg + << (firstVal() ? " unresolved " : " incomplete ") + << (yy_accessing_symbol (yylrState) < YYNTOKENS ? "token" : "nterm") + << ' ' << yyparser.symbol_name (yy_accessing_symbol (yylrState)) + << " ("]b4_locations_if([[ + << yyloc << ": "]])[ + << ")\n"; +#endif + if (firstVal() != YY_NULLPTR) + { + semantic_option& yyoption = *firstVal (); + glr_state *yyrh = yyoption.state (); + for (int yyn = yyrhsLength (yyoption.yyrule); yyn > 0; yyn -= 1) + { + yyrh->destroy (yymsg, yyparser); + yyrh = yyrh->pred(); + } + } + } +} + + +#undef YYFILL +#define YYFILL(N) yyfill (yyvsp, yylow, (N), yynormal) + +namespace +{ + class state_stack + { + public: + using parser_type = ]b4_namespace_ref[::]b4_parser_class[; + using symbol_kind = parser_type::symbol_kind; + using value_type = parser_type::value_type;]b4_locations_if([[ + using location_type = parser_type::location_type;]])[ + + /** Initialize to a single empty stack, with total maximum + * capacity for all stacks of YYSIZE. */ + state_stack (size_t yysize) + : yysplitPoint (YY_NULLPTR) + { + yyitems.reserve (yysize); + } + +#if YYSTACKEXPANDABLE + /** Returns false if it tried to expand but could not. */ + bool + yyexpandGLRStackIfNeeded () + { + return YYHEADROOM <= spaceLeft () || yyexpandGLRStack (); + } + + private: + /** If *this is expandable, extend it. WARNING: Pointers into the + stack from outside should be considered invalid after this call. + We always expand when there are 1 or fewer items left AFTER an + allocation, so that we can avoid having external pointers exist + across an allocation. */ + bool + yyexpandGLRStack () + { + const size_t oldsize = yyitems.size(); + if (YYMAXDEPTH - YYHEADROOM < oldsize) + return false; + const size_t yynewSize = YYMAXDEPTH < 2 * oldsize ? YYMAXDEPTH : 2 * oldsize; + const glr_stack_item *oldbase = &yyitems[0]; + + yyitems.reserve (yynewSize); + const glr_stack_item *newbase = &yyitems[0]; + + // Adjust the pointers. Perform raw pointer arithmetic, as there + // is no reason for objects to be aligned on their size. + const ptrdiff_t disp + = reinterpret_cast (newbase) - reinterpret_cast (oldbase); + if (yysplitPoint) + const_cast (yysplitPoint) + = reinterpret_cast (reinterpret_cast (const_cast (yysplitPoint)) + disp); + + for (std::vector::iterator + i = yytops.begin (), + yyend = yytops.end (); + i != yyend; ++i) + if (glr_state_not_null (*i)) + *i = reinterpret_cast(reinterpret_cast(*i) + disp); + + return true; + } + + public: +#else + bool yyexpandGLRStackIfNeeded () + { + return YYHEADROOM <= spaceLeft (); + } +#endif +#undef YYSTACKEXPANDABLE + + static bool glr_state_not_null (glr_state* s) + { + return s != YY_NULLPTR; + } + + bool + reduceToOneStack () + { + using iterator = std::vector::iterator; + const iterator yybegin = yytops.begin(); + const iterator yyend = yytops.end(); + const iterator yyit = std::find_if(yybegin, yyend, glr_state_not_null); + if (yyit == yyend) + return false; + for (state_set_index yyk = create_state_set_index(yyit + 1 - yybegin); + yyk.uget() != numTops(); ++yyk) + yytops.yymarkStackDeleted (yyk); + yytops.yyremoveDeletes (); + yycompressStack (); + return true; + } + + /** Called when returning to deterministic operation to clean up the extra + * stacks. */ + void + yycompressStack () + { + if (yytops.size() != 1 || !isSplit()) + return; + + // yyr is the state after the split point. + glr_state* yyr = YY_NULLPTR; + for (glr_state *yyp = firstTop(), *yyq = yyp->pred(); + yyp != yysplitPoint; + yyr = yyp, yyp = yyq, yyq = yyp->pred()) + yyp->setPred(yyr); + + // This const_cast is okay, since anyway we have access to the mutable + // yyitems into which yysplitPoint points. + glr_stack_item* nextFreeItem + = const_cast (yysplitPoint)->asItem () + 1; + yysplitPoint = YY_NULLPTR; + yytops.clearLastDeleted (); + + while (yyr != YY_NULLPTR) + { + nextFreeItem->setState (*yyr); + glr_state& nextFreeState = nextFreeItem->getState(); + yyr = yyr->pred(); + nextFreeState.setPred(&(nextFreeItem - 1)->getState()); + setFirstTop (&nextFreeState); + ++nextFreeItem; + } + yyitems.resize(static_cast(nextFreeItem - yyitems.data())); + } + + bool isSplit() const { + return yysplitPoint != YY_NULLPTR; + } + + // Present the interface of a vector of glr_stack_item. + std::vector::const_iterator begin () const + { + return yyitems.begin (); + } + + std::vector::const_iterator end () const + { + return yyitems.end (); + } + + size_t size() const + { + return yyitems.size (); + } + + glr_stack_item& operator[] (size_t i) + { + return yyitems[i]; + } + + glr_stack_item& stackItemAt (size_t index) + { + return yyitems[index]; + } + + size_t numTops () const + { + return yytops.size (); + } + + glr_state* firstTop () const + { + return yytops[create_state_set_index (0)]; + } + + glr_state* topAt (state_set_index i) const + { + return yytops[i]; + } + + void setFirstTop (glr_state* value) + { + yytops[create_state_set_index (0)] = value; + } + + void setTopAt (state_set_index i, glr_state* value) + { + yytops[i] = value; + } + + void pop_back () + { + yyitems.pop_back (); + } + + void pop_back (size_t n) + { + yyitems.resize (yyitems.size () - n); + } + + state_set_index + yysplitStack (state_set_index yyk) + { + if (!isSplit ()) + { + YYASSERT (yyk.get () == 0); + yysplitPoint = topAt (yyk); + } + return yytops.yysplitStack (yyk); + } + + /** Assuming that YYS is a GLRState somewhere on *this, update the + * splitpoint of *this, if needed, so that it is at least as deep as + * YYS. */ + void + yyupdateSplit (glr_state& yys) + { + if (isSplit() && &yys < yysplitPoint) + yysplitPoint = &yys; + } + + /** Return a fresh GLRState. + * Callers should call yyreserveStack afterwards to make sure there is + * sufficient headroom. */ + glr_state& yynewGLRState (const glr_state& newState) + { + glr_state& state = yyitems[yynewGLRStackItem (true)].getState (); +#if false && 201103L <= YY_CPLUSPLUS + state = std::move (newState); +#else + state = newState; +#endif + return state; + } + + /** Return a fresh SemanticOption. + * Callers should call yyreserveStack afterwards to make sure there is + * sufficient headroom. */ + semantic_option& yynewSemanticOption (semantic_option newOption) + { + semantic_option& option = yyitems[yynewGLRStackItem (false)].getOption (); + option = std::move (newOption); + return option; + } + + /* Do nothing if YYNORMAL or if *YYLOW <= YYLOW1. Otherwise, fill in + * YYVSP[YYLOW1 .. *YYLOW-1] as in yyfillin and set *YYLOW = YYLOW1. + * For convenience, always return YYLOW1. */ + int + yyfill (glr_stack_item *yyvsp, int &yylow, int yylow1, bool yynormal) + { + if (!yynormal && yylow1 < yylow) + { + yyfillin (yyvsp, yylow, yylow1); + yylow = yylow1; + } + return yylow1; + } + + /** Fill in YYVSP[YYLOW1 .. YYLOW0-1] from the chain of states starting + * at YYVSP[YYLOW0].getState().pred(). Leaves YYVSP[YYLOW1].getState().pred() + * containing the pointer to the next state in the chain. */ + void + yyfillin (glr_stack_item *yyvsp, int yylow0, int yylow1) + { + glr_state* s = yyvsp[yylow0].getState().pred(); + YYASSERT(s != YY_NULLPTR); + for (int i = yylow0-1; i >= yylow1; i -= 1, s = s->pred()) + { + glr_state& yys = yyvsp[i].getState(); +#if ]b4_api_PREFIX[DEBUG + yys.yylrState = s->yylrState; +#endif + yys.yyresolved = s->yyresolved; + if (s->yyresolved) + {]b4_variant_if([[ + new (&yys.value ()) value_type (); + ]b4_symbol_variant([yy_accessing_symbol (s->yylrState)], + [yys.value ()], [copy], [s->value ()])], [[ + new (&yys.value ()) value_type (s->value ());]])[ + } + else + /* The effect of using yyval or yyloc (in an immediate + * rule) is undefined. */ + yys.setFirstVal (YY_NULLPTR);]b4_locations_if([[ + yys.yyloc = s->yyloc;]])[ + yys.setPred(s->pred()); + } + } + +#if ]b4_api_PREFIX[DEBUG + + /*----------------------------------------------------------------------. + | Report that stack #YYK of *YYSTACKP is going to be reduced by YYRULE. | + `----------------------------------------------------------------------*/ + + void + yy_reduce_print (bool yynormal, glr_stack_item* yyvsp, state_set_index yyk, + rule_num yyrule, parser_type& yyparser) + { + int yynrhs = yyrhsLength (yyrule);]b4_locations_if([ + int yylow = 1;])[ + int yyi; + std::cerr << "Reducing stack " << yyk.get() << " by rule " << yyrule - 1 + << " (line " << int (yyrline[yyrule]) << "):\n"; + if (! yynormal) + yyfillin (yyvsp, 1, -yynrhs); + /* The symbols being reduced. */ + for (yyi = 0; yyi < yynrhs; yyi++) + { + std::cerr << " $" << yyi + 1 << " = "; + yyparser.yy_symbol_print_ + (yy_accessing_symbol (yyvsp[yyi - yynrhs + 1].getState().yylrState), + yyvsp[yyi - yynrhs + 1].getState().value ()]b4_locations_if([[, + ]b4_rhs_location(yynrhs, yyi + 1)])[); + if (!yyvsp[yyi - yynrhs + 1].getState().yyresolved) + std::cerr << " (unresolved)"; + std::cerr << '\n'; + } + } + + +#define YYINDEX(YYX) \ + ((YYX) == YY_NULLPTR ? -1 : (YYX)->indexIn (yyitems.data ())) + + void + dumpStack () const + { + for (size_t yyi = 0; yyi < size(); ++yyi) + { + const glr_stack_item& item = yyitems[yyi]; + std::cerr << std::setw(3) << yyi << ". "; + if (item.is_state()) + { + std::cerr << "Res: " << item.getState().yyresolved + << ", LR State: " << item.getState().yylrState + << ", posn: " << item.getState().yyposn + << ", pred: " << YYINDEX(item.getState().pred()); + if (! item.getState().yyresolved) + std::cerr << ", firstVal: " + << YYINDEX(item.getState().firstVal()); + } + else + { + std::cerr << "Option. rule: " << item.getOption().yyrule - 1 + << ", state: " << YYINDEX(item.getOption().state()) + << ", next: " << YYINDEX(item.getOption().next()); + } + std::cerr << '\n'; + } + std::cerr << "Tops:"; + for (state_set_index yyi = create_state_set_index(0); yyi.uget() < numTops(); ++yyi) { + std::cerr << yyi.get() << ": " << YYINDEX(topAt(yyi)) << "; "; + } + std::cerr << '\n'; + } + +#undef YYINDEX +#endif + + YYRESULTTAG + yyreportAmbiguity (const semantic_option& yyx0, + const semantic_option& yyx1, parser_type& yyparser]b4_locations_if([, const location_type& yyloc])[) + { + YY_USE (yyx0); + YY_USE (yyx1); + +#if ]b4_api_PREFIX[DEBUG + std::cerr << "Ambiguity detected.\n" + "Option 1,\n"; + yyx0.yyreportTree (); + std::cerr << "\nOption 2,\n"; + yyx1.yyreportTree (); + std::cerr << '\n'; +#endif + + yyparser.error (]b4_locations_if([yyloc, ])[YY_("syntax is ambiguous")); + return yyabort; + } + +#if ]b4_api_PREFIX[DEBUG + /* Print YYS (possibly NULL) and its predecessors. */ + void + yypstates (const glr_state* yys) const + { + if (yys != YY_NULLPTR) + yys->yy_yypstack(); + else + std::cerr << ""; + std::cerr << '\n'; + } +#endif + + private: + size_t spaceLeft() const + { + return yyitems.capacity() - yyitems.size(); + } + + /** Return a fresh GLRStackItem in this. The item is an LR state + * if YYIS_STATE, and otherwise a semantic option. Callers should call + * yyreserveStack afterwards to make sure there is sufficient + * headroom. */ + size_t + yynewGLRStackItem (bool yyis_state) + { + YYDASSERT(yyitems.size() < yyitems.capacity()); + yyitems.push_back(glr_stack_item(yyis_state)); + return yyitems.size() - 1; + } + + + public: + std::vector yyitems; + // Where the stack splits. Anything below this address is deterministic. + const glr_state* yysplitPoint; + glr_state_set yytops; + }; // class state_stack +} // namespace + +#undef YYFILL +#define YYFILL(N) yystateStack.yyfill (yyvsp, yylow, (N), yynormal) + +namespace ]b4_namespace_ref[ +{ + class ]b4_parser_class[::glr_stack + { + public: +]b4_parse_error_bmatch([custom\|detailed\|verbose], [[ + // Needs access to yypact_value_is_default, etc. + friend context; +]])[ + + glr_stack (size_t yysize, parser_type& yyparser_yyarg]m4_ifset([b4_parse_param], [, b4_parse_param_decl])[) + : yyerrState (0) + , yystateStack (yysize) + , yyerrcnt (0) + , yyla () + , yyparser (yyparser_yyarg)]m4_ifset([b4_parse_param], [,b4_parse_param_cons])[ + {} + + ~glr_stack () + { + if (!this->yyla.empty ()) + yyparser.yy_destroy_ ("Cleanup: discarding lookahead", + this->yyla.kind (), this->yyla.value]b4_locations_if([, this->yyla.location])[); + popall_ (); + } + + int yyerrState; +]b4_locations_if([[ /* To compute the location of the error token. */ + glr_stack_item yyerror_range[3];]])[ + state_stack yystateStack; + int yyerrcnt; + symbol_type yyla; + YYJMP_BUF yyexception_buffer; + parser_type& yyparser; + + #define YYCHK1(YYE) \ + do { \ + switch (YYE) { \ + case yyok: \ + break; \ + case yyabort: \ + goto yyabortlab; \ + case yyaccept: \ + goto yyacceptlab; \ + case yyerr: \ + goto yyuser_error; \ + default: \ + goto yybuglab; \ + } \ + } while (false) + + int + parse () + { + int yyresult; + size_t yyposn; + + YYCDEBUG << "Starting parse\n"; + + this->yyla.clear (); +]m4_ifdef([b4_initial_action], [ +b4_dollar_pushdef([yyla.value], [], [], [yyla.location])dnl + b4_user_initial_action +b4_dollar_popdef])[]dnl +[ + switch (YYSETJMP (this->yyexception_buffer)) + { + case 0: break; + case 1: goto yyabortlab; + case 2: goto yyexhaustedlab; + default: goto yybuglab; + } + this->yyglrShift (create_state_set_index(0), 0, 0, this->yyla.value]b4_locations_if([, this->yyla.location])[); + yyposn = 0; + + while (true) + { + /* For efficiency, we have two loops, the first of which is + specialized to deterministic operation (single stack, no + potential ambiguity). */ + /* Standard mode */ + while (true) + { + const state_num yystate = this->firstTopState()->yylrState; + YYCDEBUG << "Entering state " << yystate << '\n'; + if (yystate == YYFINAL) + goto yyacceptlab; + if (yy_is_defaulted_state (yystate)) + { + const rule_num yyrule = yy_default_action (yystate); + if (yyrule == 0) + {]b4_locations_if([[ + this->yyerror_range[1].getState().yyloc = this->yyla.location;]])[ + this->yyreportSyntaxError (); + goto yyuser_error; + } + YYCHK1 (this->yyglrReduce (create_state_set_index(0), yyrule, true)); + } + else + { + yyget_token (); + const short* yyconflicts; + const int yyaction = yygetLRActions (yystate, this->yyla.kind (), yyconflicts); + if (*yyconflicts != 0) + break; + if (yy_is_shift_action (yyaction)) + { + YY_SYMBOL_PRINT ("Shifting", this->yyla.kind (), this->yyla.value, this->yyla.location); + yyposn += 1; + // FIXME: we should move yylval. + this->yyglrShift (create_state_set_index(0), yyaction, yyposn, this->yyla.value]b4_locations_if([, this->yyla.location])[); + yyla.clear (); + if (0 < this->yyerrState) + this->yyerrState -= 1; + } + else if (yy_is_error_action (yyaction)) + {]b4_locations_if([[ + this->yyerror_range[1].getState().yyloc = this->yyla.location;]])[ + /* Don't issue an error message again for exceptions + thrown from the scanner. */ + if (this->yyla.kind () != ]b4_symbol(error, kind)[) + this->yyreportSyntaxError (); + goto yyuser_error; + } + else + YYCHK1 (this->yyglrReduce (create_state_set_index(0), -yyaction, true)); + } + } + + while (true) + { + for (state_set_index yys = create_state_set_index(0); yys.uget() < this->yystateStack.numTops(); ++yys) + this->yystateStack.yytops.setLookaheadNeeds(yys, !this->yyla.empty ()); + + /* yyprocessOneStack returns one of three things: + + - An error flag. If the caller is yyprocessOneStack, it + immediately returns as well. When the caller is finally + yyparse, it jumps to an error label via YYCHK1. + + - yyok, but yyprocessOneStack has invoked yymarkStackDeleted + (yys), which sets the top state of yys to NULL. Thus, + yyparse's following invocation of yyremoveDeletes will remove + the stack. + + - yyok, when ready to shift a token. + + Except in the first case, yyparse will invoke yyremoveDeletes and + then shift the next token onto all remaining stacks. This + synchronization of the shift (that is, after all preceding + reductions on all stacks) helps prevent double destructor calls + on yylval in the event of memory exhaustion. */ + + for (state_set_index yys = create_state_set_index (0); yys.uget () < this->yystateStack.numTops (); ++yys) + YYCHK1 (this->yyprocessOneStack (yys, yyposn]b4_locations_if([, &this->yyla.location])[)); + this->yystateStack.yytops.yyremoveDeletes (); + if (this->yystateStack.yytops.size() == 0) + { + this->yystateStack.yytops.yyundeleteLastStack (); + if (this->yystateStack.yytops.size() == 0) + this->yyFail (]b4_locations_if([&this->yyla.location, ])[YY_("syntax error")); + YYCHK1 (this->yyresolveStack ()); + YYCDEBUG << "Returning to deterministic operation.\n";]b4_locations_if([[ + this->yyerror_range[1].getState ().yyloc = this->yyla.location;]])[ + this->yyreportSyntaxError (); + goto yyuser_error; + } + + /* If any yyglrShift call fails, it will fail after shifting. Thus, + a copy of yylval will already be on stack 0 in the event of a + failure in the following loop. Thus, yyla is emptied + before the loop to make sure the user destructor for yylval isn't + called twice. */ + symbol_kind_type yytoken_to_shift = this->yyla.kind (); + this->yyla.kind_ = ]b4_symbol(empty, kind)[; + yyposn += 1; + for (state_set_index yys = create_state_set_index (0); yys.uget () < this->yystateStack.numTops (); ++yys) + { + const state_num yystate = this->topState (yys)->yylrState; + const short* yyconflicts; + const int yyaction = yygetLRActions (yystate, yytoken_to_shift, yyconflicts); + /* Note that yyconflicts were handled by yyprocessOneStack. */ + YYCDEBUG << "On stack " << yys.get() << ", "; + YY_SYMBOL_PRINT ("shifting", yytoken_to_shift, this->yyla.value, this->yyla.location); + this->yyglrShift (yys, yyaction, yyposn, this->yyla.value]b4_locations_if([, this->yyla.location])[); + YYCDEBUG << "Stack " << yys.get() << " now in state " + << this->topState(yys)->yylrState << '\n'; + } +]b4_variant_if([[ + // FIXME: User destructors. + // Value type destructor. + ]b4_symbol_variant([[yytoken_to_shift]], [[this->yyla.value]], [[template destroy]])])[ + + if (this->yystateStack.yytops.size () == 1) + { + YYCHK1 (this->yyresolveStack ()); + YYCDEBUG << "Returning to deterministic operation.\n"; + this->yystateStack.yycompressStack (); + break; + } + } + continue; + yyuser_error: + this->yyrecoverSyntaxError (]b4_locations_if([&this->yyla.location])[); + yyposn = this->firstTopState()->yyposn; + } + + yyacceptlab: + yyresult = 0; + goto yyreturn; + + yybuglab: + YYASSERT (false); + goto yyabortlab; + + yyabortlab: + yyresult = 1; + goto yyreturn; + + yyexhaustedlab: + yyparser.error (]b4_locations_if([this->yyla.location, ])[YY_("memory exhausted")); + yyresult = 2; + goto yyreturn; + + yyreturn: + return yyresult; + } + #undef YYCHK1 + + void yyreserveGlrStack () + { + if (!yystateStack.yyexpandGLRStackIfNeeded ()) + yyMemoryExhausted (); + } + + _Noreturn void + yyMemoryExhausted () + { + YYLONGJMP (yyexception_buffer, 2); + } + + _Noreturn void + yyFail (]b4_locations_if([location_type* yylocp, ])[const char* yymsg) + { + if (yymsg != YY_NULLPTR) + yyparser.error (]b4_locations_if([*yylocp, ])[yymsg); + YYLONGJMP (yyexception_buffer, 1); + } + + /* GLRStates */ + + + /** Add a new semantic action that will execute the action for rule + * YYRULE on the semantic values in YYRHS to the list of + * alternative actions for YYSTATE. Assumes that YYRHS comes from + * stack #YYK of *this. */ + void + yyaddDeferredAction (state_set_index yyk, glr_state* yystate, + glr_state* yyrhs, rule_num yyrule) + { + semantic_option& yyopt = yystateStack.yynewSemanticOption (semantic_option (yyrule)); + yyopt.setState (yyrhs); + yyopt.setNext (yystate->firstVal ()); + if (yystateStack.yytops.lookaheadNeeds (yyk)) + yyopt.yyla = this->yyla; + yystate->setFirstVal (&yyopt); + + yyreserveGlrStack (); + } + + #if ]b4_api_PREFIX[DEBUG + void yypdumpstack () const + { + yystateStack.dumpStack(); + } + #endif + + void + yyreportSyntaxError () + { + if (yyerrState != 0) + return; +]b4_parse_error_case( +[simple], [[ + std::string msg = YY_("syntax error"); + yyparser.error (]b4_join(b4_locations_if([yyla.location]), [[YY_MOVE (msg)]])[);]], +[custom], [[ + context yyctx (*this, yyla); + yyparser.report_syntax_error (yyctx);]], +[[ + context yyctx (*this, yyla); + std::string msg = yyparser.yysyntax_error_ (yyctx); + yyparser.error (]b4_join(b4_locations_if([yyla.location]), [[YY_MOVE (msg)]])[);]])[ + yyerrcnt += 1; + } + + /* Recover from a syntax error on this, assuming that yytoken, + yylval, and yylloc are the syntactic category, semantic value, and location + of the lookahead. */ + void + yyrecoverSyntaxError (]b4_locations_if([location_type* yylocp])[) + { + if (yyerrState == 3) + /* We just shifted the error token and (perhaps) took some + reductions. Skip tokens until we can proceed. */ + while (true) + { + if (this->yyla.kind () == ]b4_symbol(eof, kind)[) + yyFail (]b4_locations_if([yylocp, ])[YY_NULLPTR); + if (this->yyla.kind () != ]b4_symbol(empty, kind)[) + {]b4_locations_if([[ + /* We throw away the lookahead, but the error range + of the shifted error token must take it into account. */ + glr_state *yys = firstTopState(); + yyerror_range[1].getState().yyloc = yys->yyloc; + yyerror_range[2].getState().yyloc = this->yyla.location; + YYLLOC_DEFAULT ((yys->yyloc), yyerror_range, 2);]])[ + yyparser.yy_destroy_ ("Error: discarding", + this->yyla.kind (), this->yyla.value]b4_locations_if([, this->yyla.location])[);]b4_variant_if([[ + // Value type destructor. + ]b4_symbol_variant([[this->yyla.kind ()]], [[this->yyla.value]], [[template destroy]])])[ + this->yyla.kind_ = ]b4_symbol(empty, kind)[; + } + yyget_token (); + int yyj = yypact[firstTopState()->yylrState]; + if (yypact_value_is_default (yyj)) + return; + yyj += this->yyla.kind (); + if (yyj < 0 || YYLAST < yyj || yycheck[yyj] != this->yyla.kind ()) + { + if (yydefact[firstTopState()->yylrState] != 0) + return; + } + else if (! yytable_value_is_error (yytable[yyj])) + return; + } + + if (!yystateStack.reduceToOneStack()) + yyFail (]b4_locations_if([yylocp, ])[YY_NULLPTR); + + /* Now pop stack until we find a state that shifts the error token. */ + yyerrState = 3; + while (firstTopState () != YY_NULLPTR) + { + glr_state *yys = firstTopState (); + int yyj = yypact[yys->yylrState]; + if (! yypact_value_is_default (yyj)) + { + yyj += YYTERROR; + if (0 <= yyj && yyj <= YYLAST && yycheck[yyj] == YYTERROR + && yy_is_shift_action (yytable[yyj])) + { + /* Shift the error token. */]b4_locations_if([[ + /* First adjust its location.*/ + location_type yyerrloc; + yyerror_range[2].getState().yyloc = this->yyla.location; + YYLLOC_DEFAULT (yyerrloc, (yyerror_range), 2);]])[ + YY_SYMBOL_PRINT ("Shifting", yy_accessing_symbol (yytable[yyj]), + this->yyla.value, yyerrloc); + yyglrShift (create_state_set_index(0), yytable[yyj], + yys->yyposn, yyla.value]b4_locations_if([, yyerrloc])[); + yys = firstTopState(); + break; + } + }]b4_locations_if([[ + yyerror_range[1].getState().yyloc = yys->yyloc;]])[ + if (yys->pred() != YY_NULLPTR) + yys->destroy ("Error: popping", yyparser); + yystateStack.setFirstTop(yys->pred()); + yystateStack.pop_back(); + } + if (firstTopState() == YY_NULLPTR) + yyFail (]b4_locations_if([yylocp, ])[YY_NULLPTR); + } + + YYRESULTTAG + yyprocessOneStack (state_set_index yyk, + size_t yyposn]b4_locations_if([, location_type* yylocp])[) + { + while (yystateStack.topAt(yyk) != YY_NULLPTR) + { + const state_num yystate = topState(yyk)->yylrState; + YYCDEBUG << "Stack " << yyk.get() + << " Entering state " << yystate << '\n'; + + YYASSERT (yystate != YYFINAL); + + if (yy_is_defaulted_state (yystate)) + { + const rule_num yyrule = yy_default_action (yystate); + if (yyrule == 0) + { + YYCDEBUG << "Stack " << yyk.get() << " dies.\n"; + yystateStack.yytops.yymarkStackDeleted (yyk); + return yyok; + } + const YYRESULTTAG yyflag + = yyglrReduce (yyk, yyrule, yyimmediate[yyrule]); + if (yyflag == yyerr) + { + YYCDEBUG << "Stack " << yyk.get() << " dies" + " (predicate failure or explicit user error).\n"; + yystateStack.yytops.yymarkStackDeleted (yyk); + return yyok; + } + if (yyflag != yyok) + return yyflag; + } + else + { + yystateStack.yytops.setLookaheadNeeds(yyk, true); + yyget_token (); + const short* yyconflicts; + const int yyaction = yygetLRActions (yystate, this->yyla.kind (), yyconflicts); + + for (; *yyconflicts != 0; ++yyconflicts) + { + state_set_index yynewStack = yystateStack.yysplitStack (yyk); + YYCDEBUG << "Splitting off stack " << yynewStack.get() + << " from " << yyk.get() << ".\n"; + YYRESULTTAG yyflag = + yyglrReduce (yynewStack, *yyconflicts, yyimmediate[*yyconflicts]); + if (yyflag == yyok) + YYCHK (yyprocessOneStack (yynewStack, + yyposn]b4_locations_if([, yylocp])[)); + else if (yyflag == yyerr) + { + YYCDEBUG << "Stack " << yynewStack.get() << " dies.\n"; + yystateStack.yytops.yymarkStackDeleted (yynewStack); + } + else + return yyflag; + } + + if (yy_is_shift_action (yyaction)) + break; + else if (yy_is_error_action (yyaction)) + { + YYCDEBUG << "Stack " << yyk.get() << " dies.\n"; + yystateStack.yytops.yymarkStackDeleted (yyk); + break; + } + else + { + YYRESULTTAG yyflag + = yyglrReduce (yyk, -yyaction, yyimmediate[-yyaction]); + if (yyflag == yyerr) + { + YYCDEBUG << "Stack " << yyk.get() << " dies" + " (predicate failure or explicit user error).\n"; + yystateStack.yytops.yymarkStackDeleted (yyk); + break; + } + else if (yyflag != yyok) + return yyflag; + } + } + } + return yyok; + } + + /** Perform user action for rule number YYN, with RHS length YYRHSLEN, + * and top stack item YYVSP. YYVALP points to place to put semantic + * value ($$), and yylocp points to place for location information + * (@@$). Returns yyok for normal return, yyaccept for YYACCEPT, + * yyerr for YYERROR, yyabort for YYABORT. */ + YYRESULTTAG + yyuserAction (rule_num yyrule, int yyrhslen, glr_stack_item* yyvsp, state_set_index yyk, + value_type* yyvalp]b4_locations_if([, location_type* yylocp])[) + { + bool yynormal YY_ATTRIBUTE_UNUSED = !yystateStack.isSplit(); + int yylow = 1; +]b4_parse_param_use([yyvalp], [yylocp])dnl +[ YY_USE (yyk); + YY_USE (yyrhslen); + # undef yyerrok + # define yyerrok (yyerrState = 0) + # undef YYACCEPT + # define YYACCEPT return yyaccept + # undef YYABORT + # define YYABORT return yyabort + # undef YYERROR + # define YYERROR return yyerrok, yyerr + # undef YYRECOVERING + # define YYRECOVERING() (yyerrState != 0) + # undef yytoken + # define yytoken this->yyla.kind_ + # undef yyclearin + # define yyclearin (yytoken = ]b4_symbol(empty, kind)[) + # undef YYBACKUP + # define YYBACKUP(Token, Value) \ + return yyparser.error (]b4_locations_if([*yylocp, ])[YY_("syntax error: cannot back up")), \ + yyerrok, yyerr + +]b4_variant_if([[ + /* Variants are always initialized to an empty instance of the + correct type. The default '$$ = $1' action is NOT applied + when using variants. */ + // However we really need to prepare yyvsp now if we want to get + // correct locations, so invoke YYFILL for $1 anyway. + (void) YYFILL (1-yyrhslen); + ]b4_symbol_variant([[yylhsNonterm (yyrule)]], [(*yyvalp)], [emplace])], [[ + if (yyrhslen == 0) + *yyvalp = yyval_default; + else + *yyvalp = yyvsp[YYFILL (1-yyrhslen)].getState().value ();]])[]b4_locations_if([[ + /* Default location. */ + YYLLOC_DEFAULT ((*yylocp), (yyvsp - yyrhslen), yyrhslen); + yyerror_range[1].getState().yyloc = *yylocp; +]])[ + /* If yyk == -1, we are running a deferred action on a temporary + stack. In that case, YY_REDUCE_PRINT must not play with YYFILL, + so pretend the stack is "normal". */ + YY_REDUCE_PRINT ((yynormal || yyk == create_state_set_index (-1), yyvsp, yyk, yyrule, yyparser)); + #if YY_EXCEPTIONS + try + { + #endif // YY_EXCEPTIONS + switch (yyrule) + { + ]b4_user_actions[ + default: break; + } + #if YY_EXCEPTIONS + } + catch (const syntax_error& yyexc) + { + YYCDEBUG << "Caught exception: " << yyexc.what() << '\n';]b4_locations_if([ + *yylocp = yyexc.location;])[ + yyparser.error (]b4_locations_if([*yylocp, ])[yyexc.what ()); + YYERROR; + } + #endif // YY_EXCEPTIONS + YY_SYMBOL_PRINT ("-> $$ =", yylhsNonterm (yyrule), *yyvalp, *yylocp); + + return yyok; + # undef yyerrok + # undef YYABORT + # undef YYACCEPT + # undef YYERROR + # undef YYBACKUP + # undef yytoken + # undef yyclearin + # undef YYRECOVERING + } + + YYRESULTTAG + yyresolveStack () + { + if (yystateStack.isSplit ()) + { + int yyn = 0; + for (glr_state* yys = firstTopState (); + yys != yystateStack.yysplitPoint; + yys = yys->pred ()) + yyn += 1; + YYCHK (yyresolveStates (*firstTopState (), yyn)); + } + return yyok; + } + + /** Pop the symbols consumed by reduction #YYRULE from the top of stack + * #YYK of *YYSTACKP, and perform the appropriate semantic action on their + * semantic values. Assumes that all ambiguities in semantic values + * have been previously resolved. Set *YYVALP to the resulting value, + * and *YYLOCP to the computed location (if any). Return value is as + * for userAction. */ + YYRESULTTAG + yydoAction (state_set_index yyk, rule_num yyrule, + value_type* yyvalp]b4_locations_if([, location_type* yylocp])[) + { + const int yynrhs = yyrhsLength (yyrule); + + if (!yystateStack.isSplit()) + { + /* Standard special case: single stack. */ + YYASSERT (yyk.get() == 0); + glr_stack_item* yyrhs = yystateStack.firstTop()->asItem(); + const YYRESULTTAG res + = yyuserAction (yyrule, yynrhs, yyrhs, yyk, yyvalp]b4_locations_if([, yylocp])[); + yystateStack.pop_back(static_cast(yynrhs)); + yystateStack.setFirstTop(&yystateStack[yystateStack.size() - 1].getState()); + return res; + } + else + { + glr_stack_item yyrhsVals[YYMAXRHS + YYMAXLEFT + 1]; + glr_state* yys = yystateStack.topAt(yyk); + yyrhsVals[YYMAXRHS + YYMAXLEFT].getState().setPred(yys);]b4_locations_if([[ + if (yynrhs == 0) + /* Set default location. */ + yyrhsVals[YYMAXRHS + YYMAXLEFT - 1].getState().yyloc = yys->yyloc;]])[ + for (int yyi = 0; yyi < yynrhs; yyi += 1) + { + yys = yys->pred(); + YYASSERT (yys != YY_NULLPTR); + } + yystateStack.yyupdateSplit (*yys); + yystateStack.setTopAt(yyk, yys); + return yyuserAction (yyrule, yynrhs, yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, + yyk, + yyvalp]b4_locations_if([, yylocp])[); + } + } + + /** Pop items off stack #YYK of *YYSTACKP according to grammar rule YYRULE, + * and push back on the resulting nonterminal symbol. Perform the + * semantic action associated with YYRULE and store its value with the + * newly pushed state, if YYFORCEEVAL or if *YYSTACKP is currently + * unambiguous. Otherwise, store the deferred semantic action with + * the new state. If the new state would have an identical input + * position, LR state, and predecessor to an existing state on the stack, + * it is identified with that existing state, eliminating stack #YYK from + * *YYSTACKP. In this case, the semantic value is + * added to the options for the existing state's semantic value. + */ + YYRESULTTAG + yyglrReduce (state_set_index yyk, rule_num yyrule, bool yyforceEval) + { + size_t yyposn = topState(yyk)->yyposn; + + if (yyforceEval || !yystateStack.isSplit()) + { + value_type val;]b4_locations_if([[ + location_type loc;]])[ + + YYRESULTTAG yyflag = yydoAction (yyk, yyrule, &val]b4_locations_if([, &loc])[); + if (yyflag == yyerr && yystateStack.isSplit()) + {]b4_parse_trace_if([[ + YYCDEBUG << "Parse on stack " << yyk.get () + << " rejected by rule " << yyrule - 1 + << " (line " << int (yyrline[yyrule]) << ").\n"; + ]])[} + if (yyflag != yyok) + return yyflag; + yyglrShift (yyk, + yyLRgotoState (topState(yyk)->yylrState, + yylhsNonterm (yyrule)), + yyposn, val]b4_locations_if([, loc])[);]b4_variant_if([[ + // FIXME: User destructors. + // Value type destructor. + ]b4_symbol_variant([[yylhsNonterm (yyrule)]], [[val]], [[template destroy]])])[ + } + else + { + glr_state *yys = yystateStack.topAt(yyk); + glr_state *yys0 = yys; + for (int yyn = yyrhsLength (yyrule); 0 < yyn; yyn -= 1) + { + yys = yys->pred(); + YYASSERT (yys != YY_NULLPTR); + } + yystateStack.yyupdateSplit (*yys); + state_num yynewLRState = yyLRgotoState (yys->yylrState, yylhsNonterm (yyrule));]b4_parse_trace_if([[ + YYCDEBUG << "Reduced stack " << yyk.get () + << " by rule " << yyrule - 1 << " (line " << int (yyrline[yyrule]) + << "); action deferred. Now in state " << yynewLRState + << ".\n";]])[ + for (state_set_index yyi = create_state_set_index(0); yyi.uget() < yystateStack.numTops(); ++yyi) + if (yyi != yyk && yystateStack.topAt(yyi) != YY_NULLPTR) + { + const glr_state* yysplit = yystateStack.yysplitPoint; + glr_state* yyp = yystateStack.topAt(yyi); + while (yyp != yys && yyp != yysplit + && yyp->yyposn >= yyposn) + { + if (yyp->yylrState == yynewLRState + && yyp->pred() == yys) + { + yyaddDeferredAction (yyk, yyp, yys0, yyrule); + yystateStack.yytops.yymarkStackDeleted (yyk); + YYCDEBUG << "Merging stack " << yyk.get () + << " into stack " << yyi.get () << ".\n"; + return yyok; + } + yyp = yyp->pred(); + } + } + yystateStack.setTopAt(yyk, yys); + yyglrShiftDefer (yyk, yynewLRState, yyposn, yys0, yyrule); + } + return yyok; + } + + /** Shift stack #YYK of *YYSTACKP, to a new state corresponding to LR + * state YYLRSTATE, at input position YYPOSN, with the (unresolved) + * semantic value of YYRHS under the action for YYRULE. */ + void + yyglrShiftDefer (state_set_index yyk, state_num yylrState, + size_t yyposn, glr_state* yyrhs, rule_num yyrule) + { + glr_state& yynewState = yystateStack.yynewGLRState ( + glr_state (yylrState, yyposn)); + yynewState.setPred (yystateStack.topAt (yyk)); + yystateStack.setTopAt (yyk, &yynewState); + + /* Invokes yyreserveStack. */ + yyaddDeferredAction (yyk, &yynewState, yyrhs, yyrule); + } + + /** Shift to a new state on stack #YYK of *YYSTACKP, corresponding to LR + * state YYLRSTATE, at input position YYPOSN, with (resolved) semantic + * value YYVAL_ARG and source location YYLOC_ARG. */ + void + yyglrShift (state_set_index yyk, state_num yylrState, + size_t yyposn, + const value_type& yyval_arg]b4_locations_if([, const location_type& yyloc_arg])[) + { + glr_state& yynewState = yystateStack.yynewGLRState ( + glr_state (yylrState, yyposn, yyval_arg]b4_locations_if([, yyloc_arg])[)); + yynewState.setPred (yystateStack.topAt(yyk)); + yystateStack.setTopAt (yyk, &yynewState); + yyreserveGlrStack (); + } + +#if ]b4_api_PREFIX[DEBUG + void + yypstack (state_set_index yyk) const + { + yystateStack.yypstates (yystateStack.topAt (yyk)); + } +#endif + + glr_state* topState(state_set_index i) { + return yystateStack.topAt(i); + } + + glr_state* firstTopState() { + return yystateStack.firstTop(); + } + + private: + + void popall_ () + { + /* If the stack is well-formed, pop the stack until it is empty, + destroying its entries as we go. But free the stack regardless + of whether it is well-formed. */ + for (state_set_index k = create_state_set_index(0); k.uget() < yystateStack.numTops(); k += 1) + if (yystateStack.topAt(k) != YY_NULLPTR) + { + while (yystateStack.topAt(k) != YY_NULLPTR) + { + glr_state* state = topState(k);]b4_locations_if([[ + yyerror_range[1].getState().yyloc = state->yyloc;]])[ + if (state->pred() != YY_NULLPTR) + state->destroy ("Cleanup: popping", yyparser); + yystateStack.setTopAt(k, state->pred()); + yystateStack.pop_back(); + } + break; + } + } + + /** Resolve the previous YYN states starting at and including state YYS + * on *YYSTACKP. If result != yyok, some states may have been left + * unresolved possibly with empty semantic option chains. Regardless + * of whether result = yyok, each state has been left with consistent + * data so that destroy can be invoked if necessary. */ + YYRESULTTAG + yyresolveStates (glr_state& yys, int yyn) + { + if (0 < yyn) + { + YYASSERT (yys.pred() != YY_NULLPTR); + YYCHK (yyresolveStates (*yys.pred(), yyn-1)); + if (! yys.yyresolved) + YYCHK (yyresolveValue (yys)); + } + return yyok; + } + + static void + yyuserMerge (int yyn, value_type& yy0, value_type& yy1) + { + YY_USE (yy0); + YY_USE (yy1); + + switch (yyn) + { +]b4_mergers[ + default: break; + } + } + + /** Resolve the ambiguity represented in state YYS in *YYSTACKP, + * perform the indicated actions, and set the semantic value of YYS. + * If result != yyok, the chain of semantic options in YYS has been + * cleared instead or it has been left unmodified except that + * redundant options may have been removed. Regardless of whether + * result = yyok, YYS has been left with consistent data so that + * destroy can be invoked if necessary. */ + YYRESULTTAG + yyresolveValue (glr_state& yys) + { + semantic_option* yybest = yys.firstVal(); + YYASSERT(yybest != YY_NULLPTR); + bool yymerge = false; + YYRESULTTAG yyflag;]b4_locations_if([ + location_type *yylocp = &yys.yyloc;])[ + + semantic_option* yypPrev = yybest; + for (semantic_option* yyp = yybest->next(); + yyp != YY_NULLPTR; ) + { + if (yybest->isIdenticalTo (*yyp)) + { + yybest->mergeWith (*yyp); + yypPrev->setNext(yyp->next()); + yyp = yypPrev->next(); + } + else + { + switch (yypreference (*yybest, *yyp)) + { + case 0:]b4_locations_if([[ + yyresolveLocations (yys, 1);]])[ + return yystateStack.yyreportAmbiguity (*yybest, *yyp, yyparser]b4_locations_if([, *yylocp])[); + break; + case 1: + yymerge = true; + break; + case 2: + break; + case 3: + yybest = yyp; + yymerge = false; + break; + default: + /* This cannot happen so it is not worth a YYASSERT (false), + but some compilers complain if the default case is + omitted. */ + break; + } + yypPrev = yyp; + yyp = yyp->next(); + } + } + + value_type val; + if (yymerge) + { + int yyprec = yydprec[yybest->yyrule]; + yyflag = yyresolveAction (*yybest, &val]b4_locations_if([, yylocp])[); + if (yyflag == yyok) + for (semantic_option* yyp = yybest->next(); + yyp != YY_NULLPTR; + yyp = yyp->next()) + { + if (yyprec == yydprec[yyp->yyrule]) + { + value_type yyval_other;]b4_locations_if([ + location_type yydummy;])[ + yyflag = yyresolveAction (*yyp, &yyval_other]b4_locations_if([, &yydummy])[); + if (yyflag != yyok) + { + yyparser.yy_destroy_ ("Cleanup: discarding incompletely merged value for", + yy_accessing_symbol (yys.yylrState), + this->yyla.value]b4_locations_if([, *yylocp])[); + break; + } + yyuserMerge (yymerger[yyp->yyrule], val, yyval_other);]b4_variant_if([[ + // FIXME: User destructors. + // Value type destructor. + ]b4_symbol_variant([[yy_accessing_symbol (yys.yylrState)]], [[yyval_other]], [[template destroy]])])[ + } + } + } + else + yyflag = yyresolveAction (*yybest, &val]b4_locations_if([, yylocp])[); + + if (yyflag == yyok) + { + yys.yyresolved = true; + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN]b4_variant_if([[ + new (&yys.value ()) value_type (); + ]b4_symbol_variant([yy_accessing_symbol (yys.yylrState)], + [yys.value ()], [copy], [val])], [[ + new (&yys.value ()) value_type (val);]])[ + + YY_IGNORE_MAYBE_UNINITIALIZED_END + } + else + yys.setFirstVal(YY_NULLPTR); +]b4_variant_if([[ + // FIXME: User destructors. + // Value type destructor. + ]b4_symbol_variant([[yy_accessing_symbol (yys.yylrState)]], [[val]], [[template destroy]])])[ + return yyflag; + } + + /** Resolve the states for the RHS of YYOPT on *YYSTACKP, perform its + * user action, and return the semantic value and location in *YYVALP + * and *YYLOCP. Regardless of whether result = yyok, all RHS states + * have been destroyed (assuming the user action destroys all RHS + * semantic values if invoked). */ + YYRESULTTAG + yyresolveAction (semantic_option& yyopt, value_type* yyvalp]b4_locations_if([, location_type* yylocp])[) + { + glr_state* yyoptState = yyopt.state(); + YYASSERT(yyoptState != YY_NULLPTR); + int yynrhs = yyrhsLength (yyopt.yyrule); + YYRESULTTAG yyflag = yyresolveStates (*yyoptState, yynrhs); + if (yyflag != yyok) + { + for (glr_state *yys = yyoptState; yynrhs > 0; yys = yys->pred(), yynrhs -= 1) + yys->destroy ("Cleanup: popping", yyparser); + return yyflag; + } + + glr_stack_item yyrhsVals[YYMAXRHS + YYMAXLEFT + 1]; + yyrhsVals[YYMAXRHS + YYMAXLEFT].getState().setPred(yyopt.state());]b4_locations_if([[ + if (yynrhs == 0) + /* Set default location. */ + yyrhsVals[YYMAXRHS + YYMAXLEFT - 1].getState().yyloc = yyoptState->yyloc;]])[ + { + symbol_type yyla_current = std::move (this->yyla); + this->yyla = std::move (yyopt.yyla); + yyflag = yyuserAction (yyopt.yyrule, yynrhs, + yyrhsVals + YYMAXRHS + YYMAXLEFT - 1, + create_state_set_index (-1), + yyvalp]b4_locations_if([, yylocp])[); + this->yyla = std::move (yyla_current); + } + return yyflag; + }]b4_locations_if([[ + + /** Resolve the locations for each of the YYN1 states in *YYSTACKP, + * ending at YYS1. Has no effect on previously resolved states. + * The first semantic option of a state is always chosen. */ + void + yyresolveLocations (glr_state &yys1, int yyn1) + { + if (0 < yyn1) + { + yyresolveLocations (*yys1.pred(), yyn1 - 1); + if (!yys1.yyresolved) + { + glr_stack_item yyrhsloc[1 + YYMAXRHS]; + YYASSERT (yys1.firstVal() != YY_NULLPTR); + semantic_option& yyoption = *yys1.firstVal(); + const int yynrhs = yyrhsLength (yyoption.yyrule); + if (0 < yynrhs) + { + yyresolveLocations (*yyoption.state(), yynrhs); + const glr_state *yys = yyoption.state(); + for (int yyn = yynrhs; yyn > 0; yyn -= 1) + { + yyrhsloc[yyn].getState().yyloc = yys->yyloc; + yys = yys->pred(); + } + } + else + { + /* Both yyresolveAction and yyresolveLocations traverse the GSS + in reverse rightmost order. It is only necessary to invoke + yyresolveLocations on a subforest for which yyresolveAction + would have been invoked next had an ambiguity not been + detected. Thus the location of the previous state (but not + necessarily the previous state itself) is guaranteed to be + resolved already. */ + YY_IGNORE_NULL_DEREFERENCE_BEGIN + yyrhsloc[0].getState().yyloc = yyoption.state()->yyloc; + YY_IGNORE_NULL_DEREFERENCE_END + } + YYLLOC_DEFAULT ((yys1.yyloc), yyrhsloc, yynrhs); + } + } + }]])[ + + /** If yytoken is empty, fetch the next token. */ + void + yyget_token () + { +]b4_parse_param_use()dnl +[ if (this->yyla.empty ()) + { + YYCDEBUG << "Reading a token\n"; +#if YY_EXCEPTIONS + try +#endif // YY_EXCEPTIONS + {]b4_token_ctor_if([[ + symbol_type yylookahead (]b4_yylex[); + yyla.move (yylookahead);]], [[ + yyla.kind_ = yyparser.yytranslate_ (]b4_yylex[);]])[ + } +#if YY_EXCEPTIONS + catch (const parser_type::syntax_error& yyexc) + { + YYCDEBUG << "Caught exception: " << yyexc.what () << '\n';]b4_locations_if([ + this->yyla.location = yyexc.location;])[ + yyparser.error (]b4_locations_if([this->yyla.location, ])[yyexc.what ()); + // Map errors caught in the scanner to the error token, so that error + // handling is started. + this->yyla.kind_ = ]b4_symbol(error, kind)[; + } + } +#endif // YY_EXCEPTIONS + if (this->yyla.kind () == ]b4_symbol(eof, kind)[) + YYCDEBUG << "Now at end of input.\n"; + else + YY_SYMBOL_PRINT ("Next token is", this->yyla.kind (), this->yyla.value, this->yyla.location); + } + + + /* Bison grammar-table manipulation. */ + + /** The action to take in YYSTATE on seeing YYTOKEN. + * Result R means + * R < 0: Reduce on rule -R. + * R = 0: Error. + * R > 0: Shift to state R. + * Set *YYCONFLICTS to a pointer into yyconfl to a 0-terminated list + * of conflicting reductions. + */ + static int + yygetLRActions (state_num yystate, symbol_kind_type yytoken, const short*& yyconflicts) + { + int yyindex = yypact[yystate] + yytoken; + if (yytoken == ]b4_symbol(error, kind)[) + { + // This is the error token. + yyconflicts = yyconfl; + return 0; + } + else if (yy_is_defaulted_state (yystate) + || yyindex < 0 || YYLAST < yyindex || yycheck[yyindex] != yytoken) + { + yyconflicts = yyconfl; + return -yydefact[yystate]; + } + else if (! yytable_value_is_error (yytable[yyindex])) + { + yyconflicts = yyconfl + yyconflp[yyindex]; + return yytable[yyindex]; + } + else + { + yyconflicts = yyconfl + yyconflp[yyindex]; + return 0; + } + } + + /** Compute post-reduction state. + * \param yystate the current state + * \param yysym the nonterminal to push on the stack + */ + static state_num + yyLRgotoState (state_num yystate, symbol_kind_type yysym) + { + const int yyr = yypgoto[yysym - YYNTOKENS] + yystate; + if (0 <= yyr && yyr <= YYLAST && yycheck[yyr] == yystate) + return yytable[yyr]; + else + return yydefgoto[yysym - YYNTOKENS]; + } + + static bool + yypact_value_is_default (state_num yystate) + { + return ]b4_table_value_equals([[pact]], [[yystate]], [b4_pact_ninf], [YYPACT_NINF])[; + } + + static bool + yytable_value_is_error (int yytable_value YY_ATTRIBUTE_UNUSED) + { + return ]b4_table_value_equals([[table]], [[yytable_value]], [b4_table_ninf], [YYTABLE_NINF])[; + } + + static bool + yy_is_shift_action (int yyaction) YY_NOEXCEPT + { + return 0 < yyaction; + } + + static bool + yy_is_error_action (int yyaction) YY_NOEXCEPT + { + return yyaction == 0; + } + + /** Whether LR state YYSTATE has only a default reduction + * (regardless of token). */ + static bool + yy_is_defaulted_state (state_num yystate) + { + return yypact_value_is_default (yypact[yystate]); + } + + /** The default reduction for YYSTATE, assuming it has one. */ + static rule_num + yy_default_action (state_num yystate) + { + return yydefact[yystate]; + } + + /* GLRStacks */ + + /** Y0 and Y1 represent two possible actions to take in a given + * parsing state; return 0 if no combination is possible, + * 1 if user-mergeable, 2 if Y0 is preferred, 3 if Y1 is preferred. */ + static int + yypreference (const semantic_option& y0, const semantic_option& y1) + { + const rule_num r0 = y0.yyrule, r1 = y1.yyrule; + const int p0 = yydprec[r0], p1 = yydprec[r1]; + + if (p0 == p1) + { + if (yymerger[r0] == 0 || yymerger[r0] != yymerger[r1]) + return 0; + else + return 1; + } + else if (p0 == 0 || p1 == 0) + return 0; + else if (p0 < p1) + return 3; + else if (p1 < p0) + return 2; + else + return 0; + } + +]b4_parse_param_vars[ + }; // class ]b4_parser_class[::glr_stack +} // namespace ]b4_namespace_ref[ + + +#if ]b4_api_PREFIX[DEBUG +namespace +{ + void + yypstack (const glr_stack& yystack, size_t yyk) + { + yystack.yypstack (create_state_set_index (static_cast (yyk))); + } + + void + yypdumpstack (const glr_stack& yystack) + { + yystack.yypdumpstack (); + } +} +#endif + +]b4_namespace_open[ + /// Build a parser object. + ]b4_parser_class::b4_parser_class[ (]b4_parse_param_decl[)]m4_ifset([b4_parse_param], [ + :])[ +#if ]b4_api_PREFIX[DEBUG + ]m4_ifset([b4_parse_param], [ ], [ :])[yycdebug_ (&std::cerr)]m4_ifset([b4_parse_param], [,])[ +#endif]b4_parse_param_cons[ + {} + + ]b4_parser_class::~b4_parser_class[ () + {} + + ]b4_parser_class[::syntax_error::~syntax_error () YY_NOEXCEPT YY_NOTHROW + {} + + int + ]b4_parser_class[::operator() () + { + return parse (); + } + + int + ]b4_parser_class[::parse () + { + glr_stack yystack(YYINITDEPTH, *this]b4_user_args[); + return yystack.parse (); + } + +]b4_parse_error_bmatch([custom\|detailed], +[[ const char * + ]b4_parser_class[::symbol_name (symbol_kind_type yysymbol) + { + static const char *const yy_sname[] = + { + ]b4_symbol_names[ + };]b4_has_translations_if([[ + /* YYTRANSLATABLE[SYMBOL-NUM] -- Whether YY_SNAME[SYMBOL-NUM] is + internationalizable. */ + static ]b4_int_type_for([b4_translatable])[ yytranslatable[] = + { + ]b4_translatable[ + }; + return (yysymbol < YYNTOKENS && yytranslatable[yysymbol] + ? _(yy_sname[yysymbol]) + : yy_sname[yysymbol]);]], [[ + return yy_sname[yysymbol];]])[ + } +]], +[simple], +[[#if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ + const char * + ]b4_parser_class[::symbol_name (symbol_kind_type yysymbol) + { + return yytname_[yysymbol]; + } +#endif // #if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ +]], +[verbose], +[[ /* Return YYSTR after stripping away unnecessary quotes and + backslashes, so that it's suitable for yyerror. The heuristic is + that double-quoting is unnecessary unless the string contains an + apostrophe, a comma, or backslash (other than backslash-backslash). + YYSTR is taken from yytname. */ + std::string + ]b4_parser_class[::yytnamerr_ (const char *yystr) + { + if (*yystr == '"') + { + std::string yyr; + char const *yyp = yystr; + + for (;;) + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + else + goto append; + + append: + default: + yyr += *yyp; + break; + + case '"': + return yyr; + } + do_not_strip_quotes: ; + } + + return yystr; + } + + std::string + ]b4_parser_class[::symbol_name (symbol_kind_type yysymbol) + { + return yytnamerr_ (yytname_[yysymbol]); + } +]])[ + +]b4_parse_error_bmatch([simple\|verbose], +[[#if ]b4_api_PREFIX[DEBUG]b4_tname_if([[ || 1]])[ + // YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + // First, the terminals, then, starting at \a YYNTOKENS, nonterminals. + const char* + const ]b4_parser_class[::yytname_[] = + { + ]b4_tname[ + }; +#endif +]])[ + +]b4_parse_error_bmatch([custom\|detailed\|verbose], [[ + // ]b4_parser_class[::context. + ]b4_parser_class[::context::context (glr_stack& yystack, const symbol_type& yyla) + : yystack_ (yystack) + , yyla_ (yyla) + {} + + int + ]b4_parser_class[::context::expected_tokens (symbol_kind_type yyarg[], int yyargn) const + { + // Actual number of expected tokens + int yycount = 0; + const int yyn = yypact[yystack_.firstTopState()->yylrState]; + if (!yystack_.yypact_value_is_default (yyn)) + { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for this + state because they are default actions. */ + const int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + const int yychecklim = YYLAST - yyn + 1; + const int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + for (int yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != ]b4_symbol(error, kind)[ + && !yystack_.yytable_value_is_error (yytable[yyx + yyn])) + { + if (!yyarg) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = YY_CAST (symbol_kind_type, yyx); + } + } + if (yyarg && yycount == 0 && 0 < yyargn) + yyarg[0] = ]b4_symbol(empty, kind)[; + return yycount; + } + +]])[ + +]b4_parse_error_bmatch([detailed\|verbose], [[ + int + ]b4_parser_class[::yy_syntax_error_arguments_ (const context& yyctx, + symbol_kind_type yyarg[], int yyargn) const + { + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, then + the only way this function was invoked is if the default action + is an error action. In that case, don't check for expected + tokens because there are none. + - The only way there can be no lookahead present (in yyla) is + if this state is a consistent state with a default action. + Thus, detecting the absence of a lookahead is sufficient to + determine that there is no unexpected or expected token to + report. In that case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this state is + a consistent state with a default action. There might have + been a previous inconsistent state, consistent state with a + non-default action, or user semantic action that manipulated + yyla. (However, yyla is currently not documented for users.) + */ + + if (!yyctx.lookahead ().empty ()) + { + if (yyarg) + yyarg[0] = yyctx.token (); + int yyn = yyctx.expected_tokens (yyarg ? yyarg + 1 : yyarg, yyargn - 1); + return yyn + 1; + } + return 0; + } + + // Generate an error message. + std::string + ]b4_parser_class[::yysyntax_error_ (const context& yyctx) const + { + // Its maximum. + enum { YYARGS_MAX = 5 }; + // Arguments of yyformat. + symbol_kind_type yyarg[YYARGS_MAX]; + int yycount = yy_syntax_error_arguments_ (yyctx, yyarg, YYARGS_MAX); + + char const* yyformat = YY_NULLPTR; + switch (yycount) + { +#define YYCASE_(N, S) \ + case N: \ + yyformat = S; \ + break + default: // Avoid compiler warnings. + YYCASE_ (0, YY_("syntax error")); + YYCASE_ (1, YY_("syntax error, unexpected %s")); + YYCASE_ (2, YY_("syntax error, unexpected %s, expecting %s")); + YYCASE_ (3, YY_("syntax error, unexpected %s, expecting %s or %s")); + YYCASE_ (4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); + YYCASE_ (5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); +#undef YYCASE_ + } + + std::string yyres; + // Argument number. + std::ptrdiff_t yyi = 0; + for (char const* yyp = yyformat; *yyp; ++yyp) + if (yyp[0] == '%' && yyp[1] == 's' && yyi < yycount) + { + yyres += symbol_name (yyarg[yyi++]); + ++yyp; + } + else + yyres += *yyp; + return yyres; + }]])[ + + void + ]b4_parser_class[::yy_destroy_ (const char* yymsg, symbol_kind_type yykind, + value_type& yyval]b4_locations_if([[, + location_type& yyloc]])[) + { + YY_USE (yyval);]b4_locations_if([[ + YY_USE (yyloc);]])[ + if (!yymsg) + yymsg = "Deleting"; + ]b4_parser_class[& yyparser = *this; + YY_USE (yyparser); + YY_SYMBOL_PRINT (yymsg, yykind, yyval, yyloc); + + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + ]m4_do([m4_pushdef([b4_symbol_action], m4_defn([b4_symbol_action_for_yyval]))], + [b4_symbol_actions([destructor])], + [m4_popdef([b4_symbol_action])])[ + YY_IGNORE_MAYBE_UNINITIALIZED_END + } + +#if ]b4_api_PREFIX[DEBUG + /*--------------------. + | Print this symbol. | + `--------------------*/ + + void + ]b4_parser_class[::yy_symbol_value_print_ (symbol_kind_type yykind, + const value_type& yyval]b4_locations_if([[, + const location_type& yyloc]])[) const + {]b4_locations_if([[ + YY_USE (yyloc);]])[ + YY_USE (yyval); + std::ostream& yyo = debug_stream (); + YY_USE (yyo); + ]m4_do([m4_pushdef([b4_symbol_action], m4_defn([b4_symbol_action_for_yyval]))], + [b4_symbol_actions([printer])], + [m4_popdef([b4_symbol_action])])[ + } + + void + ]b4_parser_class[::yy_symbol_print_ (symbol_kind_type yykind, + const value_type& yyval]b4_locations_if([[, + const location_type& yyloc]])[) const + { + *yycdebug_ << (yykind < YYNTOKENS ? "token" : "nterm") + << ' ' << symbol_name (yykind) << " ("]b4_locations_if([[ + << yyloc << ": "]])[; + yy_symbol_value_print_ (yykind, yyval]b4_locations_if([[, yyloc]])[); + *yycdebug_ << ')'; + } + + std::ostream& + ]b4_parser_class[::debug_stream () const + { + return *yycdebug_; + } + + void + ]b4_parser_class[::set_debug_stream (std::ostream& o) + { + yycdebug_ = &o; + } + + + ]b4_parser_class[::debug_level_type + ]b4_parser_class[::debug_level () const + { + return yydebug; + } + + void + ]b4_parser_class[::set_debug_level (debug_level_type l) + { + // Actually, it is yydebug which is really used. + yydebug = l; + } +#endif // ]b4_api_PREFIX[DEBUG + +]b4_token_ctor_if([], [b4_yytranslate_define([cc])])[ + +]b4_token_ctor_if([], [[ + /*---------. + | symbol. | + `---------*/ +]b4_public_types_define([cc])])[ +]b4_namespace_close[]dnl +b4_epilogue[]dnl +b4_output_end diff --git a/platform/dbops/binaries/build/share/bison/skeletons/java-skel.m4 b/platform/dbops/binaries/build/share/bison/skeletons/java-skel.m4 new file mode 100644 index 0000000000000000000000000000000000000000..11cbc49992c46b1ea6ad7c7946a73cd706dd91f7 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/java-skel.m4 @@ -0,0 +1,27 @@ + -*- Autoconf -*- + +# Java skeleton dispatching for Bison. + +# Copyright (C) 2007, 2009-2015, 2018-2021 Free Software Foundation, +# Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +b4_glr_if( [b4_complain([%%glr-parser not supported for Java])]) +b4_nondeterministic_if([b4_complain([%%nondeterministic-parser not supported for Java])]) + +m4_define_default([b4_used_skeleton], [b4_skeletonsdir/[lalr1.java]]) +m4_define_default([b4_skeleton], ["b4_basename(b4_used_skeleton)"]) + +m4_include(b4_used_skeleton) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/java.m4 b/platform/dbops/binaries/build/share/bison/skeletons/java.m4 new file mode 100644 index 0000000000000000000000000000000000000000..8b0828b0bfdb1a14069f0269333bb6ff74163a54 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/java.m4 @@ -0,0 +1,502 @@ + -*- Autoconf -*- + +# Java language support for Bison + +# Copyright (C) 2007-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_include(b4_skeletonsdir/[c-like.m4]) + + +# b4_list2(LIST1, LIST2) +# ---------------------- +# Join two lists with a comma if necessary. +m4_define([b4_list2], + [$1[]m4_ifval(m4_quote($1), [m4_ifval(m4_quote($2), [[, ]])])[]$2]) + + +# b4_percent_define_get3(DEF, PRE, POST, NOT) +# ------------------------------------------- +# Expand to the value of DEF surrounded by PRE and POST if it's %define'ed, +# otherwise NOT. +m4_define([b4_percent_define_get3], + [m4_ifval(m4_quote(b4_percent_define_get([$1])), + [$2[]b4_percent_define_get([$1])[]$3], [$4])]) + + + +# b4_flag_value(BOOLEAN-FLAG) +# --------------------------- +m4_define([b4_flag_value], [b4_flag_if([$1], [true], [false])]) + + +# b4_parser_class_declaration +# --------------------------- +# The declaration of the parser class ("class YYParser"), with all its +# qualifiers/annotations. +b4_percent_define_default([[api.parser.abstract]], [[false]]) +b4_percent_define_default([[api.parser.final]], [[false]]) +b4_percent_define_default([[api.parser.public]], [[false]]) +b4_percent_define_default([[api.parser.strictfp]], [[false]]) + +m4_define([b4_parser_class_declaration], +[b4_percent_define_get3([api.parser.annotations], [], [ ])dnl +b4_percent_define_flag_if([api.parser.public], [public ])dnl +b4_percent_define_flag_if([api.parser.abstract], [abstract ])dnl +b4_percent_define_flag_if([api.parser.final], [final ])dnl +b4_percent_define_flag_if([api.parser.strictfp], [strictfp ])dnl +[class ]b4_parser_class[]dnl +b4_percent_define_get3([api.parser.extends], [ extends ])dnl +b4_percent_define_get3([api.parser.implements], [ implements ])]) + + +# b4_lexer_if(TRUE, FALSE) +# ------------------------ +m4_define([b4_lexer_if], +[b4_percent_code_ifdef([[lexer]], [$1], [$2])]) + + +# b4_identification +# ----------------- +m4_define([b4_identification], +[[ /** Version number for the Bison executable that generated this parser. */ + public static final String bisonVersion = "]b4_version_string["; + + /** Name of the skeleton that generated this parser. */ + public static final String bisonSkeleton = ]b4_skeleton[; +]]) + + +## ------------ ## +## Data types. ## +## ------------ ## + +# b4_int_type(MIN, MAX) +# --------------------- +# Return the smallest int type able to handle numbers ranging from +# MIN to MAX (included). +m4_define([b4_int_type], +[m4_if(b4_ints_in($@, [-128], [127]), [1], [byte], + b4_ints_in($@, [-32768], [32767]), [1], [short], + [int])]) + +# b4_int_type_for(NAME) +# --------------------- +# Return the smallest int type able to handle numbers ranging from +# 'NAME_min' to 'NAME_max' (included). +m4_define([b4_int_type_for], +[b4_int_type($1_min, $1_max)]) + +# b4_null +# ------- +m4_define([b4_null], [null]) + + +# b4_typed_parser_table_define(TYPE, NAME, DATA, COMMENT) +# ------------------------------------------------------- +# We use intermediate functions (e.g., yypact_init) to work around the +# 64KB limit for JVM methods. See +# https://lists.gnu.org/r/help-bison/2008-11/msg00004.html. +m4_define([b4_typed_parser_table_define], +[m4_ifval([$4], [b4_comment([$4]) + ])dnl +[private static final ]$1[[] yy$2_ = yy$2_init(); + private static final ]$1[[] yy$2_init() + { + return new ]$1[[] + { + ]$3[ + }; + }]]) + + +# b4_integral_parser_table_define(NAME, DATA, COMMENT) +#----------------------------------------------------- +m4_define([b4_integral_parser_table_define], +[b4_typed_parser_table_define([b4_int_type_for([$2])], [$1], [$2], [$3])]) + + +## ------------- ## +## Token kinds. ## +## ------------- ## + + +# b4_token_enum(TOKEN-NUM) +# ------------------------ +# Output the definition of this token as an enum. +m4_define([b4_token_enum], +[b4_token_visible_if([$1], + [m4_format([[ /** Token %s, to be returned by the scanner. */ + static final int %s = %s%s; +]], + b4_symbol([$1], [tag]), + b4_symbol([$1], [id]), + b4_symbol([$1], b4_api_token_raw_if([[number]], [[code]])))])]) + + +# b4_token_enums +# -------------- +# Output the definition of the tokens (if there are) as enums. +m4_define([b4_token_enums], +[b4_any_token_visible_if([ /* Token kinds. */ +b4_symbol_foreach([b4_token_enum])])]) + + + +## -------------- ## +## Symbol kinds. ## +## -------------- ## + + +# b4_symbol_kind(NUM) +# ------------------- +m4_define([b4_symbol_kind], +[SymbolKind.b4_symbol_kind_base($@)]) + + +# b4_symbol_enum(SYMBOL-NUM) +# -------------------------- +# Output the definition of this symbol as an enum. +m4_define([b4_symbol_enum], +[m4_format([ %-30s %s], + m4_format([[%s(%s)%s]], + b4_symbol([$1], [kind_base]), + [$1], + m4_if([$1], b4_last_symbol, [[;]], [[,]])), + [b4_symbol_tag_comment([$1])])]) + + +# b4_declare_symbol_enum +# ---------------------- +# The definition of the symbol internal numbers as an enum. +m4_define([b4_declare_symbol_enum], +[[ public enum SymbolKind + { +]b4_symbol_foreach([b4_symbol_enum])[ + + private final int yycode_; + + SymbolKind (int n) { + this.yycode_ = n; + } + + private static final SymbolKind[] values_ = { + ]m4_map_args_sep([b4_symbol_kind(], [)], [, + ], b4_symbol_numbers)[ + }; + + static final SymbolKind get(int code) { + return values_[code]; + } + + public final int getCode() { + return this.yycode_; + } + +]b4_parse_error_bmatch( +[simple\|verbose], +[[ /* Return YYSTR after stripping away unnecessary quotes and + backslashes, so that it's suitable for yyerror. The heuristic is + that double-quoting is unnecessary unless the string contains an + apostrophe, a comma, or backslash (other than backslash-backslash). + YYSTR is taken from yytname. */ + private static String yytnamerr_(String yystr) + { + if (yystr.charAt (0) == '"') + { + StringBuffer yyr = new StringBuffer(); + strip_quotes: for (int i = 1; i < yystr.length(); i++) + switch (yystr.charAt(i)) + { + case '\'': + case ',': + break strip_quotes; + + case '\\': + if (yystr.charAt(++i) != '\\') + break strip_quotes; + /* Fall through. */ + default: + yyr.append(yystr.charAt(i)); + break; + + case '"': + return yyr.toString(); + } + } + return yystr; + } + + /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + First, the terminals, then, starting at \a YYNTOKENS_, nonterminals. */ + ]b4_typed_parser_table_define([String], [tname], [b4_tname])[ + + /* The user-facing name of this symbol. */ + public final String getName() { + return yytnamerr_(yytname_[yycode_]); + } +]], +[custom\|detailed], +[[ /* YYNAMES_[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + First, the terminals, then, starting at \a YYNTOKENS_, nonterminals. */ + ]b4_typed_parser_table_define([String], [names], [b4_symbol_names])[ + + /* The user-facing name of this symbol. */ + public final String getName() { + return yynames_[yycode_]; + }]])[ + }; +]])]) + + + +# b4_case(ID, CODE, [COMMENTS]) +# ----------------------------- +# We need to fool Java's stupid unreachable code detection. +m4_define([b4_case], +[ case $1:m4_ifval([$3], [ b4_comment([$3])]) + if (yyn == $1) + $2; + break; +]) + + +# b4_predicate_case(LABEL, CONDITIONS) +# ------------------------------------ +m4_define([b4_predicate_case], +[ case $1: + if (! ($2)) YYERROR; + break; +]) + + +## -------- ## +## Checks. ## +## -------- ## + +b4_percent_define_check_kind([[api.value.type]], [code], [deprecated]) + +b4_percent_define_check_kind([[annotations]], [code], [deprecated]) +b4_percent_define_check_kind([[extends]], [code], [deprecated]) +b4_percent_define_check_kind([[implements]], [code], [deprecated]) +b4_percent_define_check_kind([[init_throws]], [code], [deprecated]) +b4_percent_define_check_kind([[lex_throws]], [code], [deprecated]) +b4_percent_define_check_kind([[api.parser.class]], [code], [deprecated]) +b4_percent_define_check_kind([[throws]], [code], [deprecated]) + + + +## ---------------- ## +## Default values. ## +## ---------------- ## + +m4_define([b4_yystype], [b4_percent_define_get([[api.value.type]])]) +b4_percent_define_default([[api.value.type]], [[Object]]) +b4_percent_define_default([[api.symbol.prefix]], [[S_]]) + +# b4_api_prefix, b4_api_PREFIX +# ---------------------------- +# Corresponds to %define api.prefix +b4_percent_define_default([[api.prefix]], [[YY]]) +m4_define([b4_api_prefix], +[b4_percent_define_get([[api.prefix]])]) +m4_define([b4_api_PREFIX], +[m4_toupper(b4_api_prefix)]) + +# b4_prefix +# --------- +# If the %name-prefix is not given, it is api.prefix. +m4_define_default([b4_prefix], [b4_api_prefix]) + +b4_percent_define_default([[api.parser.class]], [b4_prefix[]Parser]) +m4_define([b4_parser_class], [b4_percent_define_get([[api.parser.class]])]) + +b4_percent_define_default([[lex_throws]], [[java.io.IOException]]) +m4_define([b4_lex_throws], [b4_percent_define_get([[lex_throws]])]) + +b4_percent_define_default([[throws]], []) +m4_define([b4_throws], [b4_percent_define_get([[throws]])]) + +b4_percent_define_default([[init_throws]], []) +m4_define([b4_init_throws], [b4_percent_define_get([[init_throws]])]) + +b4_percent_define_default([[api.location.type]], [Location]) +m4_define([b4_location_type], [b4_percent_define_get([[api.location.type]])]) + +b4_percent_define_default([[api.position.type]], [Position]) +m4_define([b4_position_type], [b4_percent_define_get([[api.position.type]])]) + + +## ----------------- ## +## Semantic Values. ## +## ----------------- ## + + +# b4_symbol_translate(STRING) +# --------------------------- +# Used by "bison" in the array of symbol names to mark those that +# require translation. +m4_define([b4_symbol_translate], +[[i18n($1)]]) + + +# b4_trans(STRING) +# ---------------- +# Translate a string if i18n is enabled. Avoid collision with b4_translate. +m4_define([b4_trans], +[b4_has_translations_if([i18n($1)], [$1])]) + + + +# b4_symbol_value(VAL, [SYMBOL-NUM], [TYPE-TAG]) +# ---------------------------------------------- +# See README. +m4_define([b4_symbol_value], +[m4_ifval([$3], + [(($3)($1))], + [m4_ifval([$2], + [b4_symbol_if([$2], [has_type], + [((b4_symbol([$2], [type]))($1))], + [$1])], + [$1])])]) + + +# b4_lhs_value([SYMBOL-NUM], [TYPE]) +# ---------------------------------- +# See README. +m4_define([b4_lhs_value], [yyval]) + + +# b4_rhs_data(RULE-LENGTH, POS) +# ----------------------------- +# See README. +m4_define([b4_rhs_data], +[yystack.valueAt (b4_subtract($@))]) + +# b4_rhs_value(RULE-LENGTH, POS, SYMBOL-NUM, [TYPE]) +# -------------------------------------------------- +# See README. +# +# In this simple implementation, %token and %type have class names +# between the angle brackets. +m4_define([b4_rhs_value], +[b4_symbol_value([b4_rhs_data([$1], [$2])], [$3], [$4])]) + + +# b4_lhs_location() +# ----------------- +# Expansion of @$. +m4_define([b4_lhs_location], +[(yyloc)]) + + +# b4_rhs_location(RULE-LENGTH, POS) +# --------------------------------- +# Expansion of @POS, where the current rule has RULE-LENGTH symbols +# on RHS. +m4_define([b4_rhs_location], +[yystack.locationAt (b4_subtract($@))]) + + +# b4_lex_param +# b4_parse_param +# -------------- +# If defined, b4_lex_param arrives double quoted, but below we prefer +# it to be single quoted. Same for b4_parse_param. + +# TODO: should be in bison.m4 +m4_define_default([b4_lex_param], [[]]) +m4_define([b4_lex_param], b4_lex_param) +m4_define([b4_parse_param], b4_parse_param) + +# b4_lex_param_decl +# ----------------- +# Extra formal arguments of the constructor. +m4_define([b4_lex_param_decl], +[m4_ifset([b4_lex_param], + [b4_remove_comma([$1], + b4_param_decls(b4_lex_param))], + [$1])]) + +m4_define([b4_param_decls], + [m4_map([b4_param_decl], [$@])]) +m4_define([b4_param_decl], [, $1]) + +m4_define([b4_remove_comma], [m4_ifval(m4_quote($1), [$1, ], [])m4_shift2($@)]) + + + +# b4_parse_param_decl +# ------------------- +# Extra formal arguments of the constructor. +m4_define([b4_parse_param_decl], +[m4_ifset([b4_parse_param], + [b4_remove_comma([$1], + b4_param_decls(b4_parse_param))], + [$1])]) + + + +# b4_lex_param_call +# ----------------- +# Delegating the lexer parameters to the lexer constructor. +m4_define([b4_lex_param_call], + [m4_ifset([b4_lex_param], + [b4_remove_comma([$1], + b4_param_calls(b4_lex_param))], + [$1])]) +m4_define([b4_param_calls], + [m4_map([b4_param_call], [$@])]) +m4_define([b4_param_call], [, $2]) + + + +# b4_parse_param_cons +# ------------------- +# Extra initialisations of the constructor. +m4_define([b4_parse_param_cons], + [m4_ifset([b4_parse_param], + [b4_constructor_calls(b4_parse_param)])]) + +m4_define([b4_constructor_calls], + [m4_map([b4_constructor_call], [$@])]) +m4_define([b4_constructor_call], + [this.$2 = $2; + ]) + + + +# b4_parse_param_vars +# ------------------- +# Extra instance variables. +m4_define([b4_parse_param_vars], + [m4_ifset([b4_parse_param], + [ + /* User arguments. */ +b4_var_decls(b4_parse_param)])]) + +m4_define([b4_var_decls], + [m4_map_sep([b4_var_decl], [ +], [$@])]) +m4_define([b4_var_decl], + [ protected final $1;]) + + + +# b4_maybe_throws(THROWS) +# ----------------------- +# Expand to either an empty string or "throws THROWS". +m4_define([b4_maybe_throws], + [m4_ifval($1, [ throws $1])]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/lalr1.cc b/platform/dbops/binaries/build/share/bison/skeletons/lalr1.cc new file mode 100644 index 0000000000000000000000000000000000000000..7cb69d3db973020b59563eec6e77d693c7f4f3d8 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/lalr1.cc @@ -0,0 +1,1633 @@ +# C++ skeleton for Bison + +# Copyright (C) 2002-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_include(b4_skeletonsdir/[c++.m4]) + +# api.value.type=variant is valid. +m4_define([b4_value_type_setup_variant]) + +# parse.lac +b4_percent_define_default([[parse.lac]], [[none]]) +b4_percent_define_check_values([[[[parse.lac]], [[full]], [[none]]]]) +b4_define_flag_if([lac]) +m4_define([b4_lac_flag], + [m4_if(b4_percent_define_get([[parse.lac]]), + [none], [[0]], [[1]])]) + + +# b4_tname_if(TNAME-NEEDED, TNAME-NOT-NEEDED) +# ------------------------------------------- +m4_define([b4_tname_if], +[m4_case(b4_percent_define_get([[parse.error]]), + [verbose], [$1], + [b4_token_table_if([$1], + [$2])])]) + + +# b4_integral_parser_table_declare(TABLE-NAME, CONTENT, COMMENT) +# -------------------------------------------------------------- +# Declare "parser::yy_" whose contents is CONTENT. +m4_define([b4_integral_parser_table_declare], +[m4_ifval([$3], [b4_comment([$3], [ ]) +])dnl + static const b4_int_type_for([$2]) yy$1_[[]];dnl +]) + +# b4_integral_parser_table_define(TABLE-NAME, CONTENT, COMMENT) +# ------------------------------------------------------------- +# Define "parser::yy_" whose contents is CONTENT. +m4_define([b4_integral_parser_table_define], +[ const b4_int_type_for([$2]) + b4_parser_class::yy$1_[[]] = + { + $2 + };dnl +]) + + +# b4_symbol_kind(NUM) +# ------------------- +m4_define([b4_symbol_kind], +[symbol_kind::b4_symbol_kind_base($@)]) + + +# b4_symbol_value_template(VAL, SYMBOL-NUM, [TYPE]) +# ------------------------------------------------- +# Same as b4_symbol_value, but used in a template method. It makes +# a difference when using variants. Note that b4_value_type_setup_union +# overrides b4_symbol_value, so we must override it again. +m4_copy([b4_symbol_value], [b4_symbol_value_template]) +m4_append([b4_value_type_setup_union], +[m4_copy_force([b4_symbol_value_union], [b4_symbol_value_template])]) + +# b4_lhs_value(SYMBOL-NUM, [TYPE]) +# -------------------------------- +# See README. +m4_define([b4_lhs_value], +[b4_symbol_value([yylhs.value], [$1], [$2])]) + + +# b4_lhs_location() +# ----------------- +# Expansion of @$. +m4_define([b4_lhs_location], +[yylhs.location]) + + +# b4_rhs_data(RULE-LENGTH, POS) +# ----------------------------- +# See README. +m4_define([b4_rhs_data], +[yystack_@{b4_subtract($@)@}]) + + +# b4_rhs_state(RULE-LENGTH, POS) +# ------------------------------ +# The state corresponding to the symbol #POS, where the current +# rule has RULE-LENGTH symbols on RHS. +m4_define([b4_rhs_state], +[b4_rhs_data([$1], [$2]).state]) + + +# b4_rhs_value(RULE-LENGTH, POS, SYMBOL-NUM, [TYPE]) +# -------------------------------------------------- +# See README. +m4_define([_b4_rhs_value], +[b4_symbol_value([b4_rhs_data([$1], [$2]).value], [$3], [$4])]) + +m4_define([b4_rhs_value], +[b4_percent_define_ifdef([api.value.automove], + [YY_MOVE (_b4_rhs_value($@))], + [_b4_rhs_value($@)])]) + + +# b4_rhs_location(RULE-LENGTH, POS) +# --------------------------------- +# Expansion of @POS, where the current rule has RULE-LENGTH symbols +# on RHS. +m4_define([b4_rhs_location], +[b4_rhs_data([$1], [$2]).location]) + + +# b4_symbol_action(SYMBOL-NUM, KIND) +# ---------------------------------- +# Run the action KIND (destructor or printer) for SYMBOL-NUM. +# Same as in C, but using references instead of pointers. +m4_define([b4_symbol_action], +[b4_symbol_if([$1], [has_$2], +[m4_pushdef([b4_symbol_value], m4_defn([b4_symbol_value_template]))[]dnl +b4_dollar_pushdef([yysym.value], + [$1], + [], + [yysym.location])dnl + _b4_symbol_case([$1])[]dnl +b4_syncline([b4_symbol([$1], [$2_line])], [b4_symbol([$1], [$2_file])])dnl + b4_symbol([$1], [$2]) +b4_syncline([@oline@], [@ofile@])dnl + break; + +m4_popdef([b4_symbol_value])[]dnl +b4_dollar_popdef[]dnl +])]) + + +# b4_yylex +# -------- +# Call yylex. +m4_define([b4_yylex], +[b4_token_ctor_if( +[b4_function_call([yylex], + [symbol_type], m4_ifdef([b4_lex_param], b4_lex_param))], +[b4_function_call([yylex], [int], + [[value_type *], [&yyla.value]][]dnl +b4_locations_if([, [[location_type *], [&yyla.location]]])dnl +m4_ifdef([b4_lex_param], [, ]b4_lex_param))])]) + + +m4_pushdef([b4_copyright_years], + [2002-2015, 2018-2021]) + +m4_define([b4_parser_class], + [b4_percent_define_get([[api.parser.class]])]) + +b4_bison_locations_if([# Backward compatibility. + m4_define([b4_location_constructors]) + m4_include(b4_skeletonsdir/[location.cc])]) +m4_include(b4_skeletonsdir/[stack.hh]) +b4_variant_if([m4_include(b4_skeletonsdir/[variant.hh])]) + + +# b4_shared_declarations(hh|cc) +# ----------------------------- +# Declaration that might either go into the header (if --header, $1 = hh) +# or in the implementation file. +m4_define([b4_shared_declarations], +[b4_percent_code_get([[requires]])[ +]b4_parse_assert_if([# include ])[ +# include // std::abort +# include +# include +# include +# include + +]b4_cxx_portability[ +]m4_ifdef([b4_location_include], + [[# include ]b4_location_include])[ +]b4_variant_if([b4_variant_includes])[ + +]b4_attribute_define[ +]b4_cast_define[ +]b4_null_define[ + +]b4_YYDEBUG_define[ + +]b4_namespace_open[ + +]b4_bison_locations_if([m4_ifndef([b4_location_file], + [b4_location_define])])[ + + /// A Bison parser. + class ]b4_parser_class[ + { + public: +]b4_public_types_declare[ +]b4_symbol_type_define[ + /// Build a parser object. + ]b4_parser_class[ (]b4_parse_param_decl[); + virtual ~]b4_parser_class[ (); + +#if 201103L <= YY_CPLUSPLUS + /// Non copyable. + ]b4_parser_class[ (const ]b4_parser_class[&) = delete; + /// Non copyable. + ]b4_parser_class[& operator= (const ]b4_parser_class[&) = delete; +#endif + + /// Parse. An alias for parse (). + /// \returns 0 iff parsing succeeded. + int operator() (); + + /// Parse. + /// \returns 0 iff parsing succeeded. + virtual int parse (); + +#if ]b4_api_PREFIX[DEBUG + /// The current debugging stream. + std::ostream& debug_stream () const YY_ATTRIBUTE_PURE; + /// Set the current debugging stream. + void set_debug_stream (std::ostream &); + + /// Type for debugging levels. + typedef int debug_level_type; + /// The current debugging level. + debug_level_type debug_level () const YY_ATTRIBUTE_PURE; + /// Set the current debugging level. + void set_debug_level (debug_level_type l); +#endif + + /// Report a syntax error.]b4_locations_if([[ + /// \param loc where the syntax error is found.]])[ + /// \param msg a description of the syntax error. + virtual void error (]b4_locations_if([[const location_type& loc, ]])[const std::string& msg); + + /// Report a syntax error. + void error (const syntax_error& err); + +]b4_parse_error_bmatch( +[custom\|detailed], +[[ /// The user-facing name of the symbol whose (internal) number is + /// YYSYMBOL. No bounds checking. + static const char *symbol_name (symbol_kind_type yysymbol);]], +[simple], +[[#if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ + /// The user-facing name of the symbol whose (internal) number is + /// YYSYMBOL. No bounds checking. + static const char *symbol_name (symbol_kind_type yysymbol); +#endif // #if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ +]], +[verbose], +[[ /// The user-facing name of the symbol whose (internal) number is + /// YYSYMBOL. No bounds checking. + static std::string symbol_name (symbol_kind_type yysymbol);]])[ + +]b4_token_constructor_define[ +]b4_parse_error_bmatch([custom\|detailed\|verbose], [[ + class context + { + public: + context (const ]b4_parser_class[& yyparser, const symbol_type& yyla); + const symbol_type& lookahead () const YY_NOEXCEPT { return yyla_; } + symbol_kind_type token () const YY_NOEXCEPT { return yyla_.kind (); }]b4_locations_if([[ + const location_type& location () const YY_NOEXCEPT { return yyla_.location; } +]])[ + /// Put in YYARG at most YYARGN of the expected tokens, and return the + /// number of tokens stored in YYARG. If YYARG is null, return the + /// number of expected tokens (guaranteed to be less than YYNTOKENS). + int expected_tokens (symbol_kind_type yyarg[], int yyargn) const; + + private: + const ]b4_parser_class[& yyparser_; + const symbol_type& yyla_; + }; +]])[ + private: +#if YY_CPLUSPLUS < 201103L + /// Non copyable. + ]b4_parser_class[ (const ]b4_parser_class[&); + /// Non copyable. + ]b4_parser_class[& operator= (const ]b4_parser_class[&); +#endif +]b4_lac_if([[ + /// Check the lookahead yytoken. + /// \returns true iff the token will be eventually shifted. + bool yy_lac_check_ (symbol_kind_type yytoken) const; + /// Establish the initial context if no initial context currently exists. + /// \returns true iff the token will be eventually shifted. + bool yy_lac_establish_ (symbol_kind_type yytoken); + /// Discard any previous initial lookahead context because of event. + /// \param event the event which caused the lookahead to be discarded. + /// Only used for debbuging output. + void yy_lac_discard_ (const char* event);]])[ + + /// Stored state numbers (used for stacks). + typedef ]b4_int_type(0, m4_eval(b4_states_number - 1))[ state_type; +]b4_parse_error_bmatch( +[custom], [[ + /// Report a syntax error + /// \param yyctx the context in which the error occurred. + void report_syntax_error (const context& yyctx) const;]], +[detailed\|verbose], [[ + /// The arguments of the error message. + int yy_syntax_error_arguments_ (const context& yyctx, + symbol_kind_type yyarg[], int yyargn) const; + + /// Generate an error message. + /// \param yyctx the context in which the error occurred. + virtual std::string yysyntax_error_ (const context& yyctx) const;]])[ + /// Compute post-reduction state. + /// \param yystate the current state + /// \param yysym the nonterminal to push on the stack + static state_type yy_lr_goto_state_ (state_type yystate, int yysym); + + /// Whether the given \c yypact_ value indicates a defaulted state. + /// \param yyvalue the value to check + static bool yy_pact_value_is_default_ (int yyvalue) YY_NOEXCEPT; + + /// Whether the given \c yytable_ value indicates a syntax error. + /// \param yyvalue the value to check + static bool yy_table_value_is_error_ (int yyvalue) YY_NOEXCEPT; + + static const ]b4_int_type(b4_pact_ninf, b4_pact_ninf)[ yypact_ninf_; + static const ]b4_int_type(b4_table_ninf, b4_table_ninf)[ yytable_ninf_; + + /// Convert a scanner token kind \a t to a symbol kind. + /// In theory \a t should be a token_kind_type, but character literals + /// are valid, yet not members of the token_kind_type enum. + static symbol_kind_type yytranslate_ (int t) YY_NOEXCEPT; + +]b4_parse_error_bmatch( +[simple], +[[#if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ + /// For a symbol, its name in clear. + static const char* const yytname_[]; +#endif // #if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ +]], +[verbose], +[[ /// Convert the symbol name \a n to a form suitable for a diagnostic. + static std::string yytnamerr_ (const char *yystr); + + /// For a symbol, its name in clear. + static const char* const yytname_[]; +]])[ + + // Tables. +]b4_parser_tables_declare[ + +#if ]b4_api_PREFIX[DEBUG +]b4_integral_parser_table_declare([rline], [b4_rline], + [[YYRLINE[YYN] -- Source line where rule number YYN was defined.]])[ + /// Report on the debug stream that the rule \a r is going to be reduced. + virtual void yy_reduce_print_ (int r) const; + /// Print the state stack on the debug stream. + virtual void yy_stack_print_ () const; + + /// Debugging level. + int yydebug_; + /// Debug stream. + std::ostream* yycdebug_; + + /// \brief Display a symbol kind, value and location. + /// \param yyo The output stream. + /// \param yysym The symbol. + template + void yy_print_ (std::ostream& yyo, const basic_symbol& yysym) const; +#endif + + /// \brief Reclaim the memory associated to a symbol. + /// \param yymsg Why this token is reclaimed. + /// If null, print nothing. + /// \param yysym The symbol. + template + void yy_destroy_ (const char* yymsg, basic_symbol& yysym) const; + + private: + /// Type access provider for state based symbols. + struct by_state + { + /// Default constructor. + by_state () YY_NOEXCEPT; + + /// The symbol kind as needed by the constructor. + typedef state_type kind_type; + + /// Constructor. + by_state (kind_type s) YY_NOEXCEPT; + + /// Copy constructor. + by_state (const by_state& that) YY_NOEXCEPT; + + /// Record that this symbol is empty. + void clear () YY_NOEXCEPT; + + /// Steal the symbol kind from \a that. + void move (by_state& that); + + /// The symbol kind (corresponding to \a state). + /// \a ]b4_symbol(empty, kind)[ when empty. + symbol_kind_type kind () const YY_NOEXCEPT; + + /// The state number used to denote an empty symbol. + /// We use the initial state, as it does not have a value. + enum { empty_state = 0 }; + + /// The state. + /// \a empty when empty. + state_type state; + }; + + /// "Internal" symbol: element of the stack. + struct stack_symbol_type : basic_symbol + { + /// Superclass. + typedef basic_symbol super_type; + /// Construct an empty symbol. + stack_symbol_type (); + /// Move or copy construction. + stack_symbol_type (YY_RVREF (stack_symbol_type) that); + /// Steal the contents from \a sym to build this. + stack_symbol_type (state_type s, YY_MOVE_REF (symbol_type) sym); +#if YY_CPLUSPLUS < 201103L + /// Assignment, needed by push_back by some old implementations. + /// Moves the contents of that. + stack_symbol_type& operator= (stack_symbol_type& that); + + /// Assignment, needed by push_back by other implementations. + /// Needed by some other old implementations. + stack_symbol_type& operator= (const stack_symbol_type& that); +#endif + }; + +]b4_stack_define[ + + /// Stack type. + typedef stack stack_type; + + /// The stack. + stack_type yystack_;]b4_lac_if([[ + /// The stack for LAC. + /// Logically, the yy_lac_stack's lifetime is confined to the function + /// yy_lac_check_. We just store it as a member of this class to hold + /// on to the memory and to avoid frequent reallocations. + /// Since yy_lac_check_ is const, this member must be mutable. + mutable std::vector yylac_stack_; + /// Whether an initial LAC context was established. + bool yy_lac_established_; +]])[ + + /// Push a new state on the stack. + /// \param m a debug message to display + /// if null, no trace is output. + /// \param sym the symbol + /// \warning the contents of \a s.value is stolen. + void yypush_ (const char* m, YY_MOVE_REF (stack_symbol_type) sym); + + /// Push a new look ahead token on the state on the stack. + /// \param m a debug message to display + /// if null, no trace is output. + /// \param s the state + /// \param sym the symbol (for its value and location). + /// \warning the contents of \a sym.value is stolen. + void yypush_ (const char* m, state_type s, YY_MOVE_REF (symbol_type) sym); + + /// Pop \a n symbols from the stack. + void yypop_ (int n = 1) YY_NOEXCEPT; + + /// Constants. + enum + { + yylast_ = ]b4_last[, ///< Last index in yytable_. + yynnts_ = ]b4_nterms_number[, ///< Number of nonterminal symbols. + yyfinal_ = ]b4_final_state_number[ ///< Termination state number. + }; + +]b4_parse_param_vars[ +]b4_percent_code_get([[yy_bison_internal_hook]])[ + }; + +]b4_token_ctor_if([b4_yytranslate_define([$1])[ +]b4_public_types_define([$1])])[ +]b4_namespace_close[ + +]b4_percent_code_get([[provides]])[ +]])[ + + +## -------------- ## +## Output files. ## +## -------------- ## + +# ------------- # +# Header file. # +# ------------- # + +]b4_header_if([[ +]b4_output_begin([b4_spec_header_file])[ +]b4_copyright([Skeleton interface for Bison LALR(1) parsers in C++])[ + +/** + ** \file ]b4_spec_mapped_header_file[ + ** Define the ]b4_namespace_ref[::parser class. + */ + +// C++ LALR(1) parser skeleton written by Akim Demaille. + +]b4_disclaimer[ +]b4_cpp_guard_open([b4_spec_mapped_header_file])[ +]b4_shared_declarations(hh)[ +]b4_cpp_guard_close([b4_spec_mapped_header_file])[ +]b4_output_end[ +]])[ + + +# --------------------- # +# Implementation file. # +# --------------------- # + +]b4_output_begin([b4_parser_file_name])[ +]b4_copyright([Skeleton implementation for Bison LALR(1) parsers in C++])[ +]b4_disclaimer[ +]b4_percent_code_get([[top]])[]dnl +m4_if(b4_prefix, [yy], [], +[ +// Take the name prefix into account. +[#]define yylex b4_prefix[]lex])[ + +]b4_user_pre_prologue[ + +]b4_header_if([[#include "@basename(]b4_spec_header_file[@)"]], + [b4_shared_declarations([cc])])[ + +]b4_user_post_prologue[ +]b4_percent_code_get[ + +#ifndef YY_ +# if defined YYENABLE_NLS && YYENABLE_NLS +# if ENABLE_NLS +# include // FIXME: INFRINGES ON USER NAME SPACE. +# define YY_(msgid) dgettext ("bison-runtime", msgid) +# endif +# endif +# ifndef YY_ +# define YY_(msgid) msgid +# endif +#endif +]b4_has_translations_if([ +#ifndef N_ +# define N_(Msgid) Msgid +#endif +])[ + +// Whether we are compiled with exception support. +#ifndef YY_EXCEPTIONS +# if defined __GNUC__ && !defined __EXCEPTIONS +# define YY_EXCEPTIONS 0 +# else +# define YY_EXCEPTIONS 1 +# endif +#endif + +]b4_locations_if([dnl +[#define YYRHSLOC(Rhs, K) ((Rhs)[K].location) +]b4_yylloc_default_define])[ + +// Enable debugging if requested. +#if ]b4_api_PREFIX[DEBUG + +// A pseudo ostream that takes yydebug_ into account. +# define YYCDEBUG if (yydebug_) (*yycdebug_) + +# define YY_SYMBOL_PRINT(Title, Symbol) \ + do { \ + if (yydebug_) \ + { \ + *yycdebug_ << Title << ' '; \ + yy_print_ (*yycdebug_, Symbol); \ + *yycdebug_ << '\n'; \ + } \ + } while (false) + +# define YY_REDUCE_PRINT(Rule) \ + do { \ + if (yydebug_) \ + yy_reduce_print_ (Rule); \ + } while (false) + +# define YY_STACK_PRINT() \ + do { \ + if (yydebug_) \ + yy_stack_print_ (); \ + } while (false) + +#else // !]b4_api_PREFIX[DEBUG + +# define YYCDEBUG if (false) std::cerr +# define YY_SYMBOL_PRINT(Title, Symbol) YY_USE (Symbol) +# define YY_REDUCE_PRINT(Rule) static_cast (0) +# define YY_STACK_PRINT() static_cast (0) + +#endif // !]b4_api_PREFIX[DEBUG + +#define yyerrok (yyerrstatus_ = 0) +#define yyclearin (yyla.clear ()) + +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab +#define YYRECOVERING() (!!yyerrstatus_) + +]b4_namespace_open[ + /// Build a parser object. + ]b4_parser_class::b4_parser_class[ (]b4_parse_param_decl[) +#if ]b4_api_PREFIX[DEBUG + : yydebug_ (false), + yycdebug_ (&std::cerr)]b4_lac_if([,], [m4_ifset([b4_parse_param], [,])])[ +#else +]b4_lac_if([ :], [m4_ifset([b4_parse_param], [ :])])[ +#endif]b4_lac_if([[ + yy_lac_established_ (false)]m4_ifset([b4_parse_param], [,])])[]b4_parse_param_cons[ + {} + + ]b4_parser_class::~b4_parser_class[ () + {} + + ]b4_parser_class[::syntax_error::~syntax_error () YY_NOEXCEPT YY_NOTHROW + {} + + /*---------. + | symbol. | + `---------*/ + +]b4_token_ctor_if([], [b4_public_types_define([cc])])[ + + // by_state. + ]b4_parser_class[::by_state::by_state () YY_NOEXCEPT + : state (empty_state) + {} + + ]b4_parser_class[::by_state::by_state (const by_state& that) YY_NOEXCEPT + : state (that.state) + {} + + void + ]b4_parser_class[::by_state::clear () YY_NOEXCEPT + { + state = empty_state; + } + + void + ]b4_parser_class[::by_state::move (by_state& that) + { + state = that.state; + that.clear (); + } + + ]b4_parser_class[::by_state::by_state (state_type s) YY_NOEXCEPT + : state (s) + {} + + ]b4_parser_class[::symbol_kind_type + ]b4_parser_class[::by_state::kind () const YY_NOEXCEPT + { + if (state == empty_state) + return ]b4_symbol(empty, kind)[; + else + return YY_CAST (symbol_kind_type, yystos_[+state]); + } + + ]b4_parser_class[::stack_symbol_type::stack_symbol_type () + {} + + ]b4_parser_class[::stack_symbol_type::stack_symbol_type (YY_RVREF (stack_symbol_type) that) + : super_type (YY_MOVE (that.state)]b4_variant_if([], [, YY_MOVE (that.value)])b4_locations_if([, YY_MOVE (that.location)])[) + {]b4_variant_if([ + b4_symbol_variant([that.kind ()], + [value], [YY_MOVE_OR_COPY], [YY_MOVE (that.value)])])[ +#if 201103L <= YY_CPLUSPLUS + // that is emptied. + that.state = empty_state; +#endif + } + + ]b4_parser_class[::stack_symbol_type::stack_symbol_type (state_type s, YY_MOVE_REF (symbol_type) that) + : super_type (s]b4_variant_if([], [, YY_MOVE (that.value)])[]b4_locations_if([, YY_MOVE (that.location)])[) + {]b4_variant_if([ + b4_symbol_variant([that.kind ()], + [value], [move], [YY_MOVE (that.value)])])[ + // that is emptied. + that.kind_ = ]b4_symbol(empty, kind)[; + } + +#if YY_CPLUSPLUS < 201103L + ]b4_parser_class[::stack_symbol_type& + ]b4_parser_class[::stack_symbol_type::operator= (const stack_symbol_type& that) + { + state = that.state; + ]b4_variant_if([b4_symbol_variant([that.kind ()], + [value], [copy], [that.value])], + [[value = that.value;]])[]b4_locations_if([ + location = that.location;])[ + return *this; + } + + ]b4_parser_class[::stack_symbol_type& + ]b4_parser_class[::stack_symbol_type::operator= (stack_symbol_type& that) + { + state = that.state; + ]b4_variant_if([b4_symbol_variant([that.kind ()], + [value], [move], [that.value])], + [[value = that.value;]])[]b4_locations_if([ + location = that.location;])[ + // that is emptied. + that.state = empty_state; + return *this; + } +#endif + + template + void + ]b4_parser_class[::yy_destroy_ (const char* yymsg, basic_symbol& yysym) const + { + if (yymsg) + YY_SYMBOL_PRINT (yymsg, yysym);]b4_variant_if([], [ + + // User destructor. + b4_symbol_actions([destructor], [yysym.kind ()])])[ + } + +#if ]b4_api_PREFIX[DEBUG + template + void + ]b4_parser_class[::yy_print_ (std::ostream& yyo, const basic_symbol& yysym) const + { + std::ostream& yyoutput = yyo; + YY_USE (yyoutput); + if (yysym.empty ()) + yyo << "empty symbol"; + else + { + symbol_kind_type yykind = yysym.kind (); + yyo << (yykind < YYNTOKENS ? "token" : "nterm") + << ' ' << yysym.name () << " ("]b4_locations_if([ + << yysym.location << ": "])[; + ]b4_symbol_actions([printer])[ + yyo << ')'; + } + } +#endif + + void + ]b4_parser_class[::yypush_ (const char* m, YY_MOVE_REF (stack_symbol_type) sym) + { + if (m) + YY_SYMBOL_PRINT (m, sym); + yystack_.push (YY_MOVE (sym)); + } + + void + ]b4_parser_class[::yypush_ (const char* m, state_type s, YY_MOVE_REF (symbol_type) sym) + { +#if 201103L <= YY_CPLUSPLUS + yypush_ (m, stack_symbol_type (s, std::move (sym))); +#else + stack_symbol_type ss (s, sym); + yypush_ (m, ss); +#endif + } + + void + ]b4_parser_class[::yypop_ (int n) YY_NOEXCEPT + { + yystack_.pop (n); + } + +#if ]b4_api_PREFIX[DEBUG + std::ostream& + ]b4_parser_class[::debug_stream () const + { + return *yycdebug_; + } + + void + ]b4_parser_class[::set_debug_stream (std::ostream& o) + { + yycdebug_ = &o; + } + + + ]b4_parser_class[::debug_level_type + ]b4_parser_class[::debug_level () const + { + return yydebug_; + } + + void + ]b4_parser_class[::set_debug_level (debug_level_type l) + { + yydebug_ = l; + } +#endif // ]b4_api_PREFIX[DEBUG + + ]b4_parser_class[::state_type + ]b4_parser_class[::yy_lr_goto_state_ (state_type yystate, int yysym) + { + int yyr = yypgoto_[yysym - YYNTOKENS] + yystate; + if (0 <= yyr && yyr <= yylast_ && yycheck_[yyr] == yystate) + return yytable_[yyr]; + else + return yydefgoto_[yysym - YYNTOKENS]; + } + + bool + ]b4_parser_class[::yy_pact_value_is_default_ (int yyvalue) YY_NOEXCEPT + { + return yyvalue == yypact_ninf_; + } + + bool + ]b4_parser_class[::yy_table_value_is_error_ (int yyvalue) YY_NOEXCEPT + { + return yyvalue == yytable_ninf_; + } + + int + ]b4_parser_class[::operator() () + { + return parse (); + } + + int + ]b4_parser_class[::parse () + { + int yyn; + /// Length of the RHS of the rule being reduced. + int yylen = 0; + + // Error handling. + int yynerrs_ = 0; + int yyerrstatus_ = 0; + + /// The lookahead symbol. + symbol_type yyla;]b4_locations_if([[ + + /// The locations where the error started and ended. + stack_symbol_type yyerror_range[3];]])[ + + /// The return value of parse (). + int yyresult;]b4_lac_if([[ + + // Discard the LAC context in case there still is one left from a + // previous invocation. + yy_lac_discard_ ("init");]])[ + +#if YY_EXCEPTIONS + try +#endif // YY_EXCEPTIONS + { + YYCDEBUG << "Starting parse\n"; + +]m4_ifdef([b4_initial_action], [ +b4_dollar_pushdef([yyla.value], [], [], [yyla.location])dnl + b4_user_initial_action +b4_dollar_popdef])[]dnl + + [ /* Initialize the stack. The initial state will be set in + yynewstate, since the latter expects the semantical and the + location values to have been already stored, initialize these + stacks with a primary value. */ + yystack_.clear (); + yypush_ (YY_NULLPTR, 0, YY_MOVE (yyla)); + + /*-----------------------------------------------. + | yynewstate -- push a new symbol on the stack. | + `-----------------------------------------------*/ + yynewstate: + YYCDEBUG << "Entering state " << int (yystack_[0].state) << '\n'; + YY_STACK_PRINT (); + + // Accept? + if (yystack_[0].state == yyfinal_) + YYACCEPT; + + goto yybackup; + + + /*-----------. + | yybackup. | + `-----------*/ + yybackup: + // Try to take a decision without lookahead. + yyn = yypact_[+yystack_[0].state]; + if (yy_pact_value_is_default_ (yyn)) + goto yydefault; + + // Read a lookahead token. + if (yyla.empty ()) + { + YYCDEBUG << "Reading a token\n"; +#if YY_EXCEPTIONS + try +#endif // YY_EXCEPTIONS + {]b4_token_ctor_if([[ + symbol_type yylookahead (]b4_yylex[); + yyla.move (yylookahead);]], [[ + yyla.kind_ = yytranslate_ (]b4_yylex[);]])[ + } +#if YY_EXCEPTIONS + catch (const syntax_error& yyexc) + { + YYCDEBUG << "Caught exception: " << yyexc.what() << '\n'; + error (yyexc); + goto yyerrlab1; + } +#endif // YY_EXCEPTIONS + } + YY_SYMBOL_PRINT ("Next token is", yyla); + + if (yyla.kind () == ]b4_symbol(error, kind)[) + { + // The scanner already issued an error message, process directly + // to error recovery. But do not keep the error token as + // lookahead, it is too special and may lead us to an endless + // loop in error recovery. */ + yyla.kind_ = ]b4_symbol(undef, kind)[; + goto yyerrlab1; + } + + /* If the proper action on seeing token YYLA.TYPE is to reduce or + to detect an error, take that action. */ + yyn += yyla.kind (); + if (yyn < 0 || yylast_ < yyn || yycheck_[yyn] != yyla.kind ()) + {]b4_lac_if([[ + if (!yy_lac_establish_ (yyla.kind ())) + goto yyerrlab;]])[ + goto yydefault; + } + + // Reduce or error. + yyn = yytable_[yyn]; + if (yyn <= 0) + { + if (yy_table_value_is_error_ (yyn)) + goto yyerrlab;]b4_lac_if([[ + if (!yy_lac_establish_ (yyla.kind ())) + goto yyerrlab; +]])[ + yyn = -yyn; + goto yyreduce; + } + + // Count tokens shifted since error; after three, turn off error status. + if (yyerrstatus_) + --yyerrstatus_; + + // Shift the lookahead token. + yypush_ ("Shifting", state_type (yyn), YY_MOVE (yyla));]b4_lac_if([[ + yy_lac_discard_ ("shift");]])[ + goto yynewstate; + + + /*-----------------------------------------------------------. + | yydefault -- do the default action for the current state. | + `-----------------------------------------------------------*/ + yydefault: + yyn = yydefact_[+yystack_[0].state]; + if (yyn == 0) + goto yyerrlab; + goto yyreduce; + + + /*-----------------------------. + | yyreduce -- do a reduction. | + `-----------------------------*/ + yyreduce: + yylen = yyr2_[yyn]; + { + stack_symbol_type yylhs; + yylhs.state = yy_lr_goto_state_ (yystack_[yylen].state, yyr1_[yyn]);]b4_variant_if([[ + /* Variants are always initialized to an empty instance of the + correct type. The default '$$ = $1' action is NOT applied + when using variants. */ + ]b4_symbol_variant([[yyr1_@{yyn@}]], [yylhs.value], [emplace])], [[ + /* If YYLEN is nonzero, implement the default value of the + action: '$$ = $1'. Otherwise, use the top of the stack. + + Otherwise, the following line sets YYLHS.VALUE to garbage. + This behavior is undocumented and Bison users should not rely + upon it. */ + if (yylen) + yylhs.value = yystack_@{yylen - 1@}.value; + else + yylhs.value = yystack_@{0@}.value;]])[ +]b4_locations_if([dnl +[ + // Default location. + { + stack_type::slice range (yystack_, yylen); + YYLLOC_DEFAULT (yylhs.location, range, yylen); + yyerror_range[1].location = yylhs.location; + }]])[ + + // Perform the reduction. + YY_REDUCE_PRINT (yyn); +#if YY_EXCEPTIONS + try +#endif // YY_EXCEPTIONS + { + switch (yyn) + { +]b4_user_actions[ + default: + break; + } + } +#if YY_EXCEPTIONS + catch (const syntax_error& yyexc) + { + YYCDEBUG << "Caught exception: " << yyexc.what() << '\n'; + error (yyexc); + YYERROR; + } +#endif // YY_EXCEPTIONS + YY_SYMBOL_PRINT ("-> $$ =", yylhs); + yypop_ (yylen); + yylen = 0; + + // Shift the result of the reduction. + yypush_ (YY_NULLPTR, YY_MOVE (yylhs)); + } + goto yynewstate; + + + /*--------------------------------------. + | yyerrlab -- here on detecting error. | + `--------------------------------------*/ + yyerrlab: + // If not already recovering from an error, report this error. + if (!yyerrstatus_) + { + ++yynerrs_;]b4_parse_error_case( + [simple], [[ + std::string msg = YY_("syntax error"); + error (]b4_join(b4_locations_if([yyla.location]), [[YY_MOVE (msg)]])[);]], + [custom], [[ + context yyctx (*this, yyla); + report_syntax_error (yyctx);]], + [[ + context yyctx (*this, yyla); + std::string msg = yysyntax_error_ (yyctx); + error (]b4_join(b4_locations_if([yyla.location]), [[YY_MOVE (msg)]])[);]])[ + } + +]b4_locations_if([[ + yyerror_range[1].location = yyla.location;]])[ + if (yyerrstatus_ == 3) + { + /* If just tried and failed to reuse lookahead token after an + error, discard it. */ + + // Return failure if at end of input. + if (yyla.kind () == ]b4_symbol(eof, kind)[) + YYABORT; + else if (!yyla.empty ()) + { + yy_destroy_ ("Error: discarding", yyla); + yyla.clear (); + } + } + + // Else will try to reuse lookahead token after shifting the error token. + goto yyerrlab1; + + + /*---------------------------------------------------. + | yyerrorlab -- error raised explicitly by YYERROR. | + `---------------------------------------------------*/ + yyerrorlab: + /* Pacify compilers when the user code never invokes YYERROR and + the label yyerrorlab therefore never appears in user code. */ + if (false) + YYERROR; + + /* Do not reclaim the symbols of the rule whose action triggered + this YYERROR. */ + yypop_ (yylen); + yylen = 0; + YY_STACK_PRINT (); + goto yyerrlab1; + + + /*-------------------------------------------------------------. + | yyerrlab1 -- common code for both syntax error and YYERROR. | + `-------------------------------------------------------------*/ + yyerrlab1: + yyerrstatus_ = 3; // Each real token shifted decrements this. + // Pop stack until we find a state that shifts the error token. + for (;;) + { + yyn = yypact_[+yystack_[0].state]; + if (!yy_pact_value_is_default_ (yyn)) + { + yyn += ]b4_symbol(error, kind)[; + if (0 <= yyn && yyn <= yylast_ + && yycheck_[yyn] == ]b4_symbol(error, kind)[) + { + yyn = yytable_[yyn]; + if (0 < yyn) + break; + } + } + + // Pop the current state because it cannot handle the error token. + if (yystack_.size () == 1) + YYABORT; +]b4_locations_if([[ + yyerror_range[1].location = yystack_[0].location;]])[ + yy_destroy_ ("Error: popping", yystack_[0]); + yypop_ (); + YY_STACK_PRINT (); + } + { + stack_symbol_type error_token; +]b4_locations_if([[ + yyerror_range[2].location = yyla.location; + YYLLOC_DEFAULT (error_token.location, yyerror_range, 2);]])[ + + // Shift the error token.]b4_lac_if([[ + yy_lac_discard_ ("error recovery");]])[ + error_token.state = state_type (yyn); + yypush_ ("Shifting", YY_MOVE (error_token)); + } + goto yynewstate; + + + /*-------------------------------------. + | yyacceptlab -- YYACCEPT comes here. | + `-------------------------------------*/ + yyacceptlab: + yyresult = 0; + goto yyreturn; + + + /*-----------------------------------. + | yyabortlab -- YYABORT comes here. | + `-----------------------------------*/ + yyabortlab: + yyresult = 1; + goto yyreturn; + + + /*-----------------------------------------------------. + | yyreturn -- parsing is finished, return the result. | + `-----------------------------------------------------*/ + yyreturn: + if (!yyla.empty ()) + yy_destroy_ ("Cleanup: discarding lookahead", yyla); + + /* Do not reclaim the symbols of the rule whose action triggered + this YYABORT or YYACCEPT. */ + yypop_ (yylen); + YY_STACK_PRINT (); + while (1 < yystack_.size ()) + { + yy_destroy_ ("Cleanup: popping", yystack_[0]); + yypop_ (); + } + + return yyresult; + } +#if YY_EXCEPTIONS + catch (...) + { + YYCDEBUG << "Exception caught: cleaning lookahead and stack\n"; + // Do not try to display the values of the reclaimed symbols, + // as their printers might throw an exception. + if (!yyla.empty ()) + yy_destroy_ (YY_NULLPTR, yyla); + + while (1 < yystack_.size ()) + { + yy_destroy_ (YY_NULLPTR, yystack_[0]); + yypop_ (); + } + throw; + } +#endif // YY_EXCEPTIONS + } + + void + ]b4_parser_class[::error (const syntax_error& yyexc) + { + error (]b4_join(b4_locations_if([yyexc.location]), + [[yyexc.what ()]])[); + } + +]b4_parse_error_bmatch([custom\|detailed], +[[ const char * + ]b4_parser_class[::symbol_name (symbol_kind_type yysymbol) + { + static const char *const yy_sname[] = + { + ]b4_symbol_names[ + };]b4_has_translations_if([[ + /* YYTRANSLATABLE[SYMBOL-NUM] -- Whether YY_SNAME[SYMBOL-NUM] is + internationalizable. */ + static ]b4_int_type_for([b4_translatable])[ yytranslatable[] = + { + ]b4_translatable[ + }; + return (yysymbol < YYNTOKENS && yytranslatable[yysymbol] + ? _(yy_sname[yysymbol]) + : yy_sname[yysymbol]);]], [[ + return yy_sname[yysymbol];]])[ + } +]], +[simple], +[[#if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ + const char * + ]b4_parser_class[::symbol_name (symbol_kind_type yysymbol) + { + return yytname_[yysymbol]; + } +#endif // #if ]b4_api_PREFIX[DEBUG || ]b4_token_table_flag[ +]], +[verbose], +[[ /* Return YYSTR after stripping away unnecessary quotes and + backslashes, so that it's suitable for yyerror. The heuristic is + that double-quoting is unnecessary unless the string contains an + apostrophe, a comma, or backslash (other than backslash-backslash). + YYSTR is taken from yytname. */ + std::string + ]b4_parser_class[::yytnamerr_ (const char *yystr) + { + if (*yystr == '"') + { + std::string yyr; + char const *yyp = yystr; + + for (;;) + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + else + goto append; + + append: + default: + yyr += *yyp; + break; + + case '"': + return yyr; + } + do_not_strip_quotes: ; + } + + return yystr; + } + + std::string + ]b4_parser_class[::symbol_name (symbol_kind_type yysymbol) + { + return yytnamerr_ (yytname_[yysymbol]); + } +]])[ + +]b4_parse_error_bmatch([custom\|detailed\|verbose], [[ + // ]b4_parser_class[::context. + ]b4_parser_class[::context::context (const ]b4_parser_class[& yyparser, const symbol_type& yyla) + : yyparser_ (yyparser) + , yyla_ (yyla) + {} + + int + ]b4_parser_class[::context::expected_tokens (symbol_kind_type yyarg[], int yyargn) const + { + // Actual number of expected tokens + int yycount = 0; +]b4_lac_if([[ +#if ]b4_api_PREFIX[DEBUG + // Execute LAC once. We don't care if it is successful, we + // only do it for the sake of debugging output. + if (!yyparser_.yy_lac_established_) + yyparser_.yy_lac_check_ (yyla_.kind ()); +#endif + + for (int yyx = 0; yyx < YYNTOKENS; ++yyx) + { + symbol_kind_type yysym = YY_CAST (symbol_kind_type, yyx); + if (yysym != ]b4_symbol(error, kind)[ + && yysym != ]b4_symbol(undef, kind)[ + && yyparser_.yy_lac_check_ (yysym)) + { + if (!yyarg) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = yysym; + } + }]], [[ + const int yyn = yypact_[+yyparser_.yystack_[0].state]; + if (!yy_pact_value_is_default_ (yyn)) + { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for + this state because they are default actions. */ + const int yyxbegin = yyn < 0 ? -yyn : 0; + // Stay within bounds of both yycheck and yytname. + const int yychecklim = yylast_ - yyn + 1; + const int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + for (int yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck_[yyx + yyn] == yyx && yyx != ]b4_symbol(error, kind)[ + && !yy_table_value_is_error_ (yytable_[yyx + yyn])) + { + if (!yyarg) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = YY_CAST (symbol_kind_type, yyx); + } + } +]])[ + if (yyarg && yycount == 0 && 0 < yyargn) + yyarg[0] = ]b4_symbol(empty, kind)[; + return yycount; + } + +]])[ + +]b4_lac_if([[ + bool + ]b4_parser_class[::yy_lac_check_ (symbol_kind_type yytoken) const + { + // Logically, the yylac_stack's lifetime is confined to this function. + // Clear it, to get rid of potential left-overs from previous call. + yylac_stack_.clear (); + // Reduce until we encounter a shift and thereby accept the token. +#if ]b4_api_PREFIX[DEBUG + YYCDEBUG << "LAC: checking lookahead " << symbol_name (yytoken) << ':'; +#endif + std::ptrdiff_t lac_top = 0; + while (true) + { + state_type top_state = (yylac_stack_.empty () + ? yystack_[lac_top].state + : yylac_stack_.back ()); + int yyrule = yypact_[+top_state]; + if (yy_pact_value_is_default_ (yyrule) + || (yyrule += yytoken) < 0 || yylast_ < yyrule + || yycheck_[yyrule] != yytoken) + { + // Use the default action. + yyrule = yydefact_[+top_state]; + if (yyrule == 0) + { + YYCDEBUG << " Err\n"; + return false; + } + } + else + { + // Use the action from yytable. + yyrule = yytable_[yyrule]; + if (yy_table_value_is_error_ (yyrule)) + { + YYCDEBUG << " Err\n"; + return false; + } + if (0 < yyrule) + { + YYCDEBUG << " S" << yyrule << '\n'; + return true; + } + yyrule = -yyrule; + } + // By now we know we have to simulate a reduce. + YYCDEBUG << " R" << yyrule - 1; + // Pop the corresponding number of values from the stack. + { + std::ptrdiff_t yylen = yyr2_[yyrule]; + // First pop from the LAC stack as many tokens as possible. + std::ptrdiff_t lac_size = std::ptrdiff_t (yylac_stack_.size ()); + if (yylen < lac_size) + { + yylac_stack_.resize (std::size_t (lac_size - yylen)); + yylen = 0; + } + else if (lac_size) + { + yylac_stack_.clear (); + yylen -= lac_size; + } + // Only afterwards look at the main stack. + // We simulate popping elements by incrementing lac_top. + lac_top += yylen; + } + // Keep top_state in sync with the updated stack. + top_state = (yylac_stack_.empty () + ? yystack_[lac_top].state + : yylac_stack_.back ()); + // Push the resulting state of the reduction. + state_type state = yy_lr_goto_state_ (top_state, yyr1_[yyrule]); + YYCDEBUG << " G" << int (state); + yylac_stack_.push_back (state); + } + } + + // Establish the initial context if no initial context currently exists. + bool + ]b4_parser_class[::yy_lac_establish_ (symbol_kind_type yytoken) + { + /* Establish the initial context for the current lookahead if no initial + context is currently established. + + We define a context as a snapshot of the parser stacks. We define + the initial context for a lookahead as the context in which the + parser initially examines that lookahead in order to select a + syntactic action. Thus, if the lookahead eventually proves + syntactically unacceptable (possibly in a later context reached via a + series of reductions), the initial context can be used to determine + the exact set of tokens that would be syntactically acceptable in the + lookahead's place. Moreover, it is the context after which any + further semantic actions would be erroneous because they would be + determined by a syntactically unacceptable token. + + yy_lac_establish_ should be invoked when a reduction is about to be + performed in an inconsistent state (which, for the purposes of LAC, + includes consistent states that don't know they're consistent because + their default reductions have been disabled). + + For parse.lac=full, the implementation of yy_lac_establish_ is as + follows. If no initial context is currently established for the + current lookahead, then check if that lookahead can eventually be + shifted if syntactic actions continue from the current context. */ + if (yy_lac_established_) + return true; + else + { +#if ]b4_api_PREFIX[DEBUG + YYCDEBUG << "LAC: initial context established for " + << symbol_name (yytoken) << '\n'; +#endif + yy_lac_established_ = true; + return yy_lac_check_ (yytoken); + } + } + + // Discard any previous initial lookahead context. + void + ]b4_parser_class[::yy_lac_discard_ (const char* event) + { + /* Discard any previous initial lookahead context because of Event, + which may be a lookahead change or an invalidation of the currently + established initial context for the current lookahead. + + The most common example of a lookahead change is a shift. An example + of both cases is syntax error recovery. That is, a syntax error + occurs when the lookahead is syntactically erroneous for the + currently established initial context, so error recovery manipulates + the parser stacks to try to find a new initial context in which the + current lookahead is syntactically acceptable. If it fails to find + such a context, it discards the lookahead. */ + if (yy_lac_established_) + { + YYCDEBUG << "LAC: initial context discarded due to " + << event << '\n'; + yy_lac_established_ = false; + } + }]])[ + +]b4_parse_error_bmatch([detailed\|verbose], [[ + int + ]b4_parser_class[::yy_syntax_error_arguments_ (const context& yyctx, + symbol_kind_type yyarg[], int yyargn) const + { + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, then + the only way this function was invoked is if the default action + is an error action. In that case, don't check for expected + tokens because there are none. + - The only way there can be no lookahead present (in yyla) is + if this state is a consistent state with a default action. + Thus, detecting the absence of a lookahead is sufficient to + determine that there is no unexpected or expected token to + report. In that case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this state is + a consistent state with a default action. There might have + been a previous inconsistent state, consistent state with a + non-default action, or user semantic action that manipulated + yyla. (However, yyla is currently not documented for users.)]b4_lac_if([[ + In the first two cases, it might appear that the current syntax + error should have been detected in the previous state when + yy_lac_check was invoked. However, at that time, there might + have been a different syntax error that discarded a different + initial context during error recovery, leaving behind the + current lookahead.]], [[ + - Of course, the expected token list depends on states to have + correct lookahead information, and it depends on the parser not + to perform extra reductions after fetching a lookahead from the + scanner and before detecting a syntax error. Thus, state merging + (from LALR or IELR) and default reductions corrupt the expected + token list. However, the list is correct for canonical LR with + one exception: it will still contain any token that will not be + accepted due to an error action in a later state.]])[ + */ + + if (!yyctx.lookahead ().empty ()) + { + if (yyarg) + yyarg[0] = yyctx.token (); + int yyn = yyctx.expected_tokens (yyarg ? yyarg + 1 : yyarg, yyargn - 1); + return yyn + 1; + } + return 0; + } + + // Generate an error message. + std::string + ]b4_parser_class[::yysyntax_error_ (const context& yyctx) const + { + // Its maximum. + enum { YYARGS_MAX = 5 }; + // Arguments of yyformat. + symbol_kind_type yyarg[YYARGS_MAX]; + int yycount = yy_syntax_error_arguments_ (yyctx, yyarg, YYARGS_MAX); + + char const* yyformat = YY_NULLPTR; + switch (yycount) + { +#define YYCASE_(N, S) \ + case N: \ + yyformat = S; \ + break + default: // Avoid compiler warnings. + YYCASE_ (0, YY_("syntax error")); + YYCASE_ (1, YY_("syntax error, unexpected %s")); + YYCASE_ (2, YY_("syntax error, unexpected %s, expecting %s")); + YYCASE_ (3, YY_("syntax error, unexpected %s, expecting %s or %s")); + YYCASE_ (4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); + YYCASE_ (5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); +#undef YYCASE_ + } + + std::string yyres; + // Argument number. + std::ptrdiff_t yyi = 0; + for (char const* yyp = yyformat; *yyp; ++yyp) + if (yyp[0] == '%' && yyp[1] == 's' && yyi < yycount) + { + yyres += symbol_name (yyarg[yyi++]); + ++yyp; + } + else + yyres += *yyp; + return yyres; + }]])[ + + + const ]b4_int_type(b4_pact_ninf, b4_pact_ninf) b4_parser_class::yypact_ninf_ = b4_pact_ninf[; + + const ]b4_int_type(b4_table_ninf, b4_table_ninf) b4_parser_class::yytable_ninf_ = b4_table_ninf[; + +]b4_parser_tables_define[ + +]b4_parse_error_bmatch([simple\|verbose], +[[#if ]b4_api_PREFIX[DEBUG]b4_tname_if([[ || 1]])[ + // YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + // First, the terminals, then, starting at \a YYNTOKENS, nonterminals. + const char* + const ]b4_parser_class[::yytname_[] = + { + ]b4_tname[ + }; +#endif +]])[ + +#if ]b4_api_PREFIX[DEBUG][ +]b4_integral_parser_table_define([rline], [b4_rline])[ + + void + ]b4_parser_class[::yy_stack_print_ () const + { + *yycdebug_ << "Stack now"; + for (stack_type::const_iterator + i = yystack_.begin (), + i_end = yystack_.end (); + i != i_end; ++i) + *yycdebug_ << ' ' << int (i->state); + *yycdebug_ << '\n'; + } + + void + ]b4_parser_class[::yy_reduce_print_ (int yyrule) const + { + int yylno = yyrline_[yyrule]; + int yynrhs = yyr2_[yyrule]; + // Print the symbols being reduced, and their result. + *yycdebug_ << "Reducing stack by rule " << yyrule - 1 + << " (line " << yylno << "):\n"; + // The symbols being reduced. + for (int yyi = 0; yyi < yynrhs; yyi++) + YY_SYMBOL_PRINT (" $" << yyi + 1 << " =", + ]b4_rhs_data(yynrhs, yyi + 1)[); + } +#endif // ]b4_api_PREFIX[DEBUG + +]b4_token_ctor_if([], [b4_yytranslate_define([cc])])[ +]b4_namespace_close[ +]b4_epilogue[]dnl +b4_output_end + + +m4_popdef([b4_copyright_years])dnl diff --git a/platform/dbops/binaries/build/share/bison/skeletons/lalr1.d b/platform/dbops/binaries/build/share/bison/skeletons/lalr1.d new file mode 100644 index 0000000000000000000000000000000000000000..97303772227dc16f4d83c1040d5531848af2be48 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/lalr1.d @@ -0,0 +1,1326 @@ +# D skeleton for Bison -*- autoconf -*- + +# Copyright (C) 2007-2012, 2019-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_include(b4_skeletonsdir/[d.m4]) + +b4_header_if([b4_complain([%header/%defines does not make sense in D])]) + +# parse.lac +b4_percent_define_default([[parse.lac]], [[none]]) +b4_percent_define_check_values([[[[parse.lac]], [[full]], [[none]]]]) +b4_define_flag_if([lac]) +m4_define([b4_lac_flag], + [m4_if(b4_percent_define_get([[parse.lac]]), + [none], [[0]], [[1]])]) + + +## --------------- ## +## api.push-pull. ## +## --------------- ## + +b4_percent_define_default([[api.push-pull]], [[pull]]) +b4_percent_define_check_values([[[[api.push-pull]], + [[pull]], [[push]], [[both]]]]) + +# Define m4 conditional macros that encode the value +# of the api.push-pull flag. +b4_define_flag_if([pull]) m4_define([b4_pull_flag], [[1]]) +b4_define_flag_if([push]) m4_define([b4_push_flag], [[1]]) +m4_case(b4_percent_define_get([[api.push-pull]]), + [pull], [m4_define([b4_push_flag], [[0]])], + [push], [m4_define([b4_pull_flag], [[0]])]) + +# Define a macro to be true when api.push-pull has the value "both". +m4_define([b4_both_if],[b4_push_if([b4_pull_if([$1],[$2])],[$2])]) + +# Handle BISON_USE_PUSH_FOR_PULL for the test suite. So that push parsing +# tests function as written, do not let BISON_USE_PUSH_FOR_PULL modify the +# behavior of Bison at all when push parsing is already requested. +b4_define_flag_if([use_push_for_pull]) +b4_use_push_for_pull_if([ + b4_push_if([m4_define([b4_use_push_for_pull_flag], [[0]])], + [m4_define([b4_push_flag], [[1]])])]) + + +# Define a macro to encapsulate the parse state variables. This +# allows them to be defined either in parse() when doing pull parsing, +# or as class instance variable when doing push parsing. +b4_output_begin([b4_parser_file_name]) +b4_copyright([Skeleton implementation for Bison LALR(1) parsers in D], + [2007-2012, 2019-2021])[ +]b4_disclaimer[ +]b4_percent_define_ifdef([package], [module b4_percent_define_get([package]); +])[ +version(D_Version2) { +} else { + static assert(false, "need compiler for D Version 2"); +} + +]b4_user_pre_prologue[ +]b4_user_post_prologue[ +]b4_percent_code_get([[imports]])[ +import std.format; +import std.conv; + +/** + * Handle error message internationalisation. + */ +static if (!is(typeof(YY_))) { + version(YYENABLE_NLS) + { + version(ENABLE_NLS) + { + extern(C) char* dgettext(const char*, const char*); + string YY_(const char* s) + { + return to!string(dgettext("bison-runtime", s)); + } + } + } + static if (!is(typeof(YY_))) + { + pragma(inline, true) + string YY_(string msg) { return msg; } + } +} + +/** + * A Bison parser, automatically generated from ]m4_bpatsubst(b4_file_name, [^"\(.*\)"$], [\1])[. + * + * @@author LALR (1) parser skeleton written by Paolo Bonzini. + * Port to D language was done by Oliver Mangold. + */ + +/** + * Communication interface between the scanner and the Bison-generated + * parser ]b4_parser_class[. + */ +public interface Lexer +{ + /** + * Entry point for the scanner. Returns the token identifier corresponding + * to the next token and prepares to return the semantic value + * ]b4_locations_if([and beginning/ending positions ])[of the token. + * @@return the token identifier corresponding to the next token. */ + Symbol yylex (); + + /** + * Entry point for error reporting. Emits an error + * ]b4_locations_if([referring to the given location ])[in a user-defined way. + *]b4_locations_if([[ + * @@param loc The location of the element to which the + * error message is related]])[ + * @@param s The string for the error message. */ + void yyerror (]b4_locations_if([[const Location loc, ]])[string s); +]b4_parse_error_bmatch([custom], [[ + /** + * Build and emit a "syntax error" message in a user-defined way. + * + * @@param ctx The context of the error. + */ + void reportSyntaxError(]b4_parser_class[.Context ctx); +]])[ +} + +]b4_public_types_declare[ + +]b4_locations_if([b4_position_type_if([[ +static assert(__traits(compiles, + (new Position[1])[0]=(new Position[1])[0]), + "struct/class Position must be default-constructible " + "and assignable"); +static assert(__traits(compiles, (new string[1])[0]=(new Position).toString()), + "error: struct/class Position must have toString method"); +]], [[ + /** + * A struct denoting a point in the input.*/ +public struct ]b4_position_type[ { + + /** The column index within the line of input. */ + public int column = 1; + /** The line number within an input file. */ + public int line = 1; + /** The name of the input file. */ + public string filename = null; + + /** + * A string representation of the position. */ + public string toString() const { + if (filename) + return format("%s:%d.%d", filename, line, column); + else + return format("%d.%d", line, column); + } +} +]])b4_location_type_if([[ +static assert(__traits(compiles, (new Location((new Position[1])[0]))) && + __traits(compiles, (new Location((new Position[1])[0], (new Position[1])[0]))), + "error: struct/class Location must have " + "default constructor and constructors this(Position) and this(Position, Position)."); +static assert(__traits(compiles, (new Location[1])[0].begin=(new Location[1])[0].begin) && + __traits(compiles, (new Location[1])[0].begin=(new Location[1])[0].end) && + __traits(compiles, (new Location[1])[0].end=(new Location[1])[0].begin) && + __traits(compiles, (new Location[1])[0].end=(new Location[1])[0].end), + "error: struct/class Location must have assignment-compatible " + "members/properties 'begin' and 'end'."); +static assert(__traits(compiles, (new string[1])[0]=(new Location[1])[0].toString()), + "error: struct/class Location must have toString method."); + +private immutable bool yy_location_is_class = !__traits(compiles, *(new Location((new Position[1])[0])));]], [[ +/** + * A struct defining a pair of positions. Positions, defined by the + * Position struct, denote a point in the input. + * Locations represent a part of the input through the beginning + * and ending positions. */ +public struct ]b4_location_type[ +{ + /** The first, inclusive, position in the range. */ + public Position begin; + + /** The first position beyond the range. */ + public Position end; + + /** + * Create a Location denoting an empty range located at + * a given point. + * @@param loc The position at which the range is anchored. */ + public this(Position loc) + { + this.begin = this.end = loc; + } + + /** + * Create a Location from the endpoints of the range. + * @@param begin The first position included in the range. + * @@param end The first position beyond the range. */ + public this(Position begin, Position end) + { + this.begin = begin; + this.end = end; + } + + /** + * Reset initial location to final location. + */ + public void step() + { + this.begin = this.end; + } + + /** + * A representation of the location. + */ + public string toString() const + { + auto end_col = 0 < end.column ? end.column - 1 : 0; + auto res = begin.toString (); + if (end.filename && begin.filename != end.filename) + res ~= "-" ~ format("%s:%d.%d", end.filename, end.line, end_col); + else if (begin.line < end.line) + res ~= "-" ~ format("%d.%d", end.line, end_col); + else if (begin.column < end_col) + res ~= "-" ~ format("%d", end_col); + return res; + } +} + +private immutable bool yy_location_is_class = false; + +]])])[]b4_value_type_setup[]m4_ifdef([b4_user_union_members], [private union YYSemanticType +{ +b4_user_union_members +};], +[m4_if(b4_tag_seen_flag, 0, +[[private alias int YYSemanticType;]])])[ +]b4_token_enums[ +]b4_parser_class_declaration[ +{ + ]b4_identification[ + +]b4_declare_symbol_enum[ + +]b4_locations_if([[ + private final Location yylloc_from_stack (ref YYStack rhs, int n) + { + static if (yy_location_is_class) { + if (n > 0) + return new Location (rhs.locationAt (n-1).begin, rhs.locationAt (0).end); + else + return new Location (rhs.locationAt (0).end); + } else { + if (n > 0) + return Location (rhs.locationAt (n-1).begin, rhs.locationAt (0).end); + else + return Location (rhs.locationAt (0).end); + } + }]])[ + +]b4_lexer_if([[ private class YYLexer implements Lexer { +]b4_percent_code_get([[lexer]])[ + } +]])[ + /** The object doing lexical analysis for us. */ + private Lexer yylexer; + +]b4_parse_param_vars[ + +]b4_lexer_if([[ + /** + * Instantiate the Bison-generated parser. + */ + public this] (b4_parse_param_decl([b4_lex_param_decl])[) { +]b4_percent_code_get([[init]])[]b4_lac_if([[ + this.yylacStack = new int[]; + this.yylacEstablished = false;]])[ + this (new YYLexer(]b4_lex_param_call[)); + } +]])[ + + /** + * Instantiate the Bison-generated parser. + * @@param yylexer The scanner that will supply tokens to the parser. + */ + ]b4_lexer_if([[protected]], [[public]]) [this (]b4_parse_param_decl([[Lexer yylexer]])[) { + this.yylexer = yylexer;]b4_parse_trace_if([[ + this.yyDebugStream = stderr;]])[ +]b4_parse_param_cons[ + } +]b4_parse_trace_if([[ + import std.stdio; + private File yyDebugStream; + + /** + * The File on which the debugging output is + * printed. + */ + public File getDebugStream () { return yyDebugStream; } + + /** + * Set the std.File on which the debug output is printed. + * @@param s The stream that is used for debugging output. + */ + public final void setDebugStream(File s) { yyDebugStream = s; } + + private int yydebug = 0; + + /** + * Answer the verbosity of the debugging output; 0 means that all kinds of + * output from the parser are suppressed. + */ + public final int getDebugLevel() { return yydebug; } + + /** + * Set the verbosity of the debugging output; 0 means that all kinds of + * output from the parser are suppressed. + * @@param level The verbosity level for debugging output. + */ + public final void setDebugLevel(int level) { yydebug = level; } + + protected final void yycdebug (string s) { + if (0 < yydebug) + yyDebugStream.write (s); + } + + protected final void yycdebugln (string s) { + if (0 < yydebug) + yyDebugStream.writeln (s); + } +]])[ + private final ]b4_parser_class[.Symbol yylex () { + return yylexer.yylex (); + } + + protected final void yyerror (]b4_locations_if([[const Location loc, ]])[string s) { + yylexer.yyerror (]b4_locations_if([loc, ])[s); + } + + /** + * The number of syntax errors so far. + */ + public int numberOfErrors() const { return yynerrs_; } + private int yynerrs_ = 0; + + /** + * Returned by a Bison action in order to stop the parsing process and + * return success (true). */ + public static immutable int YYACCEPT = 0; + + /** + * Returned by a Bison action in order to stop the parsing process and + * return failure (false). */ + public static immutable int YYABORT = 1; +]b4_push_if([ + /** + * Returned by a Bison action in order to request a new token. + */ + public static immutable int YYPUSH_MORE = 4;])[ + + /** + * Returned by a Bison action in order to start error recovery without + * printing an error message. */ + public static immutable int YYERROR = 2; + + // Internal return codes that are not supported for user semantic + // actions. + private static immutable int YYERRLAB = 3; + private static immutable int YYNEWSTATE = 4; + private static immutable int YYDEFAULT = 5; + private static immutable int YYREDUCE = 6; + private static immutable int YYERRLAB1 = 7; + private static immutable int YYRETURN = 8; +]b4_push_if([[ private static immutable int YYGETTOKEN = 9; /* Signify that a new token is expected when doing push-parsing. */]])[ + +]b4_locations_if([ + private static immutable YYSemanticType yy_semantic_null;])[ + private int yyerrstatus_ = 0; + + private void yyerrok() + { + yyerrstatus_ = 0; + } + + // Lookahead symbol kind. + SymbolKind yytoken = ]b4_symbol(empty, kind)[; + + /* State. */ + int yyn = 0; + int yylen = 0; + int yystate = 0; + + YYStack yystack; + + int label = YYNEWSTATE; + + /* Error handling. */ +]b4_locations_if([[ + /// The location where the error started. + Location yyerrloc; + + /// Location of the lookahead. + Location yylloc; + + /// @@$. + Location yyloc;]])[ + + /// Semantic value of the lookahead. + Value yylval; + + /** + * Whether error recovery is being done. In this state, the parser + * reads token until it reaches a known state, and then restarts normal + * operation. */ + public final bool recovering () + { + return yyerrstatus_ == 0; + } + + /** Compute post-reduction state. + * @@param yystate the current state + * @@param yysym the nonterminal to push on the stack + */ + private int yyLRGotoState(int yystate, int yysym) { + int yyr = yypgoto_[yysym - yyntokens_] + yystate; + if (0 <= yyr && yyr <= yylast_ && yycheck_[yyr] == yystate) + return yytable_[yyr]; + else + return yydefgoto_[yysym - yyntokens_]; + } + + private int yyaction (int yyn, ref YYStack yystack, int yylen) + { + Value yyval;]b4_locations_if([[ + Location yyloc = yylloc_from_stack (yystack, yylen);]])[ + + /* If YYLEN is nonzero, implement the default value of the action: + `$$ = $1'. Otherwise, use the top of the stack. + + Otherwise, the following line sets YYVAL to garbage. + This behavior is undocumented and Bison + users should not rely upon it. */ + if (yylen > 0) + yyval = yystack.valueAt (yylen - 1); + else + yyval = yystack.valueAt (0); + +]b4_parse_trace_if([[ + yy_reduce_print (yyn, yystack);]])[ + + switch (yyn) + { +]b4_user_actions[ + default: break; + } + +]b4_parse_trace_if([[ + yy_symbol_print ("-> $$ =", to!SymbolKind (yyr1_[yyn]), yyval]b4_locations_if([, yyloc])[);]])[ + + yystack.pop (yylen); + yylen = 0; + + /* Shift the result of the reduction. */ + int yystate = yyLRGotoState(yystack.stateAt(0), yyr1_[yyn]); + yystack.push (yystate, yyval]b4_locations_if([, yyloc])[); + return YYNEWSTATE; + } + +]b4_parse_trace_if([[ + /*--------------------------------. + | Print this symbol on YYOUTPUT. | + `--------------------------------*/ + + private final void yy_symbol_print (string s, SymbolKind yykind, + ref Value yyval]b4_locations_if([, ref Location yyloc])[) + { + if (0 < yydebug) + { + File yyo = yyDebugStream; + yyo.write(s); + yyo.write(yykind < yyntokens_ ? " token " : " nterm "); + yyo.write(format("%s", yykind)); + yyo.write(" ("]b4_locations_if([ ~ yyloc.toString() ~ ": "])[); + ]b4_symbol_actions([printer])[ + yyo.write(")\n"); + } + } +]])[ +]b4_symbol_type_define[ +]b4_push_if([[ + /** + * Push Parse input from external lexer + * + * @@param yyla current Symbol + * + * @@return YYACCEPT, YYABORT, YYPUSH_MORE + */ + public int pushParse(Symbol yyla)]], [[ + /** + * Parse input from the scanner that was specified at object construction + * time. Return whether the end of the input was reached successfully. + * + * @@return true if the parsing succeeds. Note that this does not + * imply that there were no syntax errors. + */ + public bool parse()]])[ + {]b4_push_if([[ + if (!this.pushParseInitialized) + { + pushParseInitialize(); + yyerrstatus_ = 0; + } + else + label = YYGETTOKEN; + + bool push_token_consumed = true; +]], [[ bool yyresult;]b4_lac_if([[ + // Discard the LAC context in case there still is one left from a + // previous invocation. + yylacDiscard("init");]])[]b4_parse_trace_if([[ + + yycdebugln ("Starting parse");]])[ + yyerrstatus_ = 0; + +]m4_ifdef([b4_initial_action], [ +m4_pushdef([b4_at_dollar], [yylloc])dnl +m4_pushdef([b4_dollar_dollar], [yylval])dnl + /* User initialization code. */ + b4_user_initial_action +m4_popdef([b4_dollar_dollar])dnl +m4_popdef([b4_at_dollar])])dnl + + [ /* Initialize the stack. */ + yystack.push (yystate, yylval]b4_locations_if([, yylloc])[); + + label = YYNEWSTATE;]])[ + for (;;) + final switch (label) + { + /* New state. Unlike in the C/C++ skeletons, the state is already + pushed when we come here. */ + case YYNEWSTATE:]b4_parse_trace_if([[ + yycdebugln (format("Entering state %d", yystate)); + if (0 < yydebug) + yystack.print (yyDebugStream);]])[ + + /* Accept? */ + if (yystate == yyfinal_)]b4_push_if([[ + { + label = YYACCEPT; + break; + }]], [[ + return true;]])[ + + /* Take a decision. First try without lookahead. */ + yyn = yypact_[yystate]; + if (yyPactValueIsDefault(yyn)) + { + label = YYDEFAULT; + break; + }]b4_push_if([[ + goto case; + + case YYGETTOKEN:]])[ + + /* Read a lookahead token. */ + if (yytoken == ]b4_symbol(empty, kind)[) + {]b4_push_if([[ + if (!push_token_consumed) + return YYPUSH_MORE;]])[]b4_parse_trace_if([[ + yycdebugln ("Reading a token");]])[]b4_push_if([[ + yytoken = yyla.token; + yylval = yyla.value;]b4_locations_if([[ + yylloc = yyla.location;]])[ + push_token_consumed = false;]], [[ + Symbol yysymbol = yylex(); + yytoken = yysymbol.token(); + yylval = yysymbol.value();]b4_locations_if([[ + yylloc = yysymbol.location();]])[]])[ + } + + /* Token already converted to internal form. */]b4_parse_trace_if([[ + yy_symbol_print ("Next token is", yytoken, yylval]b4_locations_if([, yylloc])[);]])[ + + if (yytoken == ]b4_symbol(error, kind)[) + { + // The scanner already issued an error message, process directly + // to error recovery. But do not keep the error token as + // lookahead, it is too special and may lead us to an endless + // loop in error recovery. */ + yytoken = ]b4_symbol(undef, kind)[;]b4_locations_if([[ + yyerrloc = yylloc;]])[ + label = YYERRLAB1; + } + else + { + /* If the proper action on seeing token YYTOKEN is to reduce or to + detect an error, take that action. */ + yyn += yytoken; + if (yyn < 0 || yylast_ < yyn || yycheck_[yyn] != yytoken) {]b4_lac_if([[ + if (!yylacEstablish(yystack, yytoken)) + label = YYERRLAB; + else]])[ + label = YYDEFAULT; + } + /* <= 0 means reduce or error. */ + else if ((yyn = yytable_[yyn]) <= 0) + { + if (yyTableValueIsError(yyn)) + label = YYERRLAB;]b4_lac_if([[ + else if (!yylacEstablish(yystack, yytoken)) + label = YYERRLAB;]])[ + else + { + yyn = -yyn; + label = YYREDUCE; + } + } + else + { + /* Shift the lookahead token. */]b4_parse_trace_if([[ + yy_symbol_print ("Shifting", yytoken, yylval]b4_locations_if([, yylloc])[);]])[ + + /* Discard the token being shifted. */ + yytoken = ]b4_symbol(empty, kind)[; + + /* Count tokens shifted since error; after three, turn off error + * status. */ + if (yyerrstatus_ > 0) + --yyerrstatus_; + + yystate = yyn; + yystack.push (yystate, yylval]b4_locations_if([, yylloc])[);]b4_lac_if([[ + yylacDiscard("shift");]])[ + label = YYNEWSTATE; + } + } + break; + + /*-----------------------------------------------------------. + | yydefault -- do the default action for the current state. | + `-----------------------------------------------------------*/ + case YYDEFAULT: + yyn = yydefact_[yystate]; + if (yyn == 0) + label = YYERRLAB; + else + label = YYREDUCE; + break; + + /*-----------------------------. + | yyreduce -- Do a reduction. | + `-----------------------------*/ + case YYREDUCE: + yylen = yyr2_[yyn]; + label = yyaction (yyn, yystack, yylen); + yystate = yystack.stateAt (0); + break; + + /*--------------------------------------. + | yyerrlab -- here on detecting error. | + `--------------------------------------*/ + case YYERRLAB: + /* If not already recovering from an error, report this error. */ + if (yyerrstatus_ == 0) + { + ++yynerrs_; + yyreportSyntaxError(new Context(]b4_lac_if([[this, ]])[yystack, yytoken]b4_locations_if([[, yylloc]])[)); + } +]b4_locations_if([ + yyerrloc = yylloc;])[ + if (yyerrstatus_ == 3) + { + /* If just tried and failed to reuse lookahead token after an + * error, discard it. */ + + /* Return failure if at end of input. */ + if (yytoken == ]b4_symbol(eof, [kind])[)]b4_push_if([[ + { + label = YYABORT; + break; + }]], [[ + return false;]])[ + else + yytoken = ]b4_symbol(empty, kind)[; + } + + /* Else will try to reuse lookahead token after shifting the error + * token. */ + label = YYERRLAB1; + break; + + /*-------------------------------------------------. + | errorlab -- error raised explicitly by YYERROR. | + `-------------------------------------------------*/ + case YYERROR:]b4_locations_if([ + yyerrloc = yystack.locationAt (yylen - 1);])[ + /* Do not reclaim the symbols of the rule which action triggered + this YYERROR. */ + yystack.pop (yylen); + yylen = 0; + yystate = yystack.stateAt (0); + label = YYERRLAB1; + break; + + /*-------------------------------------------------------------. + | yyerrlab1 -- common code for both syntax error and YYERROR. | + `-------------------------------------------------------------*/ + case YYERRLAB1: + yyerrstatus_ = 3; /* Each real token shifted decrements this. */ + + // Pop stack until we find a state that shifts the error token. + for (;;) + { + yyn = yypact_[yystate]; + if (!yyPactValueIsDefault(yyn)) + { + yyn += ]b4_symbol(error, kind)[; + if (0 <= yyn && yyn <= yylast_ && yycheck_[yyn] == ]b4_symbol(error, kind)[) + { + yyn = yytable_[yyn]; + if (0 < yyn) + break; + } + } + + /* Pop the current state because it cannot handle the error token. */ + if (yystack.height == 1)]b4_push_if([[ + { + label = YYABORT; + break; + }]],[[ + return false;]])[ + +]b4_locations_if([ yyerrloc = yystack.locationAt (0);])[ + yystack.pop (); + yystate = yystack.stateAt (0);]b4_parse_trace_if([[ + if (0 < yydebug) + yystack.print (yyDebugStream);]])[ + }]b4_push_if([[ + if (label == YYABORT) + /* Leave the switch. */ + break; +]])[ +]b4_locations_if([ + /* Muck with the stack to setup for yylloc. */ + yystack.push (0, yy_semantic_null, yylloc); + yystack.push (0, yy_semantic_null, yyerrloc); + yyloc = yylloc_from_stack (yystack, 2); + yystack.pop (2);])[ + + /* Shift the error token. */]b4_lac_if([[ + yylacDiscard("error recovery");]])[]b4_parse_trace_if([[ + yy_symbol_print ("Shifting", to!SymbolKind (yystos_[yyn]), yylval]b4_locations_if([, yyloc])[);]])[ + yystate = yyn; + yystack.push (yyn, yylval]b4_locations_if([, yyloc])[); + label = YYNEWSTATE; + break; + + /* Accept. */ + case YYACCEPT:]b4_push_if([[ + this.pushParseInitialized = false;]b4_parse_trace_if([[ + if (0 < yydebug) + yystack.print (yyDebugStream);]])[ + return YYACCEPT;]], [[ + yyresult = true; + label = YYRETURN; + break;]])[ + + /* Abort. */ + case YYABORT:]b4_push_if([[ + this.pushParseInitialized = false;]b4_parse_trace_if([[ + if (0 < yydebug) + yystack.print (yyDebugStream);]])[ + return YYABORT;]], [[ + yyresult = false; + label = YYRETURN; + break;]])[ +]b4_push_if([[]], [[ ][case YYRETURN:]b4_parse_trace_if([[ + if (0 < yydebug) + yystack.print (yyDebugStream);]])[ + return yyresult;]])[ + } + assert(0); + } + +]b4_push_if([[ + bool pushParseInitialized = false; + + /** + * (Re-)Initialize the state of the push parser. + */ + public void pushParseInitialize() + { + + /* Lookahead and lookahead in internal form. */ + this.yytoken = ]b4_symbol(empty, kind)[; + + /* State. */ + this.yyn = 0; + this.yylen = 0; + this.yystate = 0; + destroy(this.yystack); + this.label = YYNEWSTATE; +]b4_lac_if([[ + destroy(this.yylacStack); + this.yylacEstablished = false;]])[ + + /* Error handling. */ + this.yynerrs_ = 0; +]b4_locations_if([ + /* The location where the error started. */ + this.yyerrloc = Location(Position(), Position()); + this.yylloc = Location(Position(), Position());])[ + + /* Semantic value of the lookahead. */ + //destroy(this.yylval); + + /* Initialize the stack. */ + yystack.push(this.yystate, this.yylval]b4_locations_if([, this.yylloc])[); + + this.pushParseInitialized = true; + }]])[]b4_both_if([[ + /** + * Parse input from the scanner that was specified at object construction + * time. Return whether the end of the input was reached successfully. + * This version of parse() is defined only when api.push-push=both. + * + * @@return true if the parsing succeeds. Note that this does not + * imply that there were no syntax errors. + */ + bool parse() + { + int status = 0; + do { + status = this.pushParse(yylex()); + } while (status == YYPUSH_MORE); + return status == YYACCEPT; + }]])[ + + // Generate an error message. + private final void yyreportSyntaxError(Context yyctx) + {]b4_parse_error_bmatch( +[custom], [[ + yylexer.reportSyntaxError(yyctx);]], +[detailed], [[ + if (yyctx.getToken() != ]b4_symbol(empty, kind)[) + { + // FIXME: This method of building the message is not compatible + // with internationalization. + immutable int argmax = 5; + SymbolKind[] yyarg = new SymbolKind[argmax]; + int yycount = yysyntaxErrorArguments(yyctx, yyarg, argmax); + string res, yyformat; + switch (yycount) + { + case 1: + yyformat = YY_("syntax error, unexpected %s"); + res = format(yyformat, yyarg[0]); + break; + case 2: + yyformat = YY_("syntax error, unexpected %s, expecting %s"); + res = format(yyformat, yyarg[0], yyarg[1]); + break; + case 3: + yyformat = YY_("syntax error, unexpected %s, expecting %s or %s"); + res = format(yyformat, yyarg[0], yyarg[1], yyarg[2]); + break; + case 4: + yyformat = YY_("syntax error, unexpected %s, expecting %s or %s or %s"); + res = format(yyformat, yyarg[0], yyarg[1], yyarg[2], yyarg[3]); + break; + case 5: + yyformat = YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); + res = format(yyformat, yyarg[0], yyarg[1], yyarg[2], yyarg[3], yyarg[4]); + break; + default: + res = YY_("syntax error"); + break; + } + yyerror(]b4_locations_if([yyctx.getLocation(), ])[res); + }]], +[[simple]], [[ + yyerror(]b4_locations_if([yyctx.getLocation(), ])[YY_("syntax error"));]])[ + } + +]b4_parse_error_bmatch( +[detailed], [[ + private int yysyntaxErrorArguments(Context yyctx, SymbolKind[] yyarg, int yyargn) { + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, + then the only way this function was invoked is if the + default action is an error action. In that case, don't + check for expected tokens because there are none. + - The only way there can be no lookahead present (in tok) is + if this state is a consistent state with a default action. + Thus, detecting the absence of a lookahead is sufficient to + determine that there is no unexpected or expected token to + report. In that case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this + state is a consistent state with a default action. There + might have been a previous inconsistent state, consistent + state with a non-default action, or user semantic action + that manipulated yychar. (However, yychar is currently out + of scope during semantic actions.) + - Of course, the expected token list depends on states to + have correct lookahead information, and it depends on the + parser not to perform extra reductions after fetching a + lookahead from the scanner and before detecting a syntax + error. Thus, state merging (from LALR or IELR) and default + reductions corrupt the expected token list. However, the + list is correct for canonical LR with one exception: it + will still contain any token that will not be accepted due + to an error action in a later state. + */ + int yycount = 0; + if (yyctx.getToken() != ]b4_symbol(empty, kind)[) + { + if (yyarg !is null) + yyarg[yycount] = yyctx.getToken(); + yycount += 1; + yycount += yyctx.getExpectedTokens(yyarg, 1, yyargn); + } + return yycount; + } +]])[ + + + /** + * Information needed to get the list of expected tokens and to forge + * a syntax error diagnostic. + */ + public static final class Context + {]b4_lac_if([[ + private ]b4_parser_class[ yyparser;]])[ + private const(YYStack) yystack; + private SymbolKind yytoken;]b4_locations_if([[ + private const(Location) yylocation;]])[ + + this(]b4_lac_if([[]b4_parser_class[ parser, ]])[YYStack stack, SymbolKind kind]b4_locations_if([[, Location loc]])[) + {]b4_lac_if([[ + yyparser = parser;]])[ + yystack = stack; + yytoken = kind;]b4_locations_if([[ + yylocation = loc;]])[ + } + + final SymbolKind getToken() const + { + return yytoken; + }]b4_locations_if([[ + + final const(Location) getLocation() const + { + return yylocation; + }]])[ + /** + * Put in YYARG at most YYARGN of the expected tokens given the + * current YYCTX, and return the number of tokens stored in YYARG. If + * YYARG is null, return the number of expected tokens (guaranteed to + * be less than YYNTOKENS). + */ + int getExpectedTokens(SymbolKind[] yyarg, int yyargn)]b4_lac_if([[]], [[ const]])[ + { + return getExpectedTokens(yyarg, 0, yyargn); + } + + int getExpectedTokens(SymbolKind[] yyarg, int yyoffset, int yyargn)]b4_lac_if([[]], [[ const]])[ + { + int yycount = yyoffset;]b4_lac_if([b4_parse_trace_if([[ + // Execute LAC once. We don't care if it is successful, we + // only do it for the sake of debugging output. + + if (!yyparser.yylacEstablished) + yyparser.yylacCheck(yystack, yytoken); +]])[ + for (int yyx = 0; yyx < yyntokens_; ++yyx) + { + SymbolKind yysym = SymbolKind(yyx); + if (yysym != ]b4_symbol(error, kind)[ + && yysym != ]b4_symbol(undef, kind)[ + && yyparser.yylacCheck(yystack, yysym)) + { + if (yyarg == null) + yycount += 1; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = yysym; + } + }]], [[ + int yyn = yypact_[this.yystack.stateAt(0)]; + if (!yyPactValueIsDefault(yyn)) + { + /* Start YYX at -YYN if negative to avoid negative + indexes in YYCHECK. In other words, skip the first + -YYN actions for this state because they are default + actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = yylast_ - yyn + 1; + int yyxend = yychecklim < yyntokens_ ? yychecklim : yyntokens_; + for (int yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck_[yyx + yyn] == yyx && yyx != ]b4_symbol(error, kind)[ + && !yyTableValueIsError(yytable_[yyx + yyn])) + { + if (yyarg is null) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = SymbolKind(yyx); + } + }]])[ + if (yyarg !is null && yycount == yyoffset && yyoffset < yyargn) + yyarg[yyoffset] = ]b4_symbol(empty, kind)[; + return yycount - yyoffset; + } + } + +]b4_lac_if([[ + /** Check the lookahead yytoken. + * \returns true iff the token will be eventually shifted. + */ + bool yylacCheck(const YYStack yystack, SymbolKind yytoken) + { + // Logically, the yylacStack's lifetime is confined to this function. + // Clear it, to get rid of potential left-overs from previous call. + destroy(yylacStack); + // Reduce until we encounter a shift and thereby accept the token. +]b4_parse_trace_if([[ + yycdebug("LAC: checking lookahead " ~ format("%s", yytoken) ~ ":");]])[ + int lacTop = 0; + while (true) + { + int topState = (yylacStack.length == 0 + ? yystack.stateAt(lacTop) + : yylacStack[$ - 1]); + int yyrule = yypact_[topState]; + if (yyPactValueIsDefault(yyrule) + || (yyrule += yytoken) < 0 || yylast_ < yyrule + || yycheck_[yyrule] != yytoken) + { + // Use the default action. + yyrule = yydefact_[+topState]; + if (yyrule == 0) + {]b4_parse_trace_if([[ + yycdebugln(" Err");]])[ + return false; + } + } + else + { + // Use the action from yytable. + yyrule = yytable_[yyrule]; + if (yyTableValueIsError(yyrule)) + {]b4_parse_trace_if([[ + yycdebugln(" Err");]])[ + return false; + } + if (0 < yyrule) + {]b4_parse_trace_if([[ + yycdebugln(" S" ~ to!string(yyrule));]])[ + return true; + } + yyrule = -yyrule; + } + // By now we know we have to simulate a reduce. +]b4_parse_trace_if([[ + yycdebug(" R" ~ to!string(yyrule - 1));]])[ + // Pop the corresponding number of values from the stack. + { + int yylen = yyr2_[yyrule]; + // First pop from the LAC stack as many tokens as possible. + int lacSize = cast (int) yylacStack.length; + if (yylen < lacSize) + { + yylacStack.length -= yylen; + yylen = 0; + } + else if (lacSize != 0) + { + destroy(yylacStack); + yylen -= lacSize; + } + // Only afterwards look at the main stack. + // We simulate popping elements by incrementing lacTop. + lacTop += yylen; + } + // Keep topState in sync with the updated stack. + topState = (yylacStack.length == 0 + ? yystack.stateAt(lacTop) + : yylacStack[$ - 1]); + // Push the resulting state of the reduction. + int state = yyLRGotoState(topState, yyr1_[yyrule]);]b4_parse_trace_if([[ + yycdebug(" G" ~ to!string(state));]])[ + yylacStack.length++; + yylacStack[$ - 1] = state; + } + } + + /** Establish the initial context if no initial context currently exists. + * \returns true iff the token will be eventually shifted. + */ + bool yylacEstablish(YYStack yystack, SymbolKind yytoken) + { + /* Establish the initial context for the current lookahead if no initial + context is currently established. + + We define a context as a snapshot of the parser stacks. We define + the initial context for a lookahead as the context in which the + parser initially examines that lookahead in order to select a + syntactic action. Thus, if the lookahead eventually proves + syntactically unacceptable (possibly in a later context reached via a + series of reductions), the initial context can be used to determine + the exact set of tokens that would be syntactically acceptable in the + lookahead's place. Moreover, it is the context after which any + further semantic actions would be erroneous because they would be + determined by a syntactically unacceptable token. + + yylacEstablish should be invoked when a reduction is about to be + performed in an inconsistent state (which, for the purposes of LAC, + includes consistent states that don't know they're consistent because + their default reductions have been disabled). + + For parse.lac=full, the implementation of yylacEstablish is as + follows. If no initial context is currently established for the + current lookahead, then check if that lookahead can eventually be + shifted if syntactic actions continue from the current context. */ + if (yylacEstablished) + return true; + else + {]b4_parse_trace_if([[ + yycdebugln("LAC: initial context established for " ~ format("%s", yytoken));]])[ + yylacEstablished = true; + return yylacCheck(yystack, yytoken); + } + } + + /** Discard any previous initial lookahead context because of event. + * \param event the event which caused the lookahead to be discarded. + * Only used for debbuging output. */ + void yylacDiscard(string event) + { + /* Discard any previous initial lookahead context because of Event, + which may be a lookahead change or an invalidation of the currently + established initial context for the current lookahead. + + The most common example of a lookahead change is a shift. An example + of both cases is syntax error recovery. That is, a syntax error + occurs when the lookahead is syntactically erroneous for the + currently established initial context, so error recovery manipulates + the parser stacks to try to find a new initial context in which the + current lookahead is syntactically acceptable. If it fails to find + such a context, it discards the lookahead. */ + if (yylacEstablished) + {]b4_parse_trace_if([[ + yycdebugln("LAC: initial context discarded due to " ~ event);]])[ + yylacEstablished = false; + } + } + + /** The stack for LAC. + * Logically, the yylacStack's lifetime is confined to the function + * yylacCheck. We just store it as a member of this class to hold + * on to the memory and to avoid frequent reallocations. + */ + int[] yylacStack; + /** Whether an initial LAC context was established. */ + bool yylacEstablished; +]])[ + + /** + * Whether the given yypact_ value indicates a defaulted state. + * @@param yyvalue the value to check + */ + private static bool yyPactValueIsDefault(int yyvalue) + { + return yyvalue == yypact_ninf_; + } + + /** + * Whether the given yytable_ value indicates a syntax error. + * @@param yyvalue the value to check + */ + private static bool yyTableValueIsError(int yyvalue) + { + return yyvalue == yytable_ninf_; + } + + /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing + STATE-NUM. */ + private static immutable ]b4_int_type_for([b4_pact])[ yypact_ninf_ = ]b4_pact_ninf[; + + /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If + positive, shift that token. If negative, reduce the rule which + number is the opposite. If YYTABLE_NINF_, syntax error. */ + private static immutable ]b4_int_type_for([b4_table])[ yytable_ninf_ = ]b4_table_ninf[; + + ]b4_parser_tables_define[ + +]b4_parse_trace_if([[ + /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ + private static immutable ]b4_int_type_for([b4_rline])[[] yyrline_ = + @{ + ]b4_rline[ + @}; + + // Report on the debug stream that the rule yyrule is going to be reduced. + private final void yy_reduce_print (int yyrule, ref YYStack yystack) + { + if (yydebug == 0) + return; + + int yylno = yyrline_[yyrule]; + int yynrhs = yyr2_[yyrule]; + /* Print the symbols being reduced, and their result. */ + yycdebugln (format("Reducing stack by rule %d (line %d):", + yyrule - 1, yylno)); + + /* The symbols being reduced. */ + for (int yyi = 0; yyi < yynrhs; yyi++) + yy_symbol_print (format(" $%d =", yyi + 1), + to!SymbolKind (yystos_[yystack.stateAt(yynrhs - (yyi + 1))]), + ]b4_rhs_value(yynrhs, yyi + 1)b4_locations_if([, + b4_rhs_location(yynrhs, yyi + 1)])[); + } +]])[ + + private static auto yytranslate_ (int t) + { +]b4_api_token_raw_if( +[[ return SymbolKind(t);]], +[[ /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ + immutable ]b4_int_type_for([b4_translate])[[] translate_table = + @{ + ]b4_translate[ + @}; + + // Last valid token kind. + immutable int code_max = ]b4_code_max[; + + if (t <= 0) + return ]b4_symbol(eof, kind)[; + else if (t <= code_max) + return SymbolKind(translate_table[t]); + else + return ]b4_symbol(undef, kind)[;]])[ + } + + private static immutable int yylast_ = ]b4_last[; + private static immutable int yynnts_ = ]b4_nterms_number[; + private static immutable int yyfinal_ = ]b4_final_state_number[; + private static immutable int yyntokens_ = ]b4_tokens_number[; + + private final struct YYStackElement { + int state; + Value value;]b4_locations_if( + b4_location_type[[] location;])[ + } + + private final struct YYStack { + private YYStackElement[] stack = []; + + public final ulong height() + { + return stack.length; + } + + public final void push (int state, Value value]dnl + b4_locations_if([, ref Location loc])[) + { + stack ~= YYStackElement(state, value]b4_locations_if([, loc])[); + } + + public final void pop () + { + pop (1); + } + + public final void pop (int num) + { + stack.length -= num; + } + + public final int stateAt (int i) const + { + return stack[$-i-1].state; + } + +]b4_locations_if([[ + public final ref Location locationAt (int i) + { + return stack[$-i-1].location; + }]])[ + + public final ref Value valueAt (int i) + { + return stack[$-i-1].value; + } +]b4_parse_trace_if([[ + // Print the state stack on the debug stream. + public final void print (File stream) + { + stream.write ("Stack now"); + for (int i = 0; i < stack.length; i++) + stream.write (" ", stack[i].state); + stream.writeln (); + }]])[ + } +]b4_percent_code_get[ +} +]b4_percent_code_get([[epilogue]])[]dnl +b4_epilogue[]dnl +b4_output_end diff --git a/platform/dbops/binaries/build/share/bison/skeletons/lalr1.java b/platform/dbops/binaries/build/share/bison/skeletons/lalr1.java new file mode 100644 index 0000000000000000000000000000000000000000..1bbecca1382c7d00ec34d7b69b702347519a898b --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/lalr1.java @@ -0,0 +1,1303 @@ +# Java skeleton for Bison -*- java -*- + +# Copyright (C) 2007-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_include(b4_skeletonsdir/[java.m4]) + +b4_header_if([b4_complain([%header/%defines does not make sense in Java])]) + +m4_define([b4_symbol_no_destructor_assert], +[b4_symbol_if([$1], [has_destructor], + [b4_complain_at(m4_unquote(b4_symbol([$1], [destructor_loc])), + [%destructor does not make sense in Java])])]) +b4_symbol_foreach([b4_symbol_no_destructor_assert]) + +## --------------- ## +## api.push-pull. ## +## --------------- ## + +b4_percent_define_default([[api.push-pull]], [[pull]]) +b4_percent_define_check_values([[[[api.push-pull]], + [[pull]], [[push]], [[both]]]]) + +# Define m4 conditional macros that encode the value +# of the api.push-pull flag. +b4_define_flag_if([pull]) m4_define([b4_pull_flag], [[1]]) +b4_define_flag_if([push]) m4_define([b4_push_flag], [[1]]) +m4_case(b4_percent_define_get([[api.push-pull]]), + [pull], [m4_define([b4_push_flag], [[0]])], + [push], [m4_define([b4_pull_flag], [[0]])]) + +# Define a macro to be true when api.push-pull has the value "both". +m4_define([b4_both_if],[b4_push_if([b4_pull_if([$1],[$2])],[$2])]) + +# Handle BISON_USE_PUSH_FOR_PULL for the test suite. So that push parsing +# tests function as written, do not let BISON_USE_PUSH_FOR_PULL modify the +# behavior of Bison at all when push parsing is already requested. +b4_define_flag_if([use_push_for_pull]) +b4_use_push_for_pull_if([ + b4_push_if([m4_define([b4_use_push_for_pull_flag], [[0]])], + [m4_define([b4_push_flag], [[1]])])]) + +# Define a macro to encapsulate the parse state variables. This +# allows them to be defined either in parse() when doing pull parsing, +# or as class instance variable when doing push parsing. +m4_define([b4_define_state], +[[ + /* Lookahead token kind. */ + int yychar = YYEMPTY_; + /* Lookahead symbol kind. */ + SymbolKind yytoken = null; + + /* State. */ + int yyn = 0; + int yylen = 0; + int yystate = 0; + YYStack yystack = new YYStack (); + int label = YYNEWSTATE; + +]b4_locations_if([[ + /* The location where the error started. */ + ]b4_location_type[ yyerrloc = null; + + /* Location. */ + ]b4_location_type[ yylloc = new ]b4_location_type[ (null, null);]])[ + + /* Semantic value of the lookahead. */ + ]b4_yystype[ yylval = null; +]]) + +# parse.lac +b4_percent_define_default([[parse.lac]], [[none]]) +b4_percent_define_check_values([[[[parse.lac]], [[full]], [[none]]]]) +b4_define_flag_if([lac]) +m4_define([b4_lac_flag], + [m4_if(b4_percent_define_get([[parse.lac]]), + [none], [[0]], [[1]])]) + + +## ------------- ## +## Parser File. ## +## ------------- ## + +b4_output_begin([b4_parser_file_name])[ +]b4_copyright([Skeleton implementation for Bison LALR(1) parsers in Java], + [2007-2015, 2018-2021])[ +]b4_disclaimer[ +]b4_percent_define_ifdef([api.package], [package b4_percent_define_get([api.package]);[ +]])[ +]b4_user_pre_prologue[ +]b4_user_post_prologue[ +import java.text.MessageFormat; +import java.util.ArrayList; +]b4_percent_code_get([[imports]])[ +/** + * A Bison parser, automatically generated from ]m4_bpatsubst(b4_file_name, [^"\(.*\)"$], [\1])[. + * + * @@author LALR (1) parser skeleton written by Paolo Bonzini. + */ +]b4_parser_class_declaration[ +{ +]b4_identification[ +][ +]b4_parse_error_bmatch( + [detailed\|verbose], [[ + /** + * True if verbose error messages are enabled. + */ + private boolean yyErrorVerbose = true; + + /** + * Whether verbose error messages are enabled. + */ + public final boolean getErrorVerbose() { return yyErrorVerbose; } + + /** + * Set the verbosity of error messages. + * @@param verbose True to request verbose error messages. + */ + public final void setErrorVerbose(boolean verbose) + { yyErrorVerbose = verbose; } +]])[ + +]b4_locations_if([[ + /** + * A class defining a pair of positions. Positions, defined by the + * ]b4_position_type[ class, denote a point in the input. + * Locations represent a part of the input through the beginning + * and ending positions. + */ + public static class ]b4_location_type[ { + /** + * The first, inclusive, position in the range. + */ + public ]b4_position_type[ begin; + + /** + * The first position beyond the range. + */ + public ]b4_position_type[ end; + + /** + * Create a ]b4_location_type[ denoting an empty range located at + * a given point. + * @@param loc The position at which the range is anchored. + */ + public ]b4_location_type[ (]b4_position_type[ loc) { + this.begin = this.end = loc; + } + + /** + * Create a ]b4_location_type[ from the endpoints of the range. + * @@param begin The first position included in the range. + * @@param end The first position beyond the range. + */ + public ]b4_location_type[ (]b4_position_type[ begin, ]b4_position_type[ end) { + this.begin = begin; + this.end = end; + } + + /** + * Print a representation of the location. For this to be correct, + * ]b4_position_type[ should override the equals + * method. + */ + public String toString() { + if (begin.equals (end)) + return begin.toString(); + else + return begin.toString() + "-" + end.toString(); + } + } + + private ]b4_location_type[ yylloc(YYStack rhs, int n) + { + if (0 < n) + return new ]b4_location_type[(rhs.locationAt(n-1).begin, rhs.locationAt(0).end); + else + return new ]b4_location_type[(rhs.locationAt(0).end); + }]])[ + +]b4_declare_symbol_enum[ + + /** + * Communication interface between the scanner and the Bison-generated + * parser ]b4_parser_class[. + */ + public interface Lexer { +]b4_token_enums[ + /** Deprecated, use ]b4_symbol(eof, id)[ instead. */ + public static final int EOF = ]b4_symbol(eof, id)[; +]b4_pull_if([b4_locations_if([[ + /** + * Method to retrieve the beginning position of the last scanned token. + * @@return the position at which the last scanned token starts. + */ + ]b4_position_type[ getStartPos(); + + /** + * Method to retrieve the ending position of the last scanned token. + * @@return the first position beyond the last scanned token. + */ + ]b4_position_type[ getEndPos();]])[ + + /** + * Method to retrieve the semantic value of the last scanned token. + * @@return the semantic value of the last scanned token. + */ + ]b4_yystype[ getLVal(); + + /** + * Entry point for the scanner. Returns the token identifier corresponding + * to the next token and prepares to return the semantic value + * ]b4_locations_if([and beginning/ending positions ])[of the token. + * @@return the token identifier corresponding to the next token. + */ + int yylex()]b4_maybe_throws([b4_lex_throws])[; +]])[ + /** + * Emit an error]b4_locations_if([ referring to the given location])[in a user-defined way. + * + *]b4_locations_if([[ @@param loc The location of the element to which the + * error message is related.]])[ + * @@param msg The string for the error message. + */ + void yyerror(]b4_locations_if([b4_location_type[ loc, ]])[String msg); + +]b4_parse_error_bmatch( + [custom], [[ + /** + * Build and emit a "syntax error" message in a user-defined way. + * + * @@param ctx The context of the error. + */ + void reportSyntaxError(Context ctx); +]])[ + } + +]b4_lexer_if([[ + private class YYLexer implements Lexer { +]b4_percent_code_get([[lexer]])[ + } + +]])[ + /** + * The object doing lexical analysis for us. + */ + private Lexer yylexer; + +]b4_parse_param_vars[ + +]b4_lexer_if([[ + /** + * Instantiates the Bison-generated parser. + */ + public ]b4_parser_class[(]b4_parse_param_decl([b4_lex_param_decl])[)]b4_maybe_throws([b4_init_throws])[ + { +]b4_percent_code_get([[init]])[]b4_lac_if([[ + this.yylacStack = new ArrayList(); + this.yylacEstablished = false;]])[ + this.yylexer = new YYLexer(]b4_lex_param_call[); +]b4_parse_param_cons[ + } +]])[ + + /** + * Instantiates the Bison-generated parser. + * @@param yylexer The scanner that will supply tokens to the parser. + */ + ]b4_lexer_if([[protected]], [[public]]) b4_parser_class[(]b4_parse_param_decl([[Lexer yylexer]])[)]b4_maybe_throws([b4_init_throws])[ + { +]b4_percent_code_get([[init]])[]b4_lac_if([[ + this.yylacStack = new ArrayList(); + this.yylacEstablished = false;]])[ + this.yylexer = yylexer; +]b4_parse_param_cons[ + } + +]b4_parse_trace_if([[ + private java.io.PrintStream yyDebugStream = System.err; + + /** + * The PrintStream on which the debugging output is printed. + */ + public final java.io.PrintStream getDebugStream() { return yyDebugStream; } + + /** + * Set the PrintStream on which the debug output is printed. + * @@param s The stream that is used for debugging output. + */ + public final void setDebugStream(java.io.PrintStream s) { yyDebugStream = s; } + + private int yydebug = 0; + + /** + * Answer the verbosity of the debugging output; 0 means that all kinds of + * output from the parser are suppressed. + */ + public final int getDebugLevel() { return yydebug; } + + /** + * Set the verbosity of the debugging output; 0 means that all kinds of + * output from the parser are suppressed. + * @@param level The verbosity level for debugging output. + */ + public final void setDebugLevel(int level) { yydebug = level; } +]])[ + + private int yynerrs = 0; + + /** + * The number of syntax errors so far. + */ + public final int getNumberOfErrors() { return yynerrs; } + + /** + * Print an error message via the lexer. + *]b4_locations_if([[ Use a null location.]])[ + * @@param msg The error message. + */ + public final void yyerror(String msg) { + yylexer.yyerror(]b4_locations_if([[(]b4_location_type[)null, ]])[msg); + } +]b4_locations_if([[ + /** + * Print an error message via the lexer. + * @@param loc The location associated with the message. + * @@param msg The error message. + */ + public final void yyerror(]b4_location_type[ loc, String msg) { + yylexer.yyerror(loc, msg); + } + + /** + * Print an error message via the lexer. + * @@param pos The position associated with the message. + * @@param msg The error message. + */ + public final void yyerror(]b4_position_type[ pos, String msg) { + yylexer.yyerror(new ]b4_location_type[ (pos), msg); + }]])[ +]b4_parse_trace_if([[ + protected final void yycdebugNnl(String s) { + if (0 < yydebug) + yyDebugStream.print(s); + } + + protected final void yycdebug(String s) { + if (0 < yydebug) + yyDebugStream.println(s); + }]])[ + + private final class YYStack { + private int[] stateStack = new int[16];]b4_locations_if([[ + private ]b4_location_type[[] locStack = new ]b4_location_type[[16];]])[ + private ]b4_yystype[[] valueStack = new ]b4_yystype[[16]; + + public int size = 16; + public int height = -1; + + public final void push(int state, ]b4_yystype[ value]b4_locations_if([, ]b4_location_type[ loc])[) { + height++; + if (size == height) { + int[] newStateStack = new int[size * 2]; + System.arraycopy(stateStack, 0, newStateStack, 0, height); + stateStack = newStateStack;]b4_locations_if([[ + ]b4_location_type[[] newLocStack = new ]b4_location_type[[size * 2]; + System.arraycopy(locStack, 0, newLocStack, 0, height); + locStack = newLocStack;]]) + + b4_yystype[[] newValueStack = new ]b4_yystype[[size * 2]; + System.arraycopy(valueStack, 0, newValueStack, 0, height); + valueStack = newValueStack; + + size *= 2; + } + + stateStack[height] = state;]b4_locations_if([[ + locStack[height] = loc;]])[ + valueStack[height] = value; + } + + public final void pop() { + pop(1); + } + + public final void pop(int num) { + // Avoid memory leaks... garbage collection is a white lie! + if (0 < num) { + java.util.Arrays.fill(valueStack, height - num + 1, height + 1, null);]b4_locations_if([[ + java.util.Arrays.fill(locStack, height - num + 1, height + 1, null);]])[ + } + height -= num; + } + + public final int stateAt(int i) { + return stateStack[height - i]; + } +]b4_locations_if([[ + + public final ]b4_location_type[ locationAt(int i) { + return locStack[height - i]; + } +]])[ + public final ]b4_yystype[ valueAt(int i) { + return valueStack[height - i]; + } + + // Print the state stack on the debug stream. + public void print(java.io.PrintStream out) { + out.print ("Stack now"); + + for (int i = 0; i <= height; i++) { + out.print(' '); + out.print(stateStack[i]); + } + out.println(); + } + } + + /** + * Returned by a Bison action in order to stop the parsing process and + * return success (true). + */ + public static final int YYACCEPT = 0; + + /** + * Returned by a Bison action in order to stop the parsing process and + * return failure (false). + */ + public static final int YYABORT = 1; + +]b4_push_if([ + /** + * Returned by a Bison action in order to request a new token. + */ + public static final int YYPUSH_MORE = 4;])[ + + /** + * Returned by a Bison action in order to start error recovery without + * printing an error message. + */ + public static final int YYERROR = 2; + + /** + * Internal return codes that are not supported for user semantic + * actions. + */ + private static final int YYERRLAB = 3; + private static final int YYNEWSTATE = 4; + private static final int YYDEFAULT = 5; + private static final int YYREDUCE = 6; + private static final int YYERRLAB1 = 7; + private static final int YYRETURN = 8; +]b4_push_if([[ private static final int YYGETTOKEN = 9; /* Signify that a new token is expected when doing push-parsing. */]])[ + + private int yyerrstatus_ = 0; + +]b4_push_if([b4_define_state])[ + /** + * Whether error recovery is being done. In this state, the parser + * reads token until it reaches a known state, and then restarts normal + * operation. + */ + public final boolean recovering () + { + return yyerrstatus_ == 0; + } + + /** Compute post-reduction state. + * @@param yystate the current state + * @@param yysym the nonterminal to push on the stack + */ + private int yyLRGotoState(int yystate, int yysym) { + int yyr = yypgoto_[yysym - YYNTOKENS_] + yystate; + if (0 <= yyr && yyr <= YYLAST_ && yycheck_[yyr] == yystate) + return yytable_[yyr]; + else + return yydefgoto_[yysym - YYNTOKENS_]; + } + + private int yyaction(int yyn, YYStack yystack, int yylen)]b4_maybe_throws([b4_throws])[ + { + /* If YYLEN is nonzero, implement the default value of the action: + '$$ = $1'. Otherwise, use the top of the stack. + + Otherwise, the following line sets YYVAL to garbage. + This behavior is undocumented and Bison + users should not rely upon it. */ + ]b4_yystype[ yyval = (0 < yylen) ? yystack.valueAt(yylen - 1) : yystack.valueAt(0);]b4_locations_if([[ + ]b4_location_type[ yyloc = yylloc(yystack, yylen);]])[]b4_parse_trace_if([[ + + yyReducePrint(yyn, yystack);]])[ + + switch (yyn) + { + ]b4_user_actions[ + default: break; + }]b4_parse_trace_if([[ + + yySymbolPrint("-> $$ =", SymbolKind.get(yyr1_[yyn]), yyval]b4_locations_if([, yyloc])[);]])[ + + yystack.pop(yylen); + yylen = 0; + /* Shift the result of the reduction. */ + int yystate = yyLRGotoState(yystack.stateAt(0), yyr1_[yyn]); + yystack.push(yystate, yyval]b4_locations_if([, yyloc])[); + return YYNEWSTATE; + } + +]b4_parse_trace_if([[ + /*--------------------------------. + | Print this symbol on YYOUTPUT. | + `--------------------------------*/ + + private void yySymbolPrint(String s, SymbolKind yykind, + ]b4_yystype[ yyvalue]b4_locations_if([, ]b4_location_type[ yylocation])[) { + if (0 < yydebug) { + yycdebug(s + + (yykind.getCode() < YYNTOKENS_ ? " token " : " nterm ") + + yykind.getName() + " ("]b4_locations_if([ + + yylocation + ": "])[ + + (yyvalue == null ? "(null)" : yyvalue.toString()) + ")"); + } + }]])[ + +]b4_push_if([],[[ + /** + * Parse input from the scanner that was specified at object construction + * time. Return whether the end of the input was reached successfully. + * + * @@return true if the parsing succeeds. Note that this does not + * imply that there were no syntax errors. + */ + public boolean parse()]b4_maybe_throws([b4_list2([b4_lex_throws], [b4_throws])])[]])[ +]b4_push_if([ + /** + * Push Parse input from external lexer + * + * @@param yylextoken current token + * @@param yylexval current lval]b4_locations_if([[ + * @@param yylexloc current position]])[ + * + * @@return YYACCEPT, YYABORT, YYPUSH_MORE + */ + public int push_parse(int yylextoken, b4_yystype yylexval[]b4_locations_if([, b4_location_type yylexloc]))b4_maybe_throws([b4_list2([b4_lex_throws], [b4_throws])])])[ + {]b4_locations_if([[ + /* @@$. */ + ]b4_location_type[ yyloc;]])[ +]b4_push_if([],[[ +]b4_define_state[ +]b4_lac_if([[ + // Discard the LAC context in case there still is one left from a + // previous invocation. + yylacDiscard("init");]])[ +]b4_parse_trace_if([[ + yycdebug ("Starting parse");]])[ + yyerrstatus_ = 0; + yynerrs = 0; + + /* Initialize the stack. */ + yystack.push (yystate, yylval]b4_locations_if([, yylloc])[); +]m4_ifdef([b4_initial_action], [ +b4_dollar_pushdef([yylval], [], [], [yylloc])dnl + b4_user_initial_action +b4_dollar_popdef[]dnl +])[ +]])[ +]b4_push_if([[ + if (!this.push_parse_initialized) + { + push_parse_initialize (); +]m4_ifdef([b4_initial_action], [ +b4_dollar_pushdef([yylval], [], [], [yylloc])dnl + b4_user_initial_action +b4_dollar_popdef[]dnl +])[]b4_parse_trace_if([[ + yycdebug ("Starting parse");]])[ + yyerrstatus_ = 0; + } else + label = YYGETTOKEN; + + boolean push_token_consumed = true; +]])[ + for (;;) + switch (label) + { + /* New state. Unlike in the C/C++ skeletons, the state is already + pushed when we come here. */ + case YYNEWSTATE:]b4_parse_trace_if([[ + yycdebug ("Entering state " + yystate); + if (0 < yydebug) + yystack.print (yyDebugStream);]])[ + + /* Accept? */ + if (yystate == YYFINAL_) + ]b4_push_if([{label = YYACCEPT; break;}], + [return true;])[ + + /* Take a decision. First try without lookahead. */ + yyn = yypact_[yystate]; + if (yyPactValueIsDefault (yyn)) + { + label = YYDEFAULT; + break; + } +]b4_push_if([ /* Fall Through */ + + case YYGETTOKEN:])[ + /* Read a lookahead token. */ + if (yychar == YYEMPTY_) + { +]b4_push_if([[ + if (!push_token_consumed) + return YYPUSH_MORE;]b4_parse_trace_if([[ + yycdebug ("Reading a token");]])[ + yychar = yylextoken; + yylval = yylexval;]b4_locations_if([ + yylloc = yylexloc;])[ + push_token_consumed = false;]], [b4_parse_trace_if([[ + yycdebug ("Reading a token");]])[ + yychar = yylexer.yylex (); + yylval = yylexer.getLVal();]b4_locations_if([[ + yylloc = new ]b4_location_type[(yylexer.getStartPos(), + yylexer.getEndPos());]])[ +]])[ + } + + /* Convert token to internal form. */ + yytoken = yytranslate_ (yychar);]b4_parse_trace_if([[ + yySymbolPrint("Next token is", yytoken, + yylval]b4_locations_if([, yylloc])[);]])[ + + if (yytoken == ]b4_symbol(error, kind)[) + { + // The scanner already issued an error message, process directly + // to error recovery. But do not keep the error token as + // lookahead, it is too special and may lead us to an endless + // loop in error recovery. */ + yychar = Lexer.]b4_symbol(undef, id)[; + yytoken = ]b4_symbol(undef, kind)[;]b4_locations_if([[ + yyerrloc = yylloc;]])[ + label = YYERRLAB1; + } + else + { + /* If the proper action on seeing token YYTOKEN is to reduce or to + detect an error, take that action. */ + yyn += yytoken.getCode(); + if (yyn < 0 || YYLAST_ < yyn || yycheck_[yyn] != yytoken.getCode()) {]b4_lac_if([[ + if (!yylacEstablish(yystack, yytoken)) { + label = YYERRLAB; + } else]])[ + label = YYDEFAULT; + } + + /* <= 0 means reduce or error. */ + else if ((yyn = yytable_[yyn]) <= 0) + { + if (yyTableValueIsError(yyn)) { + label = YYERRLAB; + }]b4_lac_if([[ else if (!yylacEstablish(yystack, yytoken)) { + label = YYERRLAB; + }]])[ else { + yyn = -yyn; + label = YYREDUCE; + } + } + + else + { + /* Shift the lookahead token. */]b4_parse_trace_if([[ + yySymbolPrint("Shifting", yytoken, + yylval]b4_locations_if([, yylloc])[); +]])[ + /* Discard the token being shifted. */ + yychar = YYEMPTY_; + + /* Count tokens shifted since error; after three, turn off error + status. */ + if (yyerrstatus_ > 0) + --yyerrstatus_; + + yystate = yyn; + yystack.push(yystate, yylval]b4_locations_if([, yylloc])[);]b4_lac_if([[ + yylacDiscard("shift");]])[ + label = YYNEWSTATE; + } + } + break; + + /*-----------------------------------------------------------. + | yydefault -- do the default action for the current state. | + `-----------------------------------------------------------*/ + case YYDEFAULT: + yyn = yydefact_[yystate]; + if (yyn == 0) + label = YYERRLAB; + else + label = YYREDUCE; + break; + + /*-----------------------------. + | yyreduce -- Do a reduction. | + `-----------------------------*/ + case YYREDUCE: + yylen = yyr2_[yyn]; + label = yyaction(yyn, yystack, yylen); + yystate = yystack.stateAt(0); + break; + + /*------------------------------------. + | yyerrlab -- here on detecting error | + `------------------------------------*/ + case YYERRLAB: + /* If not already recovering from an error, report this error. */ + if (yyerrstatus_ == 0) + { + ++yynerrs; + if (yychar == YYEMPTY_) + yytoken = null; + yyreportSyntaxError(new Context(this, yystack, yytoken]b4_locations_if([[, yylloc]])[)); + } +]b4_locations_if([[ + yyerrloc = yylloc;]])[ + if (yyerrstatus_ == 3) + { + /* If just tried and failed to reuse lookahead token after an + error, discard it. */ + + if (yychar <= Lexer.]b4_symbol(eof, id)[) + { + /* Return failure if at end of input. */ + if (yychar == Lexer.]b4_symbol(eof, id)[) + ]b4_push_if([{label = YYABORT; break;}], [return false;])[ + } + else + yychar = YYEMPTY_; + } + + /* Else will try to reuse lookahead token after shifting the error + token. */ + label = YYERRLAB1; + break; + + /*-------------------------------------------------. + | errorlab -- error raised explicitly by YYERROR. | + `-------------------------------------------------*/ + case YYERROR:]b4_locations_if([[ + yyerrloc = yystack.locationAt (yylen - 1);]])[ + /* Do not reclaim the symbols of the rule which action triggered + this YYERROR. */ + yystack.pop (yylen); + yylen = 0; + yystate = yystack.stateAt(0); + label = YYERRLAB1; + break; + + /*-------------------------------------------------------------. + | yyerrlab1 -- common code for both syntax error and YYERROR. | + `-------------------------------------------------------------*/ + case YYERRLAB1: + yyerrstatus_ = 3; /* Each real token shifted decrements this. */ + + // Pop stack until we find a state that shifts the error token. + for (;;) + { + yyn = yypact_[yystate]; + if (!yyPactValueIsDefault (yyn)) + { + yyn += ]b4_symbol(error, kind)[.getCode(); + if (0 <= yyn && yyn <= YYLAST_ + && yycheck_[yyn] == ]b4_symbol(error, kind)[.getCode()) + { + yyn = yytable_[yyn]; + if (0 < yyn) + break; + } + } + + /* Pop the current state because it cannot handle the + * error token. */ + if (yystack.height == 0) + ]b4_push_if([{label = YYABORT; break;}],[return false;])[ + +]b4_locations_if([[ + yyerrloc = yystack.locationAt (0);]])[ + yystack.pop (); + yystate = yystack.stateAt(0);]b4_parse_trace_if([[ + if (0 < yydebug) + yystack.print (yyDebugStream);]])[ + } + + if (label == YYABORT) + /* Leave the switch. */ + break; + +]b4_locations_if([[ + /* Muck with the stack to setup for yylloc. */ + yystack.push (0, null, yylloc); + yystack.push (0, null, yyerrloc); + yyloc = yylloc (yystack, 2); + yystack.pop (2);]])[ + + /* Shift the error token. */]b4_lac_if([[ + yylacDiscard("error recovery");]])[]b4_parse_trace_if([[ + yySymbolPrint("Shifting", SymbolKind.get(yystos_[yyn]), + yylval]b4_locations_if([, yyloc])[);]])[ + + yystate = yyn; + yystack.push (yyn, yylval]b4_locations_if([, yyloc])[); + label = YYNEWSTATE; + break; + + /* Accept. */ + case YYACCEPT: + ]b4_push_if([this.push_parse_initialized = false; return YYACCEPT;], + [return true;])[ + + /* Abort. */ + case YYABORT: + ]b4_push_if([this.push_parse_initialized = false; return YYABORT;], + [return false;])[ + } +} +]b4_push_if([[ + boolean push_parse_initialized = false; + + /** + * (Re-)Initialize the state of the push parser. + */ + public void push_parse_initialize () + { + /* Lookahead and lookahead in internal form. */ + this.yychar = YYEMPTY_; + this.yytoken = null; + + /* State. */ + this.yyn = 0; + this.yylen = 0; + this.yystate = 0; + this.yystack = new YYStack();]b4_lac_if([[ + this.yylacStack = new ArrayList(); + this.yylacEstablished = false;]])[ + this.label = YYNEWSTATE; + + /* Error handling. */ + this.yynerrs = 0;]b4_locations_if([[ + /* The location where the error started. */ + this.yyerrloc = null; + this.yylloc = new ]b4_location_type[ (null, null);]])[ + + /* Semantic value of the lookahead. */ + this.yylval = null; + + yystack.push (this.yystate, this.yylval]b4_locations_if([, this.yylloc])[); + + this.push_parse_initialized = true; + + } +]b4_locations_if([[ + /** + * Push parse given input from an external lexer. + * + * @@param yylextoken current token + * @@param yylexval current lval + * @@param yyylexpos current position + * + * @@return YYACCEPT, YYABORT, YYPUSH_MORE + */ + public int push_parse(int yylextoken, ]b4_yystype[ yylexval, ]b4_position_type[ yylexpos)]b4_maybe_throws([b4_list2([b4_lex_throws], [b4_throws])])[ { + return push_parse(yylextoken, yylexval, new ]b4_location_type[(yylexpos)); + } +]])])[ + +]b4_both_if([[ + /** + * Parse input from the scanner that was specified at object construction + * time. Return whether the end of the input was reached successfully. + * This version of parse() is defined only when api.push-push=both. + * + * @@return true if the parsing succeeds. Note that this does not + * imply that there were no syntax errors. + */ + public boolean parse()]b4_maybe_throws([b4_list2([b4_lex_throws], [b4_throws])])[ { + if (yylexer == null) + throw new NullPointerException("Null Lexer"); + int status; + do { + int token = yylexer.yylex(); + ]b4_yystype[ lval = yylexer.getLVal();]b4_locations_if([[ + ]b4_location_type[ yyloc = new ]b4_location_type[(yylexer.getStartPos(), yylexer.getEndPos()); + status = push_parse(token, lval, yyloc);]], [[ + status = push_parse(token, lval);]])[ + } while (status == YYPUSH_MORE); + return status == YYACCEPT; + } +]])[ + + /** + * Information needed to get the list of expected tokens and to forge + * a syntax error diagnostic. + */ + public static final class Context { + Context(]b4_parser_class[ parser, YYStack stack, SymbolKind token]b4_locations_if([[, ]b4_location_type[ loc]])[) { + yyparser = parser; + yystack = stack; + yytoken = token;]b4_locations_if([[ + yylocation = loc;]])[ + } + + private ]b4_parser_class[ yyparser; + private YYStack yystack; + + + /** + * The symbol kind of the lookahead token. + */ + public final SymbolKind getToken() { + return yytoken; + } + + private SymbolKind yytoken;]b4_locations_if([[ + + /** + * The location of the lookahead. + */ + public final ]b4_location_type[ getLocation() { + return yylocation; + } + + private ]b4_location_type[ yylocation;]])[ + static final int NTOKENS = ]b4_parser_class[.YYNTOKENS_; + + /** + * Put in YYARG at most YYARGN of the expected tokens given the + * current YYCTX, and return the number of tokens stored in YYARG. If + * YYARG is null, return the number of expected tokens (guaranteed to + * be less than YYNTOKENS). + */ + int getExpectedTokens(SymbolKind yyarg[], int yyargn) { + return getExpectedTokens (yyarg, 0, yyargn); + } + + int getExpectedTokens(SymbolKind yyarg[], int yyoffset, int yyargn) { + int yycount = yyoffset;]b4_lac_if([b4_parse_trace_if([[ + // Execute LAC once. We don't care if it is successful, we + // only do it for the sake of debugging output. + if (!yyparser.yylacEstablished) + yyparser.yylacCheck(yystack, yytoken); +]])[ + for (int yyx = 0; yyx < YYNTOKENS_; ++yyx) + { + SymbolKind yysym = SymbolKind.get(yyx); + if (yysym != ]b4_symbol(error, kind)[ + && yysym != ]b4_symbol(undef, kind)[ + && yyparser.yylacCheck(yystack, yysym)) + { + if (yyarg == null) + yycount += 1; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = yysym; + } + }]], [[ + int yyn = yypact_[this.yystack.stateAt(0)]; + if (!yyPactValueIsDefault(yyn)) + { + /* Start YYX at -YYN if negative to avoid negative + indexes in YYCHECK. In other words, skip the first + -YYN actions for this state because they are default + actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST_ - yyn + 1; + int yyxend = yychecklim < NTOKENS ? yychecklim : NTOKENS; + for (int yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck_[yyx + yyn] == yyx && yyx != ]b4_symbol(error, kind)[.getCode() + && !yyTableValueIsError(yytable_[yyx + yyn])) + { + if (yyarg == null) + yycount += 1; + else if (yycount == yyargn) + return 0; // FIXME: this is incorrect. + else + yyarg[yycount++] = SymbolKind.get(yyx); + } + }]])[ + if (yyarg != null && yycount == yyoffset && yyoffset < yyargn) + yyarg[yycount] = null; + return yycount - yyoffset; + } + } + +]b4_lac_if([[ + /** Check the lookahead yytoken. + * \returns true iff the token will be eventually shifted. + */ + boolean yylacCheck(YYStack yystack, SymbolKind yytoken) + { + // Logically, the yylacStack's lifetime is confined to this function. + // Clear it, to get rid of potential left-overs from previous call. + yylacStack.clear(); + // Reduce until we encounter a shift and thereby accept the token. + yycdebugNnl("LAC: checking lookahead " + yytoken.getName() + ":"); + int lacTop = 0; + while (true) + { + int topState = (yylacStack.isEmpty() + ? yystack.stateAt(lacTop) + : yylacStack.get(yylacStack.size() - 1)); + int yyrule = yypact_[topState]; + if (yyPactValueIsDefault(yyrule) + || (yyrule += yytoken.getCode()) < 0 || YYLAST_ < yyrule + || yycheck_[yyrule] != yytoken.getCode()) + { + // Use the default action. + yyrule = yydefact_[+topState]; + if (yyrule == 0) { + yycdebug(" Err"); + return false; + } + } + else + { + // Use the action from yytable. + yyrule = yytable_[yyrule]; + if (yyTableValueIsError(yyrule)) { + yycdebug(" Err"); + return false; + } + if (0 < yyrule) { + yycdebug(" S" + yyrule); + return true; + } + yyrule = -yyrule; + } + // By now we know we have to simulate a reduce. + yycdebugNnl(" R" + (yyrule - 1)); + // Pop the corresponding number of values from the stack. + { + int yylen = yyr2_[yyrule]; + // First pop from the LAC stack as many tokens as possible. + int lacSize = yylacStack.size(); + if (yylen < lacSize) { + // yylacStack.setSize(lacSize - yylen); + for (/* Nothing */; 0 < yylen; yylen -= 1) { + yylacStack.remove(yylacStack.size() - 1); + } + yylen = 0; + } else if (lacSize != 0) { + yylacStack.clear(); + yylen -= lacSize; + } + // Only afterwards look at the main stack. + // We simulate popping elements by incrementing lacTop. + lacTop += yylen; + } + // Keep topState in sync with the updated stack. + topState = (yylacStack.isEmpty() + ? yystack.stateAt(lacTop) + : yylacStack.get(yylacStack.size() - 1)); + // Push the resulting state of the reduction. + int state = yyLRGotoState(topState, yyr1_[yyrule]); + yycdebugNnl(" G" + state); + yylacStack.add(state); + } + } + + /** Establish the initial context if no initial context currently exists. + * \returns true iff the token will be eventually shifted. + */ + boolean yylacEstablish(YYStack yystack, SymbolKind yytoken) { + /* Establish the initial context for the current lookahead if no initial + context is currently established. + + We define a context as a snapshot of the parser stacks. We define + the initial context for a lookahead as the context in which the + parser initially examines that lookahead in order to select a + syntactic action. Thus, if the lookahead eventually proves + syntactically unacceptable (possibly in a later context reached via a + series of reductions), the initial context can be used to determine + the exact set of tokens that would be syntactically acceptable in the + lookahead's place. Moreover, it is the context after which any + further semantic actions would be erroneous because they would be + determined by a syntactically unacceptable token. + + yylacEstablish should be invoked when a reduction is about to be + performed in an inconsistent state (which, for the purposes of LAC, + includes consistent states that don't know they're consistent because + their default reductions have been disabled). + + For parse.lac=full, the implementation of yylacEstablish is as + follows. If no initial context is currently established for the + current lookahead, then check if that lookahead can eventually be + shifted if syntactic actions continue from the current context. */ + if (yylacEstablished) { + return true; + } else { + yycdebug("LAC: initial context established for " + yytoken.getName()); + yylacEstablished = true; + return yylacCheck(yystack, yytoken); + } + } + + /** Discard any previous initial lookahead context because of event. + * \param event the event which caused the lookahead to be discarded. + * Only used for debbuging output. */ + void yylacDiscard(String event) { + /* Discard any previous initial lookahead context because of Event, + which may be a lookahead change or an invalidation of the currently + established initial context for the current lookahead. + + The most common example of a lookahead change is a shift. An example + of both cases is syntax error recovery. That is, a syntax error + occurs when the lookahead is syntactically erroneous for the + currently established initial context, so error recovery manipulates + the parser stacks to try to find a new initial context in which the + current lookahead is syntactically acceptable. If it fails to find + such a context, it discards the lookahead. */ + if (yylacEstablished) { + yycdebug("LAC: initial context discarded due to " + event); + yylacEstablished = false; + } + } + + /** The stack for LAC. + * Logically, the yylacStack's lifetime is confined to the function + * yylacCheck. We just store it as a member of this class to hold + * on to the memory and to avoid frequent reallocations. + */ + ArrayList yylacStack; + /** Whether an initial LAC context was established. */ + boolean yylacEstablished; +]])[ + +]b4_parse_error_bmatch( +[detailed\|verbose], [[ + private int yysyntaxErrorArguments(Context yyctx, SymbolKind[] yyarg, int yyargn) { + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, + then the only way this function was invoked is if the + default action is an error action. In that case, don't + check for expected tokens because there are none. + - The only way there can be no lookahead present (in tok) is + if this state is a consistent state with a default action. + Thus, detecting the absence of a lookahead is sufficient to + determine that there is no unexpected or expected token to + report. In that case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this + state is a consistent state with a default action. There + might have been a previous inconsistent state, consistent + state with a non-default action, or user semantic action + that manipulated yychar. (However, yychar is currently out + of scope during semantic actions.) + - Of course, the expected token list depends on states to + have correct lookahead information, and it depends on the + parser not to perform extra reductions after fetching a + lookahead from the scanner and before detecting a syntax + error. Thus, state merging (from LALR or IELR) and default + reductions corrupt the expected token list. However, the + list is correct for canonical LR with one exception: it + will still contain any token that will not be accepted due + to an error action in a later state. + */ + int yycount = 0; + if (yyctx.getToken() != null) + { + if (yyarg != null) + yyarg[yycount] = yyctx.getToken(); + yycount += 1; + yycount += yyctx.getExpectedTokens(yyarg, 1, yyargn); + } + return yycount; + } +]])[ + + /** + * Build and emit a "syntax error" message in a user-defined way. + * + * @@param ctx The context of the error. + */ + private void yyreportSyntaxError(Context yyctx) {]b4_parse_error_bmatch( +[custom], [[ + yylexer.reportSyntaxError(yyctx);]], +[detailed\|verbose], [[ + if (yyErrorVerbose) { + final int argmax = 5; + SymbolKind[] yyarg = new SymbolKind[argmax]; + int yycount = yysyntaxErrorArguments(yyctx, yyarg, argmax); + String[] yystr = new String[yycount]; + for (int yyi = 0; yyi < yycount; ++yyi) { + yystr[yyi] = yyarg[yyi].getName(); + } + String yyformat; + switch (yycount) { + default: + case 0: yyformat = ]b4_trans(["syntax error"])[; break; + case 1: yyformat = ]b4_trans(["syntax error, unexpected {0}"])[; break; + case 2: yyformat = ]b4_trans(["syntax error, unexpected {0}, expecting {1}"])[; break; + case 3: yyformat = ]b4_trans(["syntax error, unexpected {0}, expecting {1} or {2}"])[; break; + case 4: yyformat = ]b4_trans(["syntax error, unexpected {0}, expecting {1} or {2} or {3}"])[; break; + case 5: yyformat = ]b4_trans(["syntax error, unexpected {0}, expecting {1} or {2} or {3} or {4}"])[; break; + } + yyerror(]b4_locations_if([[yyctx.yylocation, ]])[new MessageFormat(yyformat).format(yystr)); + } else { + yyerror(]b4_locations_if([[yyctx.yylocation, ]])[]b4_trans(["syntax error"])[); + }]], +[simple], [[ + yyerror(]b4_locations_if([[yyctx.yylocation, ]])[]b4_trans(["syntax error"])[);]])[ + } + + /** + * Whether the given yypact_ value indicates a defaulted state. + * @@param yyvalue the value to check + */ + private static boolean yyPactValueIsDefault(int yyvalue) { + return yyvalue == yypact_ninf_; + } + + /** + * Whether the given yytable_ + * value indicates a syntax error. + * @@param yyvalue the value to check + */ + private static boolean yyTableValueIsError(int yyvalue) { + return yyvalue == yytable_ninf_; + } + + private static final ]b4_int_type_for([b4_pact])[ yypact_ninf_ = ]b4_pact_ninf[; + private static final ]b4_int_type_for([b4_table])[ yytable_ninf_ = ]b4_table_ninf[; + +]b4_parser_tables_define[ + +]b4_parse_trace_if([[ + ]b4_integral_parser_table_define([rline], [b4_rline], + [[YYRLINE[YYN] -- Source line where rule number YYN was defined.]])[ + + + // Report on the debug stream that the rule yyrule is going to be reduced. + private void yyReducePrint (int yyrule, YYStack yystack) + { + if (yydebug == 0) + return; + + int yylno = yyrline_[yyrule]; + int yynrhs = yyr2_[yyrule]; + /* Print the symbols being reduced, and their result. */ + yycdebug ("Reducing stack by rule " + (yyrule - 1) + + " (line " + yylno + "):"); + + /* The symbols being reduced. */ + for (int yyi = 0; yyi < yynrhs; yyi++) + yySymbolPrint(" $" + (yyi + 1) + " =", + SymbolKind.get(yystos_[yystack.stateAt(yynrhs - (yyi + 1))]), + ]b4_rhs_data(yynrhs, yyi + 1)b4_locations_if([, + b4_rhs_location(yynrhs, yyi + 1)])[); + }]])[ + + /* YYTRANSLATE_(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, with out-of-bounds checking. */ + private static final SymbolKind yytranslate_(int t) +]b4_api_token_raw_if(dnl +[[ { + return SymbolKind.get(t); + } +]], +[[ { + // Last valid token kind. + int code_max = ]b4_code_max[; + if (t <= 0) + return ]b4_symbol(eof, kind)[; + else if (t <= code_max) + return SymbolKind.get(yytranslate_table_[t]); + else + return ]b4_symbol(undef, kind)[; + } + ]b4_integral_parser_table_define([translate_table], [b4_translate])[ +]])[ + + private static final int YYLAST_ = ]b4_last[; + private static final int YYEMPTY_ = -2; + private static final int YYFINAL_ = ]b4_final_state_number[; + private static final int YYNTOKENS_ = ]b4_tokens_number[; + +]b4_percent_code_get[ +} +]b4_percent_code_get([[epilogue]])[]dnl +b4_epilogue[]dnl +b4_output_end diff --git a/platform/dbops/binaries/build/share/bison/skeletons/location.cc b/platform/dbops/binaries/build/share/bison/skeletons/location.cc new file mode 100644 index 0000000000000000000000000000000000000000..3870b2bcc7770378f2c1d7fad9011ed2bd6bfc95 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/location.cc @@ -0,0 +1,380 @@ +# C++ skeleton for Bison + +# Copyright (C) 2002-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_pushdef([b4_copyright_years], + [2002-2015, 2018-2021]) + + +# b4_location_file +# ---------------- +# Name of the file containing the position/location class, +# if we want this file. +b4_percent_define_check_file([b4_location_file], + [[api.location.file]], + b4_header_if([[location.hh]])) + +# b4_location_include +# ------------------- +# If location.hh is to be generated, the name under which should it be +# included. +# +# b4_location_path +# ---------------- +# The path to use for the CPP guard. +m4_ifdef([b4_location_file], +[m4_define([b4_location_include], + [b4_percent_define_get([[api.location.include]], + ["b4_location_file"])]) + m4_define([b4_location_path], + b4_percent_define_get([[api.location.include]], + ["b4_mapped_dir_prefix[]b4_location_file"])) + m4_define([b4_location_path], + m4_substr(m4_defn([b4_location_path]), 1, m4_eval(m4_len(m4_defn([b4_location_path])) - 2))) + ]) + + +# b4_position_file +# ---------------- +# Name of the file containing the position class, if we want this file. +b4_header_if( + [b4_required_version_if( + [30200], [], + [m4_ifdef([b4_location_file], + [m4_define([b4_position_file], [position.hh])])])]) + + + +# b4_location_define +# ------------------ +# Define the position and location classes. +m4_define([b4_location_define], +[[ /// A point in a source file. + class position + { + public: + /// Type for file name. + typedef ]b4_percent_define_get([[api.filename.type]])[ filename_type; + /// Type for line and column numbers. + typedef int counter_type; +]m4_ifdef([b4_location_constructors], [[ + /// Construct a position. + explicit position (filename_type* f = YY_NULLPTR, + counter_type l = ]b4_location_initial_line[, + counter_type c = ]b4_location_initial_column[) + : filename (f) + , line (l) + , column (c) + {} + +]])[ + /// Initialization. + void initialize (filename_type* fn = YY_NULLPTR, + counter_type l = ]b4_location_initial_line[, + counter_type c = ]b4_location_initial_column[) + { + filename = fn; + line = l; + column = c; + } + + /** \name Line and Column related manipulators + ** \{ */ + /// (line related) Advance to the COUNT next lines. + void lines (counter_type count = 1) + { + if (count) + { + column = ]b4_location_initial_column[; + line = add_ (line, count, ]b4_location_initial_line[); + } + } + + /// (column related) Advance to the COUNT next columns. + void columns (counter_type count = 1) + { + column = add_ (column, count, ]b4_location_initial_column[); + } + /** \} */ + + /// File name to which this position refers. + filename_type* filename; + /// Current line number. + counter_type line; + /// Current column number. + counter_type column; + + private: + /// Compute max (min, lhs+rhs). + static counter_type add_ (counter_type lhs, counter_type rhs, counter_type min) + { + return lhs + rhs < min ? min : lhs + rhs; + } + }; + + /// Add \a width columns, in place. + inline position& + operator+= (position& res, position::counter_type width) + { + res.columns (width); + return res; + } + + /// Add \a width columns. + inline position + operator+ (position res, position::counter_type width) + { + return res += width; + } + + /// Subtract \a width columns, in place. + inline position& + operator-= (position& res, position::counter_type width) + { + return res += -width; + } + + /// Subtract \a width columns. + inline position + operator- (position res, position::counter_type width) + { + return res -= width; + } +]b4_percent_define_flag_if([[define_location_comparison]], [[ + /// Compare two position objects. + inline bool + operator== (const position& pos1, const position& pos2) + { + return (pos1.line == pos2.line + && pos1.column == pos2.column + && (pos1.filename == pos2.filename + || (pos1.filename && pos2.filename + && *pos1.filename == *pos2.filename))); + } + + /// Compare two position objects. + inline bool + operator!= (const position& pos1, const position& pos2) + { + return !(pos1 == pos2); + } +]])[ + /** \brief Intercept output stream redirection. + ** \param ostr the destination output stream + ** \param pos a reference to the position to redirect + */ + template + std::basic_ostream& + operator<< (std::basic_ostream& ostr, const position& pos) + { + if (pos.filename) + ostr << *pos.filename << ':'; + return ostr << pos.line << '.' << pos.column; + } + + /// Two points in a source file. + class location + { + public: + /// Type for file name. + typedef position::filename_type filename_type; + /// Type for line and column numbers. + typedef position::counter_type counter_type; +]m4_ifdef([b4_location_constructors], [ + /// Construct a location from \a b to \a e. + location (const position& b, const position& e) + : begin (b) + , end (e) + {} + + /// Construct a 0-width location in \a p. + explicit location (const position& p = position ()) + : begin (p) + , end (p) + {} + + /// Construct a 0-width location in \a f, \a l, \a c. + explicit location (filename_type* f, + counter_type l = ]b4_location_initial_line[, + counter_type c = ]b4_location_initial_column[) + : begin (f, l, c) + , end (f, l, c) + {} + +])[ + /// Initialization. + void initialize (filename_type* f = YY_NULLPTR, + counter_type l = ]b4_location_initial_line[, + counter_type c = ]b4_location_initial_column[) + { + begin.initialize (f, l, c); + end = begin; + } + + /** \name Line and Column related manipulators + ** \{ */ + public: + /// Reset initial location to final location. + void step () + { + begin = end; + } + + /// Extend the current location to the COUNT next columns. + void columns (counter_type count = 1) + { + end += count; + } + + /// Extend the current location to the COUNT next lines. + void lines (counter_type count = 1) + { + end.lines (count); + } + /** \} */ + + + public: + /// Beginning of the located region. + position begin; + /// End of the located region. + position end; + }; + + /// Join two locations, in place. + inline location& + operator+= (location& res, const location& end) + { + res.end = end.end; + return res; + } + + /// Join two locations. + inline location + operator+ (location res, const location& end) + { + return res += end; + } + + /// Add \a width columns to the end position, in place. + inline location& + operator+= (location& res, location::counter_type width) + { + res.columns (width); + return res; + } + + /// Add \a width columns to the end position. + inline location + operator+ (location res, location::counter_type width) + { + return res += width; + } + + /// Subtract \a width columns to the end position, in place. + inline location& + operator-= (location& res, location::counter_type width) + { + return res += -width; + } + + /// Subtract \a width columns to the end position. + inline location + operator- (location res, location::counter_type width) + { + return res -= width; + } +]b4_percent_define_flag_if([[define_location_comparison]], [[ + /// Compare two location objects. + inline bool + operator== (const location& loc1, const location& loc2) + { + return loc1.begin == loc2.begin && loc1.end == loc2.end; + } + + /// Compare two location objects. + inline bool + operator!= (const location& loc1, const location& loc2) + { + return !(loc1 == loc2); + } +]])[ + /** \brief Intercept output stream redirection. + ** \param ostr the destination output stream + ** \param loc a reference to the location to redirect + ** + ** Avoid duplicate information. + */ + template + std::basic_ostream& + operator<< (std::basic_ostream& ostr, const location& loc) + { + location::counter_type end_col + = 0 < loc.end.column ? loc.end.column - 1 : 0; + ostr << loc.begin; + if (loc.end.filename + && (!loc.begin.filename + || *loc.begin.filename != *loc.end.filename)) + ostr << '-' << loc.end.filename << ':' << loc.end.line << '.' << end_col; + else if (loc.begin.line < loc.end.line) + ostr << '-' << loc.end.line << '.' << end_col; + else if (loc.begin.column < end_col) + ostr << '-' << end_col; + return ostr; + } +]]) + + +m4_ifdef([b4_position_file], [[ +]b4_output_begin([b4_dir_prefix], [b4_position_file])[ +]b4_generated_by[ +// Starting with Bison 3.2, this file is useless: the structure it +// used to define is now defined in "]b4_location_file[". +// +// To get rid of this file: +// 1. add '%require "3.2"' (or newer) to your grammar file +// 2. remove references to this file from your build system +// 3. if you used to include it, include "]b4_location_file[" instead. + +#include ]b4_location_include[ +]b4_output_end[ +]]) + + +m4_ifdef([b4_location_file], [[ +]b4_output_begin([b4_dir_prefix], [b4_location_file])[ +]b4_copyright([Locations for Bison parsers in C++])[ +/** + ** \file ]b4_location_path[ + ** Define the ]b4_namespace_ref[::location class. + */ + +]b4_cpp_guard_open([b4_location_path])[ + +# include +# include + +]b4_null_define[ + +]b4_namespace_open[ +]b4_location_define[ +]b4_namespace_close[ +]b4_cpp_guard_close([b4_location_path])[ +]b4_output_end[ +]]) + + +m4_popdef([b4_copyright_years]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/stack.hh b/platform/dbops/binaries/build/share/bison/skeletons/stack.hh new file mode 100644 index 0000000000000000000000000000000000000000..98913258fbeea8cbfb4fd58598e56b0c15742e75 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/stack.hh @@ -0,0 +1,157 @@ +# C++ skeleton for Bison + +# Copyright (C) 2002-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# b4_stack_file +# ------------- +# Name of the file containing the stack class, if we want this file. +b4_header_if([b4_required_version_if([30200], [], + [m4_define([b4_stack_file], [stack.hh])])]) + + +# b4_stack_define +# --------------- +m4_define([b4_stack_define], +[[ /// A stack with random access from its top. + template > + class stack + { + public: + // Hide our reversed order. + typedef typename S::iterator iterator; + typedef typename S::const_iterator const_iterator; + typedef typename S::size_type size_type; + typedef typename std::ptrdiff_t index_type; + + stack (size_type n = 200) YY_NOEXCEPT + : seq_ (n) + {} + +#if 201103L <= YY_CPLUSPLUS + /// Non copyable. + stack (const stack&) = delete; + /// Non copyable. + stack& operator= (const stack&) = delete; +#endif + + /// Random access. + /// + /// Index 0 returns the topmost element. + const T& + operator[] (index_type i) const + { + return seq_[size_type (size () - 1 - i)]; + } + + /// Random access. + /// + /// Index 0 returns the topmost element. + T& + operator[] (index_type i) + { + return seq_[size_type (size () - 1 - i)]; + } + + /// Steal the contents of \a t. + /// + /// Close to move-semantics. + void + push (YY_MOVE_REF (T) t) + { + seq_.push_back (T ()); + operator[] (0).move (t); + } + + /// Pop elements from the stack. + void + pop (std::ptrdiff_t n = 1) YY_NOEXCEPT + { + for (; 0 < n; --n) + seq_.pop_back (); + } + + /// Pop all elements from the stack. + void + clear () YY_NOEXCEPT + { + seq_.clear (); + } + + /// Number of elements on the stack. + index_type + size () const YY_NOEXCEPT + { + return index_type (seq_.size ()); + } + + /// Iterator on top of the stack (going downwards). + const_iterator + begin () const YY_NOEXCEPT + { + return seq_.begin (); + } + + /// Bottom of the stack. + const_iterator + end () const YY_NOEXCEPT + { + return seq_.end (); + } + + /// Present a slice of the top of a stack. + class slice + { + public: + slice (const stack& stack, index_type range) YY_NOEXCEPT + : stack_ (stack) + , range_ (range) + {} + + const T& + operator[] (index_type i) const + { + return stack_[range_ - i]; + } + + private: + const stack& stack_; + index_type range_; + }; + + private: +#if YY_CPLUSPLUS < 201103L + /// Non copyable. + stack (const stack&); + /// Non copyable. + stack& operator= (const stack&); +#endif + /// The wrapped container. + S seq_; + }; +]]) + +m4_ifdef([b4_stack_file], +[b4_output_begin([b4_dir_prefix], [b4_stack_file])[ +]b4_generated_by[ +// Starting with Bison 3.2, this file is useless: the structure it +// used to define is now defined with the parser itself. +// +// To get rid of this file: +// 1. add '%require "3.2"' (or newer) to your grammar file +// 2. remove references to this file from your build system. +]b4_output_end[ +]]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/traceon.m4 b/platform/dbops/binaries/build/share/bison/skeletons/traceon.m4 new file mode 100644 index 0000000000000000000000000000000000000000..344d7d1c553e3599d6eeb63154848da36ed878ba --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/traceon.m4 @@ -0,0 +1,2 @@ +dnl GNU M4 treats -dV in a position-independent manner. +m4_debugmode(V)m4_traceon()dnl diff --git a/platform/dbops/binaries/build/share/bison/skeletons/variant.hh b/platform/dbops/binaries/build/share/bison/skeletons/variant.hh new file mode 100644 index 0000000000000000000000000000000000000000..2a490e8fa0b816ad457824f9b71cdb03d2e373c5 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/variant.hh @@ -0,0 +1,525 @@ +# C++ skeleton for Bison + +# Copyright (C) 2002-2015, 2018-2021 Free Software Foundation, Inc. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +## --------- ## +## variant. ## +## --------- ## + +# b4_assert +# --------- +# The name of YY_ASSERT. +m4_define([b4_assert], + [b4_api_PREFIX[]_ASSERT]) + + +# b4_symbol_variant(YYTYPE, YYVAL, ACTION, [ARGS]) +# ------------------------------------------------ +# Run some ACTION ("build", or "destroy") on YYVAL of symbol type +# YYTYPE. +m4_define([b4_symbol_variant], +[m4_pushdef([b4_dollar_dollar], + [$2.$3< $][3 > (m4_shift3($@))])dnl +switch ($1) + { +b4_type_foreach([_b4_type_action])[]dnl + default: + break; + } +m4_popdef([b4_dollar_dollar])dnl +]) + + +# _b4_char_sizeof_counter +# ----------------------- +# A counter used by _b4_char_sizeof_dummy to create fresh symbols. +m4_define([_b4_char_sizeof_counter], +[0]) + +# _b4_char_sizeof_dummy +# --------------------- +# At each call return a new C++ identifier. +m4_define([_b4_char_sizeof_dummy], +[m4_define([_b4_char_sizeof_counter], m4_incr(_b4_char_sizeof_counter))dnl +dummy[]_b4_char_sizeof_counter]) + + +# b4_char_sizeof(SYMBOL-NUMS) +# --------------------------- +# To be mapped on the list of type names to produce: +# +# char dummy1[sizeof (type_name_1)]; +# char dummy2[sizeof (type_name_2)]; +# +# for defined type names. +m4_define([b4_char_sizeof], +[b4_symbol_if([$1], [has_type], +[ +m4_map([ b4_symbol_tag_comment], [$@])dnl + char _b4_char_sizeof_dummy@{sizeof (b4_symbol([$1], [type]))@}; +])]) + + +# b4_variant_includes +# ------------------- +# The needed includes for variants support. +m4_define([b4_variant_includes], +[b4_parse_assert_if([[#include +#ifndef ]b4_assert[ +# include +# define ]b4_assert[ assert +#endif +]])]) + + + +## -------------------------- ## +## Adjustments for variants. ## +## -------------------------- ## + + +# b4_value_type_declare +# --------------------- +# Define value_type. +m4_define([b4_value_type_declare], +[[ /// A buffer to store and retrieve objects. + /// + /// Sort of a variant, but does not keep track of the nature + /// of the stored data, since that knowledge is available + /// via the current parser state. + class value_type + { + public: + /// Type of *this. + typedef value_type self_type; + + /// Empty construction. + value_type () YY_NOEXCEPT + : yyraw_ ()]b4_parse_assert_if([ + , yytypeid_ (YY_NULLPTR)])[ + {} + + /// Construct and fill. + template + value_type (YY_RVREF (T) t)]b4_parse_assert_if([ + : yytypeid_ (&typeid (T))])[ + {]b4_parse_assert_if([[ + ]b4_assert[ (sizeof (T) <= size);]])[ + new (yyas_ ()) T (YY_MOVE (t)); + } + +#if 201103L <= YY_CPLUSPLUS + /// Non copyable. + value_type (const self_type&) = delete; + /// Non copyable. + self_type& operator= (const self_type&) = delete; +#endif + + /// Destruction, allowed only if empty. + ~value_type () YY_NOEXCEPT + {]b4_parse_assert_if([ + ]b4_assert[ (!yytypeid_); + ])[} + +# if 201103L <= YY_CPLUSPLUS + /// Instantiate a \a T in here from \a t. + template + T& + emplace (U&&... u) + {]b4_parse_assert_if([[ + ]b4_assert[ (!yytypeid_); + ]b4_assert[ (sizeof (T) <= size); + yytypeid_ = & typeid (T);]])[ + return *new (yyas_ ()) T (std::forward (u)...); + } +# else + /// Instantiate an empty \a T in here. + template + T& + emplace () + {]b4_parse_assert_if([[ + ]b4_assert[ (!yytypeid_); + ]b4_assert[ (sizeof (T) <= size); + yytypeid_ = & typeid (T);]])[ + return *new (yyas_ ()) T (); + } + + /// Instantiate a \a T in here from \a t. + template + T& + emplace (const T& t) + {]b4_parse_assert_if([[ + ]b4_assert[ (!yytypeid_); + ]b4_assert[ (sizeof (T) <= size); + yytypeid_ = & typeid (T);]])[ + return *new (yyas_ ()) T (t); + } +# endif + + /// Instantiate an empty \a T in here. + /// Obsolete, use emplace. + template + T& + build () + { + return emplace (); + } + + /// Instantiate a \a T in here from \a t. + /// Obsolete, use emplace. + template + T& + build (const T& t) + { + return emplace (t); + } + + /// Accessor to a built \a T. + template + T& + as () YY_NOEXCEPT + {]b4_parse_assert_if([[ + ]b4_assert[ (yytypeid_); + ]b4_assert[ (*yytypeid_ == typeid (T)); + ]b4_assert[ (sizeof (T) <= size);]])[ + return *yyas_ (); + } + + /// Const accessor to a built \a T (for %printer). + template + const T& + as () const YY_NOEXCEPT + {]b4_parse_assert_if([[ + ]b4_assert[ (yytypeid_); + ]b4_assert[ (*yytypeid_ == typeid (T)); + ]b4_assert[ (sizeof (T) <= size);]])[ + return *yyas_ (); + } + + /// Swap the content with \a that, of same type. + /// + /// Both variants must be built beforehand, because swapping the actual + /// data requires reading it (with as()), and this is not possible on + /// unconstructed variants: it would require some dynamic testing, which + /// should not be the variant's responsibility. + /// Swapping between built and (possibly) non-built is done with + /// self_type::move (). + template + void + swap (self_type& that) YY_NOEXCEPT + {]b4_parse_assert_if([[ + ]b4_assert[ (yytypeid_); + ]b4_assert[ (*yytypeid_ == *that.yytypeid_);]])[ + std::swap (as (), that.as ()); + } + + /// Move the content of \a that to this. + /// + /// Destroys \a that. + template + void + move (self_type& that) + { +# if 201103L <= YY_CPLUSPLUS + emplace (std::move (that.as ())); +# else + emplace (); + swap (that); +# endif + that.destroy (); + } + +# if 201103L <= YY_CPLUSPLUS + /// Move the content of \a that to this. + template + void + move (self_type&& that) + { + emplace (std::move (that.as ())); + that.destroy (); + } +#endif + + /// Copy the content of \a that to this. + template + void + copy (const self_type& that) + { + emplace (that.as ()); + } + + /// Destroy the stored \a T. + template + void + destroy () + { + as ().~T ();]b4_parse_assert_if([ + yytypeid_ = YY_NULLPTR;])[ + } + + private: +#if YY_CPLUSPLUS < 201103L + /// Non copyable. + value_type (const self_type&); + /// Non copyable. + self_type& operator= (const self_type&); +#endif + + /// Accessor to raw memory as \a T. + template + T* + yyas_ () YY_NOEXCEPT + { + void *yyp = yyraw_; + return static_cast (yyp); + } + + /// Const accessor to raw memory as \a T. + template + const T* + yyas_ () const YY_NOEXCEPT + { + const void *yyp = yyraw_; + return static_cast (yyp); + } + + /// An auxiliary type to compute the largest semantic type. + union union_type + {]b4_type_foreach([b4_char_sizeof])[ }; + + /// The size of the largest semantic type. + enum { size = sizeof (union_type) }; + + /// A buffer to store semantic values. + union + { + /// Strongest alignment constraints. + long double yyalign_me_; + /// A buffer large enough to store any of the semantic values. + char yyraw_[size]; + };]b4_parse_assert_if([ + + /// Whether the content is built: if defined, the name of the stored type. + const std::type_info *yytypeid_;])[ + }; +]]) + + +# How the semantic value is extracted when using variants. + +# b4_symbol_value(VAL, SYMBOL-NUM, [TYPE]) +# ---------------------------------------- +# See README. +m4_define([b4_symbol_value], +[m4_ifval([$3], + [$1.as< $3 > ()], + [m4_ifval([$2], + [b4_symbol_if([$2], [has_type], + [$1.as < b4_symbol([$2], [type]) > ()], + [$1])], + [$1])])]) + +# b4_symbol_value_template(VAL, SYMBOL-NUM, [TYPE]) +# ------------------------------------------------- +# Same as b4_symbol_value, but used in a template method. +m4_define([b4_symbol_value_template], +[m4_ifval([$3], + [$1.template as< $3 > ()], + [m4_ifval([$2], + [b4_symbol_if([$2], [has_type], + [$1.template as < b4_symbol([$2], [type]) > ()], + [$1])], + [$1])])]) + + + +## ------------- ## +## make_SYMBOL. ## +## ------------- ## + + +# _b4_includes_tokens(SYMBOL-NUM...) +# ---------------------------------- +# Expands to non-empty iff one of the SYMBOL-NUM denotes +# a token. +m4_define([_b4_is_token], + [b4_symbol_if([$1], [is_token], [1])]) +m4_define([_b4_includes_tokens], + [m4_map([_b4_is_token], [$@])]) + + +# _b4_token_maker_define(SYMBOL-NUM) +# ---------------------------------- +# Declare make_SYMBOL for SYMBOL-NUM. Use at class-level. +m4_define([_b4_token_maker_define], +[b4_token_visible_if([$1], +[#if 201103L <= YY_CPLUSPLUS + static + symbol_type + make_[]_b4_symbol([$1], [id]) (b4_join( + b4_symbol_if([$1], [has_type], + [b4_symbol([$1], [type]) v]), + b4_locations_if([location_type l]))) + { + return symbol_type (b4_join([token::b4_symbol([$1], [id])], + b4_symbol_if([$1], [has_type], [std::move (v)]), + b4_locations_if([std::move (l)]))); + } +#else + static + symbol_type + make_[]_b4_symbol([$1], [id]) (b4_join( + b4_symbol_if([$1], [has_type], + [const b4_symbol([$1], [type])& v]), + b4_locations_if([const location_type& l]))) + { + return symbol_type (b4_join([token::b4_symbol([$1], [id])], + b4_symbol_if([$1], [has_type], [v]), + b4_locations_if([l]))); + } +#endif +])]) + + +# b4_token_kind(SYMBOL-NUM) +# ------------------------- +# Some tokens don't have an ID. +m4_define([b4_token_kind], +[b4_symbol_if([$1], [has_id], + [token::b4_symbol([$1], [id])], + [b4_symbol([$1], [code])])]) + + +# _b4_tok_in(SYMBOL-NUM, ...) +# --------------------------- +# See b4_tok_in below. The SYMBOL-NUMs... are tokens only. +# +# We iterate over the tokens to group them by "range" of token numbers (not +# symbols numbers!). +# +# b4_fst is the start of that range. +# b4_prev is the previous value. +# b4_val is the current value. +# If b4_val is the successor of b4_prev in token numbers, update the latter, +# otherwise emit the code for range b4_fst .. b4_prev. +# $1 is also used as a terminator in the foreach, but it will not be printed. +# +m4_define([_b4_tok_in], +[m4_pushdef([b4_prev], [$1])dnl +m4_pushdef([b4_fst], [$1])dnl +m4_pushdef([b4_sep], [])dnl +m4_foreach([b4_val], m4_dquote(m4_shift($@, $1)), + [m4_if(b4_symbol(b4_val, [code]), m4_eval(b4_symbol(b4_prev, [code]) + 1), [], + [b4_sep[]m4_if(b4_fst, b4_prev, + [tok == b4_token_kind(b4_fst)], + [(b4_token_kind(b4_fst) <= tok && tok <= b4_token_kind(b4_prev))])[]dnl +m4_define([b4_fst], b4_val)dnl +m4_define([b4_sep], [ + || ])])dnl +m4_define([b4_prev], b4_val)])dnl +m4_popdef([b4_sep])dnl +m4_popdef([b4_fst])dnl +m4_popdef([b4_prev])dnl +]) + + +# _b4_filter_tokens(SYMBOL-NUM, ...) +# ---------------------------------- +# Expand as the list of tokens amongst SYMBOL-NUM. +m4_define([_b4_filter_tokens], +[m4_pushdef([b4_sep])dnl +m4_foreach([b4_val], [$@], + [b4_symbol_if(b4_val, [is_token], [b4_sep[]b4_val[]m4_define([b4_sep], [,])])])dnl +m4_popdef([b4_sep])dnl +]) + + +# b4_tok_in(SYMBOL-NUM, ...) +# --------------------------- +# A C++ conditional that checks that `tok` is a member of this list of symbol +# numbers. +m4_define([b4_tok_in], + [_$0(_b4_filter_tokens($@))]) + + + + +# _b4_symbol_constructor_define(SYMBOL-NUM...) +# -------------------------------------------- +# Define a symbol_type constructor common to all the SYMBOL-NUM (they +# have the same type). Use at class-level. +m4_define([_b4_symbol_constructor_define], +[m4_ifval(_b4_includes_tokens($@), +[[#if 201103L <= YY_CPLUSPLUS + symbol_type (]b4_join( + [int tok], + b4_symbol_if([$1], [has_type], + [b4_symbol([$1], [type]) v]), + b4_locations_if([location_type l]))[) + : super_type (]b4_join([token_kind_type (tok)], + b4_symbol_if([$1], [has_type], [std::move (v)]), + b4_locations_if([std::move (l)]))[) +#else + symbol_type (]b4_join( + [int tok], + b4_symbol_if([$1], [has_type], + [const b4_symbol([$1], [type])& v]), + b4_locations_if([const location_type& l]))[) + : super_type (]b4_join([token_kind_type (tok)], + b4_symbol_if([$1], [has_type], [v]), + b4_locations_if([l]))[) +#endif + {]b4_parse_assert_if([[ +#if !defined _MSC_VER || defined __clang__ + ]b4_assert[ (]b4_tok_in($@)[); +#endif + ]])[} +]])]) + + +# b4_basic_symbol_constructor_define(SYMBOL-NUM) +# ---------------------------------------------- +# Generate a constructor for basic_symbol from given type. +m4_define([b4_basic_symbol_constructor_define], +[[#if 201103L <= YY_CPLUSPLUS + basic_symbol (]b4_join( + [typename Base::kind_type t], + b4_symbol_if([$1], [has_type], [b4_symbol([$1], [type])&& v]), + b4_locations_if([location_type&& l]))[) + : Base (t)]b4_symbol_if([$1], [has_type], [ + , value (std::move (v))])[]b4_locations_if([ + , location (std::move (l))])[ + {} +#else + basic_symbol (]b4_join( + [typename Base::kind_type t], + b4_symbol_if([$1], [has_type], [const b4_symbol([$1], [type])& v]), + b4_locations_if([const location_type& l]))[) + : Base (t)]b4_symbol_if([$1], [has_type], [ + , value (v)])[]b4_locations_if([ + , location (l)])[ + {} +#endif +]]) + + +# b4_token_constructor_define +# --------------------------- +# Define the overloaded versions of make_FOO for all the token kinds. +m4_define([b4_token_constructor_define], +[ // Implementation of make_symbol for each token kind. +b4_symbol_foreach([_b4_token_maker_define])]) diff --git a/platform/dbops/binaries/build/share/bison/skeletons/yacc.c b/platform/dbops/binaries/build/share/bison/skeletons/yacc.c new file mode 100644 index 0000000000000000000000000000000000000000..64b9ac6257c553509049a8a5d779c00e9bb86b29 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/skeletons/yacc.c @@ -0,0 +1,2209 @@ +# -*- C -*- +# Yacc compatible skeleton for Bison + +# Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2021 Free Software +# Foundation, Inc. + +m4_pushdef([b4_copyright_years], + [1984, 1989-1990, 2000-2015, 2018-2021]) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +m4_include(b4_skeletonsdir/[c.m4]) + + +## ---------- ## +## api.pure. ## +## ---------- ## + +b4_percent_define_default([[api.pure]], [[false]]) +b4_percent_define_check_values([[[[api.pure]], + [[false]], [[true]], [[]], [[full]]]]) + +m4_define([b4_pure_flag], [[0]]) +m4_case(b4_percent_define_get([[api.pure]]), + [false], [m4_define([b4_pure_flag], [[0]])], + [true], [m4_define([b4_pure_flag], [[1]])], + [], [m4_define([b4_pure_flag], [[1]])], + [full], [m4_define([b4_pure_flag], [[2]])]) + +m4_define([b4_pure_if], +[m4_case(b4_pure_flag, + [0], [$2], + [1], [$1], + [2], [$1])]) + [m4_fatal([invalid api.pure value: ]$1)])]) + +## --------------- ## +## api.push-pull. ## +## --------------- ## + +# b4_pull_if, b4_push_if +# ---------------------- +# Whether the pull/push APIs are needed. Both can be enabled. + +b4_percent_define_default([[api.push-pull]], [[pull]]) +b4_percent_define_check_values([[[[api.push-pull]], + [[pull]], [[push]], [[both]]]]) +b4_define_flag_if([pull]) m4_define([b4_pull_flag], [[1]]) +b4_define_flag_if([push]) m4_define([b4_push_flag], [[1]]) +m4_case(b4_percent_define_get([[api.push-pull]]), + [pull], [m4_define([b4_push_flag], [[0]])], + [push], [m4_define([b4_pull_flag], [[0]])]) + +# Handle BISON_USE_PUSH_FOR_PULL for the test suite. So that push parsing +# tests function as written, do not let BISON_USE_PUSH_FOR_PULL modify the +# behavior of Bison at all when push parsing is already requested. +b4_define_flag_if([use_push_for_pull]) +b4_use_push_for_pull_if([ + b4_push_if([m4_define([b4_use_push_for_pull_flag], [[0]])], + [m4_define([b4_push_flag], [[1]])])]) + +## ----------- ## +## parse.lac. ## +## ----------- ## + +b4_percent_define_default([[parse.lac]], [[none]]) +b4_percent_define_default([[parse.lac.es-capacity-initial]], [[20]]) +b4_percent_define_default([[parse.lac.memory-trace]], [[failures]]) +b4_percent_define_check_values([[[[parse.lac]], [[full]], [[none]]]], + [[[[parse.lac.memory-trace]], + [[failures]], [[full]]]]) +b4_define_flag_if([lac]) +m4_define([b4_lac_flag], + [m4_if(b4_percent_define_get([[parse.lac]]), + [none], [[0]], [[1]])]) + +## ---------------- ## +## Default values. ## +## ---------------- ## + +# Stack parameters. +m4_define_default([b4_stack_depth_max], [10000]) +m4_define_default([b4_stack_depth_init], [200]) + + +# b4_yyerror_arg_loc_if(ARG) +# -------------------------- +# Expand ARG iff yyerror is to be given a location as argument. +m4_define([b4_yyerror_arg_loc_if], +[b4_locations_if([m4_case(b4_pure_flag, + [1], [m4_ifset([b4_parse_param], [$1])], + [2], [$1])])]) + +# b4_yyerror_formals +# ------------------ +m4_define([b4_yyerror_formals], +[b4_pure_if([b4_locations_if([, [[const ]b4_api_PREFIX[LTYPE *yyllocp], [&yylloc]]])[]dnl +m4_ifdef([b4_parse_param], [, b4_parse_param])[]dnl +,])dnl +[[const char *msg], [msg]]]) + + + +# b4_yyerror_args +# --------------- +# Arguments passed to yyerror: user args plus yylloc. +m4_define([b4_yyerror_args], +[b4_yyerror_arg_loc_if([&yylloc, ])dnl +m4_ifset([b4_parse_param], [b4_args(b4_parse_param), ])]) + + + +## ----------------- ## +## Semantic Values. ## +## ----------------- ## + + +# b4_accept([SYMBOL-NUM]) +# ----------------------- +# Used in actions of the rules of accept, the initial symbol, to call +# YYACCEPT. If SYMBOL-NUM is specified, run "yyvalue->SLOT = $2;" +# before, using the slot of SYMBOL-NUM. +m4_define([b4_accept], +[m4_ifval([$1], + [b4_symbol_value(yyimpl->yyvalue, [$1]) = b4_rhs_value(2, 1, [$1]); ]) YYACCEPT]) + + +# b4_lhs_value(SYMBOL-NUM, [TYPE]) +# -------------------------------- +# See README. +m4_define([b4_lhs_value], +[b4_symbol_value(yyval, [$1], [$2])]) + + +# b4_rhs_value(RULE-LENGTH, POS, [SYMBOL-NUM], [TYPE]) +# ---------------------------------------------------- +# See README. +m4_define([b4_rhs_value], +[b4_symbol_value([yyvsp@{b4_subtract([$2], [$1])@}], [$3], [$4])]) + + +## ----------- ## +## Locations. ## +## ----------- ## + +# b4_lhs_location() +# ----------------- +# Expansion of @$. +# Overparenthetized to avoid obscure problems with "foo$$bar = foo$1bar". +m4_define([b4_lhs_location], +[(yyloc)]) + + +# b4_rhs_location(RULE-LENGTH, POS) +# --------------------------------- +# Expansion of @POS, where the current rule has RULE-LENGTH symbols +# on RHS. +# Overparenthetized to avoid obscure problems with "foo$$bar = foo$1bar". +m4_define([b4_rhs_location], +[(yylsp@{b4_subtract([$2], [$1])@})]) + + +## -------------- ## +## Declarations. ## +## -------------- ## + +# _b4_declare_sub_yyparse(START-SYMBOL-NUM, SWITCHING-TOKEN-SYMBOL-NUM) +# --------------------------------------------------------------------- +# Define the return type of the parsing function for SYMBOL-NUM, and +# declare its parsing function. +m4_define([_b4_declare_sub_yyparse], +[[ +// Return type when parsing one ]_b4_symbol($1, tag)[. +typedef struct +{]b4_symbol_if([$1], [has_type], [[ + ]_b4_symbol($1, type)[ yyvalue;]])[ + int yystatus; + int yynerrs; +} ]b4_prefix[parse_]_b4_symbol($1, id)[_t; + +// Parse one ]_b4_symbol($1, tag)[. +]b4_prefix[parse_]_b4_symbol($1, id)[_t ]b4_prefix[parse_]_b4_symbol($1, id)[ (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[); +]]) + + +# _b4_first_switching_token +# ------------------------- +m4_define([b4_first], [$1]) +m4_define([b4_second], [$2]) +m4_define([_b4_first_switching_token], +[b4_second(b4_first(b4_start_symbols))]) + + +# _b4_define_sub_yyparse(START-SYMBOL-NUM, SWITCHING-TOKEN-SYMBOL-NUM) +# -------------------------------------------------------------------- +# Define the parsing function for START-SYMBOL-NUM. +m4_define([_b4_define_sub_yyparse], +[[ +]b4_prefix[parse_]_b4_symbol($1, id)[_t +]b4_prefix[parse_]_b4_symbol($1, id)[ (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[) +{ + ]b4_prefix[parse_]_b4_symbol($1, id)[_t yyres; + yy_parse_impl_t yyimpl; + yyres.yystatus = yy_parse_impl (]b4_symbol($2, id)[, &yyimpl]m4_ifset([b4_parse_param], + [[, ]b4_args(b4_parse_param)])[);]b4_symbol_if([$1], [has_type], [[ + yyres.yyvalue = yyimpl.yyvalue.]b4_symbol($1, slot)[;]])[ + yyres.yynerrs = yyimpl.yynerrs; + return yyres; +} +]]) + + +# b4_declare_scanner_communication_variables +# ------------------------------------------ +# Declare the variables that are global, or local to YYPARSE if +# pure-parser. +m4_define([b4_declare_scanner_communication_variables], [[ +]m4_ifdef([b4_start_symbols], [], +[[/* Lookahead token kind. */ +int yychar; +]])[ +]b4_pure_if([[ +/* The semantic value of the lookahead symbol. */ +/* Default value used for initialization, for pacifying older GCCs + or non-GCC compilers. */ +YY_INITIAL_VALUE (static YYSTYPE yyval_default;) +YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);]b4_locations_if([[ + +/* Location data for the lookahead symbol. */ +static YYLTYPE yyloc_default]b4_yyloc_default[; +YYLTYPE yylloc = yyloc_default;]])], +[[/* The semantic value of the lookahead symbol. */ +YYSTYPE yylval;]b4_locations_if([[ +/* Location data for the lookahead symbol. */ +YYLTYPE yylloc]b4_yyloc_default[;]])[ +/* Number of syntax errors so far. */ +int yynerrs;]])]) + + +# b4_declare_parser_state_variables([INIT]) +# ----------------------------------------- +# Declare all the variables that are needed to maintain the parser state +# between calls to yypush_parse. +# If INIT is non-null, initialize these variables. +m4_define([b4_declare_parser_state_variables], +[b4_pure_if([[ + /* Number of syntax errors so far. */ + int yynerrs]m4_ifval([$1], [ = 0])[; +]])[ + yy_state_fast_t yystate]m4_ifval([$1], [ = 0])[; + /* Number of tokens to shift before error messages enabled. */ + int yyerrstatus]m4_ifval([$1], [ = 0])[; + + /* Refer to the stacks through separate pointers, to allow yyoverflow + to reallocate them elsewhere. */ + + /* Their size. */ + YYPTRDIFF_T yystacksize]m4_ifval([$1], [ = YYINITDEPTH])[; + + /* The state stack: array, bottom, top. */ + yy_state_t yyssa[YYINITDEPTH]; + yy_state_t *yyss]m4_ifval([$1], [ = yyssa])[; + yy_state_t *yyssp]m4_ifval([$1], [ = yyss])[; + + /* The semantic value stack: array, bottom, top. */ + YYSTYPE yyvsa[YYINITDEPTH]; + YYSTYPE *yyvs]m4_ifval([$1], [ = yyvsa])[; + YYSTYPE *yyvsp]m4_ifval([$1], [ = yyvs])[;]b4_locations_if([[ + + /* The location stack: array, bottom, top. */ + YYLTYPE yylsa[YYINITDEPTH]; + YYLTYPE *yyls]m4_ifval([$1], [ = yylsa])[; + YYLTYPE *yylsp]m4_ifval([$1], [ = yyls])[;]])[]b4_lac_if([[ + + yy_state_t yyesa@{]b4_percent_define_get([[parse.lac.es-capacity-initial]])[@}; + yy_state_t *yyes]m4_ifval([$1], [ = yyesa])[; + YYPTRDIFF_T yyes_capacity][]m4_ifval([$1], + [m4_do([ = b4_percent_define_get([[parse.lac.es-capacity-initial]]) < YYMAXDEPTH], + [ ? b4_percent_define_get([[parse.lac.es-capacity-initial]])], + [ : YYMAXDEPTH])])[;]])]) + + +m4_define([b4_macro_define], +[[#]define $1 $2]) + +m4_define([b4_macro_undef], +[[#]undef $1]) + +m4_define([b4_pstate_macro_define], +[b4_macro_define([$1], [yyps->$1])]) + +# b4_parse_state_variable_macros(b4_macro_define|b4_macro_undef) +# -------------------------------------------------------------- +m4_define([b4_parse_state_variable_macros], +[b4_pure_if([$1([b4_prefix[]nerrs])]) +$1([yystate]) +$1([yyerrstatus]) +$1([yyssa]) +$1([yyss]) +$1([yyssp]) +$1([yyvsa]) +$1([yyvs]) +$1([yyvsp])[]b4_locations_if([ +$1([yylsa]) +$1([yyls]) +$1([yylsp])]) +$1([yystacksize])[]b4_lac_if([ +$1([yyesa]) +$1([yyes]) +$1([yyes_capacity])])]) + + + + +# _b4_declare_yyparse_push +# ------------------------ +# Declaration of yyparse (and dependencies) when using the push parser +# (including in pull mode). +m4_define([_b4_declare_yyparse_push], +[[#ifndef YYPUSH_MORE_DEFINED +# define YYPUSH_MORE_DEFINED +enum { YYPUSH_MORE = 4 }; +#endif + +typedef struct ]b4_prefix[pstate ]b4_prefix[pstate; + +]b4_pull_if([[ +int ]b4_prefix[parse (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[);]])[ +int ]b4_prefix[push_parse (]b4_prefix[pstate *ps]b4_pure_if([[, + int pushed_char, ]b4_api_PREFIX[STYPE const *pushed_val]b4_locations_if([[, ]b4_api_PREFIX[LTYPE *pushed_loc]])])b4_user_formals[); +]b4_pull_if([[int ]b4_prefix[pull_parse (]b4_prefix[pstate *ps]b4_user_formals[);]])[ +]b4_prefix[pstate *]b4_prefix[pstate_new (void); +void ]b4_prefix[pstate_delete (]b4_prefix[pstate *ps); +]]) + + +# _b4_declare_yyparse +# ------------------- +# When not the push parser. +m4_define([_b4_declare_yyparse], +[[int ]b4_prefix[parse (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[); +]m4_ifdef([b4_start_symbols], + [m4_map([_b4_declare_sub_yyparse], m4_defn([b4_start_symbols]))])]) + + +# b4_declare_yyparse +# ------------------ +m4_define([b4_declare_yyparse], +[b4_push_if([_b4_declare_yyparse_push], + [_b4_declare_yyparse])[]dnl +]) + + +# b4_declare_yyerror_and_yylex +# ---------------------------- +# Comply with POSIX Yacc. +# +m4_define([b4_declare_yyerror_and_yylex], +[b4_posix_if([[#if !defined ]b4_prefix[error && !defined ]b4_api_PREFIX[ERROR_IS_DECLARED +]b4_function_declare([b4_prefix[error]], void, b4_yyerror_formals)[ +#endif +#if !defined ]b4_prefix[lex && !defined ]b4_api_PREFIX[LEX_IS_DECLARED +]b4_function_declare([b4_prefix[lex]], int, b4_yylex_formals)[ +#endif +]])dnl +]) + + +# b4_shared_declarations +# ---------------------- +# Declarations that might either go into the header (if --header) +# or into the implementation file. +m4_define([b4_shared_declarations], +[b4_cpp_guard_open([b4_spec_mapped_header_file])[ +]b4_declare_yydebug[ +]b4_percent_code_get([[requires]])[ +]b4_token_enums_defines[ +]b4_declare_yylstype[ +]b4_declare_yyerror_and_yylex[ +]b4_declare_yyparse[ +]b4_percent_code_get([[provides]])[ +]b4_cpp_guard_close([b4_spec_mapped_header_file])[]dnl +]) + + +# b4_header_include_if(IF-TRUE, IF-FALSE) +# --------------------------------------- +# Run IF-TRUE if we generate an output file and api.header.include +# is defined. +m4_define([b4_header_include_if], +[m4_ifval(m4_quote(b4_spec_header_file), + [b4_percent_define_ifdef([[api.header.include]], + [$1], + [$2])], + [$2])]) + +m4_if(b4_spec_header_file, [y.tab.h], [], + [b4_percent_define_default([[api.header.include]], + [["@basename(]b4_spec_header_file[@)"]])]) + + + + +## -------------- ## +## Output files. ## +## -------------- ## + + +b4_header_if([[ +]b4_output_begin([b4_spec_header_file])[ +]b4_copyright([Bison interface for Yacc-like parsers in C])[ +]b4_disclaimer[ +]b4_shared_declarations[ +]b4_output_end[ +]])# b4_header_if + +b4_output_begin([b4_parser_file_name])[ +]b4_copyright([Bison implementation for Yacc-like parsers in C])[ +/* C LALR(1) parser skeleton written by Richard Stallman, by + simplifying the original so-called "semantic" parser. */ + +]b4_disclaimer[ +/* All symbols defined below should begin with yy or YY, to avoid + infringing on user name space. This should be done even for local + variables, as they might otherwise be expanded by user macros. + There are some unavoidable exceptions within include files to + define necessary library symbols; they are noted "INFRINGES ON + USER NAME SPACE" below. */ + +]b4_identification[ +]b4_percent_code_get([[top]])[]dnl +m4_if(b4_api_prefix, [yy], [], +[[/* Substitute the type names. */ +#define YYSTYPE ]b4_api_PREFIX[STYPE]b4_locations_if([[ +#define YYLTYPE ]b4_api_PREFIX[LTYPE]])])[ +]m4_if(b4_prefix, [yy], [], +[[/* Substitute the variable and function names. */]b4_pull_if([[ +#define yyparse ]b4_prefix[parse]])b4_push_if([[ +#define yypush_parse ]b4_prefix[push_parse]b4_pull_if([[ +#define yypull_parse ]b4_prefix[pull_parse]])[ +#define yypstate_new ]b4_prefix[pstate_new +#define yypstate_clear ]b4_prefix[pstate_clear +#define yypstate_delete ]b4_prefix[pstate_delete +#define yypstate ]b4_prefix[pstate]])[ +#define yylex ]b4_prefix[lex +#define yyerror ]b4_prefix[error +#define yydebug ]b4_prefix[debug +#define yynerrs ]b4_prefix[nerrs]]b4_pure_if([], [[ +#define yylval ]b4_prefix[lval +#define yychar ]b4_prefix[char]b4_locations_if([[ +#define yylloc ]b4_prefix[lloc]])]))[ + +]b4_user_pre_prologue[ +]b4_cast_define[ +]b4_null_define[ + +]b4_header_include_if([[#include ]b4_percent_define_get([[api.header.include]])], + [m4_ifval(m4_quote(b4_spec_header_file), + [/* Use api.header.include to #include this header + instead of duplicating it here. */ +])b4_shared_declarations])[ +]b4_declare_symbol_enum[ + +]b4_user_post_prologue[ +]b4_percent_code_get[ +]b4_c99_int_type_define[ + +]b4_sizes_types_define[ + +/* Stored state numbers (used for stacks). */ +typedef ]b4_int_type(0, m4_eval(b4_states_number - 1))[ yy_state_t; + +/* State numbers in computations. */ +typedef int yy_state_fast_t; + +#ifndef YY_ +# if defined YYENABLE_NLS && YYENABLE_NLS +# if ENABLE_NLS +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_(Msgid) dgettext ("bison-runtime", Msgid) +# endif +# endif +# ifndef YY_ +# define YY_(Msgid) Msgid +# endif +#endif +]b4_has_translations_if([ +#ifndef N_ +# define N_(Msgid) Msgid +#endif +])[ + +]b4_attribute_define[ + +]b4_parse_assert_if([[#ifdef NDEBUG +# define YY_ASSERT(E) ((void) (0 && (E))) +#else +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_ASSERT(E) assert (E) +#endif +]], +[[#define YY_ASSERT(E) ((void) (0 && (E)))]])[ + +#if ]b4_lac_if([[1]], [b4_parse_error_case([simple], [[!defined yyoverflow]], [[1]])])[ + +/* The parser invokes alloca or malloc; define the necessary symbols. */]dnl +b4_push_if([], [b4_lac_if([], [[ + +# ifdef YYSTACK_USE_ALLOCA +# if YYSTACK_USE_ALLOCA +# ifdef __GNUC__ +# define YYSTACK_ALLOC __builtin_alloca +# elif defined __BUILTIN_VA_ARG_INCR +# include /* INFRINGES ON USER NAME SPACE */ +# elif defined _AIX +# define YYSTACK_ALLOC __alloca +# elif defined _MSC_VER +# include /* INFRINGES ON USER NAME SPACE */ +# define alloca _alloca +# else +# define YYSTACK_ALLOC alloca +# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS +# include /* INFRINGES ON USER NAME SPACE */ + /* Use EXIT_SUCCESS as a witness for stdlib.h. */ +# ifndef EXIT_SUCCESS +# define EXIT_SUCCESS 0 +# endif +# endif +# endif +# endif +# endif]])])[ + +# ifdef YYSTACK_ALLOC + /* Pacify GCC's 'empty if-body' warning. */ +# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) +# ifndef YYSTACK_ALLOC_MAXIMUM + /* The OS might guarantee only one guard page at the bottom of the stack, + and a page size can be as small as 4096 bytes. So we cannot safely + invoke alloca (N) if N exceeds 4096. Use a slightly smaller number + to allow for a few compiler-allocated temporary stack slots. */ +# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ +# endif +# else +# define YYSTACK_ALLOC YYMALLOC +# define YYSTACK_FREE YYFREE +# ifndef YYSTACK_ALLOC_MAXIMUM +# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM +# endif +# if (defined __cplusplus && ! defined EXIT_SUCCESS \ + && ! ((defined YYMALLOC || defined malloc) \ + && (defined YYFREE || defined free))) +# include /* INFRINGES ON USER NAME SPACE */ +# ifndef EXIT_SUCCESS +# define EXIT_SUCCESS 0 +# endif +# endif +# ifndef YYMALLOC +# define YYMALLOC malloc +# if ! defined malloc && ! defined EXIT_SUCCESS +void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ +# endif +# endif +# ifndef YYFREE +# define YYFREE free +# if ! defined free && ! defined EXIT_SUCCESS +void free (void *); /* INFRINGES ON USER NAME SPACE */ +# endif +# endif +# endif]b4_lac_if([[ +# define YYCOPY_NEEDED 1]])[ +#endif /* ]b4_lac_if([[1]], [b4_parse_error_case([simple], [[!defined yyoverflow]], [[1]])])[ */ + +#if (! defined yyoverflow \ + && (! defined __cplusplus \ + || (]b4_locations_if([[defined ]b4_api_PREFIX[LTYPE_IS_TRIVIAL && ]b4_api_PREFIX[LTYPE_IS_TRIVIAL \ + && ]])[defined ]b4_api_PREFIX[STYPE_IS_TRIVIAL && ]b4_api_PREFIX[STYPE_IS_TRIVIAL))) + +/* A type that is properly aligned for any stack member. */ +union yyalloc +{ + yy_state_t yyss_alloc; + YYSTYPE yyvs_alloc;]b4_locations_if([ + YYLTYPE yyls_alloc;])[ +}; + +/* The size of the maximum gap between one aligned stack and the next. */ +# define YYSTACK_GAP_MAXIMUM (YYSIZEOF (union yyalloc) - 1) + +/* The size of an array large to enough to hold all stacks, each with + N elements. */ +]b4_locations_if( +[# define YYSTACK_BYTES(N) \ + ((N) * (YYSIZEOF (yy_state_t) + YYSIZEOF (YYSTYPE) \ + + YYSIZEOF (YYLTYPE)) \ + + 2 * YYSTACK_GAP_MAXIMUM)], +[# define YYSTACK_BYTES(N) \ + ((N) * (YYSIZEOF (yy_state_t) + YYSIZEOF (YYSTYPE)) \ + + YYSTACK_GAP_MAXIMUM)])[ + +# define YYCOPY_NEEDED 1 + +/* Relocate STACK from its old location to the new one. The + local variables YYSIZE and YYSTACKSIZE give the old and new number of + elements in the stack, and YYPTR gives the new location of the + stack. Advance YYPTR to a properly aligned location for the next + stack. */ +# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ + do \ + { \ + YYPTRDIFF_T yynewbytes; \ + YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ + Stack = &yyptr->Stack_alloc; \ + yynewbytes = yystacksize * YYSIZEOF (*Stack) + YYSTACK_GAP_MAXIMUM; \ + yyptr += yynewbytes / YYSIZEOF (*yyptr); \ + } \ + while (0) + +#endif + +#if defined YYCOPY_NEEDED && YYCOPY_NEEDED +/* Copy COUNT objects from SRC to DST. The source and destination do + not overlap. */ +# ifndef YYCOPY +# if defined __GNUC__ && 1 < __GNUC__ +# define YYCOPY(Dst, Src, Count) \ + __builtin_memcpy (Dst, Src, YY_CAST (YYSIZE_T, (Count)) * sizeof (*(Src))) +# else +# define YYCOPY(Dst, Src, Count) \ + do \ + { \ + YYPTRDIFF_T yyi; \ + for (yyi = 0; yyi < (Count); yyi++) \ + (Dst)[yyi] = (Src)[yyi]; \ + } \ + while (0) +# endif +# endif +#endif /* !YYCOPY_NEEDED */ + +/* YYFINAL -- State number of the termination state. */ +#define YYFINAL ]b4_final_state_number[ +/* YYLAST -- Last index in YYTABLE. */ +#define YYLAST ]b4_last[ + +/* YYNTOKENS -- Number of terminals. */ +#define YYNTOKENS ]b4_tokens_number[ +/* YYNNTS -- Number of nonterminals. */ +#define YYNNTS ]b4_nterms_number[ +/* YYNRULES -- Number of rules. */ +#define YYNRULES ]b4_rules_number[ +/* YYNSTATES -- Number of states. */ +#define YYNSTATES ]b4_states_number[ + +/* YYMAXUTOK -- Last valid token kind. */ +#define YYMAXUTOK ]b4_code_max[ + + +/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, with out-of-bounds checking. */ +]b4_api_token_raw_if(dnl +[[#define YYTRANSLATE(YYX) YY_CAST (yysymbol_kind_t, YYX)]], +[[#define YYTRANSLATE(YYX) \ + (0 <= (YYX) && (YYX) <= YYMAXUTOK \ + ? YY_CAST (yysymbol_kind_t, yytranslate[YYX]) \ + : ]b4_symbol_prefix[YYUNDEF) + +/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM + as returned by yylex. */ +static const ]b4_int_type_for([b4_translate])[ yytranslate[] = +{ + ]b4_translate[ +};]])[ + +#if ]b4_api_PREFIX[DEBUG +]b4_integral_parser_table_define([rline], [b4_rline], + [[YYRLINE[YYN] -- Source line where rule number YYN was defined.]])[ +#endif + +/** Accessing symbol of state STATE. */ +#define YY_ACCESSING_SYMBOL(State) YY_CAST (yysymbol_kind_t, yystos[State]) + +#if ]b4_parse_error_case([simple], [b4_api_PREFIX[DEBUG || ]b4_token_table_flag], [[1]])[ +/* The user-facing name of the symbol whose (internal) number is + YYSYMBOL. No bounds checking. */ +static const char *yysymbol_name (yysymbol_kind_t yysymbol) YY_ATTRIBUTE_UNUSED; + +]b4_parse_error_bmatch([simple\|verbose], +[[/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + First, the terminals, then, starting at YYNTOKENS, nonterminals. */ +static const char *const yytname[] = +{ + ]b4_tname[ +}; + +static const char * +yysymbol_name (yysymbol_kind_t yysymbol) +{ + return yytname[yysymbol]; +}]], +[[static const char * +yysymbol_name (yysymbol_kind_t yysymbol) +{ + static const char *const yy_sname[] = + { + ]b4_symbol_names[ + };]b4_has_translations_if([[ + /* YYTRANSLATABLE[SYMBOL-NUM] -- Whether YY_SNAME[SYMBOL-NUM] is + internationalizable. */ + static ]b4_int_type_for([b4_translatable])[ yytranslatable[] = + { + ]b4_translatable[ + }; + return (yysymbol < YYNTOKENS && yytranslatable[yysymbol] + ? _(yy_sname[yysymbol]) + : yy_sname[yysymbol]);]], [[ + return yy_sname[yysymbol];]])[ +}]])[ +#endif + +#define YYPACT_NINF (]b4_pact_ninf[) + +#define yypact_value_is_default(Yyn) \ + ]b4_table_value_equals([[pact]], [[Yyn]], [b4_pact_ninf], [YYPACT_NINF])[ + +#define YYTABLE_NINF (]b4_table_ninf[) + +#define yytable_value_is_error(Yyn) \ + ]b4_table_value_equals([[table]], [[Yyn]], [b4_table_ninf], [YYTABLE_NINF])[ + +]b4_parser_tables_define[ + +enum { YYENOMEM = -2 }; + +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = ]b4_symbol(empty, id)[) + +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab +#define YYNOMEM goto yyexhaustedlab + + +#define YYRECOVERING() (!!yyerrstatus) + +#define YYBACKUP(Token, Value) \ + do \ + if (yychar == ]b4_symbol(empty, id)[) \ + { \ + yychar = (Token); \ + yylval = (Value); \ + YYPOPSTACK (yylen); \ + yystate = *yyssp; \]b4_lac_if([[ + YY_LAC_DISCARD ("YYBACKUP"); \]])[ + goto yybackup; \ + } \ + else \ + { \ + yyerror (]b4_yyerror_args[YY_("syntax error: cannot back up")); \ + YYERROR; \ + } \ + while (0) + +/* Backward compatibility with an undocumented macro. + Use ]b4_symbol(error, id)[ or ]b4_symbol(undef, id)[. */ +#define YYERRCODE ]b4_symbol(undef, id)[ +]b4_locations_if([[ +]b4_yylloc_default_define[ +#define YYRHSLOC(Rhs, K) ((Rhs)[K]) +]])[ + +/* Enable debugging if requested. */ +#if ]b4_api_PREFIX[DEBUG + +# ifndef YYFPRINTF +# include /* INFRINGES ON USER NAME SPACE */ +# define YYFPRINTF fprintf +# endif + +# define YYDPRINTF(Args) \ +do { \ + if (yydebug) \ + YYFPRINTF Args; \ +} while (0) + +]b4_yylocation_print_define[ + +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) \ +do { \ + if (yydebug) \ + { \ + YYFPRINTF (stderr, "%s ", Title); \ + yy_symbol_print (stderr, \ + Kind, Value]b4_locations_if([, Location])[]b4_user_args[); \ + YYFPRINTF (stderr, "\n"); \ + } \ +} while (0) + +]b4_yy_symbol_print_define[ + +/*------------------------------------------------------------------. +| yy_stack_print -- Print the state stack from its BOTTOM up to its | +| TOP (included). | +`------------------------------------------------------------------*/ + +static void +yy_stack_print (yy_state_t *yybottom, yy_state_t *yytop) +{ + YYFPRINTF (stderr, "Stack now"); + for (; yybottom <= yytop; yybottom++) + { + int yybot = *yybottom; + YYFPRINTF (stderr, " %d", yybot); + } + YYFPRINTF (stderr, "\n"); +} + +# define YY_STACK_PRINT(Bottom, Top) \ +do { \ + if (yydebug) \ + yy_stack_print ((Bottom), (Top)); \ +} while (0) + + +/*------------------------------------------------. +| Report that the YYRULE is going to be reduced. | +`------------------------------------------------*/ + +static void +yy_reduce_print (yy_state_t *yyssp, YYSTYPE *yyvsp,]b4_locations_if([[ YYLTYPE *yylsp,]])[ + int yyrule]b4_user_formals[) +{ + int yylno = yyrline[yyrule]; + int yynrhs = yyr2[yyrule]; + int yyi; + YYFPRINTF (stderr, "Reducing stack by rule %d (line %d):\n", + yyrule - 1, yylno); + /* The symbols being reduced. */ + for (yyi = 0; yyi < yynrhs; yyi++) + { + YYFPRINTF (stderr, " $%d = ", yyi + 1); + yy_symbol_print (stderr, + YY_ACCESSING_SYMBOL (+yyssp[yyi + 1 - yynrhs]), + &]b4_rhs_value(yynrhs, yyi + 1)[]b4_locations_if([, + &]b4_rhs_location(yynrhs, yyi + 1))[]b4_user_args[); + YYFPRINTF (stderr, "\n"); + } +} + +# define YY_REDUCE_PRINT(Rule) \ +do { \ + if (yydebug) \ + yy_reduce_print (yyssp, yyvsp, ]b4_locations_if([yylsp, ])[Rule]b4_user_args[); \ +} while (0) + +/* Nonzero means print parse trace. It is left uninitialized so that + multiple parsers can coexist. */ +int yydebug; +#else /* !]b4_api_PREFIX[DEBUG */ +# define YYDPRINTF(Args) ((void) 0) +# define YY_SYMBOL_PRINT(Title, Kind, Value, Location) +# define YY_STACK_PRINT(Bottom, Top) +# define YY_REDUCE_PRINT(Rule) +#endif /* !]b4_api_PREFIX[DEBUG */ + + +/* YYINITDEPTH -- initial size of the parser's stacks. */ +#ifndef YYINITDEPTH +# define YYINITDEPTH ]b4_stack_depth_init[ +#endif + +/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only + if the built-in stack extension method is used). + + Do not make this value too large; the results are undefined if + YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) + evaluated with infinite-precision integer arithmetic. */ + +#ifndef YYMAXDEPTH +# define YYMAXDEPTH ]b4_stack_depth_max[ +#endif]b4_push_if([[ +/* Parser data structure. */ +struct yypstate + {]b4_declare_parser_state_variables[ + /* Whether this instance has not started parsing yet. + * If 2, it corresponds to a finished parsing. */ + int yynew; + };]b4_pure_if([], [[ + +/* Whether the only allowed instance of yypstate is allocated. */ +static char yypstate_allocated = 0;]])])[ +]b4_lac_if([[ + +/* Given a state stack such that *YYBOTTOM is its bottom, such that + *YYTOP is either its top or is YYTOP_EMPTY to indicate an empty + stack, and such that *YYCAPACITY is the maximum number of elements it + can hold without a reallocation, make sure there is enough room to + store YYADD more elements. If not, allocate a new stack using + YYSTACK_ALLOC, copy the existing elements, and adjust *YYBOTTOM, + *YYTOP, and *YYCAPACITY to reflect the new capacity and memory + location. If *YYBOTTOM != YYBOTTOM_NO_FREE, then free the old stack + using YYSTACK_FREE. Return 0 if successful or if no reallocation is + required. Return YYENOMEM if memory is exhausted. */ +static int +yy_lac_stack_realloc (YYPTRDIFF_T *yycapacity, YYPTRDIFF_T yyadd, +#if ]b4_api_PREFIX[DEBUG + char const *yydebug_prefix, + char const *yydebug_suffix, +#endif + yy_state_t **yybottom, + yy_state_t *yybottom_no_free, + yy_state_t **yytop, yy_state_t *yytop_empty) +{ + YYPTRDIFF_T yysize_old = + *yytop == yytop_empty ? 0 : *yytop - *yybottom + 1; + YYPTRDIFF_T yysize_new = yysize_old + yyadd; + if (*yycapacity < yysize_new) + { + YYPTRDIFF_T yyalloc = 2 * yysize_new; + yy_state_t *yybottom_new; + /* Use YYMAXDEPTH for maximum stack size given that the stack + should never need to grow larger than the main state stack + needs to grow without LAC. */ + if (YYMAXDEPTH < yysize_new) + { + YYDPRINTF ((stderr, "%smax size exceeded%s", yydebug_prefix, + yydebug_suffix)); + return YYENOMEM; + } + if (YYMAXDEPTH < yyalloc) + yyalloc = YYMAXDEPTH; + yybottom_new = + YY_CAST (yy_state_t *, + YYSTACK_ALLOC (YY_CAST (YYSIZE_T, + yyalloc * YYSIZEOF (*yybottom_new)))); + if (!yybottom_new) + { + YYDPRINTF ((stderr, "%srealloc failed%s", yydebug_prefix, + yydebug_suffix)); + return YYENOMEM; + } + if (*yytop != yytop_empty) + { + YYCOPY (yybottom_new, *yybottom, yysize_old); + *yytop = yybottom_new + (yysize_old - 1); + } + if (*yybottom != yybottom_no_free) + YYSTACK_FREE (*yybottom); + *yybottom = yybottom_new; + *yycapacity = yyalloc;]m4_if(b4_percent_define_get([[parse.lac.memory-trace]]), + [full], [[ + YY_IGNORE_USELESS_CAST_BEGIN + YYDPRINTF ((stderr, "%srealloc to %ld%s", yydebug_prefix, + YY_CAST (long, yyalloc), yydebug_suffix)); + YY_IGNORE_USELESS_CAST_END]])[ + } + return 0; +} + +/* Establish the initial context for the current lookahead if no initial + context is currently established. + + We define a context as a snapshot of the parser stacks. We define + the initial context for a lookahead as the context in which the + parser initially examines that lookahead in order to select a + syntactic action. Thus, if the lookahead eventually proves + syntactically unacceptable (possibly in a later context reached via a + series of reductions), the initial context can be used to determine + the exact set of tokens that would be syntactically acceptable in the + lookahead's place. Moreover, it is the context after which any + further semantic actions would be erroneous because they would be + determined by a syntactically unacceptable token. + + YY_LAC_ESTABLISH should be invoked when a reduction is about to be + performed in an inconsistent state (which, for the purposes of LAC, + includes consistent states that don't know they're consistent because + their default reductions have been disabled). Iff there is a + lookahead token, it should also be invoked before reporting a syntax + error. This latter case is for the sake of the debugging output. + + For parse.lac=full, the implementation of YY_LAC_ESTABLISH is as + follows. If no initial context is currently established for the + current lookahead, then check if that lookahead can eventually be + shifted if syntactic actions continue from the current context. + Report a syntax error if it cannot. */ +#define YY_LAC_ESTABLISH \ +do { \ + if (!yy_lac_established) \ + { \ + YYDPRINTF ((stderr, \ + "LAC: initial context established for %s\n", \ + yysymbol_name (yytoken))); \ + yy_lac_established = 1; \ + switch (yy_lac (yyesa, &yyes, &yyes_capacity, yyssp, yytoken)) \ + { \ + case YYENOMEM: \ + YYNOMEM; \ + case 1: \ + goto yyerrlab; \ + } \ + } \ +} while (0) + +/* Discard any previous initial lookahead context because of Event, + which may be a lookahead change or an invalidation of the currently + established initial context for the current lookahead. + + The most common example of a lookahead change is a shift. An example + of both cases is syntax error recovery. That is, a syntax error + occurs when the lookahead is syntactically erroneous for the + currently established initial context, so error recovery manipulates + the parser stacks to try to find a new initial context in which the + current lookahead is syntactically acceptable. If it fails to find + such a context, it discards the lookahead. */ +#if ]b4_api_PREFIX[DEBUG +# define YY_LAC_DISCARD(Event) \ +do { \ + if (yy_lac_established) \ + { \ + YYDPRINTF ((stderr, "LAC: initial context discarded due to " \ + Event "\n")); \ + yy_lac_established = 0; \ + } \ +} while (0) +#else +# define YY_LAC_DISCARD(Event) yy_lac_established = 0 +#endif + +/* Given the stack whose top is *YYSSP, return 0 iff YYTOKEN can + eventually (after perhaps some reductions) be shifted, return 1 if + not, or return YYENOMEM if memory is exhausted. As preconditions and + postconditions: *YYES_CAPACITY is the allocated size of the array to + which *YYES points, and either *YYES = YYESA or *YYES points to an + array allocated with YYSTACK_ALLOC. yy_lac may overwrite the + contents of either array, alter *YYES and *YYES_CAPACITY, and free + any old *YYES other than YYESA. */ +static int +yy_lac (yy_state_t *yyesa, yy_state_t **yyes, + YYPTRDIFF_T *yyes_capacity, yy_state_t *yyssp, yysymbol_kind_t yytoken) +{ + yy_state_t *yyes_prev = yyssp; + yy_state_t *yyesp = yyes_prev; + /* Reduce until we encounter a shift and thereby accept the token. */ + YYDPRINTF ((stderr, "LAC: checking lookahead %s:", yysymbol_name (yytoken))); + if (yytoken == ]b4_symbol_prefix[YYUNDEF) + { + YYDPRINTF ((stderr, " Always Err\n")); + return 1; + } + while (1) + { + int yyrule = yypact[+*yyesp]; + if (yypact_value_is_default (yyrule) + || (yyrule += yytoken) < 0 || YYLAST < yyrule + || yycheck[yyrule] != yytoken) + { + /* Use the default action. */ + yyrule = yydefact[+*yyesp]; + if (yyrule == 0) + { + YYDPRINTF ((stderr, " Err\n")); + return 1; + } + } + else + { + /* Use the action from yytable. */ + yyrule = yytable[yyrule]; + if (yytable_value_is_error (yyrule)) + { + YYDPRINTF ((stderr, " Err\n")); + return 1; + } + if (0 < yyrule) + { + YYDPRINTF ((stderr, " S%d\n", yyrule)); + return 0; + } + yyrule = -yyrule; + } + /* By now we know we have to simulate a reduce. */ + YYDPRINTF ((stderr, " R%d", yyrule - 1)); + { + /* Pop the corresponding number of values from the stack. */ + YYPTRDIFF_T yylen = yyr2[yyrule]; + /* First pop from the LAC stack as many tokens as possible. */ + if (yyesp != yyes_prev) + { + YYPTRDIFF_T yysize = yyesp - *yyes + 1; + if (yylen < yysize) + { + yyesp -= yylen; + yylen = 0; + } + else + { + yyesp = yyes_prev; + yylen -= yysize; + } + } + /* Only afterwards look at the main stack. */ + if (yylen) + yyesp = yyes_prev -= yylen; + } + /* Push the resulting state of the reduction. */ + { + yy_state_fast_t yystate; + { + const int yylhs = yyr1[yyrule] - YYNTOKENS; + const int yyi = yypgoto[yylhs] + *yyesp; + yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyesp + ? yytable[yyi] + : yydefgoto[yylhs]); + } + if (yyesp == yyes_prev) + { + yyesp = *yyes; + YY_IGNORE_USELESS_CAST_BEGIN + *yyesp = YY_CAST (yy_state_t, yystate); + YY_IGNORE_USELESS_CAST_END + } + else + { + if (yy_lac_stack_realloc (yyes_capacity, 1, +#if ]b4_api_PREFIX[DEBUG + " (", ")", +#endif + yyes, yyesa, &yyesp, yyes_prev)) + { + YYDPRINTF ((stderr, "\n")); + return YYENOMEM; + } + YY_IGNORE_USELESS_CAST_BEGIN + *++yyesp = YY_CAST (yy_state_t, yystate); + YY_IGNORE_USELESS_CAST_END + } + YYDPRINTF ((stderr, " G%d", yystate)); + } + } +}]])[ + +]b4_parse_error_case([simple], [], +[[/* Context of a parse error. */ +typedef struct +{]b4_push_if([[ + yypstate* yyps;]], [[ + yy_state_t *yyssp;]b4_lac_if([[ + yy_state_t *yyesa; + yy_state_t **yyes; + YYPTRDIFF_T *yyes_capacity;]])])[ + yysymbol_kind_t yytoken;]b4_locations_if([[ + YYLTYPE *yylloc;]])[ +} yypcontext_t; + +/* Put in YYARG at most YYARGN of the expected tokens given the + current YYCTX, and return the number of tokens stored in YYARG. If + YYARG is null, return the number of expected tokens (guaranteed to + be less than YYNTOKENS). Return YYENOMEM on memory exhaustion. + Return 0 if there are more than YYARGN expected tokens, yet fill + YYARG up to YYARGN. */]b4_push_if([[ +static int +yypstate_expected_tokens (yypstate *yyps, + yysymbol_kind_t yyarg[], int yyargn)]], [[ +static int +yypcontext_expected_tokens (const yypcontext_t *yyctx, + yysymbol_kind_t yyarg[], int yyargn)]])[ +{ + /* Actual size of YYARG. */ + int yycount = 0; +]b4_lac_if([[ + int yyx; + for (yyx = 0; yyx < YYNTOKENS; ++yyx) + { + yysymbol_kind_t yysym = YY_CAST (yysymbol_kind_t, yyx); + if (yysym != ]b4_symbol(error, kind)[ && yysym != ]b4_symbol_prefix[YYUNDEF) + switch (yy_lac (]b4_push_if([[yyps->yyesa, &yyps->yyes, &yyps->yyes_capacity, yyps->yyssp, yysym]], + [[yyctx->yyesa, yyctx->yyes, yyctx->yyes_capacity, yyctx->yyssp, yysym]])[)) + { + case YYENOMEM: + return YYENOMEM; + case 1: + continue; + default: + if (!yyarg) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = yysym; + } + }]], +[[ int yyn = yypact@{+*]b4_push_if([yyps], [yyctx])[->yyssp@}; + if (!yypact_value_is_default (yyn)) + { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for + this state because they are default actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yyx; + for (yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != ]b4_symbol(error, kind)[ + && !yytable_value_is_error (yytable[yyx + yyn])) + { + if (!yyarg) + ++yycount; + else if (yycount == yyargn) + return 0; + else + yyarg[yycount++] = YY_CAST (yysymbol_kind_t, yyx); + } + }]])[ + if (yyarg && yycount == 0 && 0 < yyargn) + yyarg[0] = ]b4_symbol(empty, kind)[; + return yycount; +} + +]b4_push_if([[ +/* Similar to the previous function. */ +static int +yypcontext_expected_tokens (const yypcontext_t *yyctx, + yysymbol_kind_t yyarg[], int yyargn) +{ + return yypstate_expected_tokens (yyctx->yyps, yyarg, yyargn); +}]])[ +]])[ + +]b4_parse_error_bmatch( + [custom], +[[/* The kind of the lookahead of this context. */ +static yysymbol_kind_t +yypcontext_token (const yypcontext_t *yyctx) YY_ATTRIBUTE_UNUSED; + +static yysymbol_kind_t +yypcontext_token (const yypcontext_t *yyctx) +{ + return yyctx->yytoken; +} + +]b4_locations_if([[/* The location of the lookahead of this context. */ +static YYLTYPE * +yypcontext_location (const yypcontext_t *yyctx) YY_ATTRIBUTE_UNUSED; + +static YYLTYPE * +yypcontext_location (const yypcontext_t *yyctx) +{ + return yyctx->yylloc; +}]])[ + +/* User defined function to report a syntax error. */ +static int +yyreport_syntax_error (const yypcontext_t *yyctx]b4_user_formals[);]], + [detailed\|verbose], +[[#ifndef yystrlen +# if defined __GLIBC__ && defined _STRING_H +# define yystrlen(S) (YY_CAST (YYPTRDIFF_T, strlen (S))) +# else +/* Return the length of YYSTR. */ +static YYPTRDIFF_T +yystrlen (const char *yystr) +{ + YYPTRDIFF_T yylen; + for (yylen = 0; yystr[yylen]; yylen++) + continue; + return yylen; +} +# endif +#endif + +#ifndef yystpcpy +# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE +# define yystpcpy stpcpy +# else +/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in + YYDEST. */ +static char * +yystpcpy (char *yydest, const char *yysrc) +{ + char *yyd = yydest; + const char *yys = yysrc; + + while ((*yyd++ = *yys++) != '\0') + continue; + + return yyd - 1; +} +# endif +#endif + +]b4_parse_error_case( + [verbose], +[[#ifndef yytnamerr +/* Copy to YYRES the contents of YYSTR after stripping away unnecessary + quotes and backslashes, so that it's suitable for yyerror. The + heuristic is that double-quoting is unnecessary unless the string + contains an apostrophe, a comma, or backslash (other than + backslash-backslash). YYSTR is taken from yytname. If YYRES is + null, do not copy; instead, return the length of what the result + would have been. */ +static YYPTRDIFF_T +yytnamerr (char *yyres, const char *yystr) +{ + if (*yystr == '"') + { + YYPTRDIFF_T yyn = 0; + char const *yyp = yystr; + for (;;) + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + else + goto append; + + append: + default: + if (yyres) + yyres[yyn] = *yyp; + yyn++; + break; + + case '"': + if (yyres) + yyres[yyn] = '\0'; + return yyn; + } + do_not_strip_quotes: ; + } + + if (yyres) + return yystpcpy (yyres, yystr) - yyres; + else + return yystrlen (yystr); +} +#endif +]])[ + +static int +yy_syntax_error_arguments (const yypcontext_t *yyctx, + yysymbol_kind_t yyarg[], int yyargn) +{ + /* Actual size of YYARG. */ + int yycount = 0; + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, then + the only way this function was invoked is if the default action + is an error action. In that case, don't check for expected + tokens because there are none. + - The only way there can be no lookahead present (in yychar) is if + this state is a consistent state with a default action. Thus, + detecting the absence of a lookahead is sufficient to determine + that there is no unexpected or expected token to report. In that + case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this state is a + consistent state with a default action. There might have been a + previous inconsistent state, consistent state with a non-default + action, or user semantic action that manipulated yychar.]b4_lac_if([[ + In the first two cases, it might appear that the current syntax + error should have been detected in the previous state when yy_lac + was invoked. However, at that time, there might have been a + different syntax error that discarded a different initial context + during error recovery, leaving behind the current lookahead.]], [[ + - Of course, the expected token list depends on states to have + correct lookahead information, and it depends on the parser not + to perform extra reductions after fetching a lookahead from the + scanner and before detecting a syntax error. Thus, state merging + (from LALR or IELR) and default reductions corrupt the expected + token list. However, the list is correct for canonical LR with + one exception: it will still contain any token that will not be + accepted due to an error action in a later state.]])[ + */ + if (yyctx->yytoken != ]b4_symbol(empty, kind)[) + { + int yyn;]b4_lac_if([[ + YYDPRINTF ((stderr, "Constructing syntax error message\n"));]])[ + if (yyarg) + yyarg[yycount] = yyctx->yytoken; + ++yycount; + yyn = yypcontext_expected_tokens (yyctx, + yyarg ? yyarg + 1 : yyarg, yyargn - 1); + if (yyn == YYENOMEM) + return YYENOMEM;]b4_lac_if([[ + else if (yyn == 0) + YYDPRINTF ((stderr, "No expected tokens.\n"));]])[ + else + yycount += yyn; + } + return yycount; +} + +/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message + about the unexpected token YYTOKEN for the state stack whose top is + YYSSP.]b4_lac_if([[ In order to see if a particular token T is a + valid looakhead, invoke yy_lac (YYESA, YYES, YYES_CAPACITY, YYSSP, T).]])[ + + Return 0 if *YYMSG was successfully written. Return -1 if *YYMSG is + not large enough to hold the message. In that case, also set + *YYMSG_ALLOC to the required number of bytes. Return YYENOMEM if the + required number of bytes is too large to store]b4_lac_if([[ or if + yy_lac returned YYENOMEM]])[. */ +static int +yysyntax_error (YYPTRDIFF_T *yymsg_alloc, char **yymsg, + const yypcontext_t *yyctx) +{ + enum { YYARGS_MAX = 5 }; + /* Internationalized format string. */ + const char *yyformat = YY_NULLPTR; + /* Arguments of yyformat: reported tokens (one for the "unexpected", + one per "expected"). */ + yysymbol_kind_t yyarg[YYARGS_MAX]; + /* Cumulated lengths of YYARG. */ + YYPTRDIFF_T yysize = 0; + + /* Actual size of YYARG. */ + int yycount = yy_syntax_error_arguments (yyctx, yyarg, YYARGS_MAX); + if (yycount == YYENOMEM) + return YYENOMEM; + + switch (yycount) + { +#define YYCASE_(N, S) \ + case N: \ + yyformat = S; \ + break + default: /* Avoid compiler warnings. */ + YYCASE_(0, YY_("syntax error")); + YYCASE_(1, YY_("syntax error, unexpected %s")); + YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); + YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); + YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); + YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); +#undef YYCASE_ + } + + /* Compute error message size. Don't count the "%s"s, but reserve + room for the terminator. */ + yysize = yystrlen (yyformat) - 2 * yycount + 1; + { + int yyi; + for (yyi = 0; yyi < yycount; ++yyi) + { + YYPTRDIFF_T yysize1 + = yysize + ]b4_parse_error_case( + [verbose], [[yytnamerr (YY_NULLPTR, yytname[yyarg[yyi]])]], + [[yystrlen (yysymbol_name (yyarg[yyi]))]]);[ + if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM) + yysize = yysize1; + else + return YYENOMEM; + } + } + + if (*yymsg_alloc < yysize) + { + *yymsg_alloc = 2 * yysize; + if (! (yysize <= *yymsg_alloc + && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) + *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; + return -1; + } + + /* Avoid sprintf, as that infringes on the user's name space. + Don't have undefined behavior even if the translation + produced a string with the wrong number of "%s"s. */ + { + char *yyp = *yymsg; + int yyi = 0; + while ((*yyp = *yyformat) != '\0') + if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) + {]b4_parse_error_case([verbose], [[ + yyp += yytnamerr (yyp, yytname[yyarg[yyi++]]);]], [[ + yyp = yystpcpy (yyp, yysymbol_name (yyarg[yyi++]));]])[ + yyformat += 2; + } + else + { + ++yyp; + ++yyformat; + } + } + return 0; +} +]])[ + +]b4_yydestruct_define[ + +]b4_pure_if([], [b4_declare_scanner_communication_variables])[ + +]b4_push_if([b4_pull_if([[ + +int +yyparse (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[) +{ + yypstate *yyps = yypstate_new (); + if (!yyps) + {]b4_pure_if([b4_locations_if([[ + static YYLTYPE yyloc_default][]b4_yyloc_default[; + YYLTYPE yylloc = yyloc_default;]])[ + yyerror (]b4_yyerror_args[YY_("memory exhausted"));]], [[ + if (!yypstate_allocated) + yyerror (]b4_yyerror_args[YY_("memory exhausted"));]])[ + return 2; + } + int yystatus = yypull_parse (yyps]b4_user_args[); + yypstate_delete (yyps); + return yystatus; +} + +int +yypull_parse (yypstate *yyps]b4_user_formals[) +{ + YY_ASSERT (yyps);]b4_pure_if([b4_locations_if([[ + static YYLTYPE yyloc_default][]b4_yyloc_default[; + YYLTYPE yylloc = yyloc_default;]])])[ + int yystatus; + do { +]b4_pure_if([[ YYSTYPE yylval; + int ]])[yychar = ]b4_yylex[; + yystatus = yypush_parse (yyps]b4_pure_if([[, yychar, &yylval]b4_locations_if([[, &yylloc]])])m4_ifset([b4_parse_param], [, b4_args(b4_parse_param)])[); + } while (yystatus == YYPUSH_MORE); + return yystatus; +}]])[ + +]b4_parse_state_variable_macros([b4_pstate_macro_define])[ + +/* Initialize the parser data structure. */ +static void +yypstate_clear (yypstate *yyps) +{ + yynerrs = 0; + yystate = 0; + yyerrstatus = 0; + + yyssp = yyss; + yyvsp = yyvs;]b4_locations_if([[ + yylsp = yyls;]])[ + + /* Initialize the state stack, in case yypcontext_expected_tokens is + called before the first call to yyparse. */ + *yyssp = 0; + yyps->yynew = 1; +} + +/* Initialize the parser data structure. */ +yypstate * +yypstate_new (void) +{ + yypstate *yyps;]b4_pure_if([], [[ + if (yypstate_allocated) + return YY_NULLPTR;]])[ + yyps = YY_CAST (yypstate *, YYMALLOC (sizeof *yyps)); + if (!yyps) + return YY_NULLPTR;]b4_pure_if([], [[ + yypstate_allocated = 1;]])[ + yystacksize = YYINITDEPTH; + yyss = yyssa; + yyvs = yyvsa;]b4_locations_if([[ + yyls = yylsa;]])[]b4_lac_if([[ + yyes = yyesa; + yyes_capacity = ]b4_percent_define_get([[parse.lac.es-capacity-initial]])[; + if (YYMAXDEPTH < yyes_capacity) + yyes_capacity = YYMAXDEPTH;]])[ + yypstate_clear (yyps); + return yyps; +} + +void +yypstate_delete (yypstate *yyps) +{ + if (yyps) + { +#ifndef yyoverflow + /* If the stack was reallocated but the parse did not complete, then the + stack still needs to be freed. */ + if (yyss != yyssa) + YYSTACK_FREE (yyss); +#endif]b4_lac_if([[ + if (yyes != yyesa) + YYSTACK_FREE (yyes);]])[ + YYFREE (yyps);]b4_pure_if([], [[ + yypstate_allocated = 0;]])[ + } +} +]])[ + +]b4_push_if([[ +/*---------------. +| yypush_parse. | +`---------------*/ + +int +yypush_parse (yypstate *yyps]b4_pure_if([[, + int yypushed_char, YYSTYPE const *yypushed_val]b4_locations_if([[, YYLTYPE *yypushed_loc]])])b4_user_formals[)]], +[[ +/*----------. +| yyparse. | +`----------*/ + +]m4_ifdef([b4_start_symbols], +[[// Extract data from the parser. +typedef struct +{ + YYSTYPE yyvalue; + int yynerrs; +} yy_parse_impl_t; + +// Run a full parse, using YYCHAR as switching token. +static int +yy_parse_impl (int yychar, yy_parse_impl_t *yyimpl]m4_ifset([b4_parse_param], [, b4_formals(b4_parse_param)])[); + +]m4_map([_b4_define_sub_yyparse], m4_defn([b4_start_symbols]))[ + +int +yyparse (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[) +{ + return yy_parse_impl (]b4_symbol(_b4_first_switching_token, id)[, YY_NULLPTR]m4_ifset([b4_parse_param], + [[, ]b4_args(b4_parse_param)])[); +} + +static int +yy_parse_impl (int yychar, yy_parse_impl_t *yyimpl]m4_ifset([b4_parse_param], [, b4_formals(b4_parse_param)])[)]], +[[int +yyparse (]m4_ifset([b4_parse_param], [b4_formals(b4_parse_param)], [void])[)]])])[ +{]b4_pure_if([b4_declare_scanner_communication_variables +])b4_push_if([b4_pure_if([], [[ + int yypushed_char = yychar; + YYSTYPE yypushed_val = yylval;]b4_locations_if([[ + YYLTYPE yypushed_loc = yylloc;]]) +])], + [b4_declare_parser_state_variables([init]) +])b4_lac_if([[ + /* Whether LAC context is established. A Boolean. */ + int yy_lac_established = 0;]])[ + int yyn; + /* The return value of yyparse. */ + int yyresult; + /* Lookahead symbol kind. */ + yysymbol_kind_t yytoken = ]b4_symbol(empty, kind)[; + /* The variables used to return semantic value and location from the + action routines. */ + YYSTYPE yyval;]b4_locations_if([[ + YYLTYPE yyloc; + + /* The locations where the error started and ended. */ + YYLTYPE yyerror_range[3];]])[ + +]b4_parse_error_bmatch([detailed\|verbose], +[[ /* Buffer for error messages, and its allocated size. */ + char yymsgbuf[128]; + char *yymsg = yymsgbuf; + YYPTRDIFF_T yymsg_alloc = sizeof yymsgbuf;]])[ + +#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)]b4_locations_if([, yylsp -= (N)])[) + + /* The number of symbols on the RHS of the reduced rule. + Keep to zero when no symbol should be popped. */ + int yylen = 0;]b4_push_if([[ + + switch (yyps->yynew) + { + case 0: + yyn = yypact[yystate]; + goto yyread_pushed_token; + + case 2: + yypstate_clear (yyps); + break; + + default: + break; + }]])[ + + YYDPRINTF ((stderr, "Starting parse\n")); + +]m4_ifdef([b4_start_symbols], [], +[[ yychar = ]b4_symbol(empty, id)[; /* Cause a token to be read. */ +]])[ +]m4_ifdef([b4_initial_action], [ +b4_dollar_pushdef([m4_define([b4_dollar_dollar_used])yylval], [], [], + [b4_push_if([b4_pure_if([*])yypushed_loc], [yylloc])])dnl +b4_user_initial_action +b4_dollar_popdef[]dnl +m4_ifdef([b4_dollar_dollar_used],[[ yyvsp[0] = yylval; +]])])dnl +b4_locations_if([[ yylsp[0] = ]b4_push_if([b4_pure_if([*])yypushed_loc], [yylloc])[; +]])dnl +[ goto yysetstate; + + +/*------------------------------------------------------------. +| yynewstate -- push a new state, which is found in yystate. | +`------------------------------------------------------------*/ +yynewstate: + /* In all cases, when you get here, the value and location stacks + have just been pushed. So pushing a state here evens the stacks. */ + yyssp++; + + +/*--------------------------------------------------------------------. +| yysetstate -- set current state (the top of the stack) to yystate. | +`--------------------------------------------------------------------*/ +yysetstate: + YYDPRINTF ((stderr, "Entering state %d\n", yystate)); + YY_ASSERT (0 <= yystate && yystate < YYNSTATES); + YY_IGNORE_USELESS_CAST_BEGIN + *yyssp = YY_CAST (yy_state_t, yystate); + YY_IGNORE_USELESS_CAST_END + YY_STACK_PRINT (yyss, yyssp); + + if (yyss + yystacksize - 1 <= yyssp) +#if !defined yyoverflow && !defined YYSTACK_RELOCATE + YYNOMEM; +#else + { + /* Get the current used size of the three stacks, in elements. */ + YYPTRDIFF_T yysize = yyssp - yyss + 1; + +# if defined yyoverflow + { + /* Give user a chance to reallocate the stack. Use copies of + these so that the &'s don't force the real ones into + memory. */ + yy_state_t *yyss1 = yyss; + YYSTYPE *yyvs1 = yyvs;]b4_locations_if([ + YYLTYPE *yyls1 = yyls;])[ + + /* Each stack pointer address is followed by the size of the + data in use in that stack, in bytes. This used to be a + conditional around just the two extra args, but that might + be undefined if yyoverflow is a macro. */ + yyoverflow (YY_("memory exhausted"), + &yyss1, yysize * YYSIZEOF (*yyssp), + &yyvs1, yysize * YYSIZEOF (*yyvsp),]b4_locations_if([ + &yyls1, yysize * YYSIZEOF (*yylsp),])[ + &yystacksize); + yyss = yyss1; + yyvs = yyvs1;]b4_locations_if([ + yyls = yyls1;])[ + } +# else /* defined YYSTACK_RELOCATE */ + /* Extend the stack our own way. */ + if (YYMAXDEPTH <= yystacksize) + YYNOMEM; + yystacksize *= 2; + if (YYMAXDEPTH < yystacksize) + yystacksize = YYMAXDEPTH; + + { + yy_state_t *yyss1 = yyss; + union yyalloc *yyptr = + YY_CAST (union yyalloc *, + YYSTACK_ALLOC (YY_CAST (YYSIZE_T, YYSTACK_BYTES (yystacksize)))); + if (! yyptr) + YYNOMEM; + YYSTACK_RELOCATE (yyss_alloc, yyss); + YYSTACK_RELOCATE (yyvs_alloc, yyvs);]b4_locations_if([ + YYSTACK_RELOCATE (yyls_alloc, yyls);])[ +# undef YYSTACK_RELOCATE + if (yyss1 != yyssa) + YYSTACK_FREE (yyss1); + } +# endif + + yyssp = yyss + yysize - 1; + yyvsp = yyvs + yysize - 1;]b4_locations_if([ + yylsp = yyls + yysize - 1;])[ + + YY_IGNORE_USELESS_CAST_BEGIN + YYDPRINTF ((stderr, "Stack size increased to %ld\n", + YY_CAST (long, yystacksize))); + YY_IGNORE_USELESS_CAST_END + + if (yyss + yystacksize - 1 <= yyssp) + YYABORT; + } +#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */ + +]m4_ifdef([b4_start_symbols], [], [[ + if (yystate == YYFINAL) + YYACCEPT;]])[ + + goto yybackup; + + +/*-----------. +| yybackup. | +`-----------*/ +yybackup: + /* Do appropriate processing given the current state. Read a + lookahead token if we need one and don't already have one. */ + + /* First try to decide what to do without reference to lookahead token. */ + yyn = yypact[yystate]; + if (yypact_value_is_default (yyn)) + goto yydefault; + + /* Not known => get a lookahead token if don't already have one. */ + + /* YYCHAR is either empty, or end-of-input, or a valid lookahead. */ + if (yychar == ]b4_symbol(empty, id)[) + {]b4_push_if([[ + if (!yyps->yynew) + {]b4_use_push_for_pull_if([], [[ + YYDPRINTF ((stderr, "Return for a new token:\n"));]])[ + yyresult = YYPUSH_MORE; + goto yypushreturn; + } + yyps->yynew = 0;]b4_pure_if([], [[ + /* Restoring the pushed token is only necessary for the first + yypush_parse invocation since subsequent invocations don't overwrite + it before jumping to yyread_pushed_token. */ + yychar = yypushed_char; + yylval = yypushed_val;]b4_locations_if([[ + yylloc = yypushed_loc;]])])[ +yyread_pushed_token:]])[ + YYDPRINTF ((stderr, "Reading a token\n"));]b4_push_if([b4_pure_if([[ + yychar = yypushed_char; + if (yypushed_val) + yylval = *yypushed_val;]b4_locations_if([[ + if (yypushed_loc) + yylloc = *yypushed_loc;]])])], [[ + yychar = ]b4_yylex[;]])[ + } + + if (yychar <= ]b4_symbol(eof, [id])[) + { + yychar = ]b4_symbol(eof, [id])[; + yytoken = ]b4_symbol(eof, [kind])[; + YYDPRINTF ((stderr, "Now at end of input.\n")); + } + else if (yychar == ]b4_symbol(error, [id])[) + { + /* The scanner already issued an error message, process directly + to error recovery. But do not keep the error token as + lookahead, it is too special and may lead us to an endless + loop in error recovery. */ + yychar = ]b4_symbol(undef, [id])[; + yytoken = ]b4_symbol(error, [kind])[;]b4_locations_if([[ + yyerror_range[1] = yylloc;]])[ + goto yyerrlab1; + } + else + { + yytoken = YYTRANSLATE (yychar); + YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); + } + + /* If the proper action on seeing token YYTOKEN is to reduce or to + detect an error, take that action. */ + yyn += yytoken; + if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)]b4_lac_if([[ + { + YY_LAC_ESTABLISH; + goto yydefault; + }]], [[ + goto yydefault;]])[ + yyn = yytable[yyn]; + if (yyn <= 0) + { + if (yytable_value_is_error (yyn)) + goto yyerrlab; + yyn = -yyn;]b4_lac_if([[ + YY_LAC_ESTABLISH;]])[ + goto yyreduce; + } + + /* Count tokens shifted since error; after three, turn off error + status. */ + if (yyerrstatus) + yyerrstatus--; + + /* Shift the lookahead token. */ + YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); + yystate = yyn; + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END]b4_locations_if([ + *++yylsp = yylloc;])[ + + /* Discard the shifted token. */ + yychar = ]b4_symbol(empty, id)[;]b4_lac_if([[ + YY_LAC_DISCARD ("shift");]])[ + goto yynewstate; + + +/*-----------------------------------------------------------. +| yydefault -- do the default action for the current state. | +`-----------------------------------------------------------*/ +yydefault: + yyn = yydefact[yystate]; + if (yyn == 0) + goto yyerrlab; + goto yyreduce; + + +/*-----------------------------. +| yyreduce -- do a reduction. | +`-----------------------------*/ +yyreduce: + /* yyn is the number of a rule to reduce with. */ + yylen = yyr2[yyn]; + + /* If YYLEN is nonzero, implement the default value of the action: + '$$ = $1'. + + Otherwise, the following line sets YYVAL to garbage. + This behavior is undocumented and Bison + users should not rely upon it. Assigning to YYVAL + unconditionally makes the parser a bit smaller, and it avoids a + GCC warning that YYVAL may be used uninitialized. */ + yyval = yyvsp[1-yylen]; + +]b4_locations_if( +[[ /* Default location. */ + YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen); + yyerror_range[1] = yyloc;]])[ + YY_REDUCE_PRINT (yyn);]b4_lac_if([[ + { + int yychar_backup = yychar; + switch (yyn) + { +]b4_user_actions[ + default: break; + } + if (yychar_backup != yychar) + YY_LAC_DISCARD ("yychar change"); + }]], [[ + switch (yyn) + { +]b4_user_actions[ + default: break; + }]])[ + /* User semantic actions sometimes alter yychar, and that requires + that yytoken be updated with the new translation. We take the + approach of translating immediately before every use of yytoken. + One alternative is translating here after every semantic action, + but that translation would be missed if the semantic action invokes + YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or + if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an + incorrect destructor might then be invoked immediately. In the + case of YYERROR or YYBACKUP, subsequent parser actions might lead + to an incorrect destructor call or verbose syntax error message + before the lookahead is translated. */ + YY_SYMBOL_PRINT ("-> $$ =", YY_CAST (yysymbol_kind_t, yyr1[yyn]), &yyval, &yyloc); + + YYPOPSTACK (yylen); + yylen = 0; + + *++yyvsp = yyval;]b4_locations_if([ + *++yylsp = yyloc;])[ + + /* Now 'shift' the result of the reduction. Determine what state + that goes to, based on the state we popped back to and the rule + number reduced by. */ + { + const int yylhs = yyr1[yyn] - YYNTOKENS; + const int yyi = yypgoto[yylhs] + *yyssp; + yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp + ? yytable[yyi] + : yydefgoto[yylhs]); + } + + goto yynewstate; + + +/*--------------------------------------. +| yyerrlab -- here on detecting error. | +`--------------------------------------*/ +yyerrlab: + /* Make sure we have latest lookahead translation. See comments at + user semantic actions for why this is necessary. */ + yytoken = yychar == ]b4_symbol(empty, id)[ ? ]b4_symbol(empty, kind)[ : YYTRANSLATE (yychar); + /* If not already recovering from an error, report this error. */ + if (!yyerrstatus) + { + ++yynerrs; +]b4_parse_error_case( + [custom], +[[ { + yypcontext_t yyctx + = {]b4_push_if([[yyps]], [[yyssp]b4_lac_if([[, yyesa, &yyes, &yyes_capacity]])])[, yytoken]b4_locations_if([[, &yylloc]])[};]b4_lac_if([[ + if (yychar != ]b4_symbol(empty, id)[) + YY_LAC_ESTABLISH;]])[ + if (yyreport_syntax_error (&yyctx]m4_ifset([b4_parse_param], + [[, ]b4_args(b4_parse_param)])[) == 2) + YYNOMEM; + }]], + [simple], +[[ yyerror (]b4_yyerror_args[YY_("syntax error"));]], +[[ { + yypcontext_t yyctx + = {]b4_push_if([[yyps]], [[yyssp]b4_lac_if([[, yyesa, &yyes, &yyes_capacity]])])[, yytoken]b4_locations_if([[, &yylloc]])[}; + char const *yymsgp = YY_("syntax error"); + int yysyntax_error_status;]b4_lac_if([[ + if (yychar != ]b4_symbol(empty, id)[) + YY_LAC_ESTABLISH;]])[ + yysyntax_error_status = yysyntax_error (&yymsg_alloc, &yymsg, &yyctx); + if (yysyntax_error_status == 0) + yymsgp = yymsg; + else if (yysyntax_error_status == -1) + { + if (yymsg != yymsgbuf) + YYSTACK_FREE (yymsg); + yymsg = YY_CAST (char *, + YYSTACK_ALLOC (YY_CAST (YYSIZE_T, yymsg_alloc))); + if (yymsg) + { + yysyntax_error_status + = yysyntax_error (&yymsg_alloc, &yymsg, &yyctx); + yymsgp = yymsg; + } + else + { + yymsg = yymsgbuf; + yymsg_alloc = sizeof yymsgbuf; + yysyntax_error_status = YYENOMEM; + } + } + yyerror (]b4_yyerror_args[yymsgp); + if (yysyntax_error_status == YYENOMEM) + YYNOMEM; + }]])[ + } +]b4_locations_if([[ + yyerror_range[1] = yylloc;]])[ + if (yyerrstatus == 3) + { + /* If just tried and failed to reuse lookahead token after an + error, discard it. */ + + if (yychar <= ]b4_symbol(eof, [id])[) + { + /* Return failure if at end of input. */ + if (yychar == ]b4_symbol(eof, [id])[) + YYABORT; + } + else + { + yydestruct ("Error: discarding", + yytoken, &yylval]b4_locations_if([, &yylloc])[]b4_user_args[); + yychar = ]b4_symbol(empty, id)[; + } + } + + /* Else will try to reuse lookahead token after shifting the error + token. */ + goto yyerrlab1; + + +/*---------------------------------------------------. +| yyerrorlab -- error raised explicitly by YYERROR. | +`---------------------------------------------------*/ +yyerrorlab: + /* Pacify compilers when the user code never invokes YYERROR and the + label yyerrorlab therefore never appears in user code. */ + if (0) + YYERROR; + ++yynerrs; + + /* Do not reclaim the symbols of the rule whose action triggered + this YYERROR. */ + YYPOPSTACK (yylen); + yylen = 0; + YY_STACK_PRINT (yyss, yyssp); + yystate = *yyssp; + goto yyerrlab1; + + +/*-------------------------------------------------------------. +| yyerrlab1 -- common code for both syntax error and YYERROR. | +`-------------------------------------------------------------*/ +yyerrlab1: + yyerrstatus = 3; /* Each real token shifted decrements this. */ + + /* Pop stack until we find a state that shifts the error token. */ + for (;;) + { + yyn = yypact[yystate]; + if (!yypact_value_is_default (yyn)) + { + yyn += ]b4_symbol(error, kind)[; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == ]b4_symbol(error, kind)[) + { + yyn = yytable[yyn]; + if (0 < yyn) + break; + } + } + + /* Pop the current state because it cannot handle the error token. */ + if (yyssp == yyss) + YYABORT; + +]b4_locations_if([[ yyerror_range[1] = *yylsp;]])[ + yydestruct ("Error: popping", + YY_ACCESSING_SYMBOL (yystate), yyvsp]b4_locations_if([, yylsp])[]b4_user_args[); + YYPOPSTACK (1); + yystate = *yyssp; + YY_STACK_PRINT (yyss, yyssp); + }]b4_lac_if([[ + + /* If the stack popping above didn't lose the initial context for the + current lookahead token, the shift below will for sure. */ + YY_LAC_DISCARD ("error recovery");]])[ + + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END +]b4_locations_if([[ + yyerror_range[2] = yylloc; + ++yylsp; + YYLLOC_DEFAULT (*yylsp, yyerror_range, 2);]])[ + + /* Shift the error token. */ + YY_SYMBOL_PRINT ("Shifting", YY_ACCESSING_SYMBOL (yyn), yyvsp, yylsp); + + yystate = yyn; + goto yynewstate; + + +/*-------------------------------------. +| yyacceptlab -- YYACCEPT comes here. | +`-------------------------------------*/ +yyacceptlab: + yyresult = 0; + goto yyreturnlab; + + +/*-----------------------------------. +| yyabortlab -- YYABORT comes here. | +`-----------------------------------*/ +yyabortlab: + yyresult = 1; + goto yyreturnlab; + + +/*-----------------------------------------------------------. +| yyexhaustedlab -- YYNOMEM (memory exhaustion) comes here. | +`-----------------------------------------------------------*/ +yyexhaustedlab: + yyerror (]b4_yyerror_args[YY_("memory exhausted")); + yyresult = 2; + goto yyreturnlab; + + +/*----------------------------------------------------------. +| yyreturnlab -- parsing is finished, clean up and return. | +`----------------------------------------------------------*/ +yyreturnlab: + if (yychar != ]b4_symbol(empty, id)[) + { + /* Make sure we have latest lookahead translation. See comments at + user semantic actions for why this is necessary. */ + yytoken = YYTRANSLATE (yychar); + yydestruct ("Cleanup: discarding lookahead", + yytoken, &yylval]b4_locations_if([, &yylloc])[]b4_user_args[); + } + /* Do not reclaim the symbols of the rule whose action triggered + this YYABORT or YYACCEPT. */ + YYPOPSTACK (yylen); + YY_STACK_PRINT (yyss, yyssp); + while (yyssp != yyss) + { + yydestruct ("Cleanup: popping", + YY_ACCESSING_SYMBOL (+*yyssp), yyvsp]b4_locations_if([, yylsp])[]b4_user_args[); + YYPOPSTACK (1); + }]b4_push_if([[ + yyps->yynew = 2; + goto yypushreturn; + + +/*-------------------------. +| yypushreturn -- return. | +`-------------------------*/ +yypushreturn:]], [[ +#ifndef yyoverflow + if (yyss != yyssa) + YYSTACK_FREE (yyss); +#endif]b4_lac_if([[ + if (yyes != yyesa) + YYSTACK_FREE (yyes);]])])[ +]b4_parse_error_bmatch([detailed\|verbose], +[[ if (yymsg != yymsgbuf) + YYSTACK_FREE (yymsg);]])[]m4_ifdef([b4_start_symbols], [[ + if (yyimpl) + yyimpl->yynerrs = yynerrs;]])[ + return yyresult; +} +]b4_push_if([b4_parse_state_variable_macros([b4_macro_undef])])[ +]b4_percent_code_get([[epilogue]])[]dnl +b4_epilogue[]dnl +b4_output_end diff --git a/platform/dbops/binaries/build/share/bison/xslt/bison.xsl b/platform/dbops/binaries/build/share/bison/xslt/bison.xsl new file mode 100644 index 0000000000000000000000000000000000000000..989a3437f4dd52c00e3377a93f9b7e1033ad54ea --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/xslt/bison.xsl @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + + + + + + + + + + + s + + + r + + + + + + , + + + + + 0 + + + + + + + + + + + diff --git a/platform/dbops/binaries/build/share/bison/xslt/xml2dot.xsl b/platform/dbops/binaries/build/share/bison/xslt/xml2dot.xsl new file mode 100644 index 0000000000000000000000000000000000000000..7715d1ac3ecf9eb20cf074ad7490c5c9e7a89fbd --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/xslt/xml2dot.xsl @@ -0,0 +1,401 @@ + + + + + + + + + + + + + + + // Generated by GNU Bison + + . + // Report bugs to < + + >. + // Home page: < + + >. + + + + + + + + digraph " + + + + " { + node [fontname = courier, shape = box, colorscheme = paired6] + edge [fontname = courier] + + + + } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + label="[ + + + + + + , + + + ]", + + + + style=solid] + + + + + + + + + 3 + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + : + + + + . + + + + + + + . + + + + + + + + + + + %empty + + + + [ + + ] + + + + + + , + + + + + + + + + + + -> " + + R + + + d + + " [ + + + + + + + + " + + R + + + d + + " [label=" + + + Acc", fillcolor=1 + + + R + + ", fillcolor= + + + + , shape=diamond, style=filled] + + + + + + + + + + dotted + + + solid + + + dashed + + + + + + + + + + + + + + + + + [label=" + State + + \n + + + + \l"] + + + + + + + + + + -> + + [style= + + + label=" + + + + " + + ] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/platform/dbops/binaries/build/share/bison/xslt/xml2text.xsl b/platform/dbops/binaries/build/share/bison/xslt/xml2text.xsl new file mode 100644 index 0000000000000000000000000000000000000000..1fc5731a76d5323909a03bad46457fa5c3ffca33 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/xslt/xml2text.xsl @@ -0,0 +1,572 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Nonterminals useless in grammar + + + + + + + + + + + + Terminals unused in grammar + + + + + + + + + + + + + + Rules useless in grammar + + + + + + + + + + + Rules useless in parser due to conflicts + + + + + + + + + Grammar + + + + + + + + + + + + + + + + + + + + + + + + + Terminals, with rules where they appear + + + + + + Nonterminals, with rules where they appear + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + on@left: + + + + + + + + + on@right: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + State + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + : + + + + + + + + + + + + + + + + + + + + + + + + + + + %empty + + + + [ + + ] + + + + + + , + + + + + + + + + + + + + shift, and go to state + + + + go to state + + + + + + + + + + + + + + error + ( + + ) + + + + + + + + + + + + [ + + + + accept + + + reduce using rule + + ( + + ) + + + + ] + + + + + + + + + + + + + Conflict between rule + + and token + + resolved as + + an + + + ( + + ). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/platform/dbops/binaries/build/share/bison/xslt/xml2xhtml.xsl b/platform/dbops/binaries/build/share/bison/xslt/xml2xhtml.xsl new file mode 100644 index 0000000000000000000000000000000000000000..aaa5dbaa21b0870004dac3da39401ce0184302e4 --- /dev/null +++ b/platform/dbops/binaries/build/share/bison/xslt/xml2xhtml.xsl @@ -0,0 +1,765 @@ + + + + + + + + + + + + + + + + <xsl:value-of select="bison-xml-report/filename"/> + <xsl:text> - GNU Bison XML Automaton Report</xsl:text> + + + + + + + + + + + + +

GNU Bison XML Automaton Report

+

+ input grammar: +

+ + +

Table of Contents

+ + + + + + +
+ + +

+ + Reductions +

+ + + +
+ + +

+ + Nonterminals useless in grammar +

+ + +

+ + + + + + +

+
+ + + +

+ + Terminals unused in grammar +

+ + +

+ + + + + + + +

+
+ + + +

+ + Rules useless in grammar +

+ + + +

+ + + + +

+
+ + + + + +

+ + Rules useless in parser due to conflicts +

+ +

+ + + +

+ + + + + +

+ + Grammar +

+ +

+ + + + +

+ + + + + + + + + + + + + + + + + + + + + + + +

+ + Conflicts +

+ + + + + +

+ + +

+
+ + + + + + + + + +
+ + + + + + conflicts: + + + + + + + + + + + + + + +

+ + Terminals, with rules where they appear +

+ +
    + + +
+ +
+ + +

+ + Nonterminals, with rules where they appear +

+ +
    + + +
+ + + + +
  • + + + + + + + + +
  • + +
    + + + +
  • + + + + + + +
      + + + +
    • + on left: + + + +
    • + +
      + + +
    • + on right: + + + +
    • + +
      + +
    + +
  • + +
    + + + +
    + + + + + + + + +

    + + Automaton +

    + + + +
    + + + + +

    + + + + + + State + +

    + +

    + + + + + + + + + + + + +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + %empty + + + + [ + + ] + + + + + + , + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + error + ( + + ) + + + + + + + + + + + + [ + + + + accept + + + + + + + + + ( + + ) + + + + ] + + + + + + + + + + + + + Conflict between + + + + + + + and token + + resolved as + + an + + + ( + + ). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + +
    diff --git a/platform/dbops/binaries/build/share/doc/bison/AUTHORS b/platform/dbops/binaries/build/share/doc/bison/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..b3d8d8edb3ebf6e38526234b097ad2235cf73073 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/AUTHORS @@ -0,0 +1,42 @@ +Authors of GNU Bison. + +Bison was written primarily by Robert Corbett. + +Richard Stallman made it Yacc-compatible. + +Wilfred Hansen of Carnegie Mellon University added multicharacter +string literals and other features (Bison 1.25, 1995). + +Akim Demaille rewrote the parser in Bison, and changed the back end to +use M4 (1.50, 2002). + +Paul Hilfinger added GLR support (Bison 1.50, 2002). + +Joel E. Denny contributed canonical-LR support, and invented and added +IELR and LAC (Lookahead Correction) support (Bison 2.5, 2011). + +Paolo Bonzini contributed Java support (Bison 2.4, 2008). + +Alex Rozenman added named reference support (Bison 2.5, 2011). + +Paul Eggert fixed a million portability issues, arbitrary limitations, +and nasty bugs. + +----- + +Copyright (C) 1998-2015, 2018-2021 Free Software Foundation, Inc. + +This file is part of Bison, the GNU Compiler Compiler. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/platform/dbops/binaries/build/share/doc/bison/COPYING b/platform/dbops/binaries/build/share/doc/bison/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/platform/dbops/binaries/build/share/doc/bison/NEWS b/platform/dbops/binaries/build/share/doc/bison/NEWS new file mode 100644 index 0000000000000000000000000000000000000000..3385e40a8a35714b0d8292bce6b659b300ff7d7c --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/NEWS @@ -0,0 +1,4741 @@ +GNU Bison NEWS + +* Noteworthy changes in release 3.8.2 (2021-09-25) [stable] + + Fixed portability issues of bison on Cygwin. + + Improvements in glr2.cc: add support for custom error messages (`%define + parse.error custom`), allow linking several parsers together. + +* Noteworthy changes in release 3.8.1 (2021-09-11) [stable] + + The generation of prototypes for yylex and yyerror in Yacc mode is + breaking existing grammar files. To avoid breaking too many grammars, the + prototypes are now generated when `-y/--yacc` is used *and* the + `POSIXLY_CORRECT` environment variable is defined. + + Avoid using `-y`/`--yacc` simply to comply with Yacc's file name + conventions, rather, use `-o y.tab.c`. Autoconf's AC_PROG_YACC macro uses + `-y`. Avoid it if possible, for instance by using gnulib's gl_PROG_BISON. + +* Noteworthy changes in release 3.8 (2021-09-07) [stable] + +** Backward incompatible changes + + In conformance with the recommendations of the Graphviz team + (https://marc.info/?l=graphviz-devel&m=129418103126092), `-g`/`--graph` + now generates a *.gv file by default, instead of *.dot. A transition + started in Bison 3.4. + + To comply with the latest POSIX standard, in Yacc compatibility mode + (options `-y`/`--yacc`) Bison now generates prototypes for yyerror and + yylex. In some situations, this is breaking compatibility: if the user + has already declared these functions but with some differences (e.g., to + declare them as static, or to use specific attributes), the generated + parser will fail to compile. To disable these prototypes, #define yyerror + (to `yyerror`), and likewise for yylex. + +** Deprecated features + + Support for the YYPRINT macro is removed. It worked only with yacc.c and + only for tokens. It was obsoleted by %printer, introduced in Bison 1.50 + (November 2002). + + It has always been recommended to prefer `%define api.value.type foo` to + `#define YYSTYPE foo`. The latter is supported in C for compatibility + with Yacc, but not in C++. Warnings are now issued if `#define YYSTYPE` + is used in C++, and eventually support will be removed. + + In C++ code, prefer value_type to semantic_type to denote the semantic + value type, which is specified by the `api.value.type` %define variable. + +** New features + +*** A skeleton for the D programming language + + The "lalr1.d" skeleton is now officially part of Bison. + + It was originally contributed by Oliver Mangold, based on Paolo Bonzini's + lalr1.java, and was improved by H. S. Teoh. Adela Vais then took over + maintenance and invested a lot of efforts to complete, test and document + it. + + It now supports all the bells and whistles of the other deterministic + parsers, which include: pull/push interfaces, verbose and custom error + messages, lookahead correction, token constructors, internationalization, + locations, printers, token and symbol prefixes, etc. + + Two examples demonstrate the D parsers: a basic one (examples/d/simple), + and an advanced one (examples/d/calc). + +*** Option -H, --header and directive %header + + The option `-H`/`--header` supersedes the option `--defines`, and the + directive %header supersedes %defines. Both `--defines` and `%defines` + are, of course, maintained for backward compatibility. + +*** Option --html + + Since version 2.4 Bison can be used to generate HTML reports. However it + was a two-step process: first bison must be invoked with option `--xml`, + and then xsltproc must be run to the convert the XML reports into HTML. + + The new option `--html` combines these steps. The xsltproc program must + be available. + +*** A C++ native GLR parser + + A new version of the C++ GLR parser was added: "glr2.cc". It generates + "true C++11", instead of a C++ wrapper around a C parser as does the + existing "glr.cc" parser. As a first significant consequence, it supports + `%define api.value.type variant`, contrary to glr.cc. + + It should be upward compatible in terms of interface, feature and + performance to "glr.cc". To try it out, simply use + + %skeleton "glr2.cc" + + It will eventually replace "glr.cc". However we need user feedback on + this skeleton. _Please_ report your results and comments about it. + +*** Counterexamples + + Counterexamples now show the rule numbers, and always show ε for rules + with an empty right-hand side. For instance + + exp + ↳ 1: e1 e2 "a" + ↳ 3: ε • ↳ 1: ε + + instead of + + exp + ↳ e1 e2 "a" + ↳ • ↳ ε + +*** Lookahead correction in Java + + The Java skeleton (lalr1.java) now supports LAC, via the `parse.lac` + %define variable. + +*** Abort parsing for memory exhaustion (C) + + User actions may now use `YYNOMEM` (similar to `YYACCEPT` and `YYABORT`) + to abort the current parse with memory exhaustion. + +*** Printing locations in debug traces (C) + + The `YYLOCATION_PRINT(File, Loc)` macro prints a location. It is defined + when (i) locations are enabled, (ii) the default type for locations is + used, (iii) debug traces are enabled, and (iv) `YYLOCATION_PRINT` is not + already defined. + + Users may define `YYLOCATION_PRINT` to cover other cases. + +*** GLR traces + + There were no debug traces for deferred calls to user actions. They are + logged now. + + +* Noteworthy changes in release 3.7.6 (2021-03-08) [stable] + +** Bug fixes + +*** Reused Push Parsers + + When a push-parser state structure is used for multiple parses, it was + possible for some state to leak from one run into the following one. + +*** Fix Table Generation + + In some very rare conditions, when there are many useless tokens, it was + possible to generate incorrect parsers. + + +* Noteworthy changes in release 3.7.5 (2021-01-24) [stable] + +** Bug fixes + +*** Counterexample Generation + + In some cases counterexample generation could crash. This is fixed. + +*** Fix Table Generation + + In some very rare conditions, when there are many useless tokens, it was + possible to generate incorrect parsers. + +*** GLR parsers now support %merge together with api.value.type=union. + +*** C++ parsers use noexcept in more places. + +*** Generated parsers avoid some warnings about signedness issues. + +*** C-language parsers now avoid warnings from pedantic clang. + +*** C-language parsers now work around quirks of HP-UX 11.23 (2003). + + +* Noteworthy changes in release 3.7.4 (2020-11-14) [stable] + +** Bug fixes + +*** Bug fixes in yacc.c + + In Yacc mode, all the tokens are defined twice: once as an enum, and then + as a macro. YYEMPTY was missing its macro. + +*** Bug fixes in lalr1.cc + + The lalr1.cc skeleton used to emit internal assertions (using YY_ASSERT) + even when the `parse.assert` %define variable is not enabled. It no + longer does. + + The private internal macro YY_ASSERT now obeys the `api.prefix` %define + variable. + + When there is a very large number of tokens, some assertions could be long + enough to hit arbitrary limits in Visual C++. They have been rewritten to + work around this limitation. + +** Changes + + The YYBISON macro in generated "regular C parsers" (from the "yacc.c" + skeleton) used to be defined to 1. It is now defined to the version of + Bison as an integer (e.g., 30704 for version 3.7.4). + + +* Noteworthy changes in release 3.7.3 (2020-10-13) [stable] + +** Bug fixes + + Fix concurrent build issues. + + The bison executable is no longer linked uselessly against libreadline. + + Fix incorrect use of yytname in glr.cc. + + +* Noteworthy changes in release 3.7.2 (2020-09-05) [stable] + + This release of Bison fixes all known bugs reported for Bison in MITRE's + Common Vulnerabilities and Exposures (CVE) system. These vulnerabilities + are only about bison-the-program itself, not the generated code. + + Although these bugs are typically irrelevant to how Bison is used, they + are worth fixing if only to give users peace of mind. + + There is no known vulnerability in the generated parsers. + +** Bug fixes + + Fix concurrent build issues (introduced in Bison 3.5). + + Push parsers always use YYMALLOC/YYFREE (no direct calls to malloc/free). + + Fix portability issues of the test suite, and of bison itself. + + Some unlikely crashes found by fuzzing have been fixed. This is only + about bison itself, not the generated parsers. + + +* Noteworthy changes in release 3.7.1 (2020-08-02) [stable] + +** Bug fixes + + Crash when a token alias contains a NUL byte. + + Portability issues with libtextstyle. + + Portability issues of Bison itself with MSVC. + +** Changes + + Improvements and fixes in the documentation. + + More precise location about symbol type redefinitions. + + +* Noteworthy changes in release 3.7 (2020-07-23) [stable] + +** Deprecated features + + The YYPRINT macro, which works only with yacc.c and only for tokens, was + obsoleted long ago by %printer, introduced in Bison 1.50 (November 2002). + It is deprecated and its support will be removed eventually. + + In conformance with the recommendations of the Graphviz team, in the next + version Bison the option `--graph` will generate a *.gv file by default, + instead of *.dot. A transition started in Bison 3.4. + +** New features + +*** Counterexample Generation + + Contributed by Vincent Imbimbo. + + When given `-Wcounterexamples`/`-Wcex`, bison will now output + counterexamples for conflicts. + +**** Unifying Counterexamples + + Unifying counterexamples are strings which can be parsed in two ways due + to the conflict. For example on a grammar that contains the usual + "dangling else" ambiguity: + + $ bison else.y + else.y: warning: 1 shift/reduce conflict [-Wconflicts-sr] + else.y: note: rerun with option '-Wcounterexamples' to generate conflict counterexamples + + $ bison else.y -Wcex + else.y: warning: 1 shift/reduce conflict [-Wconflicts-sr] + else.y: warning: shift/reduce conflict on token "else" [-Wcounterexamples] + Example: "if" exp "then" "if" exp "then" exp • "else" exp + Shift derivation + exp + ↳ "if" exp "then" exp + ↳ "if" exp "then" exp • "else" exp + Example: "if" exp "then" "if" exp "then" exp • "else" exp + Reduce derivation + exp + ↳ "if" exp "then" exp "else" exp + ↳ "if" exp "then" exp • + + When text styling is enabled, colors are used in the examples and the + derivations to highlight the structure of both analyses. In this case, + + "if" exp "then" [ "if" exp "then" exp • ] "else" exp + + vs. + + "if" exp "then" [ "if" exp "then" exp • "else" exp ] + + + The counterexamples are "focused", in two different ways. First, they do + not clutter the output with all the derivations from the start symbol, + rather they start on the "conflicted nonterminal". They go straight to the + point. Second, they don't "expand" nonterminal symbols uselessly. + +**** Nonunifying Counterexamples + + In the case of the dangling else, Bison found an example that can be + parsed in two ways (therefore proving that the grammar is ambiguous). + When it cannot find such an example, it instead generates two examples + that are the same up until the dot: + + $ bison foo.y + foo.y: warning: 1 shift/reduce conflict [-Wconflicts-sr] + foo.y: note: rerun with option '-Wcounterexamples' to generate conflict counterexamples + foo.y:4.4-7: warning: rule useless in parser due to conflicts [-Wother] + 4 | a: expr + | ^~~~ + + $ bison -Wcex foo.y + foo.y: warning: 1 shift/reduce conflict [-Wconflicts-sr] + foo.y: warning: shift/reduce conflict on token ID [-Wcounterexamples] + First example: expr • ID ',' ID $end + Shift derivation + $accept + ↳ s $end + ↳ a ID + ↳ expr + ↳ expr • ID ',' + Second example: expr • ID $end + Reduce derivation + $accept + ↳ s $end + ↳ a ID + ↳ expr • + foo.y:4.4-7: warning: rule useless in parser due to conflicts [-Wother] + 4 | a: expr + | ^~~~ + + In these cases, the parser usually doesn't have enough lookahead to + differentiate the two given examples. + +**** Reports + + Counterexamples are also included in the report when given + `--report=counterexamples`/`-rcex` (or `--report=all`), with more + technical details: + + State 7 + + 1 exp: "if" exp "then" exp • [$end, "then", "else"] + 2 | "if" exp "then" exp • "else" exp + + "else" shift, and go to state 8 + + "else" [reduce using rule 1 (exp)] + $default reduce using rule 1 (exp) + + shift/reduce conflict on token "else": + 1 exp: "if" exp "then" exp • + 2 exp: "if" exp "then" exp • "else" exp + Example: "if" exp "then" "if" exp "then" exp • "else" exp + Shift derivation + exp + ↳ "if" exp "then" exp + ↳ "if" exp "then" exp • "else" exp + Example: "if" exp "then" "if" exp "then" exp • "else" exp + Reduce derivation + exp + ↳ "if" exp "then" exp "else" exp + ↳ "if" exp "then" exp • + +*** File prefix mapping + + Contributed by Joshua Watt. + + Bison learned a new argument, `--file-prefix-map OLD=NEW`. Any file path + in the output (specifically `#line` directives and `#ifdef` header guards) + that begins with the prefix OLD will have it replaced with the prefix NEW, + similar to the `-ffile-prefix-map` in GCC. This option can be used to + make bison output reproducible. + +** Changes + +*** Diagnostics + + When text styling is enabled and the terminal supports it, the warnings + now include hyperlinks to the documentation. + +*** Relocatable installation + + When installed to be relocatable (via `configure --enable-relocatable`), + bison will now also look for a relocated m4. + +*** C++ file names + + The `filename_type` %define variable was renamed `api.filename.type`. + Instead of + + %define filename_type "symbol" + + write + + %define api.filename.type {symbol} + + (Or let `bison --update` do it for you). + + It now defaults to `const std::string` instead of `std::string`. + +*** Deprecated %define variable names + + The following variables have been renamed for consistency. Backward + compatibility is ensured, but upgrading is recommended. + + filename_type -> api.filename.type + package -> api.package + +*** Push parsers no longer clear their state when parsing is finished + + Previously push-parsers cleared their state when parsing was finished (on + success and on failure). This made it impossible to check if there were + parse errors, since `yynerrs` was also reset. This can be especially + troublesome when used in autocompletion, since a parser with error + recovery would suggest (irrelevant) expected tokens even if there were + failures. + + Now the parser state can be examined when parsing is finished. The parser + state is reset when starting a new parse. + +** Documentation + +*** Examples + + The bistromathic demonstrates %param and how to quote sources in the error + messages: + + > 123 456 + 1.5-7: syntax error: expected end of file or + or - or * or / or ^ before number + 1 | 123 456 + | ^~~ + +** Bug fixes + +*** Include the generated header (yacc.c) + + Historically, when --defines was used, bison generated a header and pasted + an exact copy of it into the generated parser implementation file. Since + Bison 3.4 it is possible to specify that the header should be `#include`d, + and how. For instance + + %define api.header.include {"parse.h"} + + or + + %define api.header.include {} + + Now api.header.include defaults to `"header-basename"`, as was intended in + Bison 3.4, where `header-basename` is the basename of the generated + header. This is disabled when the generated header is `y.tab.h`, to + comply with Automake's ylwrap. + +*** String aliases are faithfully propagated + + Bison used to interpret user strings (i.e., decoding backslash escapes) + when reading them, and to escape them (i.e., issue non-printable + characters as backslash escapes, taking the locale into account) when + outputting them. As a consequence non-ASCII strings (say in UTF-8) ended + up "ciphered" as sequences of backslash escapes. This happened not only + in the generated sources (where the compiler will reinterpret them), but + also in all the generated reports (text, xml, html, dot, etc.). Reports + were therefore not readable when string aliases were not pure ASCII. + Worse yet: the output depended on the user's locale. + + Now Bison faithfully treats the string aliases exactly the way the user + spelled them. This fixes all the aforementioned problems. However, now, + string aliases semantically equivalent but syntactically different (e.g., + "A", "\x41", "\101") are considered to be different. + +*** Crash when generating IELR + + An old, well hidden, bug in the generation of IELR parsers was fixed. + + +* Noteworthy changes in release 3.6.4 (2020-06-15) [stable] + +** Bug fixes + + In glr.cc some internal macros leaked in the user's code, and could damage + access to the token kinds. + + +* Noteworthy changes in release 3.6.3 (2020-06-03) [stable] + +** Bug fixes + + Incorrect comments in the generated parsers. + + Warnings in push parsers (yacc.c). + + Incorrect display of gotos in LAC traces (lalr1.cc). + + +* Noteworthy changes in release 3.6.2 (2020-05-17) [stable] + +** Bug fixes + + Some tests were fixed. + + When token aliases contain comment delimiters: + + %token FOO "/* foo */" + + bison used to emit "nested" comments, which is invalid C. + + +* Noteworthy changes in release 3.6.1 (2020-05-10) [stable] + +** Bug fixes + + Restored ANSI-C compliance in yacc.c. + + GNU readline portability issues. + + In C++, yy::parser::symbol_name is now a public member, as was intended. + +** New features + + In C++, yy::parser::symbol_type now has a public name() member function. + + +* Noteworthy changes in release 3.6 (2020-05-08) [stable] + +** Backward incompatible changes + + TL;DR: replace "#define YYERROR_VERBOSE 1" by "%define parse.error verbose". + + The YYERROR_VERBOSE macro is no longer supported; the parsers that still + depend on it will now produce Yacc-like error messages (just "syntax + error"). It was superseded by the "%error-verbose" directive in Bison + 1.875 (2003-01-01). Bison 2.6 (2012-07-19) clearly announced that support + for YYERROR_VERBOSE would be removed. Note that since Bison 3.0 + (2013-07-25), "%error-verbose" is deprecated in favor of "%define + parse.error verbose". + +** Deprecated features + + The YYPRINT macro, which works only with yacc.c and only for tokens, was + obsoleted long ago by %printer, introduced in Bison 1.50 (November 2002). + It is deprecated and its support will be removed eventually. + +** New features + +*** Improved syntax error messages + + Two new values for the %define parse.error variable offer more control to + the user. Available in all the skeletons (C, C++, Java). + +**** %define parse.error detailed + + The behavior of "%define parse.error detailed" is closely resembling that + of "%define parse.error verbose" with a few exceptions. First, it is safe + to use non-ASCII characters in token aliases (with 'verbose', the result + depends on the locale with which bison was run). Second, a yysymbol_name + function is exposed to the user, instead of the yytnamerr function and the + yytname table. Third, token internationalization is supported (see + below). + +**** %define parse.error custom + + With this directive, the user forges and emits the syntax error message + herself by defining the yyreport_syntax_error function. A new type, + yypcontext_t, captures the circumstances of the error, and provides the + user with functions to get details, such as yypcontext_expected_tokens to + get the list of expected token kinds. + + A possible implementation of yyreport_syntax_error is: + + int + yyreport_syntax_error (const yypcontext_t *ctx) + { + int res = 0; + YY_LOCATION_PRINT (stderr, *yypcontext_location (ctx)); + fprintf (stderr, ": syntax error"); + // Report the tokens expected at this point. + { + enum { TOKENMAX = 10 }; + yysymbol_kind_t expected[TOKENMAX]; + int n = yypcontext_expected_tokens (ctx, expected, TOKENMAX); + if (n < 0) + // Forward errors to yyparse. + res = n; + else + for (int i = 0; i < n; ++i) + fprintf (stderr, "%s %s", + i == 0 ? ": expected" : " or", yysymbol_name (expected[i])); + } + // Report the unexpected token. + { + yysymbol_kind_t lookahead = yypcontext_token (ctx); + if (lookahead != YYSYMBOL_YYEMPTY) + fprintf (stderr, " before %s", yysymbol_name (lookahead)); + } + fprintf (stderr, "\n"); + return res; + } + +**** Token aliases internationalization + + When the %define variable parse.error is set to `custom` or `detailed`, + one may specify which token aliases are to be translated using _(). For + instance + + %token + PLUS "+" + MINUS "-" + + NUM _("number") + + FUN _("function") + VAR _("variable") + + In that case the user must define _() and N_(), and yysymbol_name returns + the translated symbol (i.e., it returns '_("variable")' rather that + '"variable"'). In Java, the user must provide an i18n() function. + +*** List of expected tokens (yacc.c) + + Push parsers may invoke yypstate_expected_tokens at any point during + parsing (including even before submitting the first token) to get the list + of possible tokens. This feature can be used to propose autocompletion + (see below the "bistromathic" example). + + It makes little sense to use this feature without enabling LAC (lookahead + correction). + +*** Returning the error token + + When the scanner returns an invalid token or the undefined token + (YYUNDEF), the parser generates an error message and enters error + recovery. Because of that error message, most scanners that find lexical + errors generate an error message, and then ignore the invalid input + without entering the error-recovery. + + The scanners may now return YYerror, the error token, to enter the + error-recovery mode without triggering an additional error message. See + the bistromathic for an example. + +*** Deep overhaul of the symbol and token kinds + + To avoid the confusion with types in programming languages, we now refer + to token and symbol "kinds" instead of token and symbol "types". The + documentation and error messages have been revised. + + All the skeletons have been updated to use dedicated enum types rather + than integral types. Special symbols are now regular citizens, instead of + being declared in ad hoc ways. + +**** Token kinds + + The "token kind" is what is returned by the scanner, e.g., PLUS, NUMBER, + LPAREN, etc. While backward compatibility is of course ensured, users are + nonetheless invited to replace their uses of "enum yytokentype" by + "yytoken_kind_t". + + This type now also includes tokens that were previously hidden: YYEOF (end + of input), YYUNDEF (undefined token), and YYerror (error token). They + now have string aliases, internationalized when internationalization is + enabled. Therefore, by default, error messages now refer to "end of file" + (internationalized) rather than the cryptic "$end", or to "invalid token" + rather than "$undefined". + + Therefore in most cases it is now useless to define the end-of-line token + as follows: + + %token T_EOF 0 "end of file" + + Rather simply use "YYEOF" in your scanner. + +**** Symbol kinds + + The "symbol kinds" is what the parser actually uses. (Unless the + api.token.raw %define variable is used, the symbol kind of a terminal + differs from the corresponding token kind.) + + They are now exposed as a enum, "yysymbol_kind_t". + + This allows users to tailor the error messages the way they want, or to + process some symbols in a specific way in autocompletion (see the + bistromathic example below). + +*** Modernize display of explanatory statements in diagnostics + + Since Bison 2.7, output was indented four spaces for explanatory + statements. For example: + + input.y:2.7-13: error: %type redeclaration for exp + input.y:1.7-11: previous declaration + + Since the introduction of caret-diagnostics, it became less clear. This + indentation has been removed and submessages are displayed similarly as in + GCC: + + input.y:2.7-13: error: %type redeclaration for exp + 2 | %type exp + | ^~~~~~~ + input.y:1.7-11: note: previous declaration + 1 | %type exp + | ^~~~~ + + Contributed by Victor Morales Cayuela. + +*** C++ + + The token and symbol kinds are yy::parser::token_kind_type and + yy::parser::symbol_kind_type. + + The symbol_type::kind() member function allows to get the kind of a + symbol. This can be used to write unit tests for scanners, e.g., + + yy::parser::symbol_type t = make_NUMBER ("123"); + assert (t.kind () == yy::parser::symbol_kind::S_NUMBER); + assert (t.value.as () == 123); + +** Documentation + +*** User Manual + + In order to avoid ambiguities with "type" as in "typing", we now refer to + the "token kind" (e.g., `PLUS`, `NUMBER`, etc.) rather than the "token + type". We now also refer to the "symbol type" (e.g., `PLUS`, `expr`, + etc.). + +*** Examples + + There are now examples/java: a very simple calculator, and a more complete + one (push-parser, location tracking, and debug traces). + + The lexcalc example (a simple example in C based on Flex and Bison) now + also demonstrates location tracking. + + + A new C example, bistromathic, is a fully featured interactive calculator + using many Bison features: pure interface, push parser, autocompletion + based on the current parser state (using yypstate_expected_tokens), + location tracking, internationalized custom error messages, lookahead + correction, rich debug traces, etc. + + It shows how to depend on the symbol kinds to tailor autocompletion. For + instance it recognizes the symbol kind "VARIABLE" to propose + autocompletion on the existing variables, rather than of the word + "variable". + + +* Noteworthy changes in release 3.5.4 (2020-04-05) [stable] + +** WARNING: Future backward-incompatibilities! + + TL;DR: replace "#define YYERROR_VERBOSE 1" by "%define parse.error verbose". + + Bison 3.6 will no longer support the YYERROR_VERBOSE macro; the parsers + that still depend on it will produce Yacc-like error messages (just + "syntax error"). It was superseded by the "%error-verbose" directive in + Bison 1.875 (2003-01-01). Bison 2.6 (2012-07-19) clearly announced that + support for YYERROR_VERBOSE would be removed. Note that since Bison 3.0 + (2013-07-25), "%error-verbose" is deprecated in favor of "%define + parse.error verbose". + +** Bug fixes + + Fix portability issues of the package itself on old compilers. + + Fix api.token.raw support in Java. + + +* Noteworthy changes in release 3.5.3 (2020-03-08) [stable] + +** Bug fixes + + Error messages could quote lines containing zero-width characters (such as + \005) with incorrect styling. Fixes for similar issues with unexpectedly + short lines (e.g., the file was changed between parsing and diagnosing). + + Some unlikely crashes found by fuzzing have been fixed. This is only + about bison itself, not the generated parsers. + + +* Noteworthy changes in release 3.5.2 (2020-02-13) [stable] + +** Bug fixes + + Portability issues and minor cosmetic issues. + + The lalr1.cc skeleton properly rejects unsupported values for parse.lac + (as yacc.c does). + + +* Noteworthy changes in release 3.5.1 (2020-01-19) [stable] + +** Bug fixes + + Portability fixes. + + Fix compiler warnings. + + +* Noteworthy changes in release 3.5 (2019-12-11) [stable] + +** Backward incompatible changes + + Lone carriage-return characters (aka \r or ^M) in the grammar files are no + longer treated as end-of-lines. This changes the diagnostics, and in + particular their locations. + + In C++, line numbers and columns are now represented as 'int' not + 'unsigned', so that integer overflow on positions is easily checkable via + 'gcc -fsanitize=undefined' and the like. This affects the API for + positions. The default position and location classes now expose + 'counter_type' (int), used to define line and column numbers. + +** Deprecated features + + The YYPRINT macro, which works only with yacc.c and only for tokens, was + obsoleted long ago by %printer, introduced in Bison 1.50 (November 2002). + It is deprecated and its support will be removed eventually. + +** New features + +*** Lookahead correction in C++ + + Contributed by Adrian Vogelsgesang. + + The C++ deterministic skeleton (lalr1.cc) now supports LAC, via the + %define variable parse.lac. + +*** Variable api.token.raw: Optimized token numbers (all skeletons) + + In the generated parsers, tokens have two numbers: the "external" token + number as returned by yylex (which starts at 257), and the "internal" + symbol number (which starts at 3). Each time yylex is called, a table + lookup maps the external token number to the internal symbol number. + + When the %define variable api.token.raw is set, tokens are assigned their + internal number, which saves one table lookup per token, and also saves + the generation of the mapping table. + + The gain is typically moderate, but in extreme cases (very simple user + actions), a 10% improvement can be observed. + +*** Generated parsers use better types for states + + Stacks now use the best integral type for state numbers, instead of always + using 15 bits. As a result "small" parsers now have a smaller memory + footprint (they use 8 bits), and there is support for large automata (16 + bits), and extra large (using int, i.e., typically 31 bits). + +*** Generated parsers prefer signed integer types + + Bison skeletons now prefer signed to unsigned integer types when either + will do, as the signed types are less error-prone and allow for better + checking with 'gcc -fsanitize=undefined'. Also, the types chosen are now + portable to unusual machines where char, short and int are all the same + width. On non-GNU platforms this may entail including and (if + available) to define integer types and constants. + +*** A skeleton for the D programming language + + For the last few releases, Bison has shipped a stealth experimental + skeleton: lalr1.d. It was first contributed by Oliver Mangold, based on + Paolo Bonzini's lalr1.java, and was cleaned and improved thanks to + H. S. Teoh. + + However, because nobody has committed to improving, testing, and + documenting this skeleton, it is not clear that it will be supported in + the future. + + The lalr1.d skeleton *is functional*, and works well, as demonstrated in + examples/d/calc.d. Please try it, enjoy it, and... commit to support it. + +*** Debug traces in Java + + The Java backend no longer emits code and data for parser tracing if the + %define variable parse.trace is not defined. + +** Diagnostics + +*** New diagnostic: -Wdangling-alias + + String literals, which allow for better error messages, are (too) + liberally accepted by Bison, which might result in silent errors. For + instance + + %type cond "condition" + + does not define "condition" as a string alias to 'cond' (nonterminal + symbols do not have string aliases). It is rather equivalent to + + %nterm cond + %token "condition" + + i.e., it gives the type 'exVal' to the "condition" token, which was + clearly not the intention. + + Also, because string aliases need not be defined, typos such as "baz" + instead of "bar" will be not reported. + + The option `-Wdangling-alias` catches these situations. On + + %token BAR "bar" + %type foo "foo" + %% + foo: "baz" {} + + bison -Wdangling-alias reports + + warning: string literal not attached to a symbol + | %type foo "foo" + | ^~~~~ + warning: string literal not attached to a symbol + | foo: "baz" {} + | ^~~~~ + + The `-Wall` option does not (yet?) include `-Wdangling-alias`. + +*** Better POSIX Yacc compatibility diagnostics + + POSIX Yacc restricts %type to nonterminals. This is now diagnosed by + -Wyacc. + + %token TOKEN1 + %type TOKEN1 TOKEN2 't' + %token TOKEN2 + %% + expr: + + gives with -Wyacc + + input.y:2.15-20: warning: POSIX yacc reserves %type to nonterminals [-Wyacc] + 2 | %type TOKEN1 TOKEN2 't' + | ^~~~~~ + input.y:2.29-31: warning: POSIX yacc reserves %type to nonterminals [-Wyacc] + 2 | %type TOKEN1 TOKEN2 't' + | ^~~ + input.y:2.22-27: warning: POSIX yacc reserves %type to nonterminals [-Wyacc] + 2 | %type TOKEN1 TOKEN2 't' + | ^~~~~~ + +*** Diagnostics with insertion + + The diagnostics now display the suggestion below the underlined source. + Replacement for undeclared symbols are now also suggested. + + $ cat /tmp/foo.y + %% + list: lis '.' | + + $ bison -Wall foo.y + foo.y:2.7-9: error: symbol 'lis' is used, but is not defined as a token and has no rules; did you mean 'list'? + 2 | list: lis '.' | + | ^~~ + | list + foo.y:2.16: warning: empty rule without %empty [-Wempty-rule] + 2 | list: lis '.' | + | ^ + | %empty + foo.y: warning: fix-its can be applied. Rerun with option '--update'. [-Wother] + +*** Diagnostics about long lines + + Quoted sources may now be truncated to fit the screen. For instance, on a + 30-column wide terminal: + + $ cat foo.y + %token FOO FOO FOO + %% + exp: FOO + $ bison foo.y + foo.y:1.34-36: warning: symbol FOO redeclared [-Wother] + 1 | … FOO … + | ^~~ + foo.y:1.8-10: previous declaration + 1 | %token FOO … + | ^~~ + foo.y:1.62-64: warning: symbol FOO redeclared [-Wother] + 1 | … FOO + | ^~~ + foo.y:1.8-10: previous declaration + 1 | %token FOO … + | ^~~ + +** Changes + +*** Debugging glr.c and glr.cc + + The glr.c skeleton always had asserts to check its own behavior (not the + user's). These assertions are now under the control of the parse.assert + %define variable (disabled by default). + +*** Clean up + + Several new compiler warnings in the generated output have been avoided. + Some unused features are no longer emitted. Cleaner generated code in + general. + +** Bug Fixes + + Portability issues in the test suite. + + In theory, parsers using %nonassoc could crash when reporting verbose + error messages. This unlikely bug has been fixed. + + In Java, %define api.prefix was ignored. It now behaves as expected. + + +* Noteworthy changes in release 3.4.2 (2019-09-12) [stable] + +** Bug fixes + + In some cases, when warnings are disabled, bison could emit tons of white + spaces as diagnostics. + + When running out of memory, bison could crash (found by fuzzing). + + When defining twice the EOF token, bison would crash. + + New warnings from recent compilers have been addressed in the generated + parsers (yacc.c, glr.c, glr.cc). + + When lone carriage-return characters appeared in the input file, + diagnostics could hang forever. + + +* Noteworthy changes in release 3.4.1 (2019-05-22) [stable] + +** Bug fixes + + Portability fixes. + + +* Noteworthy changes in release 3.4 (2019-05-19) [stable] + +** Deprecated features + + The %pure-parser directive is deprecated in favor of '%define api.pure' + since Bison 2.3b (2008-05-27), but no warning was issued; there is one + now. Note that since Bison 2.7 you are strongly encouraged to use + '%define api.pure full' instead of '%define api.pure'. + +** New features + +*** Colored diagnostics + + As an experimental feature, diagnostics are now colored, controlled by the + new options --color and --style. + + To use them, install the libtextstyle library before configuring Bison. + It is available from + + https://alpha.gnu.org/gnu/gettext/ + + for instance + + https://alpha.gnu.org/gnu/gettext/libtextstyle-0.8.tar.gz + + The option --color supports the following arguments: + - always, yes: Enable colors. + - never, no: Disable colors. + - auto, tty (default): Enable colors if the output device is a tty. + + To customize the styles, create a CSS file similar to + + /* bison-bw.css */ + .warning { } + .error { font-weight: 800; text-decoration: underline; } + .note { } + + then invoke bison with --style=bison-bw.css, or set the BISON_STYLE + environment variable to "bison-bw.css". + +*** Disabling output + + When given -fsyntax-only, the diagnostics are reported, but no output is + generated. + + The name of this option is somewhat misleading as bison does more than + just checking the syntax: every stage is run (including checking for + conflicts for instance), except the generation of the output files. + +*** Include the generated header (yacc.c) + + Before, when --defines is used, bison generated a header, and pasted an + exact copy of it into the generated parser implementation file. If the + header name is not "y.tab.h", it is now #included instead of being + duplicated. + + To use an '#include' even if the header name is "y.tab.h" (which is what + happens with --yacc, or when using the Autotools' ylwrap), define + api.header.include to the exact argument to pass to #include. For + instance: + + %define api.header.include {"parse.h"} + + or + + %define api.header.include {} + +*** api.location.type is now supported in C (yacc.c, glr.c) + + The %define variable api.location.type defines the name of the type to use + for locations. When defined, Bison no longer defines YYLTYPE. + + This can be used in programs with several parsers to factor their + definition of locations: let one of them generate them, and the others + just use them. + +** Changes + +*** Graphviz output + + In conformance with the recommendations of the Graphviz team, if %require + "3.4" (or better) is specified, the option --graph generates a *.gv file + by default, instead of *.dot. + +*** Diagnostics overhaul + + Column numbers were wrong with multibyte characters, which would also + result in skewed diagnostics with carets. Beside, because we were + indenting the quoted source with a single space, lines with tab characters + were incorrectly underlined. + + To address these issues, and to be clearer, Bison now issues diagnostics + as GCC9 does. For instance it used to display (there's a tab before the + opening brace): + + foo.y:3.37-38: error: $2 of ‘expr’ has no declared type + expr: expr '+' "number" { $$ = $1 + $2; } + ^~ + It now reports + + foo.y:3.37-38: error: $2 of ‘expr’ has no declared type + 3 | expr: expr '+' "number" { $$ = $1 + $2; } + | ^~ + + Other constructs now also have better locations, resulting in more precise + diagnostics. + +*** Fix-it hints for %empty + + Running Bison with -Wempty-rules and --update will remove incorrect %empty + annotations, and add the missing ones. + +*** Generated reports + + The format of the reports (parse.output) was improved for readability. + +*** Better support for --no-line. + + When --no-line is used, the generated files are now cleaner: no lines are + generated instead of empty lines. Together with using api.header.include, + that should help people saving the generated files into version control + systems get smaller diffs. + +** Documentation + + A new example in C shows an simple infix calculator with a hand-written + scanner (examples/c/calc). + + A new example in C shows a reentrant parser (capable of recursive calls) + built with Flex and Bison (examples/c/reccalc). + + There is a new section about the history of Yaccs and Bison. + +** Bug fixes + + A few obscure bugs were fixed, including the second oldest (known) bug in + Bison: it was there when Bison was entered in the RCS version control + system, in December 1987. See the NEWS of Bison 3.3 for the previous + oldest bug. + + +* Noteworthy changes in release 3.3.2 (2019-02-03) [stable] + +** Bug fixes + + Bison 3.3 failed to generate parsers for grammars with unused nonterminal + symbols. + + +* Noteworthy changes in release 3.3.1 (2019-01-27) [stable] + +** Changes + + The option -y/--yacc used to imply -Werror=yacc, which turns uses of Bison + extensions into errors. It now makes them simple warnings (-Wyacc). + + +* Noteworthy changes in release 3.3 (2019-01-26) [stable] + + A new mailing list was created, Bison Announce. It is low traffic, and is + only about announcing new releases and important messages (e.g., polls + about major decisions to make). + + https://lists.gnu.org/mailman/listinfo/bison-announce + +** Backward incompatible changes + + Support for DJGPP, which has been unmaintained and untested for years, is + removed. + +** Deprecated features + + A new feature, --update (see below) helps adjusting existing grammars to + deprecations. + +*** Deprecated directives + + The %error-verbose directive is deprecated in favor of '%define + parse.error verbose' since Bison 3.0, but no warning was issued. + + The '%name-prefix "xx"' directive is deprecated in favor of '%define + api.prefix {xx}' since Bison 3.0, but no warning was issued. These + directives are slightly different, you might need to adjust your code. + %name-prefix renames only symbols with external linkage, while api.prefix + also renames types and macros, including YYDEBUG, YYTOKENTYPE, + yytokentype, YYSTYPE, YYLTYPE, etc. + + Users of Flex that move from '%name-prefix "xx"' to '%define api.prefix + {xx}' will typically have to update YY_DECL from + + #define YY_DECL int xxlex (YYSTYPE *yylval, YYLTYPE *yylloc) + + to + + #define YY_DECL int xxlex (XXSTYPE *yylval, XXLTYPE *yylloc) + +*** Deprecated %define variable names + + The following variables, mostly related to parsers in Java, have been + renamed for consistency. Backward compatibility is ensured, but upgrading + is recommended. + + abstract -> api.parser.abstract + annotations -> api.parser.annotations + extends -> api.parser.extends + final -> api.parser.final + implements -> api.parser.implements + parser_class_name -> api.parser.class + public -> api.parser.public + strictfp -> api.parser.strictfp + +** New features + +*** Generation of fix-its for IDEs/Editors + + When given the new option -ffixit (aka -fdiagnostics-parseable-fixits), + bison now generates machine readable editing instructions to fix some + issues. Currently, this is mostly limited to updating deprecated + directives and removing duplicates. For instance: + + $ cat foo.y + %error-verbose + %define parser_class_name "Parser" + %define api.parser.class "Parser" + %% + exp:; + + See the "fix-it:" lines below: + + $ bison -ffixit foo.y + foo.y:1.1-14: warning: deprecated directive, use '%define parse.error verbose' [-Wdeprecated] + %error-verbose + ^~~~~~~~~~~~~~ + fix-it:"foo.y":{1:1-1:15}:"%define parse.error verbose" + foo.y:2.1-34: warning: deprecated directive, use '%define api.parser.class {Parser}' [-Wdeprecated] + %define parser_class_name "Parser" + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + fix-it:"foo.y":{2:1-2:35}:"%define api.parser.class {Parser}" + foo.y:3.1-33: error: %define variable 'api.parser.class' redefined + %define api.parser.class "Parser" + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + foo.y:2.1-34: previous definition + %define parser_class_name "Parser" + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + fix-it:"foo.y":{3:1-3:34}:"" + foo.y: warning: fix-its can be applied. Rerun with option '--update'. [-Wother] + + This uses the same output format as GCC and Clang. + +*** Updating grammar files + + Fixes can be applied on the fly. The previous example ends with the + suggestion to re-run bison with the option -u/--update, which results in a + cleaner grammar file. + + $ bison --update foo.y + [...] + bison: file 'foo.y' was updated (backup: 'foo.y~') + + $ cat foo.y + %define parse.error verbose + %define api.parser.class {Parser} + %% + exp:; + +*** Bison is now relocatable + + If you pass '--enable-relocatable' to 'configure', Bison is relocatable. + + A relocatable program can be moved or copied to a different location on + the file system. It can also be used through mount points for network + sharing. It is possible to make symbolic links to the installed and moved + programs, and invoke them through the symbolic link. + +*** %expect and %expect-rr modifiers on individual rules + + One can now document (and check) which rules participate in shift/reduce + and reduce/reduce conflicts. This is particularly important GLR parsers, + where conflicts are a normal occurrence. For example, + + %glr-parser + %expect 1 + %% + + ... + + argument_list: + arguments %expect 1 + | arguments ',' + | %empty + ; + + arguments: + expression + | argument_list ',' expression + ; + + ... + + Looking at the output from -v, one can see that the shift/reduce conflict + here is due to the fact that the parser does not know whether to reduce + arguments to argument_list until it sees the token _after_ the following + ','. By marking the rule with %expect 1 (because there is a conflict in + one state), we document the source of the 1 overall shift/reduce conflict. + + In GLR parsers, we can use %expect-rr in a rule for reduce/reduce + conflicts. In this case, we mark each of the conflicting rules. For + example, + + %glr-parser + %expect-rr 1 + + %% + + stmt: + target_list '=' expr ';' + | expr_list ';' + ; + + target_list: + target + | target ',' target_list + ; + + target: + ID %expect-rr 1 + ; + + expr_list: + expr + | expr ',' expr_list + ; + + expr: + ID %expect-rr 1 + | ... + ; + + In a statement such as + + x, y = 3, 4; + + the parser must reduce x to a target or an expr, but does not know which + until it sees the '='. So we notate the two possible reductions to + indicate that each conflicts in one rule. + + This feature needs user feedback, and might evolve in the future. + +*** C++: Actual token constructors + + When variants and token constructors are enabled, in addition to the + type-safe named token constructors (make_ID, make_INT, etc.), we now + generate genuine constructors for symbol_type. + + For instance with these declarations + + %token ':' + ID + INT; + + you may use these constructors: + + symbol_type (int token, const std::string&); + symbol_type (int token, const int&); + symbol_type (int token); + + Correct matching between token types and value types is checked via + 'assert'; for instance, 'symbol_type (ID, 42)' would abort. Named + constructors are preferable, as they offer better type safety (for + instance 'make_ID (42)' would not even compile), but symbol_type + constructors may help when token types are discovered at run-time, e.g., + + [a-z]+ { + if (auto i = lookup_keyword (yytext)) + return yy::parser::symbol_type (i); + else + return yy::parser::make_ID (yytext); + } + +*** C++: Variadic emplace + + If your application requires C++11 and you don't use symbol constructors, + you may now use a variadic emplace for semantic values: + + %define api.value.type variant + %token > PAIR + + in your scanner: + + int yylex (parser::semantic_type *lvalp) + { + lvalp->emplace > (1, 2); + return parser::token::PAIR; + } + +*** C++: Syntax error exceptions in GLR + + The glr.cc skeleton now supports syntax_error exceptions thrown from user + actions, or from the scanner. + +*** More POSIX Yacc compatibility warnings + + More Bison specific directives are now reported with -y or -Wyacc. This + change was ready since the release of Bison 3.0 in September 2015. It was + delayed because Autoconf used to define YACC as `bison -y`, which resulted + in numerous warnings for Bison users that use the GNU Build System. + + If you still experience that problem, either redefine YACC as `bison -o + y.tab.c`, or pass -Wno-yacc to Bison. + +*** The tables yyrhs and yyphrs are back + + Because no Bison skeleton uses them, these tables were removed (no longer + passed to the skeletons, not even computed) in 2008. However, some users + have expressed interest in being able to use them in their own skeletons. + +** Bug fixes + +*** Incorrect number of reduce/reduce conflicts + + On a grammar such as + + exp: "num" | "num" | "num" + + bison used to report a single RR conflict, instead of two. This is now + fixed. This was the oldest (known) bug in Bison: it was there when Bison + was entered in the RCS version control system, in December 1987. + + Some grammar files might have to adjust their %expect-rr. + +*** Parser directives that were not careful enough + + Passing invalid arguments to %nterm, for instance character literals, used + to result in unclear error messages. + +** Documentation + + The examples/ directory (installed in .../share/doc/bison/examples) has + been restructured per language for clarity. The examples come with a + README and a Makefile. Not only can they be used to toy with Bison, they + can also be starting points for your own grammars. + + There is now a Java example, and a simple example in C based on Flex and + Bison (examples/c/lexcalc/). + +** Changes + +*** Parsers in C++ + + They now use noexcept and constexpr. Please, report missing annotations. + +*** Symbol Declarations + + The syntax of the variation directives to declare symbols was overhauled + for more consistency, and also better POSIX Yacc compliance (which, for + instance, allows "%type" without actually providing a type). The %nterm + directive, supported by Bison since its inception, is now documented and + officially supported. + + The syntax is now as follows: + + %token TAG? ( ID NUMBER? STRING? )+ ( TAG ( ID NUMBER? STRING? )+ )* + %left TAG? ( ID NUMBER? )+ ( TAG ( ID NUMBER? )+ )* + %type TAG? ( ID | CHAR | STRING )+ ( TAG ( ID | CHAR | STRING )+ )* + %nterm TAG? ID+ ( TAG ID+ )* + + where TAG denotes a type tag such as ‘’, ID denotes an identifier + such as ‘NUM’, NUMBER a decimal or hexadecimal integer such as ‘300’ or + ‘0x12d’, CHAR a character literal such as ‘'+'’, and STRING a string + literal such as ‘"number"’. The post-fix quantifiers are ‘?’ (zero or + one), ‘*’ (zero or more) and ‘+’ (one or more). + + +* Noteworthy changes in release 3.2.4 (2018-12-24) [stable] + +** Bug fixes + + Fix the move constructor of symbol_type. + + Always provide a copy constructor for symbol_type, even in modern C++. + + +* Noteworthy changes in release 3.2.3 (2018-12-18) [stable] + +** Bug fixes + + Properly support token constructors in C++ with types that include commas + (e.g., std::pair). A regression introduced in Bison 3.2. + + +* Noteworthy changes in release 3.2.2 (2018-11-21) [stable] + +** Bug fixes + + C++ portability issues. + + +* Noteworthy changes in release 3.2.1 (2018-11-09) [stable] + +** Bug fixes + + Several portability issues have been fixed in the build system, in the + test suite, and in the generated parsers in C++. + + +* Noteworthy changes in release 3.2 (2018-10-29) [stable] + +** Backward incompatible changes + + Support for DJGPP, which has been unmaintained and untested for years, is + obsolete. Unless there is activity to revive it, it will be removed. + +** Changes + + %printers should use yyo rather than yyoutput to denote the output stream. + + Variant-based symbols in C++ should use emplace() rather than build(). + + In C++ parsers, parser::operator() is now a synonym for the parser::parse. + +** Documentation + + A new section, "A Simple C++ Example", is a tutorial for parsers in C++. + + A comment in the generated code now emphasizes that users should not + depend upon non-documented implementation details, such as macros starting + with YY_. + +** New features + +*** C++: Support for move semantics (lalr1.cc) + + The lalr1.cc skeleton now fully supports C++ move semantics, while + maintaining compatibility with C++98. You may now store move-only types + when using Bison's variants. For instance: + + %code { + #include + #include + } + + %skeleton "lalr1.cc" + %define api.value.type variant + + %% + + %token INT "int"; + %type > int; + %type >> list; + + list: + %empty {} + | list int { $$ = std::move($1); $$.emplace_back(std::move($2)); } + + int: "int" { $$ = std::make_unique($1); } + +*** C++: Implicit move of right-hand side values (lalr1.cc) + + In modern C++ (C++11 and later), you should always use 'std::move' with + the values of the right-hand side symbols ($1, $2, etc.), as they will be + popped from the stack anyway. Using 'std::move' is mandatory for + move-only types such as unique_ptr, and it provides a significant speedup + for large types such as std::string, or std::vector, etc. + + If '%define api.value.automove' is set, every occurrence '$n' is replaced + by 'std::move ($n)'. The second rule in the previous grammar can be + simplified to: + + list: list int { $$ = $1; $$.emplace_back($2); } + + With automove enabled, the semantic values are no longer lvalues, so do + not use the swap idiom: + + list: list int { std::swap($$, $1); $$.emplace_back($2); } + + This idiom is anyway obsolete: it is preferable to move than to swap. + + A warning is issued when automove is enabled, and a value is used several + times. + + input.yy:16.31-32: warning: multiple occurrences of $2 with api.value.automove enabled [-Wother] + exp: "twice" exp { $$ = $2 + $2; } + ^^ + + Enabling api.value.automove does not require support for modern C++. The + generated code is valid C++98/03, but will use copies instead of moves. + + The new examples/c++/variant-11.yy shows these features in action. + +*** C++: The implicit default semantic action is always run + + When variants are enabled, the default action was not run, so + + exp: "number" + + was equivalent to + + exp: "number" {} + + It now behaves like in all the other cases, as + + exp: "number" { $$ = $1; } + + possibly using std::move if automove is enabled. + + We do not expect backward compatibility issues. However, beware of + forward compatibility issues: if you rely on default actions with + variants, be sure to '%require "3.2"' to avoid older versions of Bison to + generate incorrect parsers. + +*** C++: Renaming location.hh + + When both %defines and %locations are enabled, Bison generates a + location.hh file. If you don't use locations outside of the parser, you + may avoid its creation with: + + %define api.location.file none + + However this file is useful if, for instance, your parser builds an AST + decorated with locations: you may use Bison's location independently of + Bison's parser. You can now give it another name, for instance: + + %define api.location.file "my-location.hh" + + This name can have directory components, and even be absolute. The name + under which the location file is included is controlled by + api.location.include. + + This way it is possible to have several parsers share the same location + file. + + For instance, in src/foo/parser.hh, generate the include/ast/loc.hh file: + + %locations + %define api.namespace {foo} + %define api.location.file "include/ast/loc.hh" + %define api.location.include {} + + and use it in src/bar/parser.hh: + + %locations + %define api.namespace {bar} + %code requires {#include } + %define api.location.type {bar::location} + + Absolute file names are supported, so in your Makefile, passing the flag + -Dapi.location.file='"$(top_srcdir)/include/ast/location.hh"' to bison is + safe. + +*** C++: stack.hh and position.hh are deprecated + + When asked to generate a header file (%defines), the lalr1.cc skeleton + generates a stack.hh file. This file had no interest for users; it is now + made useless: its content is included in the parser definition. It is + still generated for backward compatibility. + + When in addition to %defines, location support is requested (%locations), + the file position.hh is also generated. It is now also useless: its + content is now included in location.hh. + + These files are no longer generated when your grammar file requires at + least Bison 3.2 (%require "3.2"). + +** Bug fixes + + Portability issues on MinGW and VS2015. + + Portability issues in the test suite. + + Portability/warning issues with Flex. + + +* Noteworthy changes in release 3.1 (2018-08-27) [stable] + +** Backward incompatible changes + + Compiling Bison now requires a C99 compiler---as announced during the + release of Bison 3.0, five years ago. Generated parsers do not require a + C99 compiler. + + Support for DJGPP, which has been unmaintained and untested for years, is + obsolete. Unless there is activity to revive it, the next release of Bison + will have it removed. + +** New features + +*** Typed midrule actions + + Because their type is unknown to Bison, the values of midrule actions are + not treated like the others: they don't have %printer and %destructor + support. It also prevents C++ (Bison) variants to handle them properly. + + Typed midrule actions address these issues. Instead of: + + exp: { $$ = 1; } { $$ = 2; } { $$ = $1 + $2; } + + write: + + exp: { $$ = 1; } { $$ = 2; } { $$ = $1 + $2; } + +*** Reports include the type of the symbols + + The sections about terminal and nonterminal symbols of the '*.output' file + now specify their declared type. For instance, for: + + %token NUM + + the report now shows '': + + Terminals, with rules where they appear + + NUM (258) 5 + +*** Diagnostics about useless rules + + In the following grammar, the 'exp' nonterminal is trivially useless. So, + of course, its rules are useless too. + + %% + input: '0' | exp + exp: exp '+' exp | exp '-' exp | '(' exp ')' + + Previously all the useless rules were reported, including those whose + left-hand side is the 'exp' nonterminal: + + warning: 1 nonterminal useless in grammar [-Wother] + warning: 4 rules useless in grammar [-Wother] + 2.14-16: warning: nonterminal useless in grammar: exp [-Wother] + input: '0' | exp + ^^^ + 2.14-16: warning: rule useless in grammar [-Wother] + input: '0' | exp + ^^^ + 3.6-16: warning: rule useless in grammar [-Wother] + exp: exp '+' exp | exp '-' exp | '(' exp ')' + ^^^^^^^^^^^ + 3.20-30: warning: rule useless in grammar [-Wother] + exp: exp '+' exp | exp '-' exp | '(' exp ')' + ^^^^^^^^^^^ + 3.34-44: warning: rule useless in grammar [-Wother] + exp: exp '+' exp | exp '-' exp | '(' exp ')' + ^^^^^^^^^^^ + + Now, rules whose left-hand side symbol is useless are no longer reported + as useless. The locations of the errors have also been adjusted to point + to the first use of the nonterminal as a left-hand side of a rule: + + warning: 1 nonterminal useless in grammar [-Wother] + warning: 4 rules useless in grammar [-Wother] + 3.1-3: warning: nonterminal useless in grammar: exp [-Wother] + exp: exp '+' exp | exp '-' exp | '(' exp ')' + ^^^ + 2.14-16: warning: rule useless in grammar [-Wother] + input: '0' | exp + ^^^ + +*** C++: Generated parsers can be compiled with -fno-exceptions (lalr1.cc) + + When compiled with exceptions disabled, the generated parsers no longer + uses try/catch clauses. + + Currently only GCC and Clang are supported. + +** Documentation + +*** A demonstration of variants + + A new example was added (installed in .../share/doc/bison/examples), + 'variant.yy', which shows how to use (Bison) variants in C++. + + The other examples were made nicer to read. + +*** Some features are no longer 'experimental' + + The following features, mature enough, are no longer flagged as + experimental in the documentation: push parsers, default %printer and + %destructor (typed: <*> and untyped: <>), %define api.value.type union and + variant, Java parsers, XML output, LR family (lr, ielr, lalr), and + semantic predicates (%?). + +** Bug fixes + +*** GLR: Predicates support broken by #line directives + + Predicates (%?) in GLR such as + + widget: + %? {new_syntax} 'w' id new_args + | %?{!new_syntax} 'w' id old_args + + were issued with #lines in the middle of C code. + +*** Printer and destructor with broken #line directives + + The #line directives were not properly escaped when emitting the code for + %printer/%destructor, which resulted in compiler errors if there are + backslashes or double-quotes in the grammar file name. + +*** Portability on ICC + + The Intel compiler claims compatibility with GCC, yet rejects its _Pragma. + Generated parsers now work around this. + +*** Various + + There were several small fixes in the test suite and in the build system, + many warnings in bison and in the generated parsers were eliminated. The + documentation also received its share of minor improvements. + + Useless code was removed from C++ parsers, and some of the generated + constructors are more 'natural'. + + +* Noteworthy changes in release 3.0.5 (2018-05-27) [stable] + +** Bug fixes + +*** C++: Fix support of 'syntax_error' + + One incorrect 'inline' resulted in linking errors about the constructor of + the syntax_error exception. + +*** C++: Fix warnings + + GCC 7.3 (with -O1 or -O2 but not -O0 or -O3) issued null-dereference + warnings about yyformat being possibly null. It also warned about the + deprecated implicit definition of copy constructors when there's a + user-defined (copy) assignment operator. + +*** Location of errors + + In C++ parsers, out-of-bounds errors can happen when a rule with an empty + ride-hand side raises a syntax error. The behavior of the default parser + (yacc.c) in such a condition was undefined. + + Now all the parsers match the behavior of glr.c: @$ is used as the + location of the error. This handles gracefully rules with and without + rhs. + +*** Portability fixes in the test suite + + On some platforms, some Java and/or C++ tests were failing. + + +* Noteworthy changes in release 3.0.4 (2015-01-23) [stable] + +** Bug fixes + +*** C++ with Variants (lalr1.cc) + + Fix a compiler warning when no %destructor use $$. + +*** Test suites + + Several portability issues in tests were fixed. + + +* Noteworthy changes in release 3.0.3 (2015-01-15) [stable] + +** Bug fixes + +*** C++ with Variants (lalr1.cc) + + Problems with %destructor and '%define parse.assert' have been fixed. + +*** Named %union support (yacc.c, glr.c) + + Bison 3.0 introduced a regression on named %union such as + + %union foo { int ival; }; + + The possibility to use a name was introduced "for Yacc compatibility". + It is however not required by POSIX Yacc, and its usefulness is not clear. + +*** %define api.value.type union with %defines (yacc.c, glr.c) + + The C parsers were broken when %defines was used together with "%define + api.value.type union". + +*** Redeclarations are reported in proper order + + On + + %token FOO "foo" + %printer {} "foo" + %printer {} FOO + + bison used to report: + + foo.yy:2.10-11: error: %printer redeclaration for FOO + %printer {} "foo" + ^^ + foo.yy:3.10-11: previous declaration + %printer {} FOO + ^^ + + Now, the "previous" declaration is always the first one. + + +** Documentation + + Bison now installs various files in its docdir (which defaults to + '/usr/local/share/doc/bison'), including the three fully blown examples + extracted from the documentation: + + - rpcalc + Reverse Polish Calculator, a simple introductory example. + - mfcalc + Multi-function Calc, a calculator with memory and functions and located + error messages. + - calc++ + a calculator in C++ using variant support and token constructors. + + +* Noteworthy changes in release 3.0.2 (2013-12-05) [stable] + +** Bug fixes + +*** Generated source files when errors are reported + + When warnings are issued and -Werror is set, bison would still generate + the source files (*.c, *.h...). As a consequence, some runs of "make" + could fail the first time, but not the second (as the files were generated + anyway). + + This is fixed: bison no longer generates this source files, but, of + course, still produces the various reports (*.output, *.xml, etc.). + +*** %empty is used in reports + + Empty right-hand sides are denoted by '%empty' in all the reports (text, + dot, XML and formats derived from it). + +*** YYERROR and variants + + When C++ variant support is enabled, an error triggered via YYERROR, but + not caught via error recovery, resulted in a double deletion. + + +* Noteworthy changes in release 3.0.1 (2013-11-12) [stable] + +** Bug fixes + +*** Errors in caret diagnostics + + On some platforms, some errors could result in endless diagnostics. + +*** Fixes of the -Werror option + + Options such as "-Werror -Wno-error=foo" were still turning "foo" + diagnostics into errors instead of warnings. This is fixed. + + Actually, for consistency with GCC, "-Wno-error=foo -Werror" now also + leaves "foo" diagnostics as warnings. Similarly, with "-Werror=foo + -Wno-error", "foo" diagnostics are now errors. + +*** GLR Predicates + + As demonstrated in the documentation, one can now leave spaces between + "%?" and its "{". + +*** Installation + + The yacc.1 man page is no longer installed if --disable-yacc was + specified. + +*** Fixes in the test suite + + Bugs and portability issues. + + +* Noteworthy changes in release 3.0 (2013-07-25) [stable] + +** WARNING: Future backward-incompatibilities! + + Like other GNU packages, Bison will start using some of the C99 features + for its own code, especially the definition of variables after statements. + The generated C parsers still aim at C90. + +** Backward incompatible changes + +*** Obsolete features + + Support for YYFAIL is removed (deprecated in Bison 2.4.2): use YYERROR. + + Support for yystype and yyltype is removed (deprecated in Bison 1.875): + use YYSTYPE and YYLTYPE. + + Support for YYLEX_PARAM and YYPARSE_PARAM is removed (deprecated in Bison + 1.875): use %lex-param, %parse-param, or %param. + + Missing semicolons at the end of actions are no longer added (as announced + in the release 2.5). + +*** Use of YACC='bison -y' + + TL;DR: With Autoconf <= 2.69, pass -Wno-yacc to (AM_)YFLAGS if you use + Bison extensions. + + Traditional Yacc generates 'y.tab.c' whatever the name of the input file. + Therefore Makefiles written for Yacc expect 'y.tab.c' (and possibly + 'y.tab.h' and 'y.output') to be generated from 'foo.y'. + + To this end, for ages, AC_PROG_YACC, Autoconf's macro to look for an + implementation of Yacc, was using Bison as 'bison -y'. While it does + ensure compatible output file names, it also enables warnings for + incompatibilities with POSIX Yacc. In other words, 'bison -y' triggers + warnings for Bison extensions. + + Autoconf 2.70+ fixes this incompatibility by using YACC='bison -o y.tab.c' + (which also generates 'y.tab.h' and 'y.output' when needed). + Alternatively, disable Yacc warnings by passing '-Wno-yacc' to your Yacc + flags (YFLAGS, or AM_YFLAGS with Automake). + +** Bug fixes + +*** The epilogue is no longer affected by internal #defines (glr.c) + + The glr.c skeleton uses defines such as #define yylval (yystackp->yyval) in + generated code. These weren't properly undefined before the inclusion of + the user epilogue, so functions such as the following were butchered by the + preprocessor expansion: + + int yylex (YYSTYPE *yylval); + + This is fixed: yylval, yynerrs, yychar, and yylloc are now valid + identifiers for user-provided variables. + +*** stdio.h is no longer needed when locations are enabled (yacc.c) + + Changes in Bison 2.7 introduced a dependency on FILE and fprintf when + locations are enabled. This is fixed. + +*** Warnings about useless %pure-parser/%define api.pure are restored + +** Diagnostics reported by Bison + + Most of these features were contributed by Théophile Ranquet and Victor + Santet. + +*** Carets + + Version 2.7 introduced caret errors, for a prettier output. These are now + activated by default. The old format can still be used by invoking Bison + with -fno-caret (or -fnone). + + Some error messages that reproduced excerpts of the grammar are now using + the caret information only. For instance on: + + %% + exp: 'a' | 'a'; + + Bison 2.7 reports: + + in.y: warning: 1 reduce/reduce conflict [-Wconflicts-rr] + in.y:2.12-14: warning: rule useless in parser due to conflicts: exp: 'a' [-Wother] + + Now bison reports: + + in.y: warning: 1 reduce/reduce conflict [-Wconflicts-rr] + in.y:2.12-14: warning: rule useless in parser due to conflicts [-Wother] + exp: 'a' | 'a'; + ^^^ + + and "bison -fno-caret" reports: + + in.y: warning: 1 reduce/reduce conflict [-Wconflicts-rr] + in.y:2.12-14: warning: rule useless in parser due to conflicts [-Wother] + +*** Enhancements of the -Werror option + + The -Werror=CATEGORY option is now recognized, and will treat specified + warnings as errors. The warnings need not have been explicitly activated + using the -W option, this is similar to what GCC 4.7 does. + + For example, given the following command line, Bison will treat both + warnings related to POSIX Yacc incompatibilities and S/R conflicts as + errors (and only those): + + $ bison -Werror=yacc,error=conflicts-sr input.y + + If no categories are specified, -Werror will make all active warnings into + errors. For example, the following line does the same the previous example: + + $ bison -Werror -Wnone -Wyacc -Wconflicts-sr input.y + + (By default -Wconflicts-sr,conflicts-rr,deprecated,other is enabled.) + + Note that the categories in this -Werror option may not be prefixed with + "no-". However, -Wno-error[=CATEGORY] is valid. + + Note that -y enables -Werror=yacc. Therefore it is now possible to require + Yacc-like behavior (e.g., always generate y.tab.c), but to report + incompatibilities as warnings: "-y -Wno-error=yacc". + +*** The display of warnings is now richer + + The option that controls a given warning is now displayed: + + foo.y:4.6: warning: type clash on default action: != [-Wother] + + In the case of warnings treated as errors, the prefix is changed from + "warning: " to "error: ", and the suffix is displayed, in a manner similar + to GCC, as [-Werror=CATEGORY]. + + For instance, where the previous version of Bison would report (and exit + with failure): + + bison: warnings being treated as errors + input.y:1.1: warning: stray ',' treated as white space + + it now reports: + + input.y:1.1: error: stray ',' treated as white space [-Werror=other] + +*** Deprecated constructs + + The new 'deprecated' warning category flags obsolete constructs whose + support will be discontinued. It is enabled by default. These warnings + used to be reported as 'other' warnings. + +*** Useless semantic types + + Bison now warns about useless (uninhabited) semantic types. Since + semantic types are not declared to Bison (they are defined in the opaque + %union structure), it is %printer/%destructor directives about useless + types that trigger the warning: + + %token term + %type nterm + %printer {} + %destructor {} + %% + nterm: term { $$ = $1; }; + + 3.28-34: warning: type is used, but is not associated to any symbol + 4.28-34: warning: type is used, but is not associated to any symbol + +*** Undefined but unused symbols + + Bison used to raise an error for undefined symbols that are not used in + the grammar. This is now only a warning. + + %printer {} symbol1 + %destructor {} symbol2 + %type symbol3 + %% + exp: "a"; + +*** Useless destructors or printers + + Bison now warns about useless destructors or printers. In the following + example, the printer for , and the destructor for are + useless: all symbols of (token1) already have a printer, and all + symbols of type (token2) already have a destructor. + + %token token1 + token2 + token3 + token4 + %printer {} token1 + %destructor {} token2 + +*** Conflicts + + The warnings and error messages about shift/reduce and reduce/reduce + conflicts have been normalized. For instance on the following foo.y file: + + %glr-parser + %% + exp: exp '+' exp | '0' | '0'; + + compare the previous version of bison: + + $ bison foo.y + foo.y: conflicts: 1 shift/reduce, 2 reduce/reduce + $ bison -Werror foo.y + bison: warnings being treated as errors + foo.y: conflicts: 1 shift/reduce, 2 reduce/reduce + + with the new behavior: + + $ bison foo.y + foo.y: warning: 1 shift/reduce conflict [-Wconflicts-sr] + foo.y: warning: 2 reduce/reduce conflicts [-Wconflicts-rr] + $ bison -Werror foo.y + foo.y: error: 1 shift/reduce conflict [-Werror=conflicts-sr] + foo.y: error: 2 reduce/reduce conflicts [-Werror=conflicts-rr] + + When %expect or %expect-rr is used, such as with bar.y: + + %expect 0 + %glr-parser + %% + exp: exp '+' exp | '0' | '0'; + + Former behavior: + + $ bison bar.y + bar.y: conflicts: 1 shift/reduce, 2 reduce/reduce + bar.y: expected 0 shift/reduce conflicts + bar.y: expected 0 reduce/reduce conflicts + + New one: + + $ bison bar.y + bar.y: error: shift/reduce conflicts: 1 found, 0 expected + bar.y: error: reduce/reduce conflicts: 2 found, 0 expected + +** Incompatibilities with POSIX Yacc + + The 'yacc' category is no longer part of '-Wall', enable it explicitly + with '-Wyacc'. + +** Additional yylex/yyparse arguments + + The new directive %param declares additional arguments to both yylex and + yyparse. The %lex-param, %parse-param, and %param directives support one + or more arguments. Instead of + + %lex-param {arg1_type *arg1} + %lex-param {arg2_type *arg2} + %parse-param {arg1_type *arg1} + %parse-param {arg2_type *arg2} + + one may now declare + + %param {arg1_type *arg1} {arg2_type *arg2} + +** Types of values for %define variables + + Bison used to make no difference between '%define foo bar' and '%define + foo "bar"'. The former is now called a 'keyword value', and the latter a + 'string value'. A third kind was added: 'code values', such as '%define + foo {bar}'. + + Keyword variables are used for fixed value sets, e.g., + + %define lr.type lalr + + Code variables are used for value in the target language, e.g., + + %define api.value.type {struct semantic_type} + + String variables are used remaining cases, e.g. file names. + +** Variable api.token.prefix + + The variable api.token.prefix changes the way tokens are identified in + the generated files. This is especially useful to avoid collisions + with identifiers in the target language. For instance + + %token FILE for ERROR + %define api.token.prefix {TOK_} + %% + start: FILE for ERROR; + + will generate the definition of the symbols TOK_FILE, TOK_for, and + TOK_ERROR in the generated sources. In particular, the scanner must + use these prefixed token names, although the grammar itself still + uses the short names (as in the sample rule given above). + +** Variable api.value.type + + This new %define variable supersedes the #define macro YYSTYPE. The use + of YYSTYPE is discouraged. In particular, #defining YYSTYPE *and* either + using %union or %defining api.value.type results in undefined behavior. + + Either define api.value.type, or use "%union": + + %union + { + int ival; + char *sval; + } + %token INT "integer" + %token STRING "string" + %printer { fprintf (yyo, "%d", $$); } + %destructor { free ($$); } + + /* In yylex(). */ + yylval.ival = 42; return INT; + yylval.sval = "42"; return STRING; + + The %define variable api.value.type supports both keyword and code values. + + The keyword value 'union' means that the user provides genuine types, not + union member names such as "ival" and "sval" above (WARNING: will fail if + -y/--yacc/%yacc is enabled). + + %define api.value.type union + %token INT "integer" + %token STRING "string" + %printer { fprintf (yyo, "%d", $$); } + %destructor { free ($$); } + + /* In yylex(). */ + yylval.INT = 42; return INT; + yylval.STRING = "42"; return STRING; + + The keyword value variant is somewhat equivalent, but for C++ special + provision is made to allow classes to be used (more about this below). + + %define api.value.type variant + %token INT "integer" + %token STRING "string" + + Code values (in braces) denote user defined types. This is where YYSTYPE + used to be used. + + %code requires + { + struct my_value + { + enum + { + is_int, is_string + } kind; + union + { + int ival; + char *sval; + } u; + }; + } + %define api.value.type {struct my_value} + %token INT "integer" + %token STRING "string" + %printer { fprintf (yyo, "%d", $$); } + %destructor { free ($$); } + + /* In yylex(). */ + yylval.u.ival = 42; return INT; + yylval.u.sval = "42"; return STRING; + +** Variable parse.error + + This variable controls the verbosity of error messages. The use of the + %error-verbose directive is deprecated in favor of "%define parse.error + verbose". + +** Deprecated %define variable names + + The following variables have been renamed for consistency. Backward + compatibility is ensured, but upgrading is recommended. + + lr.default-reductions -> lr.default-reduction + lr.keep-unreachable-states -> lr.keep-unreachable-state + namespace -> api.namespace + stype -> api.value.type + +** Semantic predicates + + Contributed by Paul Hilfinger. + + The new, experimental, semantic-predicate feature allows actions of the + form "%?{ BOOLEAN-EXPRESSION }", which cause syntax errors (as for + YYERROR) if the expression evaluates to 0, and are evaluated immediately + in GLR parsers, rather than being deferred. The result is that they allow + the programmer to prune possible parses based on the values of run-time + expressions. + +** The directive %expect-rr is now an error in non GLR mode + + It used to be an error only if used in non GLR mode, _and_ if there are + reduce/reduce conflicts. + +** Tokens are numbered in their order of appearance + + Contributed by Valentin Tolmer. + + With '%token A B', A had a number less than the one of B. However, + precedence declarations used to generate a reversed order. This is now + fixed, and introducing tokens with any of %token, %left, %right, + %precedence, or %nonassoc yields the same result. + + When mixing declarations of tokens with a literal character (e.g., 'a') or + with an identifier (e.g., B) in a precedence declaration, Bison numbered + the literal characters first. For example + + %right A B 'c' 'd' + + would lead to the tokens declared in this order: 'c' 'd' A B. Again, the + input order is now preserved. + + These changes were made so that one can remove useless precedence and + associativity declarations (i.e., map %nonassoc, %left or %right to + %precedence, or to %token) and get exactly the same output. + +** Useless precedence and associativity + + Contributed by Valentin Tolmer. + + When developing and maintaining a grammar, useless associativity and + precedence directives are common. They can be a nuisance: new ambiguities + arising are sometimes masked because their conflicts are resolved due to + the extra precedence or associativity information. Furthermore, it can + hinder the comprehension of a new grammar: one will wonder about the role + of a precedence, where in fact it is useless. The following changes aim + at detecting and reporting these extra directives. + +*** Precedence warning category + + A new category of warning, -Wprecedence, was introduced. It flags the + useless precedence and associativity directives. + +*** Useless associativity + + Bison now warns about symbols with a declared associativity that is never + used to resolve conflicts. In that case, using %precedence is sufficient; + the parsing tables will remain unchanged. Solving these warnings may raise + useless precedence warnings, as the symbols no longer have associativity. + For example: + + %left '+' + %left '*' + %% + exp: + "number" + | exp '+' "number" + | exp '*' exp + ; + + will produce a + + warning: useless associativity for '+', use %precedence [-Wprecedence] + %left '+' + ^^^ + +*** Useless precedence + + Bison now warns about symbols with a declared precedence and no declared + associativity (i.e., declared with %precedence), and whose precedence is + never used. In that case, the symbol can be safely declared with %token + instead, without modifying the parsing tables. For example: + + %precedence '=' + %% + exp: "var" '=' "number"; + + will produce a + + warning: useless precedence for '=' [-Wprecedence] + %precedence '=' + ^^^ + +*** Useless precedence and associativity + + In case of both useless precedence and associativity, the issue is flagged + as follows: + + %nonassoc '=' + %% + exp: "var" '=' "number"; + + The warning is: + + warning: useless precedence and associativity for '=' [-Wprecedence] + %nonassoc '=' + ^^^ + +** Empty rules + + With help from Joel E. Denny and Gabriel Rassoul. + + Empty rules (i.e., with an empty right-hand side) can now be explicitly + marked by the new %empty directive. Using %empty on a non-empty rule is + an error. The new -Wempty-rule warning reports empty rules without + %empty. On the following grammar: + + %% + s: a b c; + a: ; + b: %empty; + c: 'a' %empty; + + bison reports: + + 3.4-5: warning: empty rule without %empty [-Wempty-rule] + a: {} + ^^ + 5.8-13: error: %empty on non-empty rule + c: 'a' %empty {}; + ^^^^^^ + +** Java skeleton improvements + + The constants for token names were moved to the Lexer interface. Also, it + is possible to add code to the parser's constructors using "%code init" + and "%define init_throws". + Contributed by Paolo Bonzini. + + The Java skeleton now supports push parsing. + Contributed by Dennis Heimbigner. + +** C++ skeletons improvements + +*** The parser header is no longer mandatory (lalr1.cc, glr.cc) + + Using %defines is now optional. Without it, the needed support classes + are defined in the generated parser, instead of additional files (such as + location.hh, position.hh and stack.hh). + +*** Locations are no longer mandatory (lalr1.cc, glr.cc) + + Both lalr1.cc and glr.cc no longer require %location. + +*** syntax_error exception (lalr1.cc) + + The C++ parser features a syntax_error exception, which can be + thrown from the scanner or from user rules to raise syntax errors. + This facilitates reporting errors caught in sub-functions (e.g., + rejecting too large integral literals from a conversion function + used by the scanner, or rejecting invalid combinations from a + factory invoked by the user actions). + +*** %define api.value.type variant + + This is based on a submission from Michiel De Wilde. With help + from Théophile Ranquet. + + In this mode, complex C++ objects can be used as semantic values. For + instance: + + %token <::std::string> TEXT; + %token NUMBER; + %token SEMICOLON ";" + %type <::std::string> item; + %type <::std::list> list; + %% + result: + list { std::cout << $1 << std::endl; } + ; + + list: + %empty { /* Generates an empty string list. */ } + | list item ";" { std::swap ($$, $1); $$.push_back ($2); } + ; + + item: + TEXT { std::swap ($$, $1); } + | NUMBER { $$ = string_cast ($1); } + ; + +*** %define api.token.constructor + + When variants are enabled, Bison can generate functions to build the + tokens. This guarantees that the token type (e.g., NUMBER) is consistent + with the semantic value (e.g., int): + + parser::symbol_type yylex () + { + parser::location_type loc = ...; + ... + return parser::make_TEXT ("Hello, world!", loc); + ... + return parser::make_NUMBER (42, loc); + ... + return parser::make_SEMICOLON (loc); + ... + } + +*** C++ locations + + There are operator- and operator-= for 'location'. Negative line/column + increments can no longer underflow the resulting value. + + +* Noteworthy changes in release 2.7.1 (2013-04-15) [stable] + +** Bug fixes + +*** Fix compiler attribute portability (yacc.c) + + With locations enabled, __attribute__ was used unprotected. + +*** Fix some compiler warnings (lalr1.cc) + + +* Noteworthy changes in release 2.7 (2012-12-12) [stable] + +** Bug fixes + + Warnings about uninitialized yylloc in yyparse have been fixed. + + Restored C90 compliance (yet no report was ever made). + +** Diagnostics are improved + + Contributed by Théophile Ranquet. + +*** Changes in the format of error messages + + This used to be the format of many error reports: + + input.y:2.7-12: %type redeclaration for exp + input.y:1.7-12: previous declaration + + It is now: + + input.y:2.7-12: error: %type redeclaration for exp + input.y:1.7-12: previous declaration + +*** New format for error reports: carets + + Caret errors have been added to Bison: + + input.y:2.7-12: error: %type redeclaration for exp + %type exp + ^^^^^^ + input.y:1.7-12: previous declaration + %type exp + ^^^^^^ + + or + + input.y:3.20-23: error: ambiguous reference: '$exp' + exp: exp '+' exp { $exp = $1 + $3; }; + ^^^^ + input.y:3.1-3: refers to: $exp at $$ + exp: exp '+' exp { $exp = $1 + $3; }; + ^^^ + input.y:3.6-8: refers to: $exp at $1 + exp: exp '+' exp { $exp = $1 + $3; }; + ^^^ + input.y:3.14-16: refers to: $exp at $3 + exp: exp '+' exp { $exp = $1 + $3; }; + ^^^ + + The default behavior for now is still not to display these unless + explicitly asked with -fcaret (or -fall). However, in a later release, it + will be made the default behavior (but may still be deactivated with + -fno-caret). + +** New value for %define variable: api.pure full + + The %define variable api.pure requests a pure (reentrant) parser. However, + for historical reasons, using it in a location-tracking Yacc parser + resulted in a yyerror function that did not take a location as a + parameter. With this new value, the user may request a better pure parser, + where yyerror does take a location as a parameter (in location-tracking + parsers). + + The use of "%define api.pure true" is deprecated in favor of this new + "%define api.pure full". + +** New %define variable: api.location.type (glr.cc, lalr1.cc, lalr1.java) + + The %define variable api.location.type defines the name of the type to use + for locations. When defined, Bison no longer generates the position.hh + and location.hh files, nor does the parser will include them: the user is + then responsible to define her type. + + This can be used in programs with several parsers to factor their location + and position files: let one of them generate them, and the others just use + them. + + This feature was actually introduced, but not documented, in Bison 2.5, + under the name "location_type" (which is maintained for backward + compatibility). + + For consistency, lalr1.java's %define variables location_type and + position_type are deprecated in favor of api.location.type and + api.position.type. + +** Exception safety (lalr1.cc) + + The parse function now catches exceptions, uses the %destructors to + release memory (the lookahead symbol and the symbols pushed on the stack) + before re-throwing the exception. + + This feature is somewhat experimental. User feedback would be + appreciated. + +** Graph improvements in DOT and XSLT + + Contributed by Théophile Ranquet. + + The graphical presentation of the states is more readable: their shape is + now rectangular, the state number is clearly displayed, and the items are + numbered and left-justified. + + The reductions are now explicitly represented as transitions to other + diamond shaped nodes. + + These changes are present in both --graph output and xml2dot.xsl XSLT + processing, with minor (documented) differences. + +** %language is no longer an experimental feature. + + The introduction of this feature, in 2.4, was four years ago. The + --language option and the %language directive are no longer experimental. + +** Documentation + + The sections about shift/reduce and reduce/reduce conflicts resolution + have been fixed and extended. + + Although introduced more than four years ago, XML and Graphviz reports + were not properly documented. + + The translation of midrule actions is now described. + + +* Noteworthy changes in release 2.6.5 (2012-11-07) [stable] + + We consider compiler warnings about Bison generated parsers to be bugs. + Rather than working around them in your own project, please consider + reporting them to us. + +** Bug fixes + + Warnings about uninitialized yylval and/or yylloc for push parsers with a + pure interface have been fixed for GCC 4.0 up to 4.8, and Clang 2.9 to + 3.2. + + Other issues in the test suite have been addressed. + + Null characters are correctly displayed in error messages. + + When possible, yylloc is correctly initialized before calling yylex. It + is no longer necessary to initialize it in the %initial-action. + + +* Noteworthy changes in release 2.6.4 (2012-10-23) [stable] + + Bison 2.6.3's --version was incorrect. This release fixes this issue. + + +* Noteworthy changes in release 2.6.3 (2012-10-22) [stable] + +** Bug fixes + + Bugs and portability issues in the test suite have been fixed. + + Some errors in translations have been addressed, and --help now directs + users to the appropriate place to report them. + + Stray Info files shipped by accident are removed. + + Incorrect definitions of YY_, issued by yacc.c when no parser header is + generated, are removed. + + All the generated headers are self-contained. + +** Header guards (yacc.c, glr.c, glr.cc) + + In order to avoid collisions, the header guards are now + YY___INCLUDED, instead of merely _. + For instance the header generated from + + %define api.prefix "calc" + %defines "lib/parse.h" + + will use YY_CALC_LIB_PARSE_H_INCLUDED as guard. + +** Fix compiler warnings in the generated parser (yacc.c, glr.c) + + The compilation of pure parsers (%define api.pure) can trigger GCC + warnings such as: + + input.c: In function 'yyparse': + input.c:1503:12: warning: 'yylval' may be used uninitialized in this + function [-Wmaybe-uninitialized] + *++yyvsp = yylval; + ^ + + This is now fixed; pragmas to avoid these warnings are no longer needed. + + Warnings from clang ("equality comparison with extraneous parentheses" and + "function declared 'noreturn' should not return") have also been + addressed. + + +* Noteworthy changes in release 2.6.2 (2012-08-03) [stable] + +** Bug fixes + + Buffer overruns, complaints from Flex, and portability issues in the test + suite have been fixed. + +** Spaces in %lex- and %parse-param (lalr1.cc, glr.cc) + + Trailing end-of-lines in %parse-param or %lex-param would result in + invalid C++. This is fixed. + +** Spurious spaces and end-of-lines + + The generated files no longer end (nor start) with empty lines. + + +* Noteworthy changes in release 2.6.1 (2012-07-30) [stable] + + Bison no longer executes user-specified M4 code when processing a grammar. + +** Future Changes + + In addition to the removal of the features announced in Bison 2.6, the + next major release will remove the "Temporary hack for adding a semicolon + to the user action", as announced in the release 2.5. Instead of: + + exp: exp "+" exp { $$ = $1 + $3 }; + + write: + + exp: exp "+" exp { $$ = $1 + $3; }; + +** Bug fixes + +*** Type names are now properly escaped. + +*** glr.cc: set_debug_level and debug_level work as expected. + +*** Stray @ or $ in actions + + While Bison used to warn about stray $ or @ in action rules, it did not + for other actions such as printers, destructors, or initial actions. It + now does. + +** Type names in actions + + For consistency with rule actions, it is now possible to qualify $$ by a + type-name in destructors, printers, and initial actions. For instance: + + %printer { fprintf (yyo, "(%d, %f)", $$, $$); } <*> <>; + + will display two values for each typed and untyped symbol (provided + that YYSTYPE has both "ival" and "fval" fields). + + +* Noteworthy changes in release 2.6 (2012-07-19) [stable] + +** Future changes + + The next major release of Bison will drop support for the following + deprecated features. Please report disagreements to bug-bison@gnu.org. + +*** K&R C parsers + + Support for generating parsers in K&R C will be removed. Parsers + generated for C support ISO C90, and are tested with ISO C99 and ISO C11 + compilers. + +*** Features deprecated since Bison 1.875 + + The definitions of yystype and yyltype will be removed; use YYSTYPE and + YYLTYPE. + + YYPARSE_PARAM and YYLEX_PARAM, deprecated in favor of %parse-param and + %lex-param, will no longer be supported. + + Support for the preprocessor symbol YYERROR_VERBOSE will be removed, use + %error-verbose. + +*** The generated header will be included (yacc.c) + + Instead of duplicating the content of the generated header (definition of + YYSTYPE, yyparse declaration etc.), the generated parser will include it, + as is already the case for GLR or C++ parsers. This change is deferred + because existing versions of ylwrap (e.g., Automake 1.12.1) do not support + it. + +** Generated Parser Headers + +*** Guards (yacc.c, glr.c, glr.cc) + + The generated headers are now guarded, as is already the case for C++ + parsers (lalr1.cc). For instance, with --defines=foo.h: + + #ifndef YY_FOO_H + # define YY_FOO_H + ... + #endif /* !YY_FOO_H */ + +*** New declarations (yacc.c, glr.c) + + The generated header now declares yydebug and yyparse. Both honor + --name-prefix=bar_, and yield + + int bar_parse (void); + + rather than + + #define yyparse bar_parse + int yyparse (void); + + in order to facilitate the inclusion of several parser headers inside a + single compilation unit. + +*** Exported symbols in C++ + + The symbols YYTOKEN_TABLE and YYERROR_VERBOSE, which were defined in the + header, are removed, as they prevent the possibility of including several + generated headers from a single compilation unit. + +*** YYLSP_NEEDED + + For the same reasons, the undocumented and unused macro YYLSP_NEEDED is no + longer defined. + +** New %define variable: api.prefix + + Now that the generated headers are more complete and properly protected + against multiple inclusions, constant names, such as YYSTYPE are a + problem. While yyparse and others are properly renamed by %name-prefix, + YYSTYPE, YYDEBUG and others have never been affected by it. Because it + would introduce backward compatibility issues in projects not expecting + YYSTYPE to be renamed, instead of changing the behavior of %name-prefix, + it is deprecated in favor of a new %define variable: api.prefix. + + The following examples compares both: + + %name-prefix "bar_" | %define api.prefix "bar_" + %token FOO %token FOO + %union { int ival; } %union { int ival; } + %% %% + exp: 'a'; exp: 'a'; + + bison generates: + + #ifndef BAR_FOO_H #ifndef BAR_FOO_H + # define BAR_FOO_H # define BAR_FOO_H + + /* Enabling traces. */ /* Enabling traces. */ + # ifndef YYDEBUG | # ifndef BAR_DEBUG + > # if defined YYDEBUG + > # if YYDEBUG + > # define BAR_DEBUG 1 + > # else + > # define BAR_DEBUG 0 + > # endif + > # else + # define YYDEBUG 0 | # define BAR_DEBUG 0 + > # endif + # endif | # endif + + # if YYDEBUG | # if BAR_DEBUG + extern int bar_debug; extern int bar_debug; + # endif # endif + + /* Tokens. */ /* Tokens. */ + # ifndef YYTOKENTYPE | # ifndef BAR_TOKENTYPE + # define YYTOKENTYPE | # define BAR_TOKENTYPE + enum yytokentype { | enum bar_tokentype { + FOO = 258 FOO = 258 + }; }; + # endif # endif + + #if ! defined YYSTYPE \ | #if ! defined BAR_STYPE \ + && ! defined YYSTYPE_IS_DECLARED | && ! defined BAR_STYPE_IS_DECLARED + typedef union YYSTYPE | typedef union BAR_STYPE + { { + int ival; int ival; + } YYSTYPE; | } BAR_STYPE; + # define YYSTYPE_IS_DECLARED 1 | # define BAR_STYPE_IS_DECLARED 1 + #endif #endif + + extern YYSTYPE bar_lval; | extern BAR_STYPE bar_lval; + + int bar_parse (void); int bar_parse (void); + + #endif /* !BAR_FOO_H */ #endif /* !BAR_FOO_H */ + + +* Noteworthy changes in release 2.5.1 (2012-06-05) [stable] + +** Future changes: + + The next major release will drop support for generating parsers in K&R C. + +** yacc.c: YYBACKUP works as expected. + +** glr.c improvements: + +*** Location support is eliminated when not requested: + + GLR parsers used to include location-related code even when locations were + not requested, and therefore not even usable. + +*** __attribute__ is preserved: + + __attribute__ is no longer disabled when __STRICT_ANSI__ is defined (i.e., + when -std is passed to GCC). + +** lalr1.java: several fixes: + + The Java parser no longer throws ArrayIndexOutOfBoundsException if the + first token leads to a syntax error. Some minor clean ups. + +** Changes for C++: + +*** C++11 compatibility: + + C and C++ parsers use "nullptr" instead of "0" when __cplusplus is 201103L + or higher. + +*** Header guards + + The header files such as "parser.hh", "location.hh", etc. used a constant + name for preprocessor guards, for instance: + + #ifndef BISON_LOCATION_HH + # define BISON_LOCATION_HH + ... + #endif // !BISON_LOCATION_HH + + The inclusion guard is now computed from "PREFIX/FILE-NAME", where lower + case characters are converted to upper case, and series of + non-alphanumerical characters are converted to an underscore. + + With "bison -o lang++/parser.cc", "location.hh" would now include: + + #ifndef YY_LANG_LOCATION_HH + # define YY_LANG_LOCATION_HH + ... + #endif // !YY_LANG_LOCATION_HH + +*** C++ locations: + + The position and location constructors (and their initialize methods) + accept new arguments for line and column. Several issues in the + documentation were fixed. + +** liby is no longer asking for "rpl_fprintf" on some platforms. + +** Changes in the manual: + +*** %printer is documented + + The "%printer" directive, supported since at least Bison 1.50, is finally + documented. The "mfcalc" example is extended to demonstrate it. + + For consistency with the C skeletons, the C++ parsers now also support + "yyoutput" (as an alias to "debug_stream ()"). + +*** Several improvements have been made: + + The layout for grammar excerpts was changed to a more compact scheme. + Named references are motivated. The description of the automaton + description file (*.output) is updated to the current format. Incorrect + index entries were fixed. Some other errors were fixed. + +** Building bison: + +*** Conflicting prototypes with recent/modified Flex. + + Fixed build problems with the current, unreleased, version of Flex, and + some modified versions of 2.5.35, which have modified function prototypes. + +*** Warnings during the build procedure have been eliminated. + +*** Several portability problems in the test suite have been fixed: + + This includes warnings with some compilers, unexpected behavior of tools + such as diff, warning messages from the test suite itself, etc. + +*** The install-pdf target works properly: + + Running "make install-pdf" (or -dvi, -html, -info, and -ps) no longer + halts in the middle of its course. + + +* Noteworthy changes in release 2.5 (2011-05-14) + +** Grammar symbol names can now contain non-initial dashes: + + Consistently with directives (such as %error-verbose) and with + %define variables (e.g. push-pull), grammar symbol names may contain + dashes in any position except the beginning. This is a GNU + extension over POSIX Yacc. Thus, use of this extension is reported + by -Wyacc and rejected in Yacc mode (--yacc). + +** Named references: + + Historically, Yacc and Bison have supported positional references + ($n, $$) to allow access to symbol values from inside of semantic + actions code. + + Starting from this version, Bison can also accept named references. + When no ambiguity is possible, original symbol names may be used + as named references: + + if_stmt : "if" cond_expr "then" then_stmt ';' + { $if_stmt = mk_if_stmt($cond_expr, $then_stmt); } + + In the more common case, explicit names may be declared: + + stmt[res] : "if" expr[cond] "then" stmt[then] "else" stmt[else] ';' + { $res = mk_if_stmt($cond, $then, $else); } + + Location information is also accessible using @name syntax. When + accessing symbol names containing dots or dashes, explicit bracketing + ($[sym.1]) must be used. + + These features are experimental in this version. More user feedback + will help to stabilize them. + Contributed by Alex Rozenman. + +** IELR(1) and canonical LR(1): + + IELR(1) is a minimal LR(1) parser table generation algorithm. That + is, given any context-free grammar, IELR(1) generates parser tables + with the full language-recognition power of canonical LR(1) but with + nearly the same number of parser states as LALR(1). This reduction + in parser states is often an order of magnitude. More importantly, + because canonical LR(1)'s extra parser states may contain duplicate + conflicts in the case of non-LR(1) grammars, the number of conflicts + for IELR(1) is often an order of magnitude less as well. This can + significantly reduce the complexity of developing of a grammar. + + Bison can now generate IELR(1) and canonical LR(1) parser tables in + place of its traditional LALR(1) parser tables, which remain the + default. You can specify the type of parser tables in the grammar + file with these directives: + + %define lr.type lalr + %define lr.type ielr + %define lr.type canonical-lr + + The default-reduction optimization in the parser tables can also be + adjusted using "%define lr.default-reductions". For details on both + of these features, see the new section "Tuning LR" in the Bison + manual. + + These features are experimental. More user feedback will help to + stabilize them. + +** LAC (Lookahead Correction) for syntax error handling + + Contributed by Joel E. Denny. + + Canonical LR, IELR, and LALR can suffer from a couple of problems + upon encountering a syntax error. First, the parser might perform + additional parser stack reductions before discovering the syntax + error. Such reductions can perform user semantic actions that are + unexpected because they are based on an invalid token, and they + cause error recovery to begin in a different syntactic context than + the one in which the invalid token was encountered. Second, when + verbose error messages are enabled (with %error-verbose or the + obsolete "#define YYERROR_VERBOSE"), the expected token list in the + syntax error message can both contain invalid tokens and omit valid + tokens. + + The culprits for the above problems are %nonassoc, default + reductions in inconsistent states, and parser state merging. Thus, + IELR and LALR suffer the most. Canonical LR can suffer only if + %nonassoc is used or if default reductions are enabled for + inconsistent states. + + LAC is a new mechanism within the parsing algorithm that solves + these problems for canonical LR, IELR, and LALR without sacrificing + %nonassoc, default reductions, or state merging. When LAC is in + use, canonical LR and IELR behave almost exactly the same for both + syntactically acceptable and syntactically unacceptable input. + While LALR still does not support the full language-recognition + power of canonical LR and IELR, LAC at least enables LALR's syntax + error handling to correctly reflect LALR's language-recognition + power. + + Currently, LAC is only supported for deterministic parsers in C. + You can enable LAC with the following directive: + + %define parse.lac full + + See the new section "LAC" in the Bison manual for additional + details including a few caveats. + + LAC is an experimental feature. More user feedback will help to + stabilize it. + +** %define improvements: + +*** Can now be invoked via the command line: + + Each of these command-line options + + -D NAME[=VALUE] + --define=NAME[=VALUE] + + -F NAME[=VALUE] + --force-define=NAME[=VALUE] + + is equivalent to this grammar file declaration + + %define NAME ["VALUE"] + + except that the manner in which Bison processes multiple definitions + for the same NAME differs. Most importantly, -F and --force-define + quietly override %define, but -D and --define do not. For further + details, see the section "Bison Options" in the Bison manual. + +*** Variables renamed: + + The following %define variables + + api.push_pull + lr.keep_unreachable_states + + have been renamed to + + api.push-pull + lr.keep-unreachable-states + + The old names are now deprecated but will be maintained indefinitely + for backward compatibility. + +*** Values no longer need to be quoted in the grammar file: + + If a %define value is an identifier, it no longer needs to be placed + within quotations marks. For example, + + %define api.push-pull "push" + + can be rewritten as + + %define api.push-pull push + +*** Unrecognized variables are now errors not warnings. + +*** Multiple invocations for any variable is now an error not a warning. + +** Unrecognized %code qualifiers are now errors not warnings. + +** Character literals not of length one: + + Previously, Bison quietly converted all character literals to length + one. For example, without warning, Bison interpreted the operators in + the following grammar to be the same token: + + exp: exp '++' + | exp '+' exp + ; + + Bison now warns when a character literal is not of length one. In + some future release, Bison will start reporting an error instead. + +** Destructor calls fixed for lookaheads altered in semantic actions: + + Previously for deterministic parsers in C, if a user semantic action + altered yychar, the parser in some cases used the old yychar value to + determine which destructor to call for the lookahead upon a syntax + error or upon parser return. This bug has been fixed. + +** C++ parsers use YYRHSLOC: + + Similarly to the C parsers, the C++ parsers now define the YYRHSLOC + macro and use it in the default YYLLOC_DEFAULT. You are encouraged + to use it. If, for instance, your location structure has "first" + and "last" members, instead of + + # define YYLLOC_DEFAULT(Current, Rhs, N) \ + do \ + if (N) \ + { \ + (Current).first = (Rhs)[1].location.first; \ + (Current).last = (Rhs)[N].location.last; \ + } \ + else \ + { \ + (Current).first = (Current).last = (Rhs)[0].location.last; \ + } \ + while (false) + + use: + + # define YYLLOC_DEFAULT(Current, Rhs, N) \ + do \ + if (N) \ + { \ + (Current).first = YYRHSLOC (Rhs, 1).first; \ + (Current).last = YYRHSLOC (Rhs, N).last; \ + } \ + else \ + { \ + (Current).first = (Current).last = YYRHSLOC (Rhs, 0).last; \ + } \ + while (false) + +** YYLLOC_DEFAULT in C++: + + The default implementation of YYLLOC_DEFAULT used to be issued in + the header file. It is now output in the implementation file, after + the user %code sections so that its #ifndef guard does not try to + override the user's YYLLOC_DEFAULT if provided. + +** YYFAIL now produces warnings and Java parsers no longer implement it: + + YYFAIL has existed for many years as an undocumented feature of + deterministic parsers in C generated by Bison. More recently, it was + a documented feature of Bison's experimental Java parsers. As + promised in Bison 2.4.2's NEWS entry, any appearance of YYFAIL in a + semantic action now produces a deprecation warning, and Java parsers + no longer implement YYFAIL at all. For further details, including a + discussion of how to suppress C preprocessor warnings about YYFAIL + being unused, see the Bison 2.4.2 NEWS entry. + +** Temporary hack for adding a semicolon to the user action: + + Previously, Bison appended a semicolon to every user action for + reductions when the output language defaulted to C (specifically, when + neither %yacc, %language, %skeleton, or equivalent command-line + options were specified). This allowed actions such as + + exp: exp "+" exp { $$ = $1 + $3 }; + + instead of + + exp: exp "+" exp { $$ = $1 + $3; }; + + As a first step in removing this misfeature, Bison now issues a + warning when it appends a semicolon. Moreover, in cases where Bison + cannot easily determine whether a semicolon is needed (for example, an + action ending with a cpp directive or a braced compound initializer), + it no longer appends one. Thus, the C compiler might now complain + about a missing semicolon where it did not before. Future releases of + Bison will cease to append semicolons entirely. + +** Verbose syntax error message fixes: + + When %error-verbose or the obsolete "#define YYERROR_VERBOSE" is + specified, syntax error messages produced by the generated parser + include the unexpected token as well as a list of expected tokens. + The effect of %nonassoc on these verbose messages has been corrected + in two ways, but a more complete fix requires LAC, described above: + +*** When %nonassoc is used, there can exist parser states that accept no + tokens, and so the parser does not always require a lookahead token + in order to detect a syntax error. Because no unexpected token or + expected tokens can then be reported, the verbose syntax error + message described above is suppressed, and the parser instead + reports the simpler message, "syntax error". Previously, this + suppression was sometimes erroneously triggered by %nonassoc when a + lookahead was actually required. Now verbose messages are + suppressed only when all previous lookaheads have already been + shifted or discarded. + +*** Previously, the list of expected tokens erroneously included tokens + that would actually induce a syntax error because conflicts for them + were resolved with %nonassoc in the current parser state. Such + tokens are now properly omitted from the list. + +*** Expected token lists are still often wrong due to state merging + (from LALR or IELR) and default reductions, which can both add + invalid tokens and subtract valid tokens. Canonical LR almost + completely fixes this problem by eliminating state merging and + default reductions. However, there is one minor problem left even + when using canonical LR and even after the fixes above. That is, + if the resolution of a conflict with %nonassoc appears in a later + parser state than the one at which some syntax error is + discovered, the conflicted token is still erroneously included in + the expected token list. Bison's new LAC implementation, + described above, eliminates this problem and the need for + canonical LR. However, LAC is still experimental and is disabled + by default. + +** Java skeleton fixes: + +*** A location handling bug has been fixed. + +*** The top element of each of the value stack and location stack is now + cleared when popped so that it can be garbage collected. + +*** Parser traces now print the top element of the stack. + +** -W/--warnings fixes: + +*** Bison now properly recognizes the "no-" versions of categories: + + For example, given the following command line, Bison now enables all + warnings except warnings for incompatibilities with POSIX Yacc: + + bison -Wall,no-yacc gram.y + +*** Bison now treats S/R and R/R conflicts like other warnings: + + Previously, conflict reports were independent of Bison's normal + warning system. Now, Bison recognizes the warning categories + "conflicts-sr" and "conflicts-rr". This change has important + consequences for the -W and --warnings command-line options. For + example: + + bison -Wno-conflicts-sr gram.y # S/R conflicts not reported + bison -Wno-conflicts-rr gram.y # R/R conflicts not reported + bison -Wnone gram.y # no conflicts are reported + bison -Werror gram.y # any conflict is an error + + However, as before, if the %expect or %expect-rr directive is + specified, an unexpected number of conflicts is an error, and an + expected number of conflicts is not reported, so -W and --warning + then have no effect on the conflict report. + +*** The "none" category no longer disables a preceding "error": + + For example, for the following command line, Bison now reports + errors instead of warnings for incompatibilities with POSIX Yacc: + + bison -Werror,none,yacc gram.y + +*** The "none" category now disables all Bison warnings: + + Previously, the "none" category disabled only Bison warnings for + which there existed a specific -W/--warning category. However, + given the following command line, Bison is now guaranteed to + suppress all warnings: + + bison -Wnone gram.y + +** Precedence directives can now assign token number 0: + + Since Bison 2.3b, which restored the ability of precedence + directives to assign token numbers, doing so for token number 0 has + produced an assertion failure. For example: + + %left END 0 + + This bug has been fixed. + + +* Noteworthy changes in release 2.4.3 (2010-08-05) + +** Bison now obeys -Werror and --warnings=error for warnings about + grammar rules that are useless in the parser due to conflicts. + +** Problems with spawning M4 on at least FreeBSD 8 and FreeBSD 9 have + been fixed. + +** Failures in the test suite for GCC 4.5 have been fixed. + +** Failures in the test suite for some versions of Sun Studio C++ have + been fixed. + +** Contrary to Bison 2.4.2's NEWS entry, it has been decided that + warnings about undefined %prec identifiers will not be converted to + errors in Bison 2.5. They will remain warnings, which should be + sufficient for POSIX while avoiding backward compatibility issues. + +** Minor documentation fixes. + + +* Noteworthy changes in release 2.4.2 (2010-03-20) + +** Some portability problems that resulted in failures and livelocks + in the test suite on some versions of at least Solaris, AIX, HP-UX, + RHEL4, and Tru64 have been addressed. As a result, fatal Bison + errors should no longer cause M4 to report a broken pipe on the + affected platforms. + +** "%prec IDENTIFIER" requires IDENTIFIER to be defined separately. + + POSIX specifies that an error be reported for any identifier that does + not appear on the LHS of a grammar rule and that is not defined by + %token, %left, %right, or %nonassoc. Bison 2.3b and later lost this + error report for the case when an identifier appears only after a + %prec directive. It is now restored. However, for backward + compatibility with recent Bison releases, it is only a warning for + now. In Bison 2.5 and later, it will return to being an error. + [Between the 2.4.2 and 2.4.3 releases, it was decided that this + warning will not be converted to an error in Bison 2.5.] + +** Detection of GNU M4 1.4.6 or newer during configure is improved. + +** Warnings from gcc's -Wundef option about undefined YYENABLE_NLS, + YYLTYPE_IS_TRIVIAL, and __STRICT_ANSI__ in C/C++ parsers are now + avoided. + +** %code is now a permanent feature. + + A traditional Yacc prologue directive is written in the form: + + %{CODE%} + + To provide a more flexible alternative, Bison 2.3b introduced the + %code directive with the following forms for C/C++: + + %code {CODE} + %code requires {CODE} + %code provides {CODE} + %code top {CODE} + + These forms are now considered permanent features of Bison. See the + %code entries in the section "Bison Declaration Summary" in the Bison + manual for a summary of their functionality. See the section + "Prologue Alternatives" for a detailed discussion including the + advantages of %code over the traditional Yacc prologue directive. + + Bison's Java feature as a whole including its current usage of %code + is still considered experimental. + +** YYFAIL is deprecated and will eventually be removed. + + YYFAIL has existed for many years as an undocumented feature of + deterministic parsers in C generated by Bison. Previously, it was + documented for Bison's experimental Java parsers. YYFAIL is no longer + documented for Java parsers and is formally deprecated in both cases. + Users are strongly encouraged to migrate to YYERROR, which is + specified by POSIX. + + Like YYERROR, you can invoke YYFAIL from a semantic action in order to + induce a syntax error. The most obvious difference from YYERROR is + that YYFAIL will automatically invoke yyerror to report the syntax + error so that you don't have to. However, there are several other + subtle differences between YYERROR and YYFAIL, and YYFAIL suffers from + inherent flaws when %error-verbose or "#define YYERROR_VERBOSE" is + used. For a more detailed discussion, see: + + https://lists.gnu.org/r/bison-patches/2009-12/msg00024.html + + The upcoming Bison 2.5 will remove YYFAIL from Java parsers, but + deterministic parsers in C will continue to implement it. However, + because YYFAIL is already flawed, it seems futile to try to make new + Bison features compatible with it. Thus, during parser generation, + Bison 2.5 will produce a warning whenever it discovers YYFAIL in a + rule action. In a later release, YYFAIL will be disabled for + %error-verbose and "#define YYERROR_VERBOSE". Eventually, YYFAIL will + be removed altogether. + + There exists at least one case where Bison 2.5's YYFAIL warning will + be a false positive. Some projects add phony uses of YYFAIL and other + Bison-defined macros for the sole purpose of suppressing C + preprocessor warnings (from GCC cpp's -Wunused-macros, for example). + To avoid Bison's future warning, such YYFAIL uses can be moved to the + epilogue (that is, after the second "%%") in the Bison input file. In + this release (2.4.2), Bison already generates its own code to suppress + C preprocessor warnings for YYFAIL, so projects can remove their own + phony uses of YYFAIL if compatibility with Bison releases prior to + 2.4.2 is not necessary. + +** Internationalization. + + Fix a regression introduced in Bison 2.4: Under some circumstances, + message translations were not installed although supported by the + host system. + + +* Noteworthy changes in release 2.4.1 (2008-12-11) + +** In the GLR defines file, unexpanded M4 macros in the yylval and yylloc + declarations have been fixed. + +** Temporary hack for adding a semicolon to the user action. + + Bison used to prepend a trailing semicolon at the end of the user + action for reductions. This allowed actions such as + + exp: exp "+" exp { $$ = $1 + $3 }; + + instead of + + exp: exp "+" exp { $$ = $1 + $3; }; + + Some grammars still depend on this "feature". Bison 2.4.1 restores + the previous behavior in the case of C output (specifically, when + neither %language or %skeleton or equivalent command-line options + are used) to leave more time for grammars depending on the old + behavior to be adjusted. Future releases of Bison will disable this + feature. + +** A few minor improvements to the Bison manual. + + +* Noteworthy changes in release 2.4 (2008-11-02) + +** %language is an experimental feature. + + We first introduced this feature in test release 2.3b as a cleaner + alternative to %skeleton. Since then, we have discussed the possibility of + modifying its effect on Bison's output file names. Thus, in this release, + we consider %language to be an experimental feature that will likely evolve + in future releases. + +** Forward compatibility with GNU M4 has been improved. + +** Several bugs in the C++ skeleton and the experimental Java skeleton have been + fixed. + + +* Noteworthy changes in release 2.3b (2008-05-27) + +** The quotes around NAME that used to be required in the following directive + are now deprecated: + + %define NAME "VALUE" + +** The directive "%pure-parser" is now deprecated in favor of: + + %define api.pure + + which has the same effect except that Bison is more careful to warn about + unreasonable usage in the latter case. + +** Push Parsing + + Bison can now generate an LALR(1) parser in C with a push interface. That + is, instead of invoking "yyparse", which pulls tokens from "yylex", you can + push one token at a time to the parser using "yypush_parse", which will + return to the caller after processing each token. By default, the push + interface is disabled. Either of the following directives will enable it: + + %define api.push_pull "push" // Just push; does not require yylex. + %define api.push_pull "both" // Push and pull; requires yylex. + + See the new section "A Push Parser" in the Bison manual for details. + + The current push parsing interface is experimental and may evolve. More user + feedback will help to stabilize it. + +** The -g and --graph options now output graphs in Graphviz DOT format, + not VCG format. Like --graph, -g now also takes an optional FILE argument + and thus cannot be bundled with other short options. + +** Java + + Bison can now generate an LALR(1) parser in Java. The skeleton is + "data/lalr1.java". Consider using the new %language directive instead of + %skeleton to select it. + + See the new section "Java Parsers" in the Bison manual for details. + + The current Java interface is experimental and may evolve. More user + feedback will help to stabilize it. + Contributed by Paolo Bonzini. + +** %language + + This new directive specifies the programming language of the generated + parser, which can be C (the default), C++, or Java. Besides the skeleton + that Bison uses, the directive affects the names of the generated files if + the grammar file's name ends in ".y". + +** XML Automaton Report + + Bison can now generate an XML report of the LALR(1) automaton using the new + "--xml" option. The current XML schema is experimental and may evolve. More + user feedback will help to stabilize it. + Contributed by Wojciech Polak. + +** The grammar file may now specify the name of the parser header file using + %defines. For example: + + %defines "parser.h" + +** When reporting useless rules, useless nonterminals, and unused terminals, + Bison now employs the terms "useless in grammar" instead of "useless", + "useless in parser" instead of "never reduced", and "unused in grammar" + instead of "unused". + +** Unreachable State Removal + + Previously, Bison sometimes generated parser tables containing unreachable + states. A state can become unreachable during conflict resolution if Bison + disables a shift action leading to it from a predecessor state. Bison now: + + 1. Removes unreachable states. + + 2. Does not report any conflicts that appeared in unreachable states. + WARNING: As a result, you may need to update %expect and %expect-rr + directives in existing grammar files. + + 3. For any rule used only in such states, Bison now reports the rule as + "useless in parser due to conflicts". + + This feature can be disabled with the following directive: + + %define lr.keep_unreachable_states + + See the %define entry in the "Bison Declaration Summary" in the Bison manual + for further discussion. + +** Lookahead Set Correction in the ".output" Report + + When instructed to generate a ".output" file including lookahead sets + (using "--report=lookahead", for example), Bison now prints each reduction's + lookahead set only next to the associated state's one item that (1) is + associated with the same rule as the reduction and (2) has its dot at the end + of its RHS. Previously, Bison also erroneously printed the lookahead set + next to all of the state's other items associated with the same rule. This + bug affected only the ".output" file and not the generated parser source + code. + +** --report-file=FILE is a new option to override the default ".output" file + name. + +** The "=" that used to be required in the following directives is now + deprecated: + + %file-prefix "parser" + %name-prefix "c_" + %output "parser.c" + +** An Alternative to "%{...%}" -- "%code QUALIFIER {CODE}" + + Bison 2.3a provided a new set of directives as a more flexible alternative to + the traditional Yacc prologue blocks. Those have now been consolidated into + a single %code directive with an optional qualifier field, which identifies + the purpose of the code and thus the location(s) where Bison should generate + it: + + 1. "%code {CODE}" replaces "%after-header {CODE}" + 2. "%code requires {CODE}" replaces "%start-header {CODE}" + 3. "%code provides {CODE}" replaces "%end-header {CODE}" + 4. "%code top {CODE}" replaces "%before-header {CODE}" + + See the %code entries in section "Bison Declaration Summary" in the Bison + manual for a summary of the new functionality. See the new section "Prologue + Alternatives" for a detailed discussion including the advantages of %code + over the traditional Yacc prologues. + + The prologue alternatives are experimental. More user feedback will help to + determine whether they should become permanent features. + +** Revised warning: unset or unused midrule values + + Since Bison 2.2, Bison has warned about midrule values that are set but not + used within any of the actions of the parent rule. For example, Bison warns + about unused $2 in: + + exp: '1' { $$ = 1; } '+' exp { $$ = $1 + $4; }; + + Now, Bison also warns about midrule values that are used but not set. For + example, Bison warns about unset $$ in the midrule action in: + + exp: '1' { $1 = 1; } '+' exp { $$ = $2 + $4; }; + + However, Bison now disables both of these warnings by default since they + sometimes prove to be false alarms in existing grammars employing the Yacc + constructs $0 or $-N (where N is some positive integer). + + To enable these warnings, specify the option "--warnings=midrule-values" or + "-W", which is a synonym for "--warnings=all". + +** Default %destructor or %printer with "<*>" or "<>" + + Bison now recognizes two separate kinds of default %destructor's and + %printer's: + + 1. Place "<*>" in a %destructor/%printer symbol list to define a default + %destructor/%printer for all grammar symbols for which you have formally + declared semantic type tags. + + 2. Place "<>" in a %destructor/%printer symbol list to define a default + %destructor/%printer for all grammar symbols without declared semantic + type tags. + + Bison no longer supports the "%symbol-default" notation from Bison 2.3a. + "<*>" and "<>" combined achieve the same effect with one exception: Bison no + longer applies any %destructor to a midrule value if that midrule value is + not actually ever referenced using either $$ or $n in a semantic action. + + The default %destructor's and %printer's are experimental. More user + feedback will help to determine whether they should become permanent + features. + + See the section "Freeing Discarded Symbols" in the Bison manual for further + details. + +** %left, %right, and %nonassoc can now declare token numbers. This is required + by POSIX. However, see the end of section "Operator Precedence" in the Bison + manual for a caveat concerning the treatment of literal strings. + +** The nonfunctional --no-parser, -n, and %no-parser options have been + completely removed from Bison. + + +* Noteworthy changes in release 2.3a (2006-09-13) + +** Instead of %union, you can define and use your own union type + YYSTYPE if your grammar contains at least one tag. + Your YYSTYPE need not be a macro; it can be a typedef. + This change is for compatibility with other Yacc implementations, + and is required by POSIX. + +** Locations columns and lines start at 1. + In accordance with the GNU Coding Standards and Emacs. + +** You may now declare per-type and default %destructor's and %printer's: + + For example: + + %union { char *string; } + %token STRING1 + %token STRING2 + %type string1 + %type string2 + %union { char character; } + %token CHR + %type chr + %destructor { free ($$); } %symbol-default + %destructor { free ($$); printf ("%d", @$.first_line); } STRING1 string1 + %destructor { } + + guarantees that, when the parser discards any user-defined symbol that has a + semantic type tag other than "", it passes its semantic value to + "free". However, when the parser discards a "STRING1" or a "string1", it + also prints its line number to "stdout". It performs only the second + "%destructor" in this case, so it invokes "free" only once. + + [Although we failed to mention this here in the 2.3a release, the default + %destructor's and %printer's were experimental, and they were rewritten in + future versions.] + +** Except for LALR(1) parsers in C with POSIX Yacc emulation enabled (with "-y", + "--yacc", or "%yacc"), Bison no longer generates #define statements for + associating token numbers with token names. Removing the #define statements + helps to sanitize the global namespace during preprocessing, but POSIX Yacc + requires them. Bison still generates an enum for token names in all cases. + +** Handling of traditional Yacc prologue blocks is now more consistent but + potentially incompatible with previous releases of Bison. + + As before, you declare prologue blocks in your grammar file with the + "%{ ... %}" syntax. To generate the pre-prologue, Bison concatenates all + prologue blocks that you've declared before the first %union. To generate + the post-prologue, Bison concatenates all prologue blocks that you've + declared after the first %union. + + Previous releases of Bison inserted the pre-prologue into both the header + file and the code file in all cases except for LALR(1) parsers in C. In the + latter case, Bison inserted it only into the code file. For parsers in C++, + the point of insertion was before any token definitions (which associate + token numbers with names). For parsers in C, the point of insertion was + after the token definitions. + + Now, Bison never inserts the pre-prologue into the header file. In the code + file, it always inserts it before the token definitions. + +** Bison now provides a more flexible alternative to the traditional Yacc + prologue blocks: %before-header, %start-header, %end-header, and + %after-header. + + For example, the following declaration order in the grammar file reflects the + order in which Bison will output these code blocks. However, you are free to + declare these code blocks in your grammar file in whatever order is most + convenient for you: + + %before-header { + /* Bison treats this block like a pre-prologue block: it inserts it into + * the code file before the contents of the header file. It does *not* + * insert it into the header file. This is a good place to put + * #include's that you want at the top of your code file. A common + * example is '#include "system.h"'. */ + } + %start-header { + /* Bison inserts this block into both the header file and the code file. + * In both files, the point of insertion is before any Bison-generated + * token, semantic type, location type, and class definitions. This is a + * good place to define %union dependencies, for example. */ + } + %union { + /* Unlike the traditional Yacc prologue blocks, the output order for the + * new %*-header blocks is not affected by their declaration position + * relative to any %union in the grammar file. */ + } + %end-header { + /* Bison inserts this block into both the header file and the code file. + * In both files, the point of insertion is after the Bison-generated + * definitions. This is a good place to declare or define public + * functions or data structures that depend on the Bison-generated + * definitions. */ + } + %after-header { + /* Bison treats this block like a post-prologue block: it inserts it into + * the code file after the contents of the header file. It does *not* + * insert it into the header file. This is a good place to declare or + * define internal functions or data structures that depend on the + * Bison-generated definitions. */ + } + + If you have multiple occurrences of any one of the above declarations, Bison + will concatenate the contents in declaration order. + + [Although we failed to mention this here in the 2.3a release, the prologue + alternatives were experimental, and they were rewritten in future versions.] + +** The option "--report=look-ahead" has been changed to "--report=lookahead". + The old spelling still works, but is not documented and may be removed + in a future release. + + +* Noteworthy changes in release 2.3 (2006-06-05) + +** GLR grammars should now use "YYRECOVERING ()" instead of "YYRECOVERING", + for compatibility with LALR(1) grammars. + +** It is now documented that any definition of YYSTYPE or YYLTYPE should + be to a type name that does not contain parentheses or brackets. + + +* Noteworthy changes in release 2.2 (2006-05-19) + +** The distribution terms for all Bison-generated parsers now permit + using the parsers in nonfree programs. Previously, this permission + was granted only for Bison-generated LALR(1) parsers in C. + +** %name-prefix changes the namespace name in C++ outputs. + +** The C++ parsers export their token_type. + +** Bison now allows multiple %union declarations, and concatenates + their contents together. + +** New warning: unused values + Right-hand side symbols whose values are not used are reported, + if the symbols have destructors. For instance: + + exp: exp "?" exp ":" exp { $1 ? $1 : $3; } + | exp "+" exp + ; + + will trigger a warning about $$ and $5 in the first rule, and $3 in + the second ($1 is copied to $$ by the default rule). This example + most likely contains three errors, and could be rewritten as: + + exp: exp "?" exp ":" exp + { $$ = $1 ? $3 : $5; free ($1 ? $5 : $3); free ($1); } + | exp "+" exp + { $$ = $1 ? $1 : $3; if ($1) free ($3); } + ; + + However, if the original actions were really intended, memory leaks + and all, the warnings can be suppressed by letting Bison believe the + values are used, e.g.: + + exp: exp "?" exp ":" exp { $1 ? $1 : $3; (void) ($$, $5); } + | exp "+" exp { $$ = $1; (void) $3; } + ; + + If there are midrule actions, the warning is issued if no action + uses it. The following triggers no warning: $1 and $3 are used. + + exp: exp { push ($1); } '+' exp { push ($3); sum (); }; + + The warning is intended to help catching lost values and memory leaks. + If a value is ignored, its associated memory typically is not reclaimed. + +** %destructor vs. YYABORT, YYACCEPT, and YYERROR. + Destructors are now called when user code invokes YYABORT, YYACCEPT, + and YYERROR, for all objects on the stack, other than objects + corresponding to the right-hand side of the current rule. + +** %expect, %expect-rr + Incorrect numbers of expected conflicts are now actual errors, + instead of warnings. + +** GLR, YACC parsers. + The %parse-params are available in the destructors (and the + experimental printers) as per the documentation. + +** Bison now warns if it finds a stray "$" or "@" in an action. + +** %require "VERSION" + This specifies that the grammar file depends on features implemented + in Bison version VERSION or higher. + +** lalr1.cc: The token and value types are now class members. + The tokens were defined as free form enums and cpp macros. YYSTYPE + was defined as a free form union. They are now class members: + tokens are enumerations of the "yy::parser::token" struct, and the + semantic values have the "yy::parser::semantic_type" type. + + If you do not want or can update to this scheme, the directive + '%define "global_tokens_and_yystype" "1"' triggers the global + definition of tokens and YYSTYPE. This change is suitable both + for previous releases of Bison, and this one. + + If you wish to update, then make sure older version of Bison will + fail using '%require "2.2"'. + +** DJGPP support added. + + +* Noteworthy changes in release 2.1 (2005-09-16) + +** The C++ lalr1.cc skeleton supports %lex-param. + +** Bison-generated parsers now support the translation of diagnostics like + "syntax error" into languages other than English. The default + language is still English. For details, please see the new + Internationalization section of the Bison manual. Software + distributors should also see the new PACKAGING file. Thanks to + Bruno Haible for this new feature. + +** Wording in the Bison-generated parsers has been changed slightly to + simplify translation. In particular, the message "memory exhausted" + has replaced "parser stack overflow", as the old message was not + always accurate for modern Bison-generated parsers. + +** Destructors are now called when the parser aborts, for all symbols left + behind on the stack. Also, the start symbol is now destroyed after a + successful parse. In both cases, the behavior was formerly inconsistent. + +** When generating verbose diagnostics, Bison-generated parsers no longer + quote the literal strings associated with tokens. For example, for + a syntax error associated with '%token NUM "number"' they might + print 'syntax error, unexpected number' instead of 'syntax error, + unexpected "number"'. + + +* Noteworthy changes in release 2.0 (2004-12-25) + +** Possibly-incompatible changes + + - Bison-generated parsers no longer default to using the alloca function + (when available) to extend the parser stack, due to widespread + problems in unchecked stack-overflow detection. You can "#define + YYSTACK_USE_ALLOCA 1" to require the use of alloca, but please read + the manual to determine safe values for YYMAXDEPTH in that case. + + - Error token location. + During error recovery, the location of the syntax error is updated + to cover the whole sequence covered by the error token: it includes + the shifted symbols thrown away during the first part of the error + recovery, and the lookahead rejected during the second part. + + - Semicolon changes: + . Stray semicolons are no longer allowed at the start of a grammar. + . Semicolons are now required after in-grammar declarations. + + - Unescaped newlines are no longer allowed in character constants or + string literals. They were never portable, and GCC 3.4.0 has + dropped support for them. Better diagnostics are now generated if + forget a closing quote. + + - NUL bytes are no longer allowed in Bison string literals, unfortunately. + +** New features + + - GLR grammars now support locations. + + - New directive: %initial-action. + This directive allows the user to run arbitrary code (including + initializing @$) from yyparse before parsing starts. + + - A new directive "%expect-rr N" specifies the expected number of + reduce/reduce conflicts in GLR parsers. + + - %token numbers can now be hexadecimal integers, e.g., "%token FOO 0x12d". + This is a GNU extension. + + - The option "--report=lookahead" was changed to "--report=look-ahead". + [However, this was changed back after 2.3.] + + - Experimental %destructor support has been added to lalr1.cc. + + - New configure option --disable-yacc, to disable installation of the + yacc command and -ly library introduced in 1.875 for POSIX conformance. + +** Bug fixes + + - For now, %expect-count violations are now just warnings, not errors. + This is for compatibility with Bison 1.75 and earlier (when there are + reduce/reduce conflicts) and with Bison 1.30 and earlier (when there + are too many or too few shift/reduce conflicts). However, in future + versions of Bison we plan to improve the %expect machinery so that + these violations will become errors again. + + - Within Bison itself, numbers (e.g., goto numbers) are no longer + arbitrarily limited to 16-bit counts. + + - Semicolons are now allowed before "|" in grammar rules, as POSIX requires. + + +* Noteworthy changes in release 1.875 (2003-01-01) + +** The documentation license has been upgraded to version 1.2 + of the GNU Free Documentation License. + +** syntax error processing + + - In Yacc-style parsers YYLLOC_DEFAULT is now used to compute error + locations too. This fixes bugs in error-location computation. + + - %destructor + It is now possible to reclaim the memory associated to symbols + discarded during error recovery. This feature is still experimental. + + - %error-verbose + This new directive is preferred over YYERROR_VERBOSE. + + - #defining yyerror to steal internal variables is discouraged. + It is not guaranteed to work forever. + +** POSIX conformance + + - Semicolons are once again optional at the end of grammar rules. + This reverts to the behavior of Bison 1.33 and earlier, and improves + compatibility with Yacc. + + - "parse error" -> "syntax error" + Bison now uniformly uses the term "syntax error"; formerly, the code + and manual sometimes used the term "parse error" instead. POSIX + requires "syntax error" in diagnostics, and it was thought better to + be consistent. + + - The documentation now emphasizes that yylex and yyerror must be + declared before use. C99 requires this. + + - Bison now parses C99 lexical constructs like UCNs and + backslash-newline within C escape sequences, as POSIX 1003.1-2001 requires. + + - File names are properly escaped in C output. E.g., foo\bar.y is + output as "foo\\bar.y". + + - Yacc command and library now available + The Bison distribution now installs a "yacc" command, as POSIX requires. + Also, Bison now installs a small library liby.a containing + implementations of Yacc-compatible yyerror and main functions. + This library is normally not useful, but POSIX requires it. + + - Type clashes now generate warnings, not errors. + + - If the user does not define YYSTYPE as a macro, Bison now declares it + using typedef instead of defining it as a macro. + For consistency, YYLTYPE is also declared instead of defined. + +** Other compatibility issues + + - %union directives can now have a tag before the "{", e.g., the + directive "%union foo {...}" now generates the C code + "typedef union foo { ... } YYSTYPE;"; this is for Yacc compatibility. + The default union tag is "YYSTYPE", for compatibility with Solaris 9 Yacc. + For consistency, YYLTYPE's struct tag is now "YYLTYPE" not "yyltype". + This is for compatibility with both Yacc and Bison 1.35. + + - ";" is output before the terminating "}" of an action, for + compatibility with Bison 1.35. + + - Bison now uses a Yacc-style format for conflict reports, e.g., + "conflicts: 2 shift/reduce, 1 reduce/reduce". + + - "yystype" and "yyltype" are now obsolescent macros instead of being + typedefs or tags; they are no longer documented and are planned to be + withdrawn in a future release. + +** GLR parser notes + + - GLR and inline + Users of Bison have to decide how they handle the portability of the + C keyword "inline". + + - "parsing stack overflow..." -> "parser stack overflow" + GLR parsers now report "parser stack overflow" as per the Bison manual. + +** %parse-param and %lex-param + The macros YYPARSE_PARAM and YYLEX_PARAM provide a means to pass + additional context to yyparse and yylex. They suffer from several + shortcomings: + + - a single argument only can be added, + - their types are weak (void *), + - this context is not passed to ancillary functions such as yyerror, + - only yacc.c parsers support them. + + The new %parse-param/%lex-param directives provide a more precise control. + For instance: + + %parse-param {int *nastiness} + %lex-param {int *nastiness} + %parse-param {int *randomness} + + results in the following signatures: + + int yylex (int *nastiness); + int yyparse (int *nastiness, int *randomness); + + or, if both %pure-parser and %locations are used: + + int yylex (YYSTYPE *lvalp, YYLTYPE *llocp, int *nastiness); + int yyparse (int *nastiness, int *randomness); + +** Bison now warns if it detects conflicting outputs to the same file, + e.g., it generates a warning for "bison -d -o foo.h foo.y" since + that command outputs both code and header to foo.h. + +** #line in output files + - --no-line works properly. + +** Bison can no longer be built by a K&R C compiler; it requires C89 or + later to be built. This change originally took place a few versions + ago, but nobody noticed until we recently asked someone to try + building Bison with a K&R C compiler. + + +* Noteworthy changes in release 1.75 (2002-10-14) + +** Bison should now work on 64-bit hosts. + +** Indonesian translation thanks to Tedi Heriyanto. + +** GLR parsers + Fix spurious parse errors. + +** Pure parsers + Some people redefine yyerror to steal yyparse' private variables. + Reenable this trick until an official feature replaces it. + +** Type Clashes + In agreement with POSIX and with other Yaccs, leaving a default + action is valid when $$ is untyped, and $1 typed: + + untyped: ... typed; + + but the converse remains an error: + + typed: ... untyped; + +** Values of midrule actions + The following code: + + foo: { ... } { $$ = $1; } ... + + was incorrectly rejected: $1 is defined in the second midrule + action, and is equal to the $$ of the first midrule action. + + +* Noteworthy changes in release 1.50 (2002-10-04) + +** GLR parsing + The declaration + %glr-parser + causes Bison to produce a Generalized LR (GLR) parser, capable of handling + almost any context-free grammar, ambiguous or not. The new declarations + %dprec and %merge on grammar rules allow parse-time resolution of + ambiguities. Contributed by Paul Hilfinger. + + Unfortunately Bison 1.50 does not work properly on 64-bit hosts + like the Alpha, so please stick to 32-bit hosts for now. + +** Output Directory + When not in Yacc compatibility mode, when the output file was not + specified, running "bison foo/bar.y" created "foo/bar.c". It + now creates "bar.c". + +** Undefined token + The undefined token was systematically mapped to 2 which prevented + the use of 2 by the user. This is no longer the case. + +** Unknown token numbers + If yylex returned an out of range value, yyparse could die. This is + no longer the case. + +** Error token + According to POSIX, the error token must be 256. + Bison extends this requirement by making it a preference: *if* the + user specified that one of her tokens is numbered 256, then error + will be mapped onto another number. + +** Verbose error messages + They no longer report "..., expecting error or..." for states where + error recovery is possible. + +** End token + Defaults to "$end" instead of "$". + +** Error recovery now conforms to documentation and to POSIX + When a Bison-generated parser encounters a syntax error, it now pops + the stack until it finds a state that allows shifting the error + token. Formerly, it popped the stack until it found a state that + allowed some non-error action other than a default reduction on the + error token. The new behavior has long been the documented behavior, + and has long been required by POSIX. For more details, please see + Paul Eggert, "Reductions during Bison error handling" (2002-05-20) + . + +** Traces + Popped tokens and nonterminals are now reported. + +** Larger grammars + Larger grammars are now supported (larger token numbers, larger grammar + size (= sum of the LHS and RHS lengths), larger LALR tables). + Formerly, many of these numbers ran afoul of 16-bit limits; + now these limits are 32 bits on most hosts. + +** Explicit initial rule + Bison used to play hacks with the initial rule, which the user does + not write. It is now explicit, and visible in the reports and + graphs as rule 0. + +** Useless rules + Before, Bison reported the useless rules, but, although not used, + included them in the parsers. They are now actually removed. + +** Useless rules, useless nonterminals + They are now reported, as a warning, with their locations. + +** Rules never reduced + Rules that can never be reduced because of conflicts are now + reported. + +** Incorrect "Token not used" + On a grammar such as + + %token useless useful + %% + exp: '0' %prec useful; + + where a token was used to set the precedence of the last rule, + bison reported both "useful" and "useless" as useless tokens. + +** Revert the C++ namespace changes introduced in 1.31 + as they caused too many portability hassles. + +** Default locations + By an accident of design, the default computation of @$ was + performed after another default computation was performed: @$ = @1. + The latter is now removed: YYLLOC_DEFAULT is fully responsible of + the computation of @$. + +** Token end-of-file + The token end of file may be specified by the user, in which case, + the user symbol is used in the reports, the graphs, and the verbose + error messages instead of "$end", which remains being the default. + For instance + %token MYEOF 0 + or + %token MYEOF 0 "end of file" + +** Semantic parser + This old option, which has been broken for ages, is removed. + +** New translations + Brazilian Portuguese, thanks to Alexandre Folle de Menezes. + Croatian, thanks to Denis Lackovic. + +** Incorrect token definitions + When given + %token 'a' "A" + bison used to output + #define 'a' 65 + +** Token definitions as enums + Tokens are output both as the traditional #define's, and, provided + the compiler supports ANSI C or is a C++ compiler, as enums. + This lets debuggers display names instead of integers. + +** Reports + In addition to --verbose, bison supports --report=THINGS, which + produces additional information: + - itemset + complete the core item sets with their closure + - lookahead [changed to "look-ahead" in 1.875e through 2.3, but changed back] + explicitly associate lookahead tokens to items + - solved + describe shift/reduce conflicts solving. + Bison used to systematically output this information on top of + the report. Solved conflicts are now attached to their states. + +** Type clashes + Previous versions don't complain when there is a type clash on + the default action if the rule has a midrule action, such as in: + + %type bar + %% + bar: '0' {} '0'; + + This is fixed. + +** GNU M4 is now required when using Bison. + + +* Noteworthy changes in release 1.35 (2002-03-25) + +** C Skeleton + Some projects use Bison's C parser with C++ compilers, and define + YYSTYPE as a class. The recent adjustment of C parsers for data + alignment and 64 bit architectures made this impossible. + + Because for the time being no real solution for C++ parser + generation exists, kludges were implemented in the parser to + maintain this use. In the future, when Bison has C++ parsers, this + kludge will be disabled. + + This kludge also addresses some C++ problems when the stack was + extended. + + +* Noteworthy changes in release 1.34 (2002-03-12) + +** File name clashes are detected + $ bison foo.y -d -o foo.x + fatal error: header and parser would both be named "foo.x" + +** A missing ";" at the end of a rule triggers a warning + In accordance with POSIX, and in agreement with other + Yacc implementations, Bison will mandate this semicolon in the near + future. This eases the implementation of a Bison parser of Bison + grammars by making this grammar LALR(1) instead of LR(2). To + facilitate the transition, this release introduces a warning. + +** Revert the C++ namespace changes introduced in 1.31, as they caused too + many portability hassles. + +** DJGPP support added. + +** Fix test suite portability problems. + + +* Noteworthy changes in release 1.33 (2002-02-07) + +** Fix C++ issues + Groff could not be compiled for the definition of size_t was lacking + under some conditions. + +** Catch invalid @n + As is done with $n. + + +* Noteworthy changes in release 1.32 (2002-01-23) + +** Fix Yacc output file names + +** Portability fixes + +** Italian, Dutch translations + + +* Noteworthy changes in release 1.31 (2002-01-14) + +** Many Bug Fixes + +** GNU Gettext and %expect + GNU Gettext asserts 10 s/r conflicts, but there are 7. Now that + Bison dies on incorrect %expectations, we fear there will be + too many bug reports for Gettext, so _for the time being_, %expect + does not trigger an error when the input file is named "plural.y". + +** Use of alloca in parsers + If YYSTACK_USE_ALLOCA is defined to 0, then the parsers will use + malloc exclusively. Since 1.29, but was not NEWS'ed. + + alloca is used only when compiled with GCC, to avoid portability + problems as on AIX. + +** yyparse now returns 2 if memory is exhausted; formerly it dumped core. + +** When the generated parser lacks debugging code, YYDEBUG is now 0 + (as POSIX requires) instead of being undefined. + +** User Actions + Bison has always permitted actions such as { $$ = $1 }: it adds the + ending semicolon. Now if in Yacc compatibility mode, the semicolon + is no longer output: one has to write { $$ = $1; }. + +** Better C++ compliance + The output parsers try to respect C++ namespaces. + [This turned out to be a failed experiment, and it was reverted later.] + +** Reduced Grammars + Fixed bugs when reporting useless nonterminals. + +** 64 bit hosts + The parsers work properly on 64 bit hosts. + +** Error messages + Some calls to strerror resulted in scrambled or missing error messages. + +** %expect + When the number of shift/reduce conflicts is correct, don't issue + any warning. + +** The verbose report includes the rule line numbers. + +** Rule line numbers are fixed in traces. + +** Swedish translation + +** Parse errors + Verbose parse error messages from the parsers are better looking. + Before: parse error: unexpected `'/'', expecting `"number"' or `'-'' or `'('' + Now: parse error: unexpected '/', expecting "number" or '-' or '(' + +** Fixed parser memory leaks. + When the generated parser was using malloc to extend its stacks, the + previous allocations were not freed. + +** Fixed verbose output file. + Some newlines were missing. + Some conflicts in state descriptions were missing. + +** Fixed conflict report. + Option -v was needed to get the result. + +** %expect + Was not used. + Mismatches are errors, not warnings. + +** Fixed incorrect processing of some invalid input. + +** Fixed CPP guards: 9foo.h uses BISON_9FOO_H instead of 9FOO_H. + +** Fixed some typos in the documentation. + +** %token MY_EOF 0 is supported. + Before, MY_EOF was silently renumbered as 257. + +** doc/refcard.tex is updated. + +** %output, %file-prefix, %name-prefix. + New. + +** --output + New, aliasing "--output-file". + + +* Noteworthy changes in release 1.30 (2001-10-26) + +** "--defines" and "--graph" have now an optional argument which is the + output file name. "-d" and "-g" do not change; they do not take any + argument. + +** "%source_extension" and "%header_extension" are removed, failed + experiment. + +** Portability fixes. + + +* Noteworthy changes in release 1.29 (2001-09-07) + +** The output file does not define const, as this caused problems when used + with common autoconfiguration schemes. If you still use ancient compilers + that lack const, compile with the equivalent of the C compiler option + "-Dconst=". Autoconf's AC_C_CONST macro provides one way to do this. + +** Added "-g" and "--graph". + +** The Bison manual is now distributed under the terms of the GNU FDL. + +** The input and the output files has automatically a similar extension. + +** Russian translation added. + +** NLS support updated; should hopefully be less troublesome. + +** Added the old Bison reference card. + +** Added "--locations" and "%locations". + +** Added "-S" and "--skeleton". + +** "%raw", "-r", "--raw" is disabled. + +** Special characters are escaped when output. This solves the problems + of the #line lines with path names including backslashes. + +** New directives. + "%yacc", "%fixed_output_files", "%defines", "%no_parser", "%verbose", + "%debug", "%source_extension" and "%header_extension". + +** @$ + Automatic location tracking. + + +* Noteworthy changes in release 1.28 (1999-07-06) + +** Should compile better now with K&R compilers. + +** Added NLS. + +** Fixed a problem with escaping the double quote character. + +** There is now a FAQ. + + +* Noteworthy changes in release 1.27 + +** The make rule which prevented bison.simple from being created on + some systems has been fixed. + + +* Noteworthy changes in release 1.26 + +** Bison now uses Automake. + +** New mailing lists: and . + +** Token numbers now start at 257 as previously documented, not 258. + +** Bison honors the TMPDIR environment variable. + +** A couple of buffer overruns have been fixed. + +** Problems when closing files should now be reported. + +** Generated parsers should now work even on operating systems which do + not provide alloca(). + + +* Noteworthy changes in release 1.25 (1995-10-16) + +** Errors in the input grammar are not fatal; Bison keeps reading +the grammar file, and reports all the errors found in it. + +** Tokens can now be specified as multiple-character strings: for +example, you could use "<=" for a token which looks like <=, instead +of choosing a name like LESSEQ. + +** The %token_table declaration says to write a table of tokens (names +and numbers) into the parser file. The yylex function can use this +table to recognize multiple-character string tokens, or for other +purposes. + +** The %no_lines declaration says not to generate any #line preprocessor +directives in the parser file. + +** The %raw declaration says to use internal Bison token numbers, not +Yacc-compatible token numbers, when token names are defined as macros. + +** The --no-parser option produces the parser tables without including +the parser engine; a project can now use its own parser engine. +The actions go into a separate file called NAME.act, in the form of +a switch statement body. + + +* Noteworthy changes in release 1.23 + +The user can define YYPARSE_PARAM as the name of an argument to be +passed into yyparse. The argument should have type void *. It should +actually point to an object. Grammar actions can access the variable +by casting it to the proper pointer type. + +Line numbers in output file corrected. + + +* Noteworthy changes in release 1.22 + +--help option added. + + +* Noteworthy changes in release 1.20 + +Output file does not redefine const for C++. + +----- + +LocalWords: yacc YYBACKUP glr GCC lalr ArrayIndexOutOfBoundsException nullptr +LocalWords: cplusplus liby rpl fprintf mfcalc Wyacc stmt cond expr mk sym lr +LocalWords: IELR ielr Lookahead YYERROR nonassoc LALR's api lookaheads yychar +LocalWords: destructor lookahead YYRHSLOC YYLLOC Rhs ifndef YYFAIL cpp sr rr +LocalWords: preprocessor initializer Wno Wnone Werror FreeBSD prec livelocks +LocalWords: Solaris AIX UX RHEL Tru LHS gcc's Wundef YYENABLE NLS YYLTYPE VCG +LocalWords: yyerror cpp's Wunused yylval yylloc prepend yyparse yylex yypush +LocalWords: Graphviz xml nonterminals midrule destructor's YYSTYPE typedef ly +LocalWords: CHR chr printf stdout namespace preprocessing enum pre include's +LocalWords: YYRECOVERING nonfree destructors YYABORT YYACCEPT params enums de +LocalWords: struct yystype DJGPP lex param Haible NUM alloca YYSTACK NUL goto +LocalWords: YYMAXDEPTH Unescaped UCNs YYLTYPE's yyltype typedefs inline Yaccs +LocalWords: Heriyanto Reenable dprec Hilfinger Eggert MYEOF Folle Menezes EOF +LocalWords: Lackovic define's itemset Groff Gettext malloc NEWS'ed YYDEBUG YY +LocalWords: namespaces strerror const autoconfiguration Dconst Autoconf's FDL +LocalWords: Automake TMPDIR LESSEQ ylwrap endif yydebug YYTOKEN YYLSP ival hh +LocalWords: extern YYTOKENTYPE TOKENTYPE yytokentype tokentype STYPE lval pdf +LocalWords: lang yyoutput dvi html ps POSIX lvalp llocp Wother nterm arg init +LocalWords: TOK calc yyo fval Wconflicts parsers yystackp yyval yynerrs +LocalWords: Théophile Ranquet Santet fno fnone stype associativity Tolmer +LocalWords: Wprecedence Rassoul Wempty Paolo Bonzini parser's Michiel loc +LocalWords: redeclaration sval fcaret reentrant XSLT xsl Wmaybe yyvsp Tedi +LocalWords: pragmas noreturn untyped Rozenman unexpanded Wojciech Polak +LocalWords: Alexandre MERCHANTABILITY yytype emplace ptr automove lvalues +LocalWords: nonterminal yy args Pragma dereference yyformat rhs docdir bw +LocalWords: Redeclarations rpcalc Autoconf YFLAGS Makefiles PROG DECL num +LocalWords: Heimbigner AST src ast Makefile srcdir MinGW xxlex XXSTYPE CVE +LocalWords: XXLTYPE strictfp IDEs ffixit fdiagnostics parseable fixits +LocalWords: Wdeprecated yytext Variadic variadic yyrhs yyphrs RCS README +LocalWords: noexcept constexpr ispell american deprecations backend Teoh +LocalWords: YYPRINT Mangold Bonzini's Wdangling exVal baz checkable gcc +LocalWords: fsanitize Vogelsgesang lis redeclared stdint automata yytname +LocalWords: yysymbol yytnamerr yyreport ctx ARGMAX yysyntax stderr LPAREN +LocalWords: symrec yypcontext TOKENMAX yyexpected YYEMPTY yypstate YYEOF +LocalWords: autocompletion bistromathic submessages Cayuela lexcalc hoc +LocalWords: yytoken YYUNDEF YYerror basename Automake's UTF ifdef ffile +LocalWords: gotos readline Imbimbo Wcounterexamples Wcex Nonunifying rcex +LocalWords: Vais xsltproc YYNOMEM YYLOCATION signedness YYBISON MITRE's +LocalWords: libreadline YYMALLOC YYFREE MSVC redefinitions POSIXLY + +Local Variables: +ispell-dictionary: "american" +mode: outline +fill-column: 76 +End: + +Copyright (C) 1995-2015, 2018-2021 Free Software Foundation, Inc. + +This file is part of Bison, the GNU Parser Generator. + +Permission is granted to copy, distribute and/or modify this document +under the terms of the GNU Free Documentation License, Version 1.3 or +any later version published by the Free Software Foundation; with no +Invariant Sections, with no Front-Cover Texts, and with no Back-Cover +Texts. A copy of the license is included in the "GNU Free +Documentation License" file as part of this distribution. diff --git a/platform/dbops/binaries/build/share/doc/bison/README b/platform/dbops/binaries/build/share/doc/bison/README new file mode 100644 index 0000000000000000000000000000000000000000..b60644002437e03a27f42e8bc971f832ab0b2501 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/README @@ -0,0 +1,135 @@ +GNU Bison is a general-purpose parser generator that converts an annotated +context-free grammar into a deterministic LR or generalized LR (GLR) parser +employing LALR(1) parser tables. Bison can also generate IELR(1) or +canonical LR(1) parser tables. Once you are proficient with Bison, you can +use it to develop a wide range of language parsers, from those used in +simple desk calculators to complex programming languages. + +Bison is upward compatible with Yacc: all properly-written Yacc grammars +work with Bison with no change. Anyone familiar with Yacc should be able to +use Bison with little trouble. You need to be fluent in C, C++, D or Java +programming in order to use Bison. + +Bison and the parsers it generates are portable, they do not require any +specific compilers. + +GNU Bison's home page is https://gnu.org/software/bison/. + +# Installation +## Build from git +The [README-hacking.md file](README-hacking.md) is about building, modifying +and checking Bison. See its "Working from the Repository" section to build +Bison from the git repo. Roughly, run: + + $ git submodule update --init + $ ./bootstrap + +then proceed with the usual `configure && make` steps. + +## Build from tarball +See the [INSTALL file](INSTALL) for generic compilation and installation +instructions. + +Bison requires GNU m4 1.4.6 or later. See +https://ftp.gnu.org/gnu/m4/m4-1.4.6.tar.gz. + +## Running a non installed bison +Once you ran `make`, you might want to toy with this fresh bison before +installing it. In that case, do not use `src/bison`: it would use the +*installed* files (skeletons, etc.), not the local ones. Use `tests/bison`. + +## Colored diagnostics +As an experimental feature, diagnostics are now colored, controlled by the +`--color` and `--style` options. + +To use them, install the libtextstyle library, 0.20.5 or newer, before +configuring Bison. It is available from https://alpha.gnu.org/gnu/gettext/, +for instance https://alpha.gnu.org/gnu/gettext/libtextstyle-0.20.5.tar.gz, +or as part of Gettext 0.21 or newer, for instance +https://ftp.gnu.org/gnu/gettext/gettext-0.21.tar.gz. + +The option --color supports the following arguments: +- always, yes: Enable colors. +- never, no: Disable colors. +- auto, tty (default): Enable colors if the output device is a tty. + +To customize the styles, create a CSS file, say `bison-bw.css`, similar to + + /* bison-bw.css */ + .warning { } + .error { font-weight: 800; text-decoration: underline; } + .note { } + +then invoke bison with `--style=bison-bw.css`, or set the `BISON_STYLE` +environment variable to `bison-bw.css`. + +In some diagnostics, bison uses libtextstyle to emit special escapes to +generate clickable hyperlinks. The environment variable +`NO_TERM_HYPERLINKS` can be used to suppress them. This may be useful for +terminal emulators which produce garbage output when they receive the escape +sequence for a hyperlink. Currently (as of 2020), this affects some versions +of emacs, guake, konsole, lxterminal, rxvt, yakuake. + +## Relocatability +If you pass `--enable-relocatable` to `configure`, Bison is relocatable. + +A relocatable program can be moved or copied to a different location on the +file system. It can also be used through mount points for network sharing. +It is possible to make symlinks to the installed and moved programs, and +invoke them through the symlink. + +See "Enabling Relocatability" in the documentation. + +## Internationalization +Bison supports two catalogs: one for Bison itself (i.e., for the +maintainer-side parser generation), and one for the generated parsers (i.e., +for the user-side parser execution). The requirements between both differ: +bison needs ngettext, the generated parsers do not. To simplify the build +system, neither are installed if ngettext is not supported, even if +generated parsers could have been localized. See +https://lists.gnu.org/r/bug-bison/2009-08/msg00006.html for more +details. + +# Questions +See the section FAQ in the documentation (doc/bison.info) for frequently +asked questions. The documentation is also available in PDF and HTML, +provided you have a recent version of Texinfo installed: run `make pdf` or +`make html`. + +If you have questions about using Bison and the documentation does not +answer them, please send mail to . + +# Bug reports +Please send bug reports to . Be sure to include the +version number from `bison --version`, and a complete, self-contained test +case in each bug report. + +# Copyright statements +For any copyright year range specified as YYYY-ZZZZ in this package, note +that the range specifies every single year in that closed interval. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/THANKS b/platform/dbops/binaries/build/share/doc/bison/THANKS new file mode 100644 index 0000000000000000000000000000000000000000..be743a232584c25073cbdad7831c016a1893e940 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/THANKS @@ -0,0 +1,256 @@ +Bison was originally written by Robert Corbett. It would not be what +it is today without the invaluable help of these people: + +Aaro Koskinen aaro.koskinen@iki.fi +Аскар Сафин safinaskar@mail.ru +Adam Sampson ats@offog.org +Adrian Vogelsgesang avogelsgesang@tableau.com +Ahcheong Lee dkcjd2000@gmail.com +Airy Andre Airy.Andre@edf.fr +Akim Demaille akim@gnu.org +Albert Chin-A-Young china@thewrittenword.com +Alexander Belopolsky alexb@rentec.com +Alexandre Duret-Lutz adl@lrde.epita.fr +Andre da Costa Barros andre.cbarros@yahoo.com +Andreas Damm adamm@onica.com +Andreas Schwab schwab@suse.de +Andrew Suffield asuffield@users.sourceforge.net +Angelo Borsotti angelo.borsotti@gmail.com +Anthony Heading ajrh@ajrh.net +Antonio Silva Correia amsilvacorreia@hotmail.com +Arnold Robbins arnold@skeeve.com +Art Haas ahaas@neosoft.com +Arthur Schwarz aschwarz1309@att.net +Askar Safin safinaskar@mail.ru +Balázs Scheidler balazs.scheidler@oneidentity.com +Baron Schwartz baron@sequent.org +Ben Pfaff blp@cs.stanford.edu +Benoit Perrot benoit.perrot@epita.fr +Bernd Edlinger bernd.edlinger@hotmail.de +Bernd Kiefer kiefer@dfki.de +Bert Deknuydt Bert.Deknuydt@esat.kuleuven.ac.be +Bill Allombert Bill.Allombert@math.u-bordeaux1.fr +Bob Rossi bob@brasko.net +Brandon Lucia blucia@gmail.com +Brooks Moses bmoses@google.com +Bruce Lilly blilly@erols.com +Bruno Belanyi bruno.belanyi@epita.fr +Bruno Haible bruno@clisp.org +Charles-Henri de Boysson de-boy_c@epita.fr +Christian Burger cburger@sunysb.edu +Clément Démoulins demoulins@lrde.epita.fr +Colin Daley colin.daley@outlook.com +Cris Bailiff c.bailiff+bison@awayweb.com +Cris van Pelt cris@amf03054.office.wxs.nl +Csaba Raduly csaba_22@yahoo.co.uk +Dagobert Michelsen dam@baltic-online.de +Daniel Frużyński daniel@poradnik-webmastera.com +Daniel Galloway dg1751@att.com +Daniela Becker daniela@lrde.epita.fr +Daniel Hagerty hag@gnu.org +David Barto david.barto@sparqlcity.com +David J. MacKenzie djm@gnu.org +David Kastrup dak@gnu.org +David Michael fedora.dm0@gmail.com +Dengke Du dengke.du@windriver.com +Denis Excoffier gcc@Denis-Excoffier.org +Dennis Clarke dclarke@blastwave.org +Derek Clegg derek@me.com +Derek M. Jones derek@knosof.co.uk +Di-an Jan dianj@freeshell.org +Dick Streefland dick.streefland@altium.nl +Didier Godefroy dg@ulysium.net +Don Macpherson donmac703@gmail.com +Dwight Guth dwight.guth@runtimeverification.com +Efi Fogel efifogel@gmail.com +Enrico Scholz enrico.scholz@informatik.tu-chemnitz.de +Eric Blake ebb9@byu.net +Eric S. Raymond esr@thyrsus.com +Étienne Renault renault@lrde.epita.fr +Evan Lavelle eml-bison@cyconix.com +Evan Nemerson evan@nemerson.com +Evgeny Stambulchik fnevgeny@plasma-gate.weizmann.ac.il +Fabrice Bauzac noon@cote-dazur.com +Ferdinand Thiessen ferdinand@fthiessen.de +Florian Krohm florian@edamail.fishkill.ibm.com +Frank Heckenbach frank@g-n-u.de +Frans Englich frans.englich@telia.com +Gabriel Rassoul gabriel.rassoul@epita.fr +Gary L Peskin garyp@firstech.com +Gavin Smith gavinsmith0123@gmail.com +Georg Sauthoff gsauthof@TechFak.Uni-Bielefeld.DE +George Neuner gneuner2@comcast.net +Gilles Espinasse g.esp@free.fr +Goran Uddeborg goeran@uddeborg.se +Guido Trentalancia trentalg@aston.ac.uk +H. Merijn Brand h.m.brand@hccnet.nl +Hans Åberg haberg-1@telia.com +Horst Von Brand vonbrand@inf.utfsm.cl +Jacob L. Mandelson jlm-bbison@jlm.ofb.net +Jan Nieuwenhuizen janneke@gnu.org +Jannick thirdedition@gmx.net +Jeff Hammond jeff_hammond@acm.org +Jerry Quinn jlquinn@optonline.net +Jesse Thilo jthilo@gnu.org +Jim Kent jkent@arch.sel.sony.com +Jim Meyering jim@meyering.net +Joel E. Denny joeldenny@joeldenny.org +Johan van Selst johans@stack.nl +John Horigan john@glyphic.com +Jonathan Fabrizio jonathan.fabrizio@lrde.epita.fr +Jonathan Nieder jrnieder@gmail.com +Josh Soref jsoref@gmail.com +Juan Manuel Guerrero juan.guerrero@gmx.de +Karl Berry karl@freefriends.org +Kees Zeelenberg kzlg@users.sourceforge.net +Keith Browne kbrowne@legato.com +Ken Moffat zarniwhoop@ntlworld.com +Kiyoshi Kanazawa yoi_no_myoujou@yahoo.co.jp +Lars Maier lars.maier@tefax.net +Lars Wendler polynomial-c@gentoo.org +László Várady laszlo.varady93@gmail.com +Laurent Mascherpa laurent.mascherpa@epita.fr +Lie Yan lie.yan@kaust.edu.sa +Maarten De Braekeleer maarten.debraekeleer@gmail.com +Magnus Fromreide magfr@lysator.liu.se +Marc Autret autret_m@epita.fr +Marc Mendiola mmendiol@usc.edu +Marc Schönefeld marc.schoenefeld@gmx.org +Mark Boyall wolfeinstein@gmail.com +Martin Blais blais@furius.ca +Martin Jacobs martin.jacobs@arcor.de +Martin Mokrejs mmokrejs@natur.cuni.cz +Martin Nylin martin.nylin@linuxmail.org +Matt Kraai kraai@alumni.cmu.edu +Matt Rosing rosing@peakfive.com +Maxim Prohorenko Maxim.Prohorenko@gmail.com +Michael Catanzaro mcatanzaro@gnome.org +Michael Felt mamfelt@gmail.com +Michael Hayes m.hayes@elec.canterbury.ac.nz +Michael Raskin 7c6f434c@mail.ru +Michel d'Hooge michel.dhooge@gmail.com +Michiel De Wilde mdewilde.agilent@gmail.com +Mickael Labau labau_m@epita.fr +Mike Castle dalgoda@ix.netcom.com +Mike Frysinger vapier@gentoo.org +Mike Sullivan Mike.sullivan@Oracle.COM +Nate Guerin nathan.guerin@riseup.net +Neil Booth NeilB@earthling.net +Nelson H. F. Beebe beebe@math.utah.edu +neok m4700 neok.m4700@gmail.com +Nick Bowler nbowler@elliptictech.com +Nicolas Bedon nicolas.bedon@univ-rouen.fr +Nicolas Burrus nicolas.burrus@epita.fr +Nicolas Tisserand nicolas.tisserand@epita.fr +Nikki Valen nicolettavalencia.nv@gmail.com +Noah Friedman friedman@gnu.org +Odd Arild Olsen oao@fibula.no +Oleg Smolsky oleg.smolsky@pacific-simulators.co.nz +Oleksii Taran oleksii.taran@gmail.com +Oliver Mangold o.mangold@gmail.com +Paolo Bonzini bonzini@gnu.org +Paolo Simone Gasparello djgaspa@gmail.com +Pascal Bart pascal.bart@epita.fr +Patrice Dumas pertusus@free.fr +Paul Eggert eggert@cs.ucla.edu +Paul Hilfinger Hilfinger@CS.Berkeley.EDU +Per Allansson per@appgate.com +Peter Eisentraut peter_e@gmx.net +Peter Fales psfales@lucent.com +Peter Hamorsky hamo@upjs.sk +Peter Simons simons@cryp.to +Petr Machata pmachata@redhat.com +Pho pho@cielonegro.org +Piotr Gackiewicz gacek@intertel.com.pl +Piotr Marcińczyk piomar123@gmail.com +Pramod Kumbhar pramod.s.kumbhar@gmail.com +Quentin Hocquet hocquet@gostai.com +Quoc Peyrot chojin@lrde.epita.fr +R Blake blakers@mac.com +Raja R Harinath harinath@cs.umn.edu +Ralf Wildenhues Ralf.Wildenhues@gmx.de +Ryan dev@splintermail.com +Rich Wilson richaw@gmail.com +Richard Stallman rms@gnu.org +Rici Lake ricilake@gmail.com +Rob Conde rob.conde@ai-solutions.com +Rob Vermaas rob.vermaas@gmail.com +Robert Anisko anisko_r@epita.fr +Robert Yang liezhi.yang@windriver.com +Roland Levillain roland@lrde.epita.fr +Satya Kiran Popuri satyakiran@gmail.com +Sebastian Setzer sebastian.setzer.ext@siemens.com +Sebastien Fricker sebastien.fricker@gmail.com +Sébastien Villemot sebastien@debian.org +Sergei Steshenko sergstesh@yahoo.com +Shura debil_urod@ngs.ru +Simon Sobisch simonsobisch@web.de +Stefano Lattarini stefano.lattarini@gmail.com +Stephen Cameron stephenmcameron@gmail.com +Steve Murphy murf@parsetree.com +Suhwan Song prada960808@gmail.com +Sum Wu sum@geekhouse.org +Théophile Ranquet theophile.ranquet@gmail.com +Thiru Ramakrishnan thiru.ramakrishnan@gmail.com +Thomas Jahns jahns@dkrz.de +Thomas Petazzoni thomas.petazzoni@bootlin.com +Tim Josling tej@melbpc.org.au +Tim Landscheidt tim@tim-landscheidt.de +Tim Van Holder tim.van.holder@pandora.be +Tobias Frost tobi@debian.org +Todd Freed todd.freed@gmail.com +Tom Kramer kramer@nist.gov +Tom Lane tgl@sss.pgh.pa.us +Tom Tromey tromey@cygnus.com +Tomasz Kłoczko kloczko.tomasz@gmail.com +Tommy Nordgren tommy.nordgren@chello.se +Troy A. Johnson troyj@ecn.purdue.edu +Tys Lefering gccbison@gmail.com +Uxio Prego uxio@uma.es +Valentin Tolmer nitnelave1@gmail.com +wcventure wcventure@126.com +Victor Khomenko victor.khomenko@newcastle.ac.uk +Victor Zverovich victor.zverovich@gmail.com +Vin Shelton acs@alumni.princeton.edu +W.C.A. Wijngaards wouter@NLnetLabs.nl +Wayne Green wayne@infosavvy.com +Wei Song wsong83@gmail.com +Wojciech Polak polak@gnu.org +Wolfgang S. Kechel wolfgang.kechel@prs.de +Wolfgang Thaller wolfgang.thaller@gmx.net +Wolfram Wagner ww@mpi-sb.mpg.de +Wwp subscript@free.fr +xolodho xolodho@gmail.com +Yuichiro Kaneko spiketeika@gmail.com +Yuriy Solodkyy solodon@gmail.com +Zack Weinberg zack@codesourcery.com +江 祖铭 jjzuming@outlook.com +長田偉伸 cbh34680@iret.co.jp +马俊 majun123@whu.edu.cn + +Many people are not named here because we lost track of them. We +thank them! Please, help us keeping this list up to date. + +Local Variables: +mode: text +coding: utf-8 +End: + +----- + +Copyright (C) 2000-2015, 2018-2021 Free Software Foundation, Inc. + +This file is part of Bison, the GNU Parser Generator. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . diff --git a/platform/dbops/binaries/build/share/doc/bison/TODO b/platform/dbops/binaries/build/share/doc/bison/TODO new file mode 100644 index 0000000000000000000000000000000000000000..d89894c7f116f29b19f91085b331a3cbef7ba920 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/TODO @@ -0,0 +1,723 @@ +* Soon +** scan-code +The default case is scanning char-per-char. + + /* By default, grow the string obstack with the input. */ + .|\n STRING_GROW (); + +make it more eager? + +** Missing tests +commit 2c294c132528ede23d8ae4959783a67e9ff05ac5 +Author: Vincent Imbimbo +Date: Sat Jan 23 13:25:18 2021 -0500 + + cex: fix state-item pruning + +See https://lists.gnu.org/r/bug-bison/2021-01/msg00002.html + +** pos_set_set +The current approach is correct, but with poor performances. Bitsets need +to support 'assign' and 'shift'. And instead of extending POS_SET just for +the out-of-range new values, we need something like doubling the size. + +** glr +There is no test with "Parse on stack %ld rejected by rule %d" in it. + +** yyrline etc. +Clarify that rule numbers in the skeletons are 1-based. + +** Macros in C++ +There are many macros that should obey api.prefix: YY_CPLUSPLUS, YY_MOVE, +etc. + +** yyerrok in Java +And add tests in calc.at, to prepare work for D. + +** YYERROR and yynerrs +We are missing some cases. Write a test case, and check all the skeletons. + +** Cex +*** Improve gnulib +Don't do this (counterexample.c): + +// This is the fastest way to get the tail node from the gl_list API. +gl_list_node_t +list_get_end (gl_list_t list) +{ + gl_list_node_t sentinel = gl_list_add_last (list, NULL); + gl_list_node_t res = gl_list_previous_node (list, sentinel); + gl_list_remove_node (list, sentinel); + return res; +} + +*** Ambiguous rewriting +If the user is stupid enough to have equal rules, then the derivations are +harder to read: + + Reduce/reduce conflict on tokens $end, "+", "⊕": + 2 exp: exp "+" exp . + 3 exp: exp "+" exp . + Example exp "+" exp • + First derivation exp ::=[ exp "+" exp • ] + Example exp "+" exp • + Second derivation exp ::=[ exp "+" exp • ] + +Do we care about this? In color, we use twice the same color here, but we +could try to use the same color for the same rule. + +*** XML reports +Show the counterexamples. This is going to be really hard and/or painful. +Unless we play it dumb (little structure). + +** Bistromathic +- How about not evaluating incomplete lines when the text is not finished + (as shells do). + +** Questions +*** Java +- Should i18n be part of the Lexer? Currently it's a static method of + Lexer. + +- is there a migration path that would allow to use TokenKinds in + yylex? + +- define the tokens as an enum too. + +- promote YYEOF rather than EOF. + +** YYerror +https://git.savannah.gnu.org/gitweb/?p=gettext.git;a=blob;f=gettext-runtime/intl/plural.y;h=a712255af4f2f739c93336d4ff6556d932a426a5;hb=HEAD + +should be updated to not use YYERRCODE. Returning an undef token is good +enough. + +** Java +*** calc.at +Stop hard-coding "Calc". Adjust local.at (look for FIXME). + +** doc +I feel it's ugly to use the GNU style to declare functions in the doc. It +generates tons of white space in the page, and may contribute to bad page +breaks. + +** consistency +token vs terminal. + +** api.token.raw +The YYUNDEFTOK could be assigned a semantic value so that yyerror could be +used to report invalid lexemes. + +** push parsers +Consider deprecating impure push parsers. They add a lot of complexity, for +a bad feature. On the other hand, that would make it much harder to sit +push parsers on top of pull parser. Which is currently not relevant, since +push parsers are measurably slower. + +** %define parse.error formatted +How about pushing Bistromathic's yyreport_syntax_error as another standard +way to generate the error message, and leave to the user the task of +providing the message formats? Currently in bistro, it reads: + + const char * + error_format_string (int argc) + { + switch (argc) + { + default: /* Avoid compiler warnings. */ + case 0: return _("%@: syntax error"); + case 1: return _("%@: syntax error: unexpected %u"); + // TRANSLATORS: '%@' is a location in a file, '%u' is an + // "unexpected token", and '%0e', '%1e'... are expected tokens + // at this point. + // + // For instance on the expression "1 + * 2", you'd get + // + // 1.5: syntax error: expected - or ( or number or function or variable before * + case 2: return _("%@: syntax error: expected %0e before %u"); + case 3: return _("%@: syntax error: expected %0e or %1e before %u"); + case 4: return _("%@: syntax error: expected %0e or %1e or %2e before %u"); + case 5: return _("%@: syntax error: expected %0e or %1e or %2e or %3e before %u"); + case 6: return _("%@: syntax error: expected %0e or %1e or %2e or %3e or %4e before %u"); + case 7: return _("%@: syntax error: expected %0e or %1e or %2e or %3e or %4e or %5e before %u"); + case 8: return _("%@: syntax error: expected %0e or %1e or %2e or %3e or %4e or %5e or %6e before %u"); + } + } + +The message would have to be generated in a string, and pushed to yyerror. +Which will be a pain in the neck in yacc.c. + +If we want to do that, we should think very carefully about the syntax of +the format string. + +** yyclearin does not invoke the lookahead token's %destructor +https://lists.gnu.org/r/bug-bison/2018-02/msg00000.html +Rici: + +> Modifying yyclearin so that it calls yydestruct seems like the simplest +> solution to this issue, but it is conceivable that such a change would +> break programs which already perform some kind of workaround in order to +> destruct the lookahead symbol. So it might be necessary to use some kind of +> compatibility %define, or to create a new replacement macro with a +> different name such as yydiscardin. +> +> At a minimum, the fact that yyclearin does not invoke the %destructor +> should be highlighted in the documentation, since it is not at all obvious. + +** Issues in i18n + +Les catégories d'avertissements incluent : + conflicts-sr conflits S/R (activé par défaut) + conflicts-rr conflits R/R (activé par défaut) + dangling-alias l'alias chaîne n'est pas attaché à un symbole + deprecated construction obsolète + empty-rule règle vide sans %empty + midrule-values valeurs de règle intermédiaire non définies ou inutilisées + precedence priorité et associativité inutiles + yacc incompatibilités avec POSIX Yacc + other tous les autres avertissements (activé par défaut) + all tous les avertissements sauf « dangling-alias » et « yacc » + no-CATEGORY désactiver les avertissements dans CATEGORIE + none désactiver tous les avertissements + error[=CATEGORY] traiter les avertissements comme des erreurs + +Line -1 and -3 should mention CATEGORIE, not CATEGORY. + +* Bison 3.9 +** Rewrite glr.cc (currently glr2.cc) +*** custom error messages + +*** Remove jumps +We can probably replace setjmp/longjmp with exceptions. That would help +tremendously other languages such as D and Java that probably have no +similar feature. If we remove jumps, we probably no longer need _Noreturn, +so simplify `b4_attribute_define([noreturn])` into `b4_attribute_define`. + +After discussing with Valentin, it was decided that it's better to stay with +jumps, since in some places exceptions are ruled out from C++. + +*** Coding style +Move to our coding conventions. In particular names such as yy_glr_stack, +not yyGLRStack. + +*** yydebug +It should be a member of the parser object, see lalr1.cc. Let the parser +object decide what the debug stream is, rather than open coding std::cerr. + +*** Avoid pointers +There are many places where pointers should be replaced with references. +Some occurrences were fixed, but now some have improper names: + +-yygetToken (int *yycharp, ]b4_namespace_ref[::]b4_parser_class[& yyparser][]b4_pure_if([, glr_stack* yystackp])[]b4_user_formals[) ++yygetToken (int& yycharp, ]b4_namespace_ref[::]b4_parser_class[& yyparser][]b4_pure_if([, glr_stack* yystackp])[]b4_user_formals[) + +yycharp is no longer a Pointer. And yystackp should probably also be a reference. + +*** parse.assert +Currently all the assertions are enabled. Once we are confident in glr2.cc, +let parse.assert use the same approach as in lalr1.cc. + +*** debug_stream +Stop using std::cerr everywhere. + +*** glr.c +When glr2.cc fully replaces glr.cc, get rid of the glr.cc scaffolding in +glr.c. + +* Chains +** Unit rules / Injection rules (Akim Demaille) +Maybe we could expand unit rules (or "injections", see +https://homepages.cwi.nl/~daybuild/daily-books/syntax/2-sdf/sdf.html), i.e., +transform + + exp: arith | bool; + arith: exp '+' exp; + bool: exp '&' exp; + +into + + exp: exp '+' exp | exp '&' exp; + +when there are no actions. This can significantly speed up some grammars. +I can't find the papers. In particular the book 'LR parsing: Theory and +Practice' is impossible to find, but according to 'Parsing Techniques: a +Practical Guide', it includes information about this issue. Does anybody +have it? + +** clean up (Akim Demaille) +Do not work on these items now, as I (Akim) have branches with a lot of +changes in this area (hitting several files), and no desire to have to fix +conflicts. Addressing these items will happen after my branches have been +merged. + +*** lalr.c +Introduce a goto struct, and use it in place of from_state/to_state. +Rename states1 as path, length as pathlen. +Introduce inline functions for things such as nullable[*rp - ntokens] +where we need to map from symbol number to nterm number. + +There are probably a significant part of the relations management that +should be migrated on top of a bitsetv. + +*** closure +It should probably take a "state*" instead of two arguments. + +*** traces +The "automaton" and "set" categories are not so useful. We should probably +introduce lr(0) and lalr, just the way we have ielr categories. The +"closure" function is too verbose, it should probably have its own category. + +"set" can still be used for summarizing the important sets. That would make +tests easy to maintain. + +*** complain.* +Rename these guys as "diagnostics.*" (or "diagnose.*"), since that's the +name they have in GCC, clang, etc. Likewise for the complain_* series of +functions. + +*** ritem +states/nstates, rules/nrules, ..., ritem/nritems +Fix the latter. + +*** m4: slot, type, type_tag +The meaning of type_tag varies depending on api.value.type. We should avoid +that and using clear definitions with stable semantics. + +* D programming language +There's a number of features that are missing, here sorted in _suggested_ +order of implementation. + +When copying code from other skeletons, keep the comments exactly as they +are. Keep the same variable names. If you change the wording in one place, +do it in the others too. In other words: make sure to keep the +maintenance *simple* by avoiding any gratuitous difference. + +** CI +Check when gdc and ldc. + +** GLR Parser +This is very ambitious. That's the final boss. There are currently no +"clean" implementation to get inspiration from. + +glr.c is very clean but: +- is low-level C +- is a different skeleton from yacc.c + +glr.cc is (currently) an ugly hack: a C++ shell around glr.c. Valentin +Tolmer is currently rewriting glr.cc to be clean C++, but he is not +finished. There will be a lot a common code between lalr1.cc and glr.cc, so +eventually I would like them to be fused into a single skeleton, supporting +both deterministic and generalized parsing. + +It would be great for D to also support this. + +The basic ideas of GLR are explained here: + +https://www.codeproject.com/Articles/5259825/GLR-Parsing-in-Csharp-How-to-Use-The-Most-Powerful + +* Better error messages +The users are not provided with enough tools to forge their error messages. +See for instance "Is there an option to change the message produced by +YYERROR_VERBOSE?" by Simon Sobisch, on bison-help. + +See also +https://www.cs.tufts.edu/~nr/cs257/archive/clinton-jefferey/lr-error-messages.pdf +https://research.swtch.com/yyerror +http://gallium.inria.fr/~fpottier/publis/fpottier-reachability-cc2016.pdf + +* Modernization +Fix data/skeletons/yacc.c so that it defines YYPTRDIFF_T properly for modern +and older C++ compilers. Currently the code defaults to defining it to +'long' for non-GCC compilers, but it should use the proper C++ magic to +define it to the same type as the C ptrdiff_t type. + +* Completion +Several features are not available in all the back-ends. + +- push parsers: glr.c, glr.cc, lalr1.cc (not very difficult) +- token constructors: Java, C, D (a bit difficult) +- glr: D, Java (super difficult) + +* Bugs +** Autotest has quotation issues +tests/input.at:1730:AT_SETUP([%define errors]) + +-> + +$ ./tests/testsuite -l | grep errors | sed q + 38: input.at:1730 errors + +* Short term +** Better design for diagnostics +The current implementation of diagnostics is ad hoc, it grew organically. +It works as a series of calls to several functions, with dependency of the +latter calls on the former. For instance: + + complain (&sym->location, + sym->content->status == needed ? complaint : Wother, + _("symbol %s is used, but is not defined as a token" + " and has no rules; did you mean %s?"), + quote_n (0, sym->tag), + quote_n (1, best->tag)); + if (feature_flag & feature_caret) + location_caret_suggestion (sym->location, best->tag, stderr); + +We should rewrite this in a more FP way: + +1. build a rich structure that denotes the (complete) diagnostic. + "Complete" in the sense that it also contains the suggestions, the list + of possible matches, etc. + +2. send this to the pretty-printing routine. The diagnostic structure + should be sufficient so that we can generate all the 'format' of + diagnostics, including the fixits. + +If properly done, this diagnostic module can be detached from Bison and be +put in gnulib. It could be used, for instance, for errors caught by +xgettext. + +There's certainly already something alike in GCC. At least that's the +impression I get from reading the "-fdiagnostics-format=FORMAT" part of this +page: + +https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Message-Formatting-Options.html + +** Graphviz display code thoughts +The code for the --graph option is over two files: print_graph, and +graphviz. This is because Bison used to also produce VCG graphs, but since +this is no longer true, maybe we could consider these files for fusion. + +An other consideration worth noting is that print_graph.c (correct me if I +am wrong) should contain generic functions, whereas graphviz.c and other +potential files should contain just the specific code for that output +format. It will probably prove difficult to tell if the implementation is +actually generic whilst only having support for a single format, but it +would be nice to keep stuff a bit tidier: right now, the construction of the +bitset used to show reductions is in the graphviz-specific code, and on the +opposite side we have some use of \l, which is graphviz-specific, in what +should be generic code. + +Little effort seems to have been given to factoring these files and their +print{,-xml} counterpart. We would very much like to re-use the pretty format +of states from .output for the graphs, etc. + +Since graphviz dies on medium-to-big grammars, maybe consider an other tool? + +** push-parser +Check it too when checking the different kinds of parsers. And be +sure to check that the initial-action is performed once per parsing. + +** m4 names +b4_shared_declarations is no longer what it is. Make it +b4_parser_declaration for instance. + +** yychar in lalr1.cc +There is a large difference bw maint and master on the handling of +yychar (which was removed in lalr1.cc). See what needs to be +back-ported. + + + /* User semantic actions sometimes alter yychar, and that requires + that yytoken be updated with the new translation. We take the + approach of translating immediately before every use of yytoken. + One alternative is translating here after every semantic action, + but that translation would be missed if the semantic action + invokes YYABORT, YYACCEPT, or YYERROR immediately after altering + yychar. In the case of YYABORT or YYACCEPT, an incorrect + destructor might then be invoked immediately. In the case of + YYERROR, subsequent parser actions might lead to an incorrect + destructor call or verbose syntax error message before the + lookahead is translated. */ + + /* Make sure we have latest lookahead translation. See comments at + user semantic actions for why this is necessary. */ + yytoken = yytranslate_ (yychar); + + +** Get rid of fake #lines [Bison: ...] +Possibly as simple as checking whether the column number is nonnegative. + +I have seen messages like the following from GCC. + +:0: fatal error: opening dependency file .deps/libltdl/argz.Tpo: No such file or directory + + +** Discuss about %printer/%destroy in the case of C++. +It would be very nice to provide the symbol classes with an operator<< +and a destructor. Unfortunately the syntax we have chosen for +%destroy and %printer make them hard to reuse. For instance, the user +is invited to write something like + + %printer { debug_stream() << $$; } ; + +which is hard to reuse elsewhere since it wants to use +"debug_stream()" to find the stream to use. The same applies to +%destroy: we told the user she could use the members of the Parser +class in the printers/destructors, which is not good for an operator<< +since it is no longer bound to a particular parser, it's just a +(standalone symbol). + +* Various +** Rewrite glr.cc in C++ (Valentin Tolmer) +As a matter of fact, it would be very interesting to see how much we can +share between lalr1.cc and glr.cc. Most of the skeletons should be common. +It would be a very nice source of inspiration for the other languages. + +Valentin Tolmer is working on this. + +* From lalr1.cc to yacc.c +** Single stack +Merging the three stacks in lalr1.cc simplified the code, prompted for +other improvements and also made it faster (probably because memory +management is performed once instead of three times). I suggest that +we do the same in yacc.c. + +(Some time later): it's also very nice to have three stacks: it's more dense +as we don't lose bits to padding. For instance the typical stack for states +will use 8 bits, while it is likely to consume 32 bits in a struct. + +We need trustworthy benchmarks for Bison, for all our backends. Akim has a +few things scattered around; we need to put them in the repo, and make them +more useful. + +* Report + +** Figures +Some statistics about the grammar and the parser would be useful, +especially when asking the user to send some information about the +grammars she is working on. We should probably also include some +information about the variables (I'm not sure for instance we even +specify what LR variant was used). + +** GLR +How would Paul like to display the conflicted actions? In particular, +what when two reductions are possible on a given lookahead token, but one is +part of $default. Should we make the two reductions explicit, or just +keep $default? See the following point. + +** Disabled Reductions +See 'tests/conflicts.at (Defaulted Conflicted Reduction)', and decide +what we want to do. + +** Documentation +Extend with error productions. The hard part will probably be finding +the right rule so that a single state does not exhibit too many yet +undocumented ''features''. Maybe an empty action ought to be +presented too. Shall we try to make a single grammar with all these +features, or should we have several very small grammars? + +* Extensions +** More languages? +Well, only if there is really some demand for it. + +*** PHP +https://github.com/scfc/bison-php/blob/master/data/lalr1.php + +*** Python +https://lists.gnu.org/r/bison-patches/2013-09/msg00000.html and following + +** Multiple start symbols +Revert a70e75b8a41755ab96ab211a0ea111ac68a4aadd. +Revert tests: disable "Multistart reports". + +Would be very useful when parsing closely related languages. The idea is to +declare several start symbols, for instance + + %start stmt expr + %% + stmt: ... + expr: ... + +and to generate parse(), parse_stmt() and parse_expr(). Technically, the +above grammar would be transformed into + + %start yy_start + %token YY_START_STMT YY_START_EXPR + %% + yy_start: YY_START_STMT stmt | YY_START_EXPR expr + +so that there are no new conflicts in the grammar (as would undoubtedly +happen with yy_start: stmt | expr). Then adjust the skeletons so that this +initial token (YY_START_STMT, YY_START_EXPR) be shifted first in the +corresponding parse function. + +*** Number of useless symbols +AT_TEST( +[[%start exp; +exp: exp;]], +[[input.y: warning: 2 nonterminals useless in grammar [-Wother] +input.y: warning: 2 rules useless in grammar [-Wother] +input.y:2.8-10: error: start symbol exp does not derive any sentence]]) + +We should say "1 nonterminal": the other one is $accept, which should not +participate in the count. + +*** Tokens +Do we want to disallow terminal start symbols? The limitation is not +technical. Can it be useful to someone to "parse" a token? + +** %include +This is a popular demand. We already made many changes in the parser that +should make this reasonably easy to implement. + +Bruce Mardle +https://lists.gnu.org/r/bison-patches/2015-09/msg00000.html + +However, there are many other things to do before having such a feature, +because I don't want a % equivalent to #include (which we all learned to +hate). I want something that builds "modules" of grammars, and assembles +them together, paying attention to keep separate bits separated, in pseudo +name spaces. + +** Push parsers +There is demand for push parsers in C++. + +** Generate code instead of tables +This is certainly quite a lot of work. See +https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.50.4539. + +** $-1 +We should find a means to provide an access to values deep in the +stack. For instance, instead of + + baz: qux { $$ = $-1 + $0 + $1; } + +we should be able to have: + + foo($foo) bar($bar) baz($bar): qux($qux) { $baz = $foo + $bar + $qux; } + +Or something like this. + +** %if and the like +It should be possible to have %if/%else/%endif. The implementation is +not clear: should it be lexical or syntactic. Vadim Maslow thinks it +must be in the scanner: we must not parse what is in a switched off +part of %if. Akim Demaille thinks it should be in the parser, so as +to avoid falling into another CPP mistake. + +(Later): I'm sure there's actually good case for this. People who need that +feature can use m4/cpp on top of Bison. I don't think it is worth the +trouble in Bison itself. + +** XML Output +There are couple of available extensions of Bison targeting some XML +output. Some day we should consider including them. One issue is +that they seem to be quite orthogonal to the parsing technique, and +seem to depend mostly on the possibility to have some code triggered +for each reduction. As a matter of fact, such hooks could also be +used to generate the yydebug traces. Some generic scheme probably +exists in there. + +XML output for GNU Bison and gcc + http://www.cs.may.ie/~jpower/Research/bisonXML/ + +XML output for GNU Bison + http://yaxx.sourceforge.net/ + +* Coding system independence +Paul notes: + + Currently Bison assumes 8-bit bytes (i.e. that UCHAR_MAX is + 255). It also assumes that the 8-bit character encoding is + the same for the invocation of 'bison' as it is for the + invocation of 'cc', but this is not necessarily true when + people run bison on an ASCII host and then use cc on an EBCDIC + host. I don't think these topics are worth our time + addressing (unless we find a gung-ho volunteer for EBCDIC or + PDP-10 ports :-) but they should probably be documented + somewhere. + + More importantly, Bison does not currently allow NUL bytes in + tokens, either via escapes (e.g., "x\0y") or via a NUL byte in + the source code. This should get fixed. + +* Broken options? +** %token-table +** Skeleton strategy +Must we keep %token-table? + +* Precedence + +** Partial order +It is unfortunate that there is a total order for precedence. It +makes it impossible to have modular precedence information. We should +move to partial orders (sounds like series/parallel orders to me). + +This is a prerequisite for modules. + +* Pre and post actions. +From: Florian Krohm +Subject: YYACT_EPILOGUE +To: bug-bison@gnu.org +X-Sent: 1 week, 4 days, 14 hours, 38 minutes, 11 seconds ago + +The other day I had the need for explicitly building the parse tree. I +used %locations for that and defined YYLLOC_DEFAULT to call a function +that returns the tree node for the production. Easy. But I also needed +to assign the S-attribute to the tree node. That cannot be done in +YYLLOC_DEFAULT, because it is invoked before the action is executed. +The way I solved this was to define a macro YYACT_EPILOGUE that would +be invoked after the action. For reasons of symmetry I also added +YYACT_PROLOGUE. Although I had no use for that I can envision how it +might come in handy for debugging purposes. +All is needed is to add + +#if YYLSP_NEEDED + YYACT_EPILOGUE (yyval, (yyvsp - yylen), yylen, yyloc, (yylsp - yylen)); +#else + YYACT_EPILOGUE (yyval, (yyvsp - yylen), yylen); +#endif + +at the proper place to bison.simple. Ditto for YYACT_PROLOGUE. + +I was wondering what you think about adding YYACT_PROLOGUE/EPILOGUE +to bison. If you're interested, I'll work on a patch. + +* Better graphics +Equip the parser with a means to create the (visual) parse tree. + + +----- + +# LocalWords: Cex gnulib gl Bistromathic TokenKinds yylex enum YYEOF EOF +# LocalWords: YYerror gettext af hb YYERRCODE undef calc FIXME dev yyerror +# LocalWords: Autoconf YYUNDEFTOK lexemes parsers Bistromathic's yyreport +# LocalWords: const argc yacc yyclearin lookahead destructor Rici incluent +# LocalWords: yydestruct yydiscardin catégories d'avertissements sr activé +# LocalWords: conflits défaut rr l'alias chaîne n'est attaché un symbole +# LocalWords: obsolète règle vide midrule valeurs de intermédiaire ou avec +# LocalWords: définies inutilisées priorité associativité inutiles POSIX +# LocalWords: incompatibilités tous les autres avertissements sauf dans rp +# LocalWords: désactiver CATEGORIE traiter comme des erreurs glr Akim bool +# LocalWords: Demaille arith lalr goto struct pathlen nullable ntokens lr +# LocalWords: nterm bitsetv ielr ritem nstates nrules nritems yysymbol EQ +# LocalWords: SymbolKind YYEMPTY YYUNDEF YYTNAME NUM yyntokens yytname sed +# LocalWords: nonterminals yykind yycode YYNAMES yynames init getName conv +# LocalWords: TokenKind ival yychar yylval yylexer Tolmer hoc +# LocalWords: Sobisch YYPTRDIFF ptrdiff Autotest toknum yytoknum +# LocalWords: sym Wother stderr FP fixits xgettext fdiagnostics Graphviz +# LocalWords: graphviz VCG bitset xml bw maint yytoken YYABORT deps +# LocalWords: YYACCEPT yytranslate nonnegative destructors yyerrlab repo +# LocalWords: backends stmt expr yy Mardle baz qux Vadim Maslow CPP cpp +# LocalWords: yydebug gcc UCHAR EBCDIC gung PDP NUL Pre Florian Krohm utf +# LocalWords: YYACT YYLLOC YYLSP yyval yyvsp yylen yyloc yylsp endif +# LocalWords: ispell american + +Local Variables: +mode: outline +coding: utf-8 +fill-column: 76 +ispell-dictionary: "american" +End: + +Copyright (C) 2001-2004, 2006, 2008-2015, 2018-2021 Free Software +Foundation, Inc. + +This file is part of Bison, the GNU Compiler Compiler. + +Permission is granted to copy, distribute and/or modify this document +under the terms of the GNU Free Documentation License, Version 1.3 or +any later version published by the Free Software Foundation; with no +Invariant Sections, with no Front-Cover Texts, and with no Back-Cover +Texts. A copy of the license is included in the "GNU Free +Documentation License" file as part of this distribution. diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cc467dc0c38ba987af98a54822ce23c54be58683 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/README.md @@ -0,0 +1,28 @@ +This directory contains examples of Bison grammar files, sorted per +language. + +Several of them come from the documentation, which should be installed +together with Bison. The URLs are provided for convenience. + +These examples come with a README and a Makefile. Not only can they be used +to toy with Bison, they can also be starting points for your own grammars. + +Please, be sure to read the C examples before looking at the other +languages, as these examples are simpler. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c++/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4742fa95cd6441e6fd948cd2b9344b3f504aafd8 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/Makefile @@ -0,0 +1,21 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BISON = bison +CXX = g++ +CXXFLAGS = +PROGS = simple variant variant-11 + +simple: CXXFLAGS = -std=c++14 +variant-11: CXXFLAGS = -std=c++11 + +all: $(PROGS) + +%.cc %.hh %.html %.gv: %.yy + $(BISON) $(BISONFLAGS) --html --graph -o $*.cc $< + +%: %.cc + $(CXX) $(CXXFLAGS) -o$@ $< + +clean: + rm -f $(PROGS:=.cc) $(PROGS) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c++/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dff3783e5aba396a4ac6952f753175d8cb0e6b1a --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/README.md @@ -0,0 +1,68 @@ +# Examples in C++ + +This directory contains examples of Bison grammar files in C++. + +You can run `make` to compile these examples. And `make clean` to tidy +afterwards. + +## simple.yy - Simple example in C++14 +A very simple example in C++, based on variants and symbol constructors. +Variants allow to use any C++ type as semantic value type, and symbol +constructors ensure consistency between declared token kind and effective +semantic value. + +Run as `./simple`. + +Extracted from the documentation: [A Simple C++ +Example](https://www.gnu.org/software/bison/manual/html_node/A-Simple-C_002b_002b-Example.html). + +## variant.yy - Self-contained example in C++98 +A variation of simple.yy, in C++98. + +Run as `./variant`. + +## variant-11.yy - Self-contained example in modern C++ +Another variation of simple.yy, closely related to the previous one, but +exhibiting support for C++11's move semantics. + +Run as `./variant` or `./variant NUMBER`. + +## calc++ - A Complete C++ Example +A fully featured C++ version of the canonical example for parsers: a +calculator. Also uses Flex for the scanner. + +Don't look at this example first: it is fully featured and can serve as a +starting point for a clean parser in C++. The previous examples are better +introductory examples, and the C examples are also useful introductory +examples. + +Extracted from the documentation: [A Complete C++ +Example](https://www.gnu.org/software/bison/manual/html_node/A-Complete-C_002b_002b-Example.html). + +## glr + +This example demonstrates the use of GLR parsers to handle (local) +ambiguities in the C++ language. See the node "Merging GLR Parses" in +Bison's documentation. + +It uses (Bison) variants to store objects as semantic values. It also +demonstrates custom error messages in C++. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..bf57a3639dd752e7d73c93041bb3204f8725f709 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/Makefile @@ -0,0 +1,36 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = calc++ +BISON = bison +CXX = g++ +FLEX = flex + +all: $(BASE) + +%.cc %.hh %.html %.gv: %.yy + $(BISON) $(BISONFLAGS) --html --graph -o $*.cc $< + +%.cc: %.ll + $(FLEX) $(FLEXFLAGS) -o$@ $< + +%.o: %.cc + $(CXX) $(CXXFLAGS) -c -o$@ $< + +$(BASE): $(BASE).o driver.o parser.o scanner.o + $(CXX) -o $@ $^ + +$(BASE).o: parser.hh +parser.o: parser.hh +scanner.o: parser.hh + +run: $(BASE) + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< - + +CLEANFILES = \ + $(BASE) *.o \ + parser.hh parser.cc parser.output parser.xml parser.html parser.gv location.hh \ + scanner.cc +clean: + rm -f $(CLEANFILES) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6aaebc2aae6731193101b845a62742559864735b --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/README.md @@ -0,0 +1,49 @@ +# calc++ - A Flex+Bison calculator + +This directory contains calc++, a Bison grammar file in C++. If you never +saw the traditional implementation in C, please first read +examples/c/lexcalc, which can be seen as a C precursor of this example. + +Read the corresponding chapter in the documentation: "A Complete C++ +Example". It is also available [on +line](https://www.gnu.org/software/bison/manual/html_node/A-Complete-C_002b_002b-Example.html) +(maybe with a different version of Bison). + +To use it, copy this directory into some work directory, and run `make` to +compile the executable, and try it. It is a simple calculator which accepts +several variable definitions, one per line, and then a single expression to +evaluate. + +The program calc++ expects the file to parse as argument; pass `-` to read +the standard input (and then hit , control-d, to end your input). + +``` +$ ./calc++ - +one := 1 +two := 2 +three := 3 +(one + two * three) * two * three + +42 +``` + +You may pass `-p` to activate the parser debug traces, and `-s` to activate +the scanner's. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/calc++.cc b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/calc++.cc new file mode 100644 index 0000000000000000000000000000000000000000..26738072180df3c38674dbc5d8c7d8b6f79f4049 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/calc++.cc @@ -0,0 +1,38 @@ +/* Main for calc++. -*- C++ -*- + + Copyright (C) 2005-2015, 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#include +#include "driver.hh" + +int +main (int argc, char *argv[]) +{ + int res = 0; + driver drv; + for (int i = 1; i < argc; ++i) + if (argv[i] == std::string ("-p")) + drv.trace_parsing = true; + else if (argv[i] == std::string ("-s")) + drv.trace_scanning = true; + else if (!drv.parse (argv[i])) + std::cout << drv.result << '\n'; + else + res = 1; + return res; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/driver.cc b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/driver.cc new file mode 100644 index 0000000000000000000000000000000000000000..ece18994e22a400aa38d275ec39e58ec94760769 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/driver.cc @@ -0,0 +1,41 @@ +/* Driver for calc++. -*- C++ -*- + + Copyright (C) 2005-2015, 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#include "driver.hh" +#include "parser.hh" + +driver::driver () + : trace_parsing (false), trace_scanning (false) +{ + variables["one"] = 1; + variables["two"] = 2; +} + +int +driver::parse (const std::string &f) +{ + file = f; + location.initialize (&file); + scan_begin (); + yy::parser parse (*this); + parse.set_debug_level (trace_parsing); + int res = parse (); + scan_end (); + return res; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/driver.hh b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/driver.hh new file mode 100644 index 0000000000000000000000000000000000000000..b288396df8c28d16333ae0ed61183bfc660bba92 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/driver.hh @@ -0,0 +1,57 @@ +/* Driver for calc++. -*- C++ -*- + + Copyright (C) 2005-2015, 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#ifndef DRIVER_HH +# define DRIVER_HH +# include +# include +# include "parser.hh" + +// Give Flex the prototype of yylex we want ... +# define YY_DECL \ + yy::parser::symbol_type yylex (driver& drv) +// ... and declare it for the parser's sake. +YY_DECL; + +// Conducting the whole scanning and parsing of Calc++. +class driver +{ +public: + driver (); + + std::map variables; + + int result; + + // Run the parser on file F. Return 0 on success. + int parse (const std::string& f); + // The name of the file being parsed. + std::string file; + // Whether to generate parser debug traces. + bool trace_parsing; + + // Handling the scanner. + void scan_begin (); + void scan_end (); + // Whether to generate scanner debug traces. + bool trace_scanning; + // The token's location used by the scanner. + yy::location location; +}; +#endif // ! DRIVER_HH diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/parser.yy b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/parser.yy new file mode 100644 index 0000000000000000000000000000000000000000..11da515d4e992caa9eecef9b3ebe4c898138a4c5 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/parser.yy @@ -0,0 +1,92 @@ +/* Parser for calc++. -*- C++ -*- + + Copyright (C) 2005-2015, 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%skeleton "lalr1.cc" // -*- C++ -*- +%require "3.8.2" +%header + +%define api.token.raw + +%define api.token.constructor +%define api.value.type variant +%define parse.assert + +%code requires { + # include + class driver; +} + +// The parsing context. +%param { driver& drv } + +%locations + +%define parse.trace +%define parse.error detailed +%define parse.lac full + +%code { +# include "driver.hh" +} + +%define api.token.prefix {TOK_} +%token + ASSIGN ":=" + MINUS "-" + PLUS "+" + STAR "*" + SLASH "/" + LPAREN "(" + RPAREN ")" +; + +%token IDENTIFIER "identifier" +%token NUMBER "number" +%nterm exp + +%printer { yyo << $$; } <*>; + +%% +%start unit; +unit: assignments exp { drv.result = $2; }; + +assignments: + %empty {} +| assignments assignment {}; + +assignment: + "identifier" ":=" exp { drv.variables[$1] = $3; }; + +%left "+" "-"; +%left "*" "/"; +exp: + "number" +| "identifier" { $$ = drv.variables[$1]; } +| exp "+" exp { $$ = $1 + $3; } +| exp "-" exp { $$ = $1 - $3; } +| exp "*" exp { $$ = $1 * $3; } +| exp "/" exp { $$ = $1 / $3; } +| "(" exp ")" { $$ = $2; } +%% + +void +yy::parser::error (const location_type& l, const std::string& m) +{ + std::cerr << l << ": " << m << '\n'; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/scanner.ll b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/scanner.ll new file mode 100644 index 0000000000000000000000000000000000000000..a3dd6d56822c8443d525a5f721e95ce74550673a --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/calc++/scanner.ll @@ -0,0 +1,171 @@ +/* Scanner for calc++. -*- C++ -*- + + Copyright (C) 2005-2015, 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%{ /* -*- C++ -*- */ +# include +# include +# include +# include // strerror +# include +# include "driver.hh" +# include "parser.hh" +%} + +%{ +#if defined __clang__ +# define CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) +#endif + +// Clang and ICC like to pretend they are GCC. +#if defined __GNUC__ && !defined __clang__ && !defined __ICC +# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +#endif + +// Pacify warnings in yy_init_buffer (observed with Flex 2.6.4) +// and GCC 6.4.0, 7.3.0 with -O3. +#if defined GCC_VERSION && 600 <= GCC_VERSION +# pragma GCC diagnostic ignored "-Wnull-dereference" +#endif + +// This example uses Flex's C back end, yet compiles it as C++. +// So expect warnings about C style casts and NULL. +#if defined CLANG_VERSION && 500 <= CLANG_VERSION +# pragma clang diagnostic ignored "-Wold-style-cast" +# pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" +#elif defined GCC_VERSION && 407 <= GCC_VERSION +# pragma GCC diagnostic ignored "-Wold-style-cast" +# pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" +#endif + +#define FLEX_VERSION (YY_FLEX_MAJOR_VERSION * 100 + YY_FLEX_MINOR_VERSION) + +// Old versions of Flex (2.5.35) generate an incomplete documentation comment. +// +// In file included from src/scan-code-c.c:3: +// src/scan-code.c:2198:21: error: empty paragraph passed to '@param' command +// [-Werror,-Wdocumentation] +// * @param line_number +// ~~~~~~~~~~~~~~~~~^ +// 1 error generated. +#if FLEX_VERSION < 206 && defined CLANG_VERSION +# pragma clang diagnostic ignored "-Wdocumentation" +#endif + +// Old versions of Flex (2.5.35) use 'register'. Warnings introduced in +// GCC 7 and Clang 6. +#if FLEX_VERSION < 206 +# if defined CLANG_VERSION && 600 <= CLANG_VERSION +# pragma clang diagnostic ignored "-Wdeprecated-register" +# elif defined GCC_VERSION && 700 <= GCC_VERSION +# pragma GCC diagnostic ignored "-Wregister" +# endif +#endif + +#if FLEX_VERSION < 206 +# if defined CLANG_VERSION +# pragma clang diagnostic ignored "-Wconversion" +# pragma clang diagnostic ignored "-Wdocumentation" +# pragma clang diagnostic ignored "-Wshorten-64-to-32" +# pragma clang diagnostic ignored "-Wsign-conversion" +# elif defined GCC_VERSION +# pragma GCC diagnostic ignored "-Wconversion" +# pragma GCC diagnostic ignored "-Wsign-conversion" +# endif +#endif + +// Flex 2.6.4, GCC 9 +// warning: useless cast to type 'int' [-Wuseless-cast] +// 1361 | YY_CURRENT_BUFFER_LVALUE->yy_buf_size = (int) (new_size - 2); +// | ^ +#if defined GCC_VERSION && 900 <= GCC_VERSION +# pragma GCC diagnostic ignored "-Wuseless-cast" +#endif +%} + +%option noyywrap nounput noinput batch debug + +%{ + // A number symbol corresponding to the value in S. + yy::parser::symbol_type + make_NUMBER (const std::string &s, const yy::parser::location_type& loc); +%} + +id [a-zA-Z][a-zA-Z_0-9]* +int [0-9]+ +blank [ \t\r] + +%{ + // Code run each time a pattern is matched. + # define YY_USER_ACTION loc.columns (yyleng); +%} +%% +%{ + // A handy shortcut to the location held by the driver. + yy::location& loc = drv.location; + // Code run each time yylex is called. + loc.step (); +%} +{blank}+ loc.step (); +\n+ loc.lines (yyleng); loc.step (); + +"-" return yy::parser::make_MINUS (loc); +"+" return yy::parser::make_PLUS (loc); +"*" return yy::parser::make_STAR (loc); +"/" return yy::parser::make_SLASH (loc); +"(" return yy::parser::make_LPAREN (loc); +")" return yy::parser::make_RPAREN (loc); +":=" return yy::parser::make_ASSIGN (loc); + +{int} return make_NUMBER (yytext, loc); +{id} return yy::parser::make_IDENTIFIER (yytext, loc); +. { + throw yy::parser::syntax_error + (loc, "invalid character: " + std::string(yytext)); +} +<> return yy::parser::make_YYEOF (loc); +%% + +yy::parser::symbol_type +make_NUMBER (const std::string &s, const yy::parser::location_type& loc) +{ + errno = 0; + long n = strtol (s.c_str(), NULL, 10); + if (! (INT_MIN <= n && n <= INT_MAX && errno != ERANGE)) + throw yy::parser::syntax_error (loc, "integer is out of range: " + s); + return yy::parser::make_NUMBER ((int) n, loc); +} + +void +driver::scan_begin () +{ + yy_flex_debug = trace_scanning; + if (file.empty () || file == "-") + yyin = stdin; + else if (!(yyin = fopen (file.c_str (), "r"))) + { + std::cerr << "cannot open " << file << ": " << strerror (errno) << '\n'; + exit (EXIT_FAILURE); + } +} + +void +driver::scan_end () +{ + fclose (yyin); +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/simple.yy b/platform/dbops/binaries/build/share/doc/bison/examples/c++/simple.yy new file mode 100644 index 0000000000000000000000000000000000000000..0ef1dcccb66146cbe47ae23009e9a9b6f59212e9 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/simple.yy @@ -0,0 +1,99 @@ +/* Simple variant-based parser. -*- C++ -*- + + Copyright (C) 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%require "3.2" +%language "c++" + +%define api.value.type variant + +%code +{ + // Print a list of strings. + auto + operator<< (std::ostream& o, const std::vector& ss) + -> std::ostream& + { + o << '{'; + const char *sep = ""; + for (const auto& s: ss) + { + o << sep << s; + sep = ", "; + } + return o << '}'; + } +} + +%define api.token.constructor + +%code +{ + namespace yy + { + // Return the next token. + auto yylex () -> parser::symbol_type + { + static int count = 0; + switch (int stage = count++) + { + case 0: + return parser::make_TEXT ("I have three numbers for you."); + case 1: case 2: case 3: + return parser::make_NUMBER (stage); + case 4: + return parser::make_TEXT ("And that's all!"); + default: + return parser::make_YYEOF (); + } + } + } +} +%% +result: + list { std::cout << $1 << '\n'; } +; + +%nterm > list; +list: + %empty { /* Generates an empty string list */ } +| list item { $$ = $1; $$.push_back ($2); } +; + +%nterm item; +%token TEXT; +%token NUMBER; +item: + TEXT +| NUMBER { $$ = std::to_string ($1); } +; +%% +namespace yy +{ + // Report an error to the user. + auto parser::error (const std::string& msg) -> void + { + std::cerr << msg << '\n'; + } +} + +int main () +{ + yy::parser parse; + return parse (); +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/variant-11.yy b/platform/dbops/binaries/build/share/doc/bison/examples/c++/variant-11.yy new file mode 100644 index 0000000000000000000000000000000000000000..edbcff8709b6a32e5a651123f23515c81598933f --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/variant-11.yy @@ -0,0 +1,154 @@ +/* + Copyright (C) 2008-2015, 2018-2021 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +%require "3.2" +%debug +%language "c++" +%define api.token.constructor +%define api.value.type variant +%define api.value.automove +%define api.location.file none +%define parse.assert +%locations + +%code requires // *.hh +{ +#include // std::unique_ptr +#include +#include + + using string_uptr = std::unique_ptr; + using string_uptrs = std::vector; +} + +%code // *.cc +{ +#include // INT_MIN, INT_MAX +#include +#include + + namespace yy + { + // Prototype of the yylex function providing subsequent tokens. + static parser::symbol_type yylex (); + + // Print a vector of strings. + std::ostream& + operator<< (std::ostream& o, const string_uptrs& ss) + { + o << '{'; + const char *sep = ""; + for (const auto& s: ss) + { + o << sep << *s; + sep = ", "; + } + return o << '}'; + } + } + + template + string_uptr + make_string_uptr (Args&&... args) + { + // std::make_unique is C++14. + return string_uptr (new std::string{std::forward (args)...}); + } +} + +%token TEXT; +%token NUMBER; +%printer { yyo << '(' << &$$ << ") " << $$; } <*>; +%printer { yyo << *$$; } ; +%token END_OF_FILE 0; + +%type item; +%type list; + +%% + +result: + list { std::cout << $1 << '\n'; } +; + +list: + %empty { /* Generates an empty string list */ } +| list item { $$ = $1; $$.emplace_back ($2); } +; + +item: + TEXT +| NUMBER { $$ = make_string_uptr (std::to_string ($1)); } +; +%% + +// The last number return by the scanner is max - 1. +int max = 4; + +namespace yy +{ + // The yylex function providing subsequent tokens: + // TEXT "I have three numbers for you." + // NUMBER 1 + // NUMBER 2 + // NUMBER ... + // NUMBER max - 1 + // TEXT "And that's all!" + // END_OF_FILE + + static + parser::symbol_type + yylex () + { + static int count = 0; + const int stage = count; + ++count; + auto loc = parser::location_type{nullptr, stage + 1, stage + 1}; + if (stage == 0) + return parser::make_TEXT (make_string_uptr ("I have numbers for you."), std::move (loc)); + else if (stage < max) + return parser::make_NUMBER (stage, std::move (loc)); + else if (stage == max) + return parser::make_TEXT (make_string_uptr ("And that's all!"), std::move (loc)); + else + return parser::make_END_OF_FILE (std::move (loc)); + } + + // Mandatory error function + void + parser::error (const parser::location_type& loc, const std::string& msg) + { + std::cerr << loc << ": " << msg << '\n'; + } +} + +int +main (int argc, const char *argv[]) +{ + if (2 <= argc && isdigit (static_cast (*argv[1]))) + { + auto maxl = strtol (argv[1], nullptr, 10); + max = INT_MIN <= maxl && maxl <= INT_MAX ? int(maxl) : 4; + } + auto&& p = yy::parser{}; + p.set_debug_level (!!getenv ("YYDEBUG")); + return p.parse (); +} + +// Local Variables: +// mode: C++ +// End: diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c++/variant.yy b/platform/dbops/binaries/build/share/doc/bison/examples/c++/variant.yy new file mode 100644 index 0000000000000000000000000000000000000000..8dcab484d6ce6cbce264a764474cc301d4a05111 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c++/variant.yy @@ -0,0 +1,154 @@ +/* + Copyright (C) 2008-2015, 2018-2021 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +%require "3.2" +%debug +%language "c++" +%define api.token.constructor +%define api.value.type variant +%define api.location.file none +%define parse.assert +%locations + +%code requires // *.hh +{ +#include +#include +typedef std::vector strings_type; +} + +%code // *.cc +{ +#include +#include + + namespace yy + { + // Prototype of the yylex function providing subsequent tokens. + static parser::symbol_type yylex (); + + // Print a vector of strings. + std::ostream& + operator<< (std::ostream& o, const strings_type& ss) + { + o << '{'; + const char *sep = ""; + for (strings_type::const_iterator i = ss.begin (), end = ss.end (); + i != end; ++i) + { + o << sep << *i; + sep = ", "; + } + return o << '}'; + } + } + + // Convert to string. + template + std::string + to_string (const T& t) + { + std::ostringstream o; + o << t; + return o.str (); + } +} + +%token <::std::string> TEXT; +%token NUMBER; +%printer { yyo << '(' << &$$ << ") " << $$; } <*>; +%token END_OF_FILE 0; + +%type <::std::string> item; +%type <::std::vector> list; + +%% + +result: + list { std::cout << $1 << '\n'; } +; + +list: + %empty { /* Generates an empty string list */ } +| list item { std::swap ($$, $1); $$.push_back ($2); } +; + +item: + TEXT +| NUMBER { $$ = to_string ($1); } +; +%% + +namespace yy +{ + // Use nullptr with pre-C++11. +#if !defined __cplusplus || __cplusplus < 201103L +# define NULLPTR 0 +#else +# define NULLPTR nullptr +#endif + + // The yylex function providing subsequent tokens: + // TEXT "I have three numbers for you." + // NUMBER 1 + // NUMBER 2 + // NUMBER 3 + // TEXT "And that's all!" + // END_OF_FILE + + static + parser::symbol_type + yylex () + { + static int count = 0; + const int stage = count; + ++count; + parser::location_type loc (NULLPTR, stage + 1, stage + 1); + switch (stage) + { + case 0: + return parser::make_TEXT ("I have three numbers for you.", loc); + case 1: + case 2: + case 3: + return parser::make_NUMBER (stage, loc); + case 4: + return parser::make_TEXT ("And that's all!", loc); + default: + return parser::make_END_OF_FILE (loc); + } + } + + // Mandatory error function + void + parser::error (const parser::location_type& loc, const std::string& msg) + { + std::cerr << loc << ": " << msg << '\n'; + } +} + +int +main () +{ + yy::parser p; + p.set_debug_level (!!getenv ("YYDEBUG")); + return p.parse (); +} + +// Local Variables: +// mode: C++ +// End: diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a7561c68c40c3635c567915b5876e9e5d2f877fc --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/README.md @@ -0,0 +1,99 @@ +# Examples in C + +This directory contains simple examples of Bison grammar files in C. + +Some of them come from the documentation, which should be installed together +with Bison. The URLs are provided for convenience. + +## rpcalc - Reverse Polish Notation Calculator +The first example is that of a simple double-precision Reverse Polish +Notation calculator (a calculator using postfix operators). This example +provides a good starting point, since operator precedence is not an issue. + +Extracted from the documentation: [Reverse Polish Notation +Calculator](https://www.gnu.org/software/bison/manual/html_node/RPN-Calc.html). + +## calc - Simple Calculator +This example is slightly more complex than rpcalc: it features infix +operators (`1 + 2`, instead of `1 2 +` in rpcalc), but it does so using a +unambiguous grammar of the arithmetic instead of using precedence +directives (%left, etc.). + +## mfcalc - Multi-Function Calculator +A more complete C example: a multi-function calculator. More complex than +the previous example. Using precedence directives to support infix +operators. + +Extracted from the documentation: [Multi-Function Calculator: +mfcalc](https://www.gnu.org/software/bison/manual/html_node/Multi_002dfunction-Calc.html). + +## lexcalc - calculator with Flex and Bison +The calculator with precedence directives and location tracking. It uses +Flex to generate the scanner. + +## reccalc - recursive calculator with Flex and Bison +This example builds on top of the previous one to provide a reentrant +parser. Such parsers can be called concurrently in different threads, or +even recursively. To demonstrate this feature, expressions in parentheses +are tokenized as strings, and then recursively parsed from the parser. So +`(((1)+(2))*((3)+(4)))` uses eight parsers, with a depth of four. + +## pushcalc - calculator implemented with a push parser +All the previous examples are so called "pull parsers": the user invokes the +parser once, which repeatedly calls the scanner until the input is drained. + +This example demonstrates the "push parsers": the user calls the scanner to +fetch the next token, passes it to the parser, and repeats the operation +until the input is drained. + +This example is a straightforward conversion of the 'calc' example to the +push-parser model. + +## bistromathic - all the bells and whistles +This example demonstrates best practices when using Bison. +- Its hand-written scanner tracks locations. +- Its interface is pure. +- It uses %params to pass user information to the parser and scanner. +- Its scanner uses the `error` token to signal lexical errors and enter + error recovery. +- Its interface is "incremental", well suited for interaction: it uses the + push-parser API to feed the parser with the incoming tokens. +- It features an interactive command line with completion based on the + parser state, based on `yyexpected_tokens`. +- It uses Bison's standard catalog for internationalization of generated + messages. +- It uses a custom syntax error with location, lookahead correction and + token internationalization. +- Error messages quote the source with squiggles that underline the error: +``` +> 123 456 +1.5-7: syntax error: expected end of file or + or - or * or / or ^ before number + 1 | 123 456 + | ^~~ +``` +- It supports debug traces with semantic values. +- It uses named references instead of the traditional $1, $2, etc. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0e61ce66134a55fedd752c26925a3034e21758f2 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/Makefile @@ -0,0 +1,35 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = bistromathic +BISON = bison + +# We need to find the headers and libs for readline (and possibly intl). +# You probably need to customize this for your own environment. +CPPFLAGS = -I/opt/local/include +LDFLAGS = -L/opt/local/lib + +# Find the translation catalog for Bison's generated messagess. +BISON_LOCALEDIR = $(shell $(BISON) $(BISON_FLAGS) --print-localedir) +CPPFLAGS += -DENABLE_NLS -DBISON_LOCALEDIR='"$(BISON_LOCALEDIR)"' + +LIBS = -lreadline -lm # In some environments, -lintl is needed. + +all: $(BASE) + +%.c %.h %.html %.xml %.gv: %.y + $(BISON) $(BISONFLAGS) --header --html --graph -o $*.c $< + +$(BASE): parse.o + $(CC) $(CPPFLAGS) $(CFLAGS) -o $@ $^ $(LDFLAGS) $(LIBS) + +run: $(BASE) + @echo "Type bistromathic expressions. Quit with ctrl-d." + ./$< + +CLEANFILES = \ + $(BASE) *.o \ + parse.[ch] parse.output parse.xml parse.html parse.gv + +clean: + rm -f $(CLEANFILES) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8528e0bd161efc39b14012f096db50d9c245d39f --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/README.md @@ -0,0 +1,49 @@ +# bistromathic - all the bells and whistles +This example demonstrates best practices when using Bison. +- Its hand-written scanner tracks locations. +- Its interface is pure. +- It uses %params to pass user information to the parser and scanner. +- Its scanner uses the `error` token to signal lexical errors and enter + error recovery. +- Its interface is "incremental", well suited for interaction: it uses the + push-parser API to feed the parser with the incoming tokens. +- It features an interactive command line with completion based on the + parser state, based on `yyexpected_tokens`. +- It uses Bison's standard catalog for internationalization of generated + messages. +- It uses a custom syntax error with location, lookahead correction and + token internationalization. +- Error messages quote the source with squiggles that underline the error: +``` +> 123 456 +1.5-7: syntax error: expected end of file or + or - or * or / or ^ before number + 1 | 123 456 + | ^~~ +``` +- It supports debug traces with semantic values. +- It uses named references instead of the traditional $1, $2, etc. + +To customize the interaction with bistromathic, see the GNU Readline user +manual (see `info rluserman`). + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/parse.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/parse.y new file mode 100644 index 0000000000000000000000000000000000000000..8b1591e6bd00adbb332d5a092a5e5a81d0cfc48c --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/bistromathic/parse.y @@ -0,0 +1,695 @@ +/* Parser and scanner for bistromathic. -*- C -*- + + Copyright (C) 2019-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%require "3.7" + +// Emitted on top of the implementation file. +%code top { + #include // isdigit + #include // LC_ALL + #include // cos, sin, etc. + #include // va_start + #include // printf + #include // calloc + #include // strcmp + + #include + #include + + #if defined ENABLE_NLS && ENABLE_NLS + // Unable the translation of Bison's generated messages. + # define YYENABLE_NLS 1 + # include + // Unless specified otherwise, we expect bistromathic's own + // catalog to be installed in the same tree as Bison's catalog. + # ifndef LOCALEDIR + # define LOCALEDIR BISON_LOCALEDIR + # endif + #endif +} + +// Emitted in the header file, before the definition of YYSTYPE. +%code requires { + // Function type. + typedef double (func_t) (double); + + // Data type for links in the chain of symbols. + typedef struct symrec symrec; + struct symrec + { + char *name; // name of symbol + int type; // type of symbol: either VAR or FUN + union + { + double var; // value of a VAR + func_t *fun; // value of a FUN + } value; + symrec *next; // link field + }; + + symrec *putsym (char const *name, int sym_type); + symrec *getsym (char const *name); + + // Exchanging information with the parser. + typedef struct + { + // Whether to not emit error messages. + int silent; + // The current input line. + const char *line; + } user_context; +} + +// Emitted in the header file, after the definition of YYSTYPE. +%code provides { +# ifndef __attribute__ +# ifndef __GNUC__ +# define __attribute__(Spec) /* empty */ +# endif +# endif + + yytoken_kind_t + yylex (const char **line, YYSTYPE *yylval, YYLTYPE *yylloc, + const user_context *uctx); + void yyerror (const YYLTYPE *loc, const user_context *uctx, + char const *format, ...) + __attribute__ ((__format__ (__printf__, 3, 4))); +} + +// Emitted in the implementation file. +%code { + // Print *LOC on OUT. + static void location_print (FILE *out, YYLTYPE const * const loc); + #define YYLOCATION_PRINT location_print + + #if defined ENABLE_NLS && ENABLE_NLS + # define _(Msgid) gettext (Msgid) + #else + # define _(Msgid) (Msgid) + #endif + + // Whether to quit. + int done = 0; +} + +// Include the header in the implementation rather than duplicating it. +%define api.header.include {"parse.h"} + +// Don't share global variables between the scanner and the parser. +%define api.pure full + +// Generate a push parser. +%define api.push-pull push + +// To avoid name clashes (e.g., with C's EOF) prefix token definitions +// with TOK_ (e.g., TOK_EOF). +%define api.token.prefix {TOK_} + +// Customized syntax error messages (see yyreport_syntax_error)... +%define parse.error custom + +// ... with locations... +%locations + +// ... and accurate list of expected tokens. +%define parse.lac full + +// Generate the parser description file (calc.output). +%verbose + +// User information exchanged with the parser and scanner. +%param {const user_context *uctx} + +// Generate YYSTYPE from the types assigned to symbols. +%define api.value.type union +%token + PLUS "+" + MINUS "-" + STAR "*" + SLASH "/" + CARET "^" + LPAREN "(" + RPAREN ")" + EQUAL "=" + EXIT "exit" + + NUM _("number") + + FUN _("function") + VAR _("variable") + +%nterm exp + +// Enable run-time traces (yydebug). +%define parse.trace + +// Formatting semantic values in debug traces. +%printer { fprintf (yyo, "%s", $$->name); } VAR; +%printer { fprintf (yyo, "%s()", $$->name); } FUN; +%printer { fprintf (yyo, "%g", $$); } ; + + +// Precedence (from lowest to highest) and associativity. +%precedence "=" +%left "+" "-" +%left "*" "/" +%precedence NEG // negation--unary minus +%right "^" // exponentiation + +%% // The grammar follows. +input: + %empty +| exp { printf ("%.10g\n", $exp); } +| "exit" { done = 1; } +; + +exp: + NUM +| VAR { $$ = $VAR->value.var; } +| VAR "=" exp { $$ = $3; $VAR->value.var = $3; } +| FUN "(" exp ")" { $$ = $FUN->value.fun ($3); } +| exp[l] "+" exp[r] { $$ = $l + $r; } +| exp[l] "-" exp[r] { $$ = $l - $r; } +| exp[l] "*" exp[r] { $$ = $l * $r; } +| exp[l] "/" exp[r] + { + if ($r == 0) + { + yyerror (&@$, uctx, _("error: division by zero")); + YYERROR; + } + else + $$ = $l / $r; + } +| "-" exp %prec NEG { $$ = -$2; } +| exp[l] "^" exp[r] { $$ = pow ($l, $r); } +| "(" exp ")" { $$ = $2; } +| "(" error ")" { $$ = 666; } +; + +// End of grammar. +%% + +/*------------. +| Functions. | +`------------*/ + +struct init +{ + char const *name; + func_t *fun; +}; + +static struct init const funs[] = +{ + { "atan", atan }, + { "cos", cos }, + { "exp", exp }, + { "ln", log }, + { "sin", sin }, + { "sqrt", sqrt }, + { 0, 0 }, +}; + +// The symbol table: a chain of 'struct symrec'. +static symrec *sym_table; + +// Put functions in table. +static void +init_table (void) +{ + for (int i = 0; funs[i].name; i++) + { + symrec *ptr = putsym (funs[i].name, TOK_FUN); + ptr->value.fun = funs[i].fun; + } +} + +symrec * +putsym (char const *name, int sym_type) +{ + symrec *res = (symrec *) malloc (sizeof (symrec)); + res->name = strdup (name); + res->type = sym_type; + res->value.var = 0; // Set value to 0 even if fun. + res->next = sym_table; + sym_table = res; + return res; +} + +symrec * +getsym (char const *name) +{ + for (symrec *p = sym_table; p; p = p->next) + if (strcmp (p->name, name) == 0) + return p; + return NULL; +} + +// How many symbols are registered. +static int +symbol_count (void) +{ + int res = 0; + for (symrec *p = sym_table; p; p = p->next) + ++res; + return res; +} + + + +/*------------. +| Locations. | +`------------*/ + +// Print *LOC on OUT. Do it in a compact way, that avoids redundancy. + +static void +location_print (FILE *out, YYLTYPE const * const loc) +{ + fprintf (out, "%d.%d", loc->first_line, loc->first_column); + + int end_col = 0 != loc->last_column ? loc->last_column - 1 : 0; + if (loc->first_line < loc->last_line) + fprintf (out, "-%d.%d", loc->last_line, end_col); + else if (loc->first_column < end_col) + fprintf (out, "-%d", end_col); +} + + +/*----------. +| Scanner. | +`----------*/ + +yytoken_kind_t +yylex (const char **line, YYSTYPE *yylval, YYLTYPE *yylloc, + const user_context *uctx) +{ + int c; + + // Ignore white space, get first nonwhite character. + do { + // Move the first position onto the last. + yylloc->first_line = yylloc->last_line; + yylloc->first_column = yylloc->last_column; + + yylloc->last_column += 1; + c = *((*line)++); + } while (c == ' ' || c == '\t'); + + switch (c) + { + case '+': return TOK_PLUS; + case '-': return TOK_MINUS; + case '*': return TOK_STAR; + case '/': return TOK_SLASH; + case '^': return TOK_CARET; + case '=': return TOK_EQUAL; + case '(': return TOK_LPAREN; + case ')': return TOK_RPAREN; + + case '!': return TOK_YYUNDEF; + + case '\0': return TOK_YYEOF; + + // Numbers. + case '.': + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + { + int nchars = 0; + if (sscanf (*line - 1, "%lf%n", &yylval->TOK_NUM, &nchars) != 1) + abort (); + *line += nchars - 1; + yylloc->last_column += nchars - 1; + return TOK_NUM; + } + + // Identifiers. + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': case 'g': case 'h': case 'i': case 'j': + case 'k': case 'l': case 'm': case 'n': case 'o': + case 'p': case 'q': case 'r': case 's': case 't': + case 'u': case 'v': case 'w': case 'x': case 'y': + case 'z': + { + int nchars = 0; + char buf[100]; + if (sscanf (*line - 1, "%99[a-z]%n", buf, &nchars) != 1) + abort (); + *line += nchars - 1; + yylloc->last_column += nchars - 1; + if (strcmp (buf, "exit") == 0) + return TOK_EXIT; + else + { + symrec *s = getsym (buf); + if (!s) + s = putsym (buf, TOK_VAR); + yylval->TOK_VAR = s; + return s->type; + } + } + + // Stray characters. + default: + yyerror (yylloc, uctx, _("syntax error: invalid character: %c"), c); + return TOK_YYerror; + } +} + + +/*---------. +| Parser. | +`---------*/ + + +static const char * +error_format_string (int argc) +{ + switch (argc) + { + default: // Avoid compiler warnings. + case 0: return _("%@: syntax error"); + case 1: return _("%@: syntax error: unexpected %u"); + // TRANSLATORS: '%@' is a location in a file, '%u' is an + // "unexpected token", and '%0e', '%1e'... are expected tokens + // at this point. + // + // For instance on the expression "1 + * 2", you'd get + // + // 1.5: syntax error: expected - or ( or number or function or variable before * + case 2: return _("%@: syntax error: expected %0e before %u"); + case 3: return _("%@: syntax error: expected %0e or %1e before %u"); + case 4: return _("%@: syntax error: expected %0e or %1e or %2e before %u"); + case 5: return _("%@: syntax error: expected %0e or %1e or %2e or %3e before %u"); + case 6: return _("%@: syntax error: expected %0e or %1e or %2e or %3e or %4e before %u"); + case 7: return _("%@: syntax error: expected %0e or %1e or %2e or %3e or %4e or %5e before %u"); + case 8: return _("%@: syntax error: expected %0e or %1e or %2e or %3e or %4e or %5e etc., before %u"); + } +} + + +int +yyreport_syntax_error (const yypcontext_t *ctx, const user_context *uctx) +{ + if (uctx->silent) + return 0; + + enum { ARGS_MAX = 6 }; + yysymbol_kind_t arg[ARGS_MAX]; + int argsize = yypcontext_expected_tokens (ctx, arg, ARGS_MAX); + if (argsize < 0) + return argsize; + const int too_many_expected_tokens = argsize == 0 && arg[0] != YYSYMBOL_YYEMPTY; + if (too_many_expected_tokens) + argsize = ARGS_MAX; + const char *format = error_format_string (1 + argsize + too_many_expected_tokens); + + const YYLTYPE *loc = yypcontext_location (ctx); + while (*format) + // %@: location. + if (format[0] == '%' && format[1] == '@') + { + YYLOCATION_PRINT (stderr, loc); + format += 2; + } + // %u: unexpected token. + else if (format[0] == '%' && format[1] == 'u') + { + fputs (yysymbol_name (yypcontext_token (ctx)), stderr); + format += 2; + } + // %0e, %1e...: expected token. + else if (format[0] == '%' + && isdigit ((unsigned char) format[1]) + && format[2] == 'e' + && (format[1] - '0') < argsize) + { + int i = format[1] - '0'; + fputs (yysymbol_name (arg[i]), stderr); + format += 3; + } + else + { + fputc (*format, stderr); + ++format; + } + fputc ('\n', stderr); + + // Quote the source line. + { + fprintf (stderr, "%5d | %s\n", loc->first_line, uctx->line); + fprintf (stderr, "%5s | %*s", "", loc->first_column, "^"); + for (int i = loc->last_column - loc->first_column - 1; 0 < i; --i) + putc ('~', stderr); + putc ('\n', stderr); + } + return 0; +} + + +// Called by yyparse on errors to report the error to the user. +void +yyerror (const YYLTYPE *loc, const user_context *uctx, char const *format, ...) +{ + if (uctx->silent) + return; + + YYLOCATION_PRINT (stderr, loc); + fputs (": ", stderr); + va_list args; + va_start (args, format); + vfprintf (stderr, format, args); + va_end (args); + putc ('\n', stderr); +} + + +// Return a newly allocated copy of at most N bytes of STRING. In +// other words, return a copy of the initial segment of length N of +// STRING. +static char * +xstrndup (const char *string, size_t n) +{ + // len = strnlen (string, n), portably. + const char *end = memchr (string, '\0', n); + size_t len = end ? (size_t) (end - string) : n; + char *new = malloc (len + 1); + if (!new) + abort (); + new[len] = '\0'; + return memcpy (new, string, len); +} + + +/*-----------. +| Readline. | +`-----------*/ + +// Parse (and execute) this line. +static int +process_line (YYLTYPE *lloc, const char *line) +{ + user_context uctx = {0, line}; + yypstate *ps = yypstate_new (); + int status = 0; + do { + YYSTYPE lval; + yytoken_kind_t token = yylex (&line, &lval, lloc, &uctx); + status = yypush_parse (ps, token, &lval, lloc, &uctx); + } while (status == YYPUSH_MORE); + yypstate_delete (ps); + lloc->last_line++; + lloc->last_column = 1; + return status; +} + +// Get the list of possible tokens after INPUT was read. +// Returns a nonnegative. +static int +expected_tokens (const char *input, + int *tokens, int ntokens) +{ + YYDPRINTF ((stderr, "expected_tokens (\"%s\")", input)); + user_context uctx = {1, input}; + + // Parse the current state of the line. + yypstate *ps = yypstate_new (); + int status = 0; + YYLTYPE lloc = { 1, 1, 1, 1 }; + do { + YYSTYPE lval; + yytoken_kind_t token = yylex (&input, &lval, &lloc, &uctx); + // Don't let the parse know when we reach the end of input. + if (token == TOK_YYEOF) + break; + status = yypush_parse (ps, token, &lval, &lloc, &uctx); + } while (status == YYPUSH_MORE); + + int res = 0; + // If there were parse errors, don't propose completions. + if (!ps->yynerrs) + { + // Then query for the accepted tokens at this point. + res = yypstate_expected_tokens (ps, tokens, ntokens); + if (res < 0) + abort (); + } + yypstate_delete (ps); + return res; +} + +// Attempt to complete on the contents of TEXT. START and END bound +// the region of rl_line_buffer that contains the word to complete. +// TEXT is the word to complete. We can use the entire contents of +// rl_line_buffer in case we want to do some simple parsing. Return +// the array of matches, or NULL if there aren't any. +static char ** +completion (const char *text, int start, int end) +{ + YYDPRINTF ((stderr, "completion (\"%.*s[%.*s]%s\")\n", + start, rl_line_buffer, + end - start, rl_line_buffer + start, + rl_line_buffer + end)); + + // Get list of token numbers. + int tokens[YYNTOKENS]; + char *line = xstrndup (rl_line_buffer, (size_t) start); + int ntokens = expected_tokens (line, tokens, YYNTOKENS); + free (line); + + // Build MATCHES, the list of possible completions. + const size_t len = strlen (text); + // Need initial prefix and final NULL. + char **matches + = calloc ((size_t) ntokens + (size_t) symbol_count () + 2, sizeof *matches); + if (!matches) + abort (); + int match = 1; + for (int i = 0; i < ntokens; ++i) + switch (tokens[i]) + { + case YYSYMBOL_FUN: + for (symrec *s = sym_table; s; s = s->next) + if (s->type == TOK_FUN && strncmp (text, s->name, len) == 0) + matches[match++] = strdup (s->name); + break; + case YYSYMBOL_VAR: + for (symrec *s = sym_table; s; s = s->next) + if (s->type == TOK_VAR && strncmp (text, s->name, len) == 0) + matches[match++] = strdup (s->name); + break; + default: + { + const char* token = yysymbol_name (tokens[i]); + if (strncmp (text, token, len) == 0) + matches[match++] = strdup (token); + break; + } + } + + // Find the longest common prefix, and install it in matches[0], as + // required by readline. + if (match == 1) + matches[0] = strdup (text); + else + { + size_t lcplen = strlen (matches[1]); + for (int i = 2; i < match && lcplen; ++i) + for (size_t j = 0; j < lcplen; ++j) + if (matches[1][j] != matches[i][j]) + lcplen = j; + matches[0] = xstrndup (matches[1], lcplen); + } + + if (yydebug) + { + fprintf (stderr, "completion (\"%.*s[%.*s]%s\") = ", + start, rl_line_buffer, + end - start, rl_line_buffer + start, + rl_line_buffer + end); + for (int i = 1; matches[i]; ++i) + fprintf (stderr, "%s%s", + i == 1 ? "{" : ", ", + matches[i]); + fprintf (stderr, "}\n"); + } + + // Don't fall back to proposing file names. + rl_attempted_completion_over = 1; + return matches; +} + +static void +init_readline (void) +{ + // Allow conditional parsing of the ~/.inputrc file. + rl_readline_name = "bistromathic"; + + // Tell the completer that we want a crack first. + rl_attempted_completion_function = completion; + + // The basic list of characters that signal a break between words + // for the completer routine. + rl_basic_word_break_characters = " \t\n\"\\'`@$><=;|&{(+-*/^)"; +} + + + +/*-------. +| Main. | +`-------*/ + +int +main (int argc, char const* argv[]) +{ +#if defined ENABLE_NLS && ENABLE_NLS + // Set up internationalization. + setlocale (LC_ALL, ""); + // Use Bison's standard translation catalog for error messages + // (the generated messages). + bindtextdomain ("bison-runtime", BISON_LOCALEDIR); + // The translation catalog of bistromathic is actually included in + // Bison's. In your own project, use the name of your project. + bindtextdomain ("bison", LOCALEDIR); + textdomain ("bison"); +#endif + + // Enable parse traces on option -p. + if (1 < argc && strcmp (argv[1], "-p") == 0) + yydebug = 1; + init_table (); + init_readline (); + YYLTYPE lloc = {1, 1, 1, 1}; + while (!done) + { + char *line = readline ("> "); + if (!line) + { + // Finish the line started by the prompt. + putchar ('\n'); + break; + } + if (*line) + add_history (line); + process_line (&lloc, line); + free (line); + } +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..cdf2ae3267dbcb99adc773f242c183cdf3e1d969 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/Makefile @@ -0,0 +1,23 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = calc +BISON = bison + +all: $(BASE) + +%.c %.h %.html %.xml %.gv: %.y + $(BISON) $(BISONFLAGS) --header --html --graph -o $*.c $< + +$(BASE): $(BASE).o + $(CC) $(CFLAGS) -o $@ $^ + +run: $(BASE) + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< + +CLEANFILES = \ + $(BASE) *.o $(BASE).[ch] $(BASE).output $(BASE).xml $(BASE).html $(BASE).gv + +clean: + rm -f $(CLEANFILES) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7cb5282d68495f91de964f32f9811f67d1276872 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/README.md @@ -0,0 +1,23 @@ +# calc - calculator with Bison + +This directory contains calc, the traditional example of using Bison to +build a simple calculator. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/calc.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/calc.y new file mode 100644 index 0000000000000000000000000000000000000000..6daf22b33333c8050b043dadbca0edc1753bc00e --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/calc/calc.y @@ -0,0 +1,102 @@ +%code top { + #include + #include /* isdigit. */ + #include /* printf. */ + #include /* abort. */ + #include /* strcmp. */ + + int yylex (void); + void yyerror (char const *); +} + +%define api.header.include {"calc.h"} + +/* Generate YYSTYPE from the types used in %token and %type. */ +%define api.value.type union +%token NUM "number" +%type expr term fact + +/* Generate the parser description file (calc.output). */ +%verbose + +/* Nice error messages with details. */ +%define parse.error detailed + +/* Enable run-time traces (yydebug). */ +%define parse.trace + +/* Formatting semantic values in debug traces. */ +%printer { fprintf (yyo, "%g", $$); } ; + +%% /* The grammar follows. */ +input: + %empty +| input line +; + +line: + '\n' +| expr '\n' { printf ("%.10g\n", $1); } +| error '\n' { yyerrok; } +; + +expr: + expr '+' term { $$ = $1 + $3; } +| expr '-' term { $$ = $1 - $3; } +| term +; + +term: + term '*' fact { $$ = $1 * $3; } +| term '/' fact { $$ = $1 / $3; } +| fact +; + +fact: + "number" +| '(' expr ')' { $$ = $2; } +; + +%% + +int +yylex (void) +{ + int c; + + /* Ignore white space, get first nonwhite character. */ + while ((c = getchar ()) == ' ' || c == '\t') + continue; + + if (c == EOF) + return 0; + + /* Char starts a number => parse the number. */ + if (c == '.' || isdigit (c)) + { + ungetc (c, stdin); + if (scanf ("%lf", &yylval.NUM) != 1) + abort (); + return NUM; + } + + /* Any other character is a token by itself. */ + return c; +} + +/* Called by yyparse on error. */ +void +yyerror (char const *s) +{ + fprintf (stderr, "%s\n", s); +} + +int +main (int argc, char const* argv[]) +{ + /* Enable parse traces on option -p. */ + for (int i = 1; i < argc; ++i) + if (!strcmp (argv[i], "-p")) + yydebug = 1; + return yyparse (); +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4bf7149eb5d06a157314fb2e693a01a856a36763 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/Makefile @@ -0,0 +1,23 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = c++-types +BISON = bison + +all: $(BASE) + +%.c %.h %.xml %.gv: %.y + $(BISON) $(BISONFLAGS) --header --graph -o $*.c $< + +$(BASE): $(BASE).o + $(CC) $(CFLAGS) -o $@ $^ + +run: $(BASE) + @echo "Type C++ declarations or expressions. Quit with ctrl-d." + ./$< + +CLEANFILES = \ + $(BASE) *.o $(BASE).[ch] $(BASE).output $(BASE).xml $(BASE).html $(BASE).gv + +clean: + rm -f $(CLEANFILES) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..26067c27fd7e09ea8f2de2a017c5acf10248653b --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/README.md @@ -0,0 +1,24 @@ +# glr + +This example demonstrates the use of GLR parsers to handle (local) +ambiguities in the C++ language. See the node "Merging GLR Parses" in +Bison's documentation. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/c++-types.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/c++-types.y new file mode 100644 index 0000000000000000000000000000000000000000..3a14d9bf0ed4949996f42467656e8500caaef97b --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/glr/c++-types.y @@ -0,0 +1,340 @@ +/* -*- C -*- + Copyright (C) 2020-2021 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +*/ + +/* Simplified C++ Type and Expression Grammar. + Written by Paul Hilfinger for Bison's test suite. */ + +%define api.pure +%header +%define api.header.include {"c++-types.h"} +%locations +%debug + +/* Nice error messages with details. */ +%define parse.error detailed + +%code requires +{ + union Node { + struct { + int isNterm; + int parents; + } nodeInfo; + struct { + int isNterm; /* 1 */ + int parents; + char const *form; + union Node *children[3]; + } nterm; + struct { + int isNterm; /* 0 */ + int parents; + char *text; + } term; + }; + typedef union Node Node; +} + +%define api.value.type union + +%code +{ + +#include +#include +#include +#include +#include +#include + + static Node *new_nterm (char const *, Node *, Node *, Node *); + static Node *new_term (char *); + static void free_node (Node *); + static char *node_to_string (const Node *); + static void node_print (FILE *, const Node *); + static Node *stmt_merge (YYSTYPE x0, YYSTYPE x1); + + static void yyerror (YYLTYPE const * const loc, const char *msg); + static yytoken_kind_t yylex (YYSTYPE *lval, YYLTYPE *lloc); +} + +%expect-rr 1 + +%token + TYPENAME "typename" + ID "identifier" + +%right '=' +%left '+' + +%glr-parser + +%type stmt expr decl declarator TYPENAME ID +%destructor { free_node ($$); } +%printer { node_print (yyo, $$); } + +%% + +prog : %empty + | prog stmt { + YYLOCATION_PRINT (stdout, &@2); + fputs (": ", stdout); + node_print (stdout, $2); + putc ('\n', stdout); + fflush (stdout); + free_node ($2); + } + ; + +stmt : expr ';' %merge { $$ = $1; } + | decl %merge + | error ';' { $$ = new_nterm ("", NULL, NULL, NULL); } + ; + +expr : ID + | TYPENAME '(' expr ')' + { $$ = new_nterm ("(%s, %s)", $3, $1, NULL); } + | expr '+' expr { $$ = new_nterm ("+(%s, %s)", $1, $3, NULL); } + | expr '=' expr { $$ = new_nterm ("=(%s, %s)", $1, $3, NULL); } + ; + +decl : TYPENAME declarator ';' + { $$ = new_nterm ("(%s, %s)", $1, $2, NULL); } + | TYPENAME declarator '=' expr ';' + { $$ = new_nterm ("(%s, %s, %s)", $1, + $2, $4); } + ; + +declarator + : ID + | '(' declarator ')' { $$ = $2; } + ; + +%% + +/* A C error reporting function. */ +static void +yyerror (YYLTYPE const * const loc, const char *msg) +{ + YYLOCATION_PRINT (stderr, loc); + fprintf (stderr, ": %s\n", msg); +} + +/* The input file. */ +FILE * input = NULL; + +yytoken_kind_t +yylex (YYSTYPE *lval, YYLTYPE *lloc) +{ + static int lineNum = 1; + static int colNum = 0; + + while (1) + { + int c; + assert (!feof (input)); + c = getc (input); + switch (c) + { + case EOF: + return 0; + case '\t': + colNum = (colNum + 7) & ~7; + break; + case ' ': case '\f': + colNum += 1; + break; + case '\n': + lineNum += 1; + colNum = 0; + break; + default: + { + yytoken_kind_t tok; + lloc->first_line = lloc->last_line = lineNum; + lloc->first_column = colNum; + if (isalpha (c)) + { + char buffer[256]; + unsigned i = 0; + + do + { + buffer[i++] = (char) c; + colNum += 1; + assert (i != sizeof buffer - 1); + c = getc (input); + } + while (isalnum (c) || c == '_'); + + ungetc (c, input); + buffer[i++] = 0; + if (isupper ((unsigned char) buffer[0])) + { + tok = TYPENAME; + lval->TYPENAME = new_term (strcpy (malloc (i), buffer)); + } + else + { + tok = ID; + lval->ID = new_term (strcpy (malloc (i), buffer)); + } + } + else + { + colNum += 1; + tok = c; + } + lloc->last_column = colNum; + return tok; + } + } + } +} + +static Node * +new_nterm (char const *form, Node *child0, Node *child1, Node *child2) +{ + Node *res = malloc (sizeof *res); + res->nterm.isNterm = 1; + res->nterm.parents = 0; + res->nterm.form = form; + res->nterm.children[0] = child0; + if (child0) + child0->nodeInfo.parents += 1; + res->nterm.children[1] = child1; + if (child1) + child1->nodeInfo.parents += 1; + res->nterm.children[2] = child2; + if (child2) + child2->nodeInfo.parents += 1; + return res; +} + +static Node * +new_term (char *text) +{ + Node *res = malloc (sizeof *res); + res->term.isNterm = 0; + res->term.parents = 0; + res->term.text = text; + return res; +} + +static void +free_node (Node *node) +{ + if (!node) + return; + node->nodeInfo.parents -= 1; + /* Free only if 0 (last parent) or -1 (no parents). */ + if (node->nodeInfo.parents > 0) + return; + if (node->nodeInfo.isNterm == 1) + { + free_node (node->nterm.children[0]); + free_node (node->nterm.children[1]); + free_node (node->nterm.children[2]); + } + else + free (node->term.text); + free (node); +} + +static char * +node_to_string (const Node *node) +{ + char *res; + if (!node) + { + res = malloc (1); + res[0] = 0; + } + else if (node->nodeInfo.isNterm == 1) + { + char *child0 = node_to_string (node->nterm.children[0]); + char *child1 = node_to_string (node->nterm.children[1]); + char *child2 = node_to_string (node->nterm.children[2]); + res = malloc (strlen (node->nterm.form) + strlen (child0) + + strlen (child1) + strlen (child2) + 1); + sprintf (res, node->nterm.form, child0, child1, child2); + free (child2); + free (child1); + free (child0); + } + else + res = strdup (node->term.text); + return res; +} + +static void +node_print (FILE *out, const Node *n) +{ + char *str = node_to_string (n); + fputs (str, out); + free (str); +} + + +static Node * +stmt_merge (YYSTYPE x0, YYSTYPE x1) +{ + return new_nterm ("(%s, %s)", x0.stmt, x1.stmt, NULL); +} + +static int +process (const char *file) +{ + int is_stdin = !file || strcmp (file, "-") == 0; + if (is_stdin) + input = stdin; + else + input = fopen (file, "r"); + assert (input); + int status = yyparse (); + if (!is_stdin) + fclose (input); + return status; +} + +int +main (int argc, char **argv) +{ + if (getenv ("YYDEBUG")) + yydebug = 1; + + int ran = 0; + for (int i = 1; i < argc; ++i) + // Enable parse traces on option -p. + if (strcmp (argv[i], "-p") == 0) + yydebug = 1; + else + { + int status = process (argv[i]); + ran = 1; + if (!status) + return status; + } + + if (!ran) + { + int status = process (NULL); + if (!status) + return status; + } + return 0; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..33069256b7574c4ab63f07b750eaea575e79bf5f --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/Makefile @@ -0,0 +1,29 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = lexcalc +BISON = bison +FLEX = flex + +all: $(BASE) + +%.c %.h %.html %.xml %.gv: %.y + $(BISON) $(BISONFLAGS) --header --html --graph -o $*.c $< + +%.c: %.l + $(FLEX) $(FLEXFLAGS) -o$@ $< + +scan.o: parse.h +$(BASE): parse.o scan.o + $(CC) $(CFLAGS) -o $@ $^ + +run: $(BASE) + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< + +CLEANFILES = \ + $(BASE) *.o \ + parse.[ch] parse.output parse.xml parse.html parse.gv \ + scan.c +clean: + rm -f $(CLEANFILES) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2636a21d0b1859ff447e847bfdf9082ae671d7f3 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/README.md @@ -0,0 +1,24 @@ +# lexcalc - calculator with Flex and Bison + +This directory contains lexcalc, the traditional example of using Flex and +Bison to build a simple calculator. + +It features detailed syntax errors with locations. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/parse.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/parse.y new file mode 100644 index 0000000000000000000000000000000000000000..ee5598874759b2b89c09a57bfbcc7209837ee361 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/parse.y @@ -0,0 +1,138 @@ +/* Parser for lexcalc. -*- C -*- + + Copyright (C) 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +// Prologue (directives). +%expect 0 + +// Emitted in the header file, after the definition of YYSTYPE. +%code provides +{ + // Tell Flex the expected prototype of yylex. +#define YY_DECL \ + yytoken_kind_t yylex (YYSTYPE* yylval, YYLTYPE *yylloc, int *nerrs) + YY_DECL; + + void yyerror (const YYLTYPE *loc, int *nerrs, const char *msg); +} + +// Emitted on top of the implementation file. +%code top +{ +#include // printf. +#include // getenv. +#include // strcmp. +} + +// Include the header in the implementation rather than duplicating it. +%define api.header.include {"parse.h"} + +// Don't share global variables between the scanner and the parser. +%define api.pure full + +// To avoid name clashes (e.g., with C's EOF) prefix token definitions +// with TOK_ (e.g., TOK_EOF). +%define api.token.prefix {TOK_} + +// %token and %type use genuine types (e.g., "%token "). Let +// %bison define YYSTYPE as a union of all these types. +%define api.value.type union + +// Generate detailed error messages. +%define parse.error detailed + +// with locations. +%locations + +// Enable debug traces (see yydebug in main). +%define parse.trace + +// Error count, exchanged between main, yyparse and yylex. +%param {int *nerrs} + +%token + PLUS "+" + MINUS "-" + STAR "*" + SLASH "/" + LPAREN "(" + RPAREN ")" + EOL "end of line" +; + +%token NUM "number" +%type exp +%printer { fprintf (yyo, "%d", $$); } + +// Precedence (from lowest to highest) and associativity. +%left "+" "-" +%left "*" "/" + +%% +// Rules. +input: + %empty +| input line +; + +line: + exp EOL { printf ("%d\n", $exp); } +| error EOL { yyerrok; } +; + +exp: + exp "+" exp { $$ = $1 + $3; } +| exp "-" exp { $$ = $1 - $3; } +| exp "*" exp { $$ = $1 * $3; } +| exp "/" exp + { + if ($3 == 0) + { + yyerror (&@$, nerrs, "error: division by zero"); + YYERROR; + } + else + $$ = $1 / $3; + } +| "(" exp ")" { $$ = $2; } +| NUM { $$ = $1; } +; +%% +// Epilogue (C code). + +void yyerror (const YYLTYPE *loc, int *nerrs, const char *msg) +{ + YYLOCATION_PRINT (stderr, loc); + fprintf (stderr, ": %s\n", msg); + ++*nerrs; +} + +int main (int argc, const char *argv[]) +{ + // Possibly enable parser runtime debugging. + yydebug = !!getenv ("YYDEBUG"); + // Enable parse traces on option -p. + for (int i = 1; i < argc; ++i) + if (strcmp (argv[i], "-p") == 0) + yydebug = 1; + + int nerrs = 0; + yyparse (&nerrs); + // Exit on failure if there were errors. + return !!nerrs; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/scan.l b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/scan.l new file mode 100644 index 0000000000000000000000000000000000000000..983fbdafc34f302f8d12b54b57cd0f45df58dbae --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/lexcalc/scan.l @@ -0,0 +1,78 @@ +/* Scanner for lexcalc. -*- C -*- + + Copyright (C) 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* Prologue (directives). */ + +/* Disable Flex features we don't need, to avoid warnings. */ +%option nodefault noinput nounput noyywrap + +%{ +#include /* errno, ERANGE */ +#include /* INT_MIN */ +#include /* strtol */ + +#include "parse.h" + + // Each time a rule is matched, advance the end cursor/position. +#define YY_USER_ACTION \ + yylloc->last_column += (int) yyleng; + + // Move the first position onto the last. +#define LOCATION_STEP() \ + do { \ + yylloc->first_line = yylloc->last_line; \ + yylloc->first_column = yylloc->last_column; \ + } while (0) +%} + +%% +%{ + // Each time yylex is called, move the head position to the end one. + LOCATION_STEP (); +%} + /* Rules. */ + +"+" return TOK_PLUS; +"-" return TOK_MINUS; +"*" return TOK_STAR; +"/" return TOK_SLASH; + +"(" return TOK_LPAREN; +")" return TOK_RPAREN; + + /* Scan an integer. */ +[0-9]+ { + errno = 0; + long n = strtol (yytext, NULL, 10); + if (! (INT_MIN <= n && n <= INT_MAX && errno != ERANGE)) + yyerror (yylloc, nerrs, "integer is out of range"); + yylval->TOK_NUM = (int) n; + return TOK_NUM; +} + +"\n" yylloc->last_line++; yylloc->last_column = 1; return TOK_EOL; + + /* Ignore white spaces. */ +[ \t]+ LOCATION_STEP (); continue; + +. yyerror (yylloc, nerrs, "syntax error, invalid character"); continue; + +<> return TOK_YYEOF; +%% +/* Epilogue (C code). */ diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4014dd484bde947b02a0f2792cc97353aa84db6b --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/Makefile @@ -0,0 +1,20 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = mfcalc +BISON = bison + +all: $(BASE) + +%.c %.html %.gv: %.y + $(BISON) $(BISONFLAGS) --html --graph -o $*.c $< + +%: %.c + $(CC) $(CFLAGS) -o $@ $< + +run: $(BASE) + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< + +clean: + rm -f $(BASE) $(BASE).c $(BASE).html $(BASE).xml $(BASE).gv $(BASE).output diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/calc.h b/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/calc.h new file mode 100644 index 0000000000000000000000000000000000000000..c5baeb962fadb6b2bbd107fa336780f739fd5ad8 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/calc.h @@ -0,0 +1,43 @@ +/* Functions for mfcalc. -*- C -*- + + Copyright (C) 1988-1993, 1995, 1998-2015, 2018-2021 Free Software + Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* Function type. */ +typedef double (func_t) (double); + +/* Data type for links in the chain of symbols. */ +struct symrec +{ + char *name; /* name of symbol */ + int type; /* type of symbol: either VAR or FUN */ + union + { + double var; /* value of a VAR */ + func_t *fun; /* value of a FUN */ + } value; + struct symrec *next; /* link field */ +}; + +typedef struct symrec symrec; + +/* The symbol table: a chain of 'struct symrec'. */ +extern symrec *sym_table; + +symrec *putsym (char const *name, int sym_type); +symrec *getsym (char const *name); diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/mfcalc.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/mfcalc.y new file mode 100644 index 0000000000000000000000000000000000000000..4227d5cfd000f4f35ad1d84504e51ba1f350cac0 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/mfcalc/mfcalc.y @@ -0,0 +1,208 @@ +/* Parser for mfcalc. -*- C -*- + + Copyright (C) 1988-1993, 1995, 1998-2015, 2018-2021 Free Software + Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%{ + #include /* For printf, etc. */ + #include /* For pow, used in the grammar. */ + #include "calc.h" /* Contains definition of 'symrec'. */ + int yylex (void); + void yyerror (char const *); +%} + +%define api.value.type union /* Generate YYSTYPE from these types: */ +%token NUM /* Double precision number. */ +%token VAR FUN /* Symbol table pointer: variable/function. */ +%nterm exp + +%precedence '=' +%left '-' '+' +%left '*' '/' +%precedence NEG /* negation--unary minus */ +%right '^' /* exponentiation */ +/* Generate the parser description file. */ +%verbose +/* Enable run-time traces (yydebug). */ +%define parse.trace + +/* Formatting semantic values. */ +%printer { fprintf (yyo, "%s", $$->name); } VAR; +%printer { fprintf (yyo, "%s()", $$->name); } FUN; +%printer { fprintf (yyo, "%g", $$); } ; +%% /* The grammar follows. */ +input: + %empty +| input line +; + +line: + '\n' +| exp '\n' { printf ("%.10g\n", $1); } +| error '\n' { yyerrok; } +; + +exp: + NUM +| VAR { $$ = $1->value.var; } +| VAR '=' exp { $$ = $3; $1->value.var = $3; } +| FUN '(' exp ')' { $$ = $1->value.fun ($3); } +| exp '+' exp { $$ = $1 + $3; } +| exp '-' exp { $$ = $1 - $3; } +| exp '*' exp { $$ = $1 * $3; } +| exp '/' exp { $$ = $1 / $3; } +| '-' exp %prec NEG { $$ = -$2; } +| exp '^' exp { $$ = pow ($1, $3); } +| '(' exp ')' { $$ = $2; } +; +/* End of grammar. */ +%% + +struct init +{ + char const *name; + func_t *fun; +}; + +struct init const funs[] = +{ + { "atan", atan }, + { "cos", cos }, + { "exp", exp }, + { "ln", log }, + { "sin", sin }, + { "sqrt", sqrt }, + { 0, 0 }, +}; + +/* The symbol table: a chain of 'struct symrec'. */ +symrec *sym_table; + +/* Put functions in table. */ +static void +init_table (void) +{ + for (int i = 0; funs[i].name; i++) + { + symrec *ptr = putsym (funs[i].name, FUN); + ptr->value.fun = funs[i].fun; + } +} + +/* The mfcalc code assumes that malloc and realloc + always succeed, and that integer calculations + never overflow. Production-quality code should + not make these assumptions. */ +#include +#include /* malloc, realloc. */ +#include /* strlen. */ + +symrec * +putsym (char const *name, int sym_type) +{ + symrec *res = (symrec *) malloc (sizeof (symrec)); + res->name = strdup (name); + res->type = sym_type; + res->value.var = 0; /* Set value to 0 even if fun. */ + res->next = sym_table; + sym_table = res; + return res; +} + +symrec * +getsym (char const *name) +{ + for (symrec *p = sym_table; p; p = p->next) + if (strcmp (p->name, name) == 0) + return p; + return NULL; +} + +#include +#include + +int +yylex (void) +{ + int c = getchar (); + + /* Ignore white space, get first nonwhite character. */ + while (c == ' ' || c == '\t') + c = getchar (); + + if (c == EOF) + return YYEOF; + + /* Char starts a number => parse the number. */ + if (c == '.' || isdigit (c)) + { + ungetc (c, stdin); + if (scanf ("%lf", &yylval.NUM) != 1) + abort (); + return NUM; + } + + /* Char starts an identifier => read the name. */ + if (isalpha (c)) + { + static ptrdiff_t bufsize = 0; + static char *symbuf = 0; + ptrdiff_t i = 0; + do + { + /* If buffer is full, make it bigger. */ + if (bufsize <= i) + { + bufsize = 2 * bufsize + 40; + symbuf = realloc (symbuf, (size_t) bufsize); + } + /* Add this character to the buffer. */ + symbuf[i++] = (char) c; + /* Get another character. */ + c = getchar (); + } + while (isalnum (c)); + + ungetc (c, stdin); + symbuf[i] = '\0'; + + symrec *s = getsym (symbuf); + if (!s) + s = putsym (symbuf, VAR); + yylval.VAR = s; /* or yylval.FUN = s. */ + return s->type; + } + + /* Any other character is a token by itself. */ + return c; +} + +/* Called by yyparse on error. */ +void yyerror (char const *s) +{ + fprintf (stderr, "%s\n", s); +} + +int main (int argc, char const* argv[]) +{ + /* Enable parse traces on option -p. */ + if (argc == 2 && strcmp(argv[1], "-p") == 0) + yydebug = 1; + init_table (); + return yyparse (); +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8352393558e47e4946bf71b6c9e2990aa05f4691 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/Makefile @@ -0,0 +1,23 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = calc +BISON = bison + +all: $(BASE) + +%.c %.h %.html %.gv: %.y + $(BISON) $(BISONFLAGS) --header --html --graph -o $*.c $< + +$(BASE): $(BASE).o + $(CC) $(CFLAGS) -o $@ $^ + +run: $(BASE) + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< + +CLEANFILES = \ + $(BASE) *.o $(BASE).[ch] $(BASE).output $(BASE).xml $(BASE).html $(BASE).gv + +clean: + rm -f $(CLEANFILES) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8ec1a3a7569396fcbdc291e2ea5f0a9502b4cc4c --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/README.md @@ -0,0 +1,32 @@ +# pushcalc - push parser with Bison + +This directory contains pushcalc, the traditional calculator, implemented as +a push parser. + +Traditionally Bison is used to create so called "pull parsers": the user +invokes the parser once, which repeatedly calls (pulls) the scanner until +the input is drained. + +This example demonstrates the "push parsers": the user calls scanner to +fetch the next token, passes (pushes) it to the parser, and repeats the +operation until the input is drained. + +This example is a straightforward conversion of the 'calc' example to the +push-parser model. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/calc.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/calc.y new file mode 100644 index 0000000000000000000000000000000000000000..d1fd3d5a8330e412160bdb7c4253f9e1ac8e24b5 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/pushcalc/calc.y @@ -0,0 +1,134 @@ +/* Parser and scanner for pushcalc. -*- C -*- + + Copyright (C) 2020-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%code top { + #include /* isdigit. */ + #include /* printf. */ + #include /* abort. */ + #include /* strcmp. */ +} + +%code { + int yylex (YYSTYPE *yylval); + void yyerror (char const *); +} + +%define api.header.include {"calc.h"} + +/* Generate YYSTYPE from the types used in %token and %type. */ +%define api.value.type union +%token NUM "number" +%type expr term fact + +/* Don't share global variables between the scanner and the parser. */ +%define api.pure full +/* Generate a push parser. */ +%define api.push-pull push + +/* Nice error messages with details. */ +%define parse.error detailed + +/* Generate the parser description file (calc.output). */ +%verbose + + /* Enable run-time traces (yydebug). */ +%define parse.trace + +/* Formatting semantic values in debug traces. */ +%printer { fprintf (yyo, "%g", $$); } ; + +%% /* The grammar follows. */ +input: + %empty +| input line +; + +line: + '\n' +| expr '\n' { printf ("%.10g\n", $1); } +| error '\n' { yyerrok; } +; + +expr: + expr '+' term { $$ = $1 + $3; } +| expr '-' term { $$ = $1 - $3; } +| term +; + +term: + term '*' fact { $$ = $1 * $3; } +| term '/' fact { $$ = $1 / $3; } +| fact +; + +fact: + "number" +| '(' expr ')' { $$ = $expr; } +; + +%% + +int +yylex (YYSTYPE *yylval) +{ + int c; + + /* Ignore white space, get first nonwhite character. */ + while ((c = getchar ()) == ' ' || c == '\t') + continue; + + if (c == EOF) + return 0; + + /* Char starts a number => parse the number. */ + if (c == '.' || isdigit (c)) + { + ungetc (c, stdin); + if (scanf ("%lf", &yylval->NUM) != 1) + abort (); + return NUM; + } + + /* Any other character is a token by itself. */ + return c; +} + +/* Called by yyparse on error. */ +void +yyerror (char const *s) +{ + fprintf (stderr, "%s\n", s); +} + +int +main (int argc, char const* argv[]) +{ + /* Enable parse traces on option -p. */ + for (int i = 1; i < argc; ++i) + if (!strcmp (argv[i], "-p")) + yydebug = 1; + int status; + yypstate *ps = yypstate_new (); + do { + YYSTYPE lval; + status = yypush_parse (ps, yylex (&lval), &lval); + } while (status == YYPUSH_MORE); + yypstate_delete (ps); + return status; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..69fc0c081bcfa96c413ea208468d7e5e1565e459 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/Makefile @@ -0,0 +1,30 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = reccalc +BISON = bison +FLEX = flex + +all: $(BASE) + +%.c %.h %.xml %.gv: %.y + $(BISON) $(BISONFLAGS) --header --graph -o $*.c $< + +%.c %.h: %.l + $(FLEX) $(FLEXFLAGS) -o$*.c --header=$*.h $< + +scan.o: parse.h +parse.o: scan.h +$(BASE): parse.o scan.o + $(CC) $(CFLAGS) -o $@ $^ + +run: $(BASE) + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< + +CLEANFILES = \ + $(BASE) *.o \ + parse.[ch] parse.output parse.xml parse.html parse.gv \ + scan.[ch] +clean: + rm -f $(CLEANFILES) diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b89911af2e80f1ee14aae304ea2006c835db3800 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/README.md @@ -0,0 +1,38 @@ +# reccalc - recursive calculator with Flex and Bison + +In this example the generated parser is pure and reentrant: it can be used +concurrently in different threads, or recursively. As a proof of this +reentrancy, expressions in parenthesis are tokenized as strings, and then +recursively parsed from the parser: + +``` +exp: STR + { + result r = parse_string ($1); + free ($1); + if (r.nerrs) + { + res->nerrs += r.nerrs; + YYERROR; + } + else + $$ = r.value; + } +``` + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/parse.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/parse.y new file mode 100644 index 0000000000000000000000000000000000000000..81d5bca0e21e65dfcedbf15a4cd81e104b3caec0 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/parse.y @@ -0,0 +1,220 @@ +/* Parser for reccalc. -*- C -*- + + Copyright (C) 2019-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +// Prologue (directives). +%expect 0 + +// Emitted in the header file, before the definition of YYSTYPE. +%code requires +{ + #ifndef YY_TYPEDEF_YY_SCANNER_T + # define YY_TYPEDEF_YY_SCANNER_T + typedef void* yyscan_t; + #endif + + typedef struct + { + // Whether to print the intermediate results. + int verbose; + // Value of the last computation. + int value; + // Number of errors. + int nerrs; + } result; +} + +// Emitted in the header file, after the definition of YYSTYPE. +%code provides +{ + // Tell Flex the expected prototype of yylex. + // The scanner argument must be named yyscanner. +#define YY_DECL \ + yytoken_kind_t yylex (YYSTYPE* yylval, yyscan_t yyscanner, result *res) + YY_DECL; + + void yyerror (yyscan_t scanner, result *res, const char *msg, ...); +} + +// Emitted on top of the implementation file. +%code top +{ +#include // va_list. +#include // printf. +#include // getenv. +} + +%code +{ + result parse_string (const char* cp); + result parse (void); +} + +// Include the header in the implementation rather than duplicating it. +%define api.header.include {"parse.h"} + +// Don't share global variables between the scanner and the parser. +%define api.pure full + +// To avoid name clashes (e.g., with C's EOF) prefix token definitions +// with TOK_ (e.g., TOK_EOF). +%define api.token.prefix {TOK_} + +// Generate YYSTYPE from the types assigned to symbols. +%define api.value.type union + +// Error messages with "unexpected XXX, expected XXX...". +%define parse.error detailed + +// Enable run-time traces (yydebug). +%define parse.trace + +// Generate the parser description file (parse.output). +%verbose + +// Scanner and error count are exchanged between main, yyparse and yylex. +%param {yyscan_t scanner}{result *res} + +%token + PLUS "+" + MINUS "-" + STAR "*" + SLASH "/" + EOL "end-of-line" + EOF 0 "end-of-file" +; + +%token NUM "number" +%type exp +%printer { fprintf (yyo, "%d", $$); } + +%token STR "string" +%printer { fprintf (yyo, "\"%s\"", $$); } +%destructor { free ($$); } + +// Precedence (from lowest to highest) and associativity. +%left "+" "-" +%left "*" "/" +%precedence UNARY + +%% +// Rules. +input: + line +| input line +; + +line: + exp eol + { + res->value = $exp; + if (res->verbose) + printf ("%d\n", $exp); + } +| error eol + { + yyerrok; + } +; + +eol: + EOF +| EOL +; + +exp: + NUM { $$ = $1; } +| exp "+" exp { $$ = $1 + $3; } +| exp "-" exp { $$ = $1 - $3; } +| exp "*" exp { $$ = $1 * $3; } +| exp "/" exp + { + if ($3 == 0) + { + yyerror (scanner, res, "invalid division by zero"); + YYERROR; + } + else + $$ = $1 / $3; + } +| "+" exp %prec UNARY { $$ = + $2; } +| "-" exp %prec UNARY { $$ = - $2; } +| STR + { + result r = parse_string ($1); + free ($1); + if (r.nerrs) + { + res->nerrs += r.nerrs; + YYERROR; + } + else + $$ = r.value; + } +; + +%% +// Epilogue (C code). +#include "scan.h" + +result +parse (void) +{ + yyscan_t scanner; + yylex_init (&scanner); + result res = {1, 0, 0}; + yyparse (scanner, &res); + yylex_destroy (scanner); + return res; +} + +result +parse_string (const char *str) +{ + yyscan_t scanner; + yylex_init (&scanner); + YY_BUFFER_STATE buf = yy_scan_string (str ? str : "", scanner); + result res = {0, 0, 0}; + yyparse (scanner, &res); + yy_delete_buffer (buf, scanner); + yylex_destroy (scanner); + return res; +} + +void +yyerror (yyscan_t scanner, result *res, + const char *msg, ...) +{ + (void) scanner; + va_list args; + va_start (args, msg); + vfprintf (stderr, msg, args); + va_end (args); + fputc ('\n', stderr); + res->nerrs += 1; +} + +int +main (void) +{ + // Possibly enable parser runtime debugging. + yydebug = !!getenv ("YYDEBUG"); + result res = parse (); + // Exit on failure if there were errors. + return !!res.nerrs; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/scan.l b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/scan.l new file mode 100644 index 0000000000000000000000000000000000000000..a80bf504ab46c57297e0de578281d56b8900f311 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/reccalc/scan.l @@ -0,0 +1,108 @@ +/* Scanner for reccalc. -*- C -*- + + Copyright (C) 2019-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* Prologue (directives). -*- C -*- */ + +/* Disable Flex features we don't need, to avoid warnings. */ +%option nodefault noinput nounput noyywrap + +%option reentrant + +%{ +#include +#include /* INT_MIN */ +#include /* strtol */ + +#include "parse.h" +%} + +%x SC_STRING + +%% +%{ + // Number of opened parentheses. + int nesting = 0; + // A buffer storing the text inside the outer parentheses. + char *str = NULL; + // Its allocated size. + int capacity = 0; + // Its used size. + int size = 0; +#define STR_APPEND() \ + do { \ + if (capacity < size + yyleng + 1) \ + { \ + do \ + capacity = capacity ? 2 * capacity : 128; \ + while (capacity < size + yyleng + 1); \ + str = realloc (str, (size_t) capacity); \ + } \ + memcpy (str + size, yytext, (size_t) yyleng); \ + size += yyleng; \ + assert (size < capacity); \ + } while (0) +%} + + // Rules. + +"+" return TOK_PLUS; +"-" return TOK_MINUS; +"*" return TOK_STAR; +"/" return TOK_SLASH; + +"(" nesting += 1; BEGIN SC_STRING; + + /* Scan an integer. */ +[0-9]+ { + errno = 0; + long n = strtol (yytext, NULL, 10); + if (! (INT_MIN <= n && n <= INT_MAX && errno != ERANGE)) + yyerror (yyscanner, res, "integer is out of range"); + yylval->TOK_NUM = (int) n; + return TOK_NUM; +} + + /* Ignore white spaces. */ +[ \t]+ continue; + +"\n" return TOK_EOL; + +. yyerror (yyscanner, res, "syntax error, invalid character: %c", yytext[0]); + + +{ + "("+ nesting += yyleng; STR_APPEND (); + ")" { + if (!--nesting) + { + BEGIN INITIAL; + if (str) + str[size] = 0; + yylval->TOK_STR = str; + return TOK_STR; + } + else + STR_APPEND (); + } + [^()]+ STR_APPEND (); +} + +<> return TOK_EOF; +%% +/* Epilogue (C code). */ diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/rpcalc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/c/rpcalc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..23ea410cb67f9df2d947c9a6a6700d43346cb5e6 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/rpcalc/Makefile @@ -0,0 +1,20 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BASE = rpcalc +BISON = bison + +all: $(BASE) + +%.c %.html %.gv: %.y + $(BISON) $(BISONFLAGS) --html --graph -o $*.c $< + +%: %.c + $(CC) $(CFLAGS) -o $@ $< + +run: $(BASE) + @echo "Type arithmetic expressions in Reverse Polish Notation. Quit with ctrl-d." + ./$< + +clean: + rm -f $(BASE) $(BASE).c $(BASE).html $(BASE).xml $(BASE).gv diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/c/rpcalc/rpcalc.y b/platform/dbops/binaries/build/share/doc/bison/examples/c/rpcalc/rpcalc.y new file mode 100644 index 0000000000000000000000000000000000000000..bc67c58c150cc654aa01b15fb0a1d8c46c1f7499 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/c/rpcalc/rpcalc.y @@ -0,0 +1,100 @@ +/* Parser for rpcalc. -*- C -*- + + Copyright (C) 1988-1993, 1995, 1998-2015, 2018-2021 Free Software + Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* Reverse Polish Notation calculator. */ + +%{ + #include + #include + int yylex (void); + void yyerror (char const *); +%} + +%define api.value.type {double} +%token NUM + +%% /* Grammar rules and actions follow. */ + +input: + %empty +| input line +; + +line: + '\n' +| exp '\n' { printf ("%.10g\n", $1); } +; + +exp: + NUM +| exp exp '+' { $$ = $1 + $2; } +| exp exp '-' { $$ = $1 - $2; } +| exp exp '*' { $$ = $1 * $2; } +| exp exp '/' { $$ = $1 / $2; } +| exp exp '^' { $$ = pow ($1, $2); } /* Exponentiation */ +| exp 'n' { $$ = -$1; } /* Unary minus */ +; +%% + +/* The lexical analyzer returns a double floating point + number on the stack and the token NUM, or the numeric code + of the character read if not a number. It skips all blanks + and tabs, and returns 0 for end-of-input. */ + +#include +#include + +int +yylex (void) +{ + int c = getchar (); + /* Skip white space. */ + while (c == ' ' || c == '\t') + c = getchar (); + /* Process numbers. */ + if (c == '.' || isdigit (c)) + { + ungetc (c, stdin); + if (scanf ("%lf", &yylval) != 1) + abort (); + return NUM; + } + /* Return end-of-input. */ + else if (c == EOF) + return YYEOF; + /* Return a single char. */ + else + return c; +} + +int +main (void) +{ + return yyparse (); +} + +#include + +/* Called by yyparse on error. */ +void +yyerror (char const *s) +{ + fprintf (stderr, "%s\n", s); +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/d/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/d/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b060648efd6aa459a8b16300044ad9b40528fa78 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/d/README.md @@ -0,0 +1,31 @@ +# Examples in D + +This directory contains examples of Bison grammar files in D. + +You can run `make` to compile these examples. And `make clean` to tidy +afterwards. + +## d/simple.y +The usual calculator. + +## d/calc.y +A richer implementation of the calculator, with location tracking. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/d/calc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/d/calc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e95960b36386e619792622213e3e646af6dbe134 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/d/calc/Makefile @@ -0,0 +1,20 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BISON = bison +DC = dmd + +all: calc + +%.d %.html %.gv: %.y + $(BISON) $(BISONFLAGS) --html --graph -o $*.d $< + +%: %.d + $(DC) $(DCFLAGS) $< + +run: calc + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< + +clean: + rm -f calc calc.d calc.xml calc.gv calc.html *.o diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/d/calc/calc.y b/platform/dbops/binaries/build/share/doc/bison/examples/d/calc/calc.y new file mode 100644 index 0000000000000000000000000000000000000000..f2b0e8b2d6e8cbcdcdb3870bef962e7f0310f302 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/d/calc/calc.y @@ -0,0 +1,188 @@ +/* Parser and scanner for calc in D. -*- D -*- + + Copyright (C) 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%language "D" + +%define api.parser.class {Calc} +%define api.push-pull push +%define api.token.constructor +%define api.value.type union +%define parse.error detailed +%define parse.trace + +%locations + +/* Bison Declarations */ +%token PLUS "+" + MINUS "-" + STAR "*" + SLASH "/" + LPAR "(" + RPAR ")" + EOL "end of line" +%token NUM "number" +%type exp +%printer { yyo.write($$); } + +%left "-" "+" +%left "*" "/" +%precedence UNARY /* unary operators */ + +/* Grammar follows */ +%% +input: + line +| input line +; + +line: + EOL +| exp EOL { writeln ($exp); } +| error EOL { yyerrok(); } +; + +exp: + NUM { $$ = $1; } +| exp "+" exp { $$ = $1 + $3; } +| exp "-" exp { $$ = $1 - $3; } +| exp "*" exp { $$ = $1 * $3; } +| exp "/" exp { $$ = $1 / $3; } +| "+" exp %prec UNARY { $$ = $2; } +| "-" exp %prec UNARY { $$ = -$2; } +| "(" exp ")" { $$ = $2; } +; + +%% +import std.range.primitives; +import std.stdio; + +auto calcLexer(R)(R range) +if (isInputRange!R && is(ElementType!R : dchar)) +{ + return new CalcLexer!R(range); +} + +auto calcLexer(File f) +{ + import std.algorithm : map, joiner; + import std.utf : byDchar; + + return f.byChunk(1024) // avoid making a syscall roundtrip per char + .map!(chunk => cast(char[]) chunk) // because byChunk returns ubyte[] + .joiner // combine chunks into a single virtual range of char + .calcLexer; // forward to other overload +} + +class CalcLexer(R) : Lexer +if (isInputRange!R && is(ElementType!R : dchar)) +{ + R input; + + this(R r) { input = r; } + + Location location; + + // Should be a local in main, shared with %parse-param. + int exit_status = 0; + + void yyerror(const Location loc, string s) + { + exit_status = 1; + stderr.writeln(loc.toString(), ": ", s); + } + + Symbol yylex() + { + import std.uni : isWhite, isNumber; + + // Skip initial spaces + while (!input.empty && input.front != '\n' && isWhite(input.front)) + { + location.end.column++; + input.popFront; + } + location.step(); + + if (input.empty) + return Symbol.YYEOF(location); + + // Numbers. + if (input.front.isNumber) + { + import std.compiler : version_minor; + static if (version_minor >= 95) + { + // from Dlang v2.095.0 onwards std.conv.parse reports + // the number of consumed characters + import std.typecons : Flag, Yes; + import std.conv : parse; + auto parsed = parse!(int, R, Yes.doCount)(input); + int ival = parsed.data; + location.end.column += cast(int) parsed.count; + } + else + { + auto copy = input; + import std.conv : parse; + int ival = input.parse!int; + while (!input.empty && copy.front != input.front) + { + location.end.column++; + copy.popFront; + } + } + return Symbol.NUM(ival, location); + } + + // Individual characters + auto ch = input.front; + input.popFront; + location.end.column++; + switch (ch) + { + case '+': return Symbol.PLUS(location); + case '-': return Symbol.MINUS(location); + case '*': return Symbol.STAR(location); + case '/': return Symbol.SLASH(location); + case '(': return Symbol.LPAR(location); + case ')': return Symbol.RPAR(location); + case '\n': + { + location.end.line++; + location.end.column = 1; + return Symbol.EOL(location); + } + default: assert(0); + } + } +} + +int main() +{ + auto l = calcLexer(stdin); + auto p = new Calc(l); + import core.stdc.stdlib : getenv; + if (getenv("YYDEBUG")) + p.setDebugLevel(1); + int status; + do { + status = p.pushParse(l.yylex()); + } while (status == PUSH_MORE); + return l.exit_status; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/d/simple/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/d/simple/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e95960b36386e619792622213e3e646af6dbe134 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/d/simple/Makefile @@ -0,0 +1,20 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BISON = bison +DC = dmd + +all: calc + +%.d %.html %.gv: %.y + $(BISON) $(BISONFLAGS) --html --graph -o $*.d $< + +%: %.d + $(DC) $(DCFLAGS) $< + +run: calc + @echo "Type arithmetic expressions. Quit with ctrl-d." + ./$< + +clean: + rm -f calc calc.d calc.xml calc.gv calc.html *.o diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/d/simple/calc.y b/platform/dbops/binaries/build/share/doc/bison/examples/d/simple/calc.y new file mode 100644 index 0000000000000000000000000000000000000000..09c0626308dc252e2e37a57fe20ff0c384b69d89 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/d/simple/calc.y @@ -0,0 +1,146 @@ +/* Parser and scanner for calc in D. -*- D -*- + + Copyright (C) 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%language "D" + +%define api.parser.class {Calc} +%define parse.error detailed + +%union { + int ival; +} + +/* Bison Declarations */ +%token PLUS "+" + MINUS "-" + STAR "*" + SLASH "/" + LPAR "(" + RPAR ")" + EOL "end of line" +%token NUM "number" +%type exp + +%left "-" "+" +%left "*" "/" +%precedence UNARY /* unary operators */ + +/* Grammar follows */ +%% +input: + line +| input line +; + +line: + EOL +| exp EOL { writeln ($exp); } +| error EOL { yyerrok(); } +; + +exp: + NUM { $$ = $1; } +| exp "+" exp { $$ = $1 + $3; } +| exp "-" exp { $$ = $1 - $3; } +| exp "*" exp { $$ = $1 * $3; } +| exp "/" exp { $$ = $1 / $3; } +| "+" exp %prec UNARY { $$ = $2; } +| "-" exp %prec UNARY { $$ = -$2; } +| "(" exp ")" { $$ = $2; } +; + +%% +import std.range.primitives; +import std.stdio; + +auto calcLexer(R)(R range) +if (isInputRange!R && is(ElementType!R : dchar)) +{ + return new CalcLexer!R(range); +} + +auto calcLexer(File f) +{ + import std.algorithm : map, joiner; + import std.utf : byDchar; + + return f.byChunk(1024) // avoid making a syscall roundtrip per char + .map!(chunk => cast(char[]) chunk) // because byChunk returns ubyte[] + .joiner // combine chunks into a single virtual range of char + .calcLexer; // forward to other overload +} + +class CalcLexer(R) : Lexer +if (isInputRange!R && is(ElementType!R : dchar)) +{ + R input; + + this(R r) { input = r; } + + // Should be a local in main, shared with %parse-param. + int exit_status = 0; + + public void yyerror(string s) + { + exit_status = 1; + stderr.writeln(s); + } + + Symbol yylex() + { + import std.uni : isWhite, isNumber; + + // Skip initial spaces + while (!input.empty && input.front != '\n' && isWhite(input.front)) + input.popFront; + + if (input.empty) + return Symbol(TokenKind.YYEOF); + + // Numbers. + if (input.front.isNumber) + { + import std.conv : parse; + return Symbol(TokenKind.NUM, input.parse!int); + } + + // Individual characters + auto ch = input.front; + input.popFront; + switch (ch) + { + case '+': return Symbol(TokenKind.PLUS); + case '-': return Symbol(TokenKind.MINUS); + case '*': return Symbol(TokenKind.STAR); + case '/': return Symbol(TokenKind.SLASH); + case '(': return Symbol(TokenKind.LPAR); + case ')': return Symbol(TokenKind.RPAR); + case '\n': return Symbol(TokenKind.EOL); + default: assert(0); + } + } +} + +int main() +{ + auto l = calcLexer(stdin); + auto p = new Calc(l); + p.parse(); + return l.exit_status; +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/java/README.md b/platform/dbops/binaries/build/share/doc/bison/examples/java/README.md new file mode 100644 index 0000000000000000000000000000000000000000..57b4a6ba2b4a9a3a940b25bff5bd35a21a6080b2 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/java/README.md @@ -0,0 +1,30 @@ +# Examples in Java + +This directory contains examples of Bison grammar files in Java. + +You can run `make` to compile these examples. And `make clean` to tidy +afterwards. + +## simple/Calc.y +The usual calculator, a very simple version. + +## calc/Calc.y +The calculator, but with location tracking, debug traces, and a push parser. + + diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/java/calc/Calc.y b/platform/dbops/binaries/build/share/doc/bison/examples/java/calc/Calc.y new file mode 100644 index 0000000000000000000000000000000000000000..18bd03828d4545d0ee6e29888e9076c2097e80c1 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/java/calc/Calc.y @@ -0,0 +1,317 @@ +/* Parser and scanner for calc in Java. -*- Java -*- + + Copyright (C) 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%language "Java" + +%define api.parser.class {Calc} +%define api.parser.public +%define api.push-pull push + +// Customized syntax error messages (see reportSyntaxError)... +%define parse.error custom + +// ... with locations... +%locations + +// ... and accurate list of expected tokens. +%define parse.lac full + +%define parse.trace + +%code imports { + import java.io.BufferedReader; + import java.io.IOException; + import java.io.InputStream; + import java.io.InputStreamReader; + import java.io.Reader; + import java.io.StreamTokenizer; + import java.nio.CharBuffer; +} + +%code { + public static void main(String[] args) throws IOException { + CalcLexer scanner = new CalcLexer(System.in); + Calc parser = new Calc(scanner); + for (String arg : args) + if (arg.equals("-p")) + parser.setDebugLevel(1); + int status; + do { + int token = scanner.getToken(); + Object lval = scanner.getValue(); + Calc.Location yyloc = scanner.getLocation(); + status = parser.push_parse(token, lval, yyloc); + } while (status == Calc.YYPUSH_MORE); + if (status != Calc.YYACCEPT) + System.exit(1); + } + + static String i18n(String s) { + return s; + } +} + +/* Bison Declarations */ +%token + BANG "!" + PLUS "+" + MINUS "-" + STAR "*" + SLASH "/" + CARET "^" + LPAREN "(" + RPAREN ")" + EQUAL "=" + EOL _("end of line") + + NUM _("number") +%type exp + +%nonassoc "=" /* comparison */ +%left "-" "+" +%left "*" "/" +%precedence NEG /* negation--unary minus */ +%right "^" /* exponentiation */ + +/* Grammar follows */ +%% +input: + line +| input line +; + +line: + EOL +| exp EOL { System.out.println($exp); } +| error EOL +; + +exp: + NUM { $$ = $1; } +| exp "=" exp + { + if ($1.intValue() != $3.intValue()) + yyerror(@$, "calc: error: " + $1 + " != " + $3); + } +| exp "+" exp { $$ = $1 + $3; } +| exp "-" exp { $$ = $1 - $3; } +| exp "*" exp { $$ = $1 * $3; } +| exp "/" exp { $$ = $1 / $3; } +| "-" exp %prec NEG { $$ = -$2; } +| exp "^" exp { $$ = (int) Math.pow($1, $3); } +| "(" exp ")" { $$ = $2; } +| "(" error ")" { $$ = 1111; } +| "!" { $$ = 0; return YYERROR; } +| "-" error { $$ = 0; return YYERROR; } +; + +%% +class CalcLexer implements Calc.Lexer { + + StreamTokenizer st; + PositionReader reader; + + public CalcLexer(InputStream is) { + reader = new PositionReader(new InputStreamReader(is)); + st = new StreamTokenizer(reader); + st.resetSyntax(); + st.eolIsSignificant(true); + st.wordChars('0', '9'); + } + + Position start = new Position(1, 0); + Position end = new Position(1, 0); + + /** + * The location of the last token read. + * Implemented with getStartPos and getEndPos in pull parsers. + */ + public Calc.Location getLocation() { + return new Calc.Location(new Position(start), new Position(end)); + } + + /** + * Build and emit a syntax error message. + */ + public void reportSyntaxError(Calc.Context ctx) { + System.err.print(ctx.getLocation() + ": syntax error"); + { + final int TOKENMAX = 10; + Calc.SymbolKind[] arg = new Calc.SymbolKind[TOKENMAX]; + int n = ctx.getExpectedTokens(arg, TOKENMAX); + for (int i = 0; i < n; ++i) + System.err.print((i == 0 ? ": expected " : " or ") + + arg[i].getName()); + } + { + Calc.SymbolKind lookahead = ctx.getToken(); + if (lookahead != null) + System.err.print(" before " + lookahead.getName()); + } + System.err.println(""); + } + + /** + * Emit an error referring to the given location in a user-defined way. + * + * @@param loc The location of the element to which the + * error message is related. + * @@param msg The string for the error message. + */ + public void yyerror(Calc.Location loc, String msg) { + if (loc == null) + System.err.println(msg); + else + System.err.println(loc + ": " + msg); + } + + Integer yylval; + + /** + * The value of the last token read. Called getLVal in pull parsers. + */ + public Object getValue() { + return yylval; + } + + /** + * Fetch the next token. Called yylex in pull parsers. + */ + public int getToken() throws IOException { + start.set(reader.getPosition()); + int ttype = st.nextToken(); + end.set(reader.getPosition()); + switch (ttype) { + case StreamTokenizer.TT_EOF: + return YYEOF; + case StreamTokenizer.TT_EOL: + end.line += 1; + end.column = 0; + return EOL; + case StreamTokenizer.TT_WORD: + yylval = Integer.parseInt(st.sval); + end.set(reader.getPreviousPosition()); + return NUM; + case ' ': case '\t': + return getToken(); + case '!': + return BANG; + case '+': + return PLUS; + case '-': + return MINUS; + case '*': + return STAR; + case '/': + return SLASH; + case '^': + return CARET; + case '(': + return LPAREN; + case ')': + return RPAREN; + case '=': + return EQUAL; + default: + throw new AssertionError("invalid character: " + ttype); + } + } +} + +/** + * A class defining a point in the input. + */ +class Position { + public int line = 1; + public int column = 1; + + public Position() { + line = 1; + column = 1; + } + + public Position(int l, int t) { + line = l; + column = t; + } + + public Position(Position p) { + line = p.line; + column = p.column; + } + + public void set(Position p) { + line = p.line; + column = p.column; + } + + public boolean equals(Position l) { + return l.line == line && l.column == column; + } + + public String toString() { + return Integer.toString(line) + "." + Integer.toString(column); + } + + public int line() { + return line; + } + + public int column() { + return column; + } +} + +/** + * A Stream reader that keeps track of the current Position. + */ +class PositionReader extends BufferedReader { + + private Position position = new Position(); + // Position before the latest call to "read", i.e. position + // of the last character of the current token. + private Position previousPosition = new Position(); + + public PositionReader(Reader reader) { + super(reader); + } + + public int read() throws IOException { + previousPosition.set(position); + int res = super.read(); + if (res > -1) { + char c = (char) res; + if (c == '\r' || c == '\n') { + position.line += 1; + position.column = 1; + } else { + position.column += 1; + } + } + return res; + } + + public Position getPosition() { + return position; + } + + public Position getPreviousPosition() { + return previousPosition; + } +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/java/calc/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/java/calc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9c7bf1ef9b74204ed1009ff50d772666c065dfed --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/java/calc/Makefile @@ -0,0 +1,21 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BISON = bison +JAVAC = javac +JAVA = java + +all: Calc.class + +%.java %.html %.gv: %.y + $(BISON) $(BISONFLAGS) --html --graph -o $*.java $< + +%.class: %.java + $(JAVAC) $(JAVACFLAGS) $< + +run: Calc.class + @echo "Type arithmetic expressions. Quit with ctrl-d." + $(JAVA) $(JAVAFLAGS) Calc + +clean: + rm -f *.class Calc.java Calc.html Calc.xml Calc.gv diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/java/simple/Calc.y b/platform/dbops/binaries/build/share/doc/bison/examples/java/simple/Calc.y new file mode 100644 index 0000000000000000000000000000000000000000..51699edb7e248d78c4b7e9ad66c52175ff85e84d --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/java/simple/Calc.y @@ -0,0 +1,125 @@ +/* Simple parser and scanner in Java. -*- Java -*- + + Copyright (C) 2018-2021 Free Software Foundation, Inc. + + This file is part of Bison, the GNU Compiler Compiler. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +%language "Java" + +%define api.parser.class {Calc} +%define api.parser.public + +%define parse.error verbose + +%code imports { + import java.io.IOException; + import java.io.InputStream; + import java.io.InputStreamReader; + import java.io.Reader; + import java.io.StreamTokenizer; +} + +%code { + public static void main(String[] args) throws IOException { + CalcLexer l = new CalcLexer(System.in); + Calc p = new Calc(l); + if (!p.parse()) + System.exit(1); + } +} + +/* Bison Declarations */ +%token NUM "number" +%type exp + +%nonassoc '=' /* comparison */ +%left '-' '+' +%left '*' '/' +%precedence NEG /* negation--unary minus */ +%right '^' /* exponentiation */ + +/* Grammar follows */ +%% +input: + line +| input line +; + +line: + '\n' +| exp '\n' { System.out.println($exp); } +| error '\n' +; + +exp: + NUM { $$ = $1; } +| exp '=' exp + { + if ($1.intValue() != $3.intValue()) + yyerror("calc: error: " + $1 + " != " + $3); + } +| exp '+' exp { $$ = $1 + $3; } +| exp '-' exp { $$ = $1 - $3; } +| exp '*' exp { $$ = $1 * $3; } +| exp '/' exp { $$ = $1 / $3; } +| '-' exp %prec NEG { $$ = -$2; } +| exp '^' exp { $$ = (int) Math.pow($1, $3); } +| '(' exp ')' { $$ = $2; } +| '(' error ')' { $$ = 1111; } +| '!' { $$ = 0; return YYERROR; } +| '-' error { $$ = 0; return YYERROR; } +; + + +%% +class CalcLexer implements Calc.Lexer { + + StreamTokenizer st; + + public CalcLexer(InputStream is) { + st = new StreamTokenizer(new InputStreamReader(is)); + st.resetSyntax(); + st.eolIsSignificant(true); + st.whitespaceChars('\t', '\t'); + st.whitespaceChars(' ', ' '); + st.wordChars('0', '9'); + } + + public void yyerror(String s) { + System.err.println(s); + } + + Integer yylval; + + public Object getLVal() { + return yylval; + } + + public int yylex() throws IOException { + int ttype = st.nextToken(); + switch (ttype) { + case StreamTokenizer.TT_EOF: + return YYEOF; + case StreamTokenizer.TT_EOL: + return (int) '\n'; + case StreamTokenizer.TT_WORD: + yylval = Integer.parseInt(st.sval); + return NUM; + default: + return ttype; + } + } +} diff --git a/platform/dbops/binaries/build/share/doc/bison/examples/java/simple/Makefile b/platform/dbops/binaries/build/share/doc/bison/examples/java/simple/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9c7bf1ef9b74204ed1009ff50d772666c065dfed --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/bison/examples/java/simple/Makefile @@ -0,0 +1,21 @@ +# This Makefile is designed to be simple and readable. It does not +# aim at portability. It requires GNU Make. + +BISON = bison +JAVAC = javac +JAVA = java + +all: Calc.class + +%.java %.html %.gv: %.y + $(BISON) $(BISONFLAGS) --html --graph -o $*.java $< + +%.class: %.java + $(JAVAC) $(JAVACFLAGS) $< + +run: Calc.class + @echo "Type arithmetic expressions. Quit with ctrl-d." + $(JAVA) $(JAVAFLAGS) Calc + +clean: + rm -f *.class Calc.java Calc.html Calc.xml Calc.gv diff --git a/platform/dbops/binaries/build/share/doc/flex/AUTHORS b/platform/dbops/binaries/build/share/doc/flex/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..93b35287a263df5fc1291cab2f3d3a7ccf1b7bbd --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/flex/AUTHORS @@ -0,0 +1,16 @@ + +In 2001, Will Estes took over as maintainer of flex. + +John Millaway is a co-author of the current version of flex. He has +contributed a large number of new features, fixed a large number of +outstanding bugs and has made significant contributions to the flex +documentation. + +Aaron Stone has contributed several bug fixes to the flex codebase. + +Vern Paxson wrote flex with the help of many ideas and much +inspiration from Van Jacobson. Original version by Jef Poskanzer. + +The fast table representation is a partial implementation of a design +done by Van Jacobson. The implementation was done by Kevin Gong and +Vern Paxson. diff --git a/platform/dbops/binaries/build/share/doc/flex/COPYING b/platform/dbops/binaries/build/share/doc/flex/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..684b011026dd719137ef01764f002c2c27a2bb52 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/flex/COPYING @@ -0,0 +1,42 @@ +Flex carries the copyright used for BSD software, slightly modified +because it originated at the Lawrence Berkeley (not Livermore!) Laboratory, +which operates under a contract with the Department of Energy: + +Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 The Flex Project. + +Copyright (c) 1990, 1997 The Regents of the University of California. +All rights reserved. + +This code is derived from software contributed to Berkeley by +Vern Paxson. + +The United States Government has rights in this work pursuant +to contract no. DE-AC03-76SF00098 between the United States +Department of Energy and the University of California. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. + +This basically says "do whatever you please with this software except +remove this notice or take advantage of the University's (or the flex +authors') name". + +Note that the "flex.skl" scanner skeleton carries no copyright notice. +You are free to do whatever you please with scanners generated using flex; +for them, you are not even bound by the above copyright. diff --git a/platform/dbops/binaries/build/share/doc/flex/NEWS b/platform/dbops/binaries/build/share/doc/flex/NEWS new file mode 100644 index 0000000000000000000000000000000000000000..85d900b5362cf792cfbdad044b7606471f963619 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/flex/NEWS @@ -0,0 +1,746 @@ +flex NEWS + +* Noteworthy changes in release 2.6.4 (2017-05-06) [stable] + +** build + +*** The indent target now knows about flex's new (as of 2.6.0) + layout. The indent rules it would apply are not correct and do + need to be fixed. + +*** The files included in the flex distribution are now built by the + version of flex that is included in the distribution. + +*** The configure script has a better idea of which headers are + required to build flex. It will also error when missing functions + are detected. + +*** We have lowered the versions of automake and gettext that + configure.ac lists as required for building flex. In autogen.sh, + we now check for how to call libtoolize and use what we find in + the rest of the script. + +*** Since files in lib/ are picked up as needed by src/, we no longer + generate a Makefile for that directory. + +*** Flex can be cross compiled. + +** documentation + +*** Some typos were removed from the manual. + +** scanner + +*** Some minor performance enhancements. + +*** We honor user defined yy_* macros again. We are also more careful + to not leak macro definitions into header files. + +*** A number of portability fixes were introduced so building flex is + more reliable on more platforms. Additionally, outdated function + calls were removed. + +*** When building the flex executable itself, %# comments from + flex.skl are removed when generating the C source code array. This + reduces the size of flex. + +** test suite + +*** All scripts in the test suite are now run by $(SHELL) and the + needed portability fixes have been included. + +*** Test suite dependencies are handled much better. This only matters + if you are actively developing flex or its test suite. + +*** Tests that depend on platform dependent features now properly skip + when those platforms are not present. + +*** When running "make check", you can now pas V=0 to silence more of + the build. This is useful when you're less connncerned about the + details of building and linking the test programs themselves. + +* Noteworthy changes in release 2.6.3 (2016-12-30) [stable] + +** scanner + +*** several bug fixes resolved problems introduced in recent flex + versions regarding processing of comments, literals and various + quoting scenarios. + +*** If the path to m4 was sufficiently long, a buffer overflow could + occur. This has been resolved. The fix also removes dependence on + the constant PATH_MAX. + +** build + +*** A new configure option --disable-bootstrap changes the behavior of + the build system when building flex. The default + "--enable-bootstrap" behavior is to build flex, then to use that + flex to build flex again. With --disable-bootstrap, the scanner is + simply built by sedding the scanner source. This is friendlier to + cross compilation. + +*** The compatibility functions in lib/ are no longer built as a + library. Instead, they are built as $(LIBOBJ) objects. This is + simpler and friendlier to cross compilation. + +*** It is now possible to build flex without building the accompanying + libfl. This is friendlier to cross compilation. See the + --disable-libfl option to configure. Resolves #99. + +*** the PIC version of libfl was not correctly built. It is no longer + included in the build/installation targets of flex since it was + unused. + +*** the distributed man page is only rebuilt when the relevant source + files change or when the binary doesn't exist. In particular, this + is friendlier to cross compilation. Resolves #108 + +** test + +*** the shell scripts in the test suite are more portable across different shell implementations. + +* version 2.6.2 released 2016-10-24 + +** flex internals + +*** a segfalt involving yyrestart(NULL) has been fixed + +*** flex should now handle quoting when mixed with m4 processing correctly + +*** flex handles `[[' and `]]' correctly + +*** flex no longer generates non-ANSI code + +*** more compilation warnings were squashed in generated scanners + +*** prevented a buffer overflow that could occur when input buffers were the exact wrong size + +** test suite + +*** input filenames on MSWindows are now calculated correctly + +*** general code cleanups in a number of tests now make the test suite compile much more cleanly + +** build system + +*** the xz archive has been replaced with an lzip archive + +*** a new option to configure --enable-warnings to encapsulate passing + of warning-related flags which is useful in testing flex + +*** make indent now works for out of source builds + +*** Portability warnings when generating Makefile.in files are now suppressed; they were just noise and the use of GNU extensions in Makefile.{am,in,} was intentional and well known. + +** bugs + +*** resolved gh#67 + +** new sv translation from the translation project + +* version 2.6.1 released 2016-03-01 + +** flex resources + +*** The flex project is now hosted at github. Consider this a "period of transition". In particular, you should start at https://github.com/westes/flex for the flex codebase, issue tracking and pull requests. + +*** New releases of flex are to be found at https://github.com/westes/flex/releases. + +** flex internals + +*** Flex now uses more modern and more standard names for variable types. There's more work to be done on that front yet, though. + +*** A number of compiler warnings have been remedied. + +*** Line directives should now work as expected and be absent when that is expected. + +** test suite + +*** When running the test suite, c++ files are compiled with the c++ header inside the flex distribution, rather than relying on the build system's flex header , which might not be installed yet or which might be out of date with respect to what flex tests expect. + +*** Some portability fixes in the test suite such as opening files for reading in binary mode + +** Building flex + +*** The file src/scan.c asdistributed with flex source is now built with the current version of flex. Occasionally this had to be done manually to pick up new flex features. It's now just a part of flex's build system. + +*** The pdf version of the manual is no longer distributed with flex, although if you have the texinfo package installed, you can still build it. + +*** lots of general build system cleanup + +*** the build system tries a bit harder to find libtoolize and texi2dvi. + +*** When help2man and texi2dvi are missing, the error messages are now much more helpful. + +** bug fixes + +*** resolved github issues #53, #54, #55, #61. + +*** Resolved sf bugs #128, #129, #155, #160, #184, #187, #195. + +* version 2.6.0 released 2015-11-17 + +** User Visible Changes + +*** C++ scanners now use references instead of pointers. See the manual for details. + +*** A number of compiler warnings were addressed, so flex generated scanners should be quieter under compiler warning scenarios. + +*** Allow error reporting routines to accept varying number of arguments + +*** Removed deprecated 'register' storage class specifier + +*** Changeed output formats from octal to hexadecimal + +*** check limits before using array index cclp; resolves sf-166 + +*** Suppress clang warning about empty @param paragraph; resolves sf#158 + +*** Fixed malloc/realloc replacement, resolves sf bug#151. + +*** Adjusted buffer sizes on ia64. + +*** various documentation and code clean up fixes: resolves sf bugs #167, #168, among other patches. + +** Flex Internals + +*** flex is now organized into subdirectories. This keeps the tree neater at the top level and puts like things near each other and unlike things away from each other. + +*** The test suite has been reorganized and is now run with the parallel test suite harness from automake. + +*** Cleaned up the automake parts of the build system to better reflect what automake does on its own. Also added a call to libtoolize in autogen.sh because autoreconf gets confused without a prior run of libtoolize. + +*** po/Makefile now includes a rule to fetch the latest translations from the translation project. "make -f po/Makefile getpo" from the top level of the flex tree will fetch the files. + +*** New da translation from the translation project + +* flex version 2.5.39 released 2014-03-26 + +** no user visible changes in this release + +* version 2.5.38 released 2014-02-14 + +** internationalization + +*** add sr translation from the translation project + +*** update da, es, ko, nl, pt_BR, ro, ru, sv, tr, vi, zh_CN translations from the translation project + +*** rename zh_tw to its proper zh_TW name + +* version 2.5.37 released 2012-08-03 + +** Import flex into git. See + git://flex.git.sourceforge.net/gitroot/flex/flex. + +** Fix make install target to not fail when the flex++ program is + already installed + +** New translations from the translation project: de, fi, pl, vi + +* version 2.5.36 released 2012-07-20 + +** various portability fixes that quiet compiler warnings on 64-bit + hosts + +** various manual fixes, including correcting the name of a %option and + updating some simple examples to use ANSI C syntax + +** various bug fixes that prevent certain error conditions from + persisting when they should not persist + +** improvements to the test suite so it behaves better when linking + compiled files + +** new translations from the translation project: ca, da, es, fi, fr, + ga, ko, pt_br, ro, ru, sv, tr, zh_cn + +** the flex distribution is now built with automake 1.10.1 and automake + 2.61 + +* version 2.5.35 released 2008-02-26 + +** fixed bug that prevented flex from accepting certain comments in the + scanner file (resolves bugs #1849809 and #1849805) + +** fix bug that prevented headers for all functions from being generated + (resolves bug #1628314) + +** change yy_size_t to be size_t (resolves bug #1849812) + +** new de, nl, pl, pt_br, vi translations from the translation project + +* version 2.5.34 released 2007-12-12 + +** introduce yylex_init_extra; see the manual for details + +** introduce %option extra-type="your_type *" (resolves bug #1744505) + +** The flex program now parses multiple short concatenated options (resolves bug + #1619820). Thanks to Petr Machata of Red Hat on this issue. + +** better checking after yyalloc/yyrealloc (resolves bug #1595967) + +** flex now provides for a libfl_pic.a compiled with position + independent code. Particularly useful when including a flex scanner + in a shared library and with more recent versions of gcc. Thanks to the Debian project for the idea. + +** SourceForge feature request #1658379: Expose YY_BUF_SIZE in the + header file. + +** flex better escapes filenames with special characters in them + (resolves bug #1623600) + +** a memory leak was plugged(resolves bug #1601111) + +** pattern language expanded; see the manual for details on the below + highlights + +*** pattern options added to specify patterns as case-insensitive or + case-sensitive + +*** pattern options to specify whether the "." character should match + the newline character + +*** pattern options added to allow ignoring of whitespace in patterns + +*** POSIX character classes may be negated in patterns + +*** patterns may now use set difference, union operators + +** the manual now contains an appendix listing various common patterns + which may be useful when writing scanners + +** some memory leaks were removed from the C++ scanner (but the C++ + scanner is still experimental and may change radically without + notice) + +** c++ scanners can now use yywrap + +** added new unit test for c++ and yywrap + +** portability fixes to some unit tests + +** flex man page and flex manual in pdf now distributed in the flex +distribution + +** new ca, vi, ga, nl translations from the translation project + +** flex no longer comes with an rpm spec file + +** flex development now happens with automake 1.9.6 + +* version 2.5.33 released 2006-2-20 + +** all flex resources are now to be found from the website at + http://flex.sourceforge.net/ + +** there was no release 2.5.32 published + +** numerous bug and security fixes + +** new nl, vi, sv, ro, po, ga, ca, fr, tr translations from the translation project + +** upgrade to use gettext 0.12 (this now makes the "pdf" and "ps" + targets in the build system able to be run successfully) + +* version 2.5.31 released 2003-4-1 + +** remove --enable-maintainer-mode configure option; none of the + Makefiles were using it and it can be unduely confusing + +* version 2.5.30 released 2003-4-1 + +** yylineno is per-buffer in reentrant scanners + +** added %top directive for placing code at the top of the generated + scanner; see manual for details + +** flex now uses m4 to generate scanners; while this means that + scanners are more readable, it means that flex requires m4 to be + installed; see manual for details + +* version 2.5.29 released 2003-3-5 + +** Automatic stack management for multiple input buffers in C and C++ scanners + +** moved the flex documentation to a new doc/ subdirectory + +** cleanups to the yy namespace + +* version 2.5.28 released 2003-2-12 + +** flex is now hosted at sourceforge + +** Fixed trailing slash bug in YY_INPUT macro def + +** Flex now warns if always-interactive is specified with fast or full + +* version 2.5.27 released 2003-1-21 + +** flex now works with recent bison versions + +** new pt_br translation from the translation project + +* version 2.5.26 released 2003-1-14 + +** Fixed table deserialization bug on big-endian archs. Patch sent from Bryce Nichols + +** yyleng has proper declarations now; this caused flex to generate + unusable scanners for some programs + +** the flex distribution now includes a spec file suitable for use + with rpm + +** some more c++ fixes + +** new es translation from the translation project + +** slight tweeks to the flex_int*_t types + +** flex now warns about pattern ranges that might be ambiguous when + generating a case-insensitive scanner + + +* version 2.5.25 released 2002-12-2 + +** flex now uses flex_int*_t types. For C99 systems, they are just the + int*_t types; for non-C99 systems, we just make some typedefs + +** new pt_br translation from the translation project + +* version 2.5.24 released 2002-11-25 + +* more portability fixes + +** the manual continues to be updated and edited, but it's still got a + ways to go + +** it is possible to have multiple c++ scanners in the same program again + +** new turkish translation from the translation project + +* version 2.5.23 released 2002-10-21 + +** more portability fixes + +** the manual includes a title page and a table-of-contents when printed + +** the test suite can be run with "make check" from the top-level + directory + +** configure now accepts the --enable-maintainer-mode option + +** gettext functionality is now only available externally + +** the constant FLEX_BETA is defined if flex is a beta release + +** the script create-test was not included in the distribution and it + should have been + +* version 2.5.22 released 2002-10-10 + +** more portability fixes around how we get ahold of the integral + types; there is a constant FLEX_NEED_INTEGRAL_TYPE_DEFINITIONS + which you should define if you don't have the header + file (after you complain to your C vendor for not providing a + reasonable C environment) + +** more test suite cleanups; in particular, the test suite should run + correctly when build from a different directory + +** upgraded automake to 1.7 and consequently autoconf to 2.54; this + means, among other things, that there is some support for +formatting the manual in postscript and pdf in the distributed + Makefile.in (and therefore in the Makefile built by configure) + +** the flex.1 manpage is generated by help2man; (this has been true + for quite a while but was not listed here) + +** flex now includes three defined constants to indicate which version + of flex generated a scanner (YY_FLEX_{MAJOR,MINOR,SUBMINOR}_VERSION) + +** flex tries its best to output only the relevant portions of the + skeleton when generating a scanner, thus avoiding as much + conditional compilation as possible + +* version 2.5.21 released 2002-9-17 + +** one of the tests in the test suite broke the dist target + +* version 2.5.20 released 2002-9-16 + +** A flex scanner has the ability to save the DFA tables to a file, + and load them at runtime when needed; see the manual for details + +** Added %option bison-bridge (--bison-bridge) + +** Removed %option reentrant-bison/--reentrant-bison/-Rb + +** yylineno is present in all scanners; Modified nasty performance + penalty warning with yylineno in documentation + +** test-table-opts is now run last in the test suite because it's so fat + +** flex can, to some extent, diagnose where internal problems occur + +** new translations from the translation project: fr, ca, de, ru, sv + +**Flex generates C99 defs now; see YY_TRADITIONAL_FUNC_DEFS in the + manual if that's not a good thing for you + +* version 2.5.19 released 2002-9-5 + +** prevent segfault on input lines which are longer than the allocated + space (problem report from Manoj Srivastava + ) + +** Changed option 'header' to 'header-file' + +* version 2.5.18 released 2002-9-4 + +** portability fixes for integer constants and in the way the test + suite reports its results + +** the test for bison was reporting bison missing when it was, in + fact, found + +** if we don't find GNU indent, we're more careful when we're not + finding it + +* version 2.5.17 released 2002-8-29 + +** more portability fixes + +** updated config.sub and config.guess + +** flex is indented by GNU indent (this was done earlier but not + explicitly documented) + +* version 2.5.16 released 2002-8-28 + +** c++ scanners compile again + +** there is now an indent target in the top-level Makefile; configure + checks for GNU indent which is required for proper operation of the + indent target + +** some more portability fixes were made + +** %options and invocation sections of manual merged + +** a c++ test was added to the test suite + +** we're trying to clean up more files in the test suite's make clean + targets + +* version 2.5.15 released 2002-8-21 + +** reject-state buffer is now dynamically allocated and REJECT buffer + variables are reentrant-safe + +** manual now discusses memory usage + +** skeleton now processed by m4 before mkskel.sh; (this only matters + if you want to change the skeleton or if you're doing flex development) + +** zh_cn translation added from translation project + +** a bug that caused a segfault has now been fixed + +** the test suite now respects the usual CFLAGS, etc. variables + +** removed some warnings which some tests trigggered with the -s option + +** the flex-generated header file now tries to be smarter about + conditionally including start conditions + +** tables code omitted from generated scanner when not used + +* version 2.5.14 released 2002-8-15 + +** the tests using the reentrant c scanner as c++ were reworked + slightly to be sure that the c++ was enforced + +** de translation now included in the distribution + +** various portability fixes regarding nls support, c++ include + headers, etc. + +* version 2.5.13 released 2002-8-15 + +** the header file output with %option header is now much smaller + +** Fixed type mismatch in printf in scanner skeleton + +** yylex_init now reports errors + +* version 2.5.12 released 2002-8-8 + +** updated gettext support to 0.11.5 + +** new fr translation from the translation project + +** bison is no longer needed to build flex; If you are building flex + from a release (i.e., not from a cvs snapshot), then you don't need + to have a pre-built lex around either (unless you modify scan.l, of + course); (This has been true for some time, but was not mentioned + here.) + +* version 2.5.11 released 2002-7-31 + +** Fixed bug where yyless did not consider yylineno + +** the yylineno performance hit is now gone + +** fixed some typos in the manual and we now include texinfo.tex in + the distribution + +** traditional prototypes output for C scanners, controlled by a + preprocessor symbol; see documentation for details + +* version 2.5.10 released 2002-7-24 + +** yy_globals renamed to yyscanner and yy_globals_t renamed to + yy_guts_t + +** added dist-bzip2 option to Makefile.am so we now produce a bzip2'd + archive in addition to the standard gzip archive + +* version 2.5.9 + +** new tests in test suite: test-mem-{nr,r}, test-posix, + test-posixly-correct, test-debug-{nr,r} + +** made changes to work with gcc-3.2 development code + +** ability to choose which memory functions are used in flex + +** new yylex_destroy() function for the non-reentrant scanner + +** new handling of POSIXLY_CORRECT environment variable + +** the test suite now has its copyrights explicitly described + +** new ca, de, fr, ru, sv, tr translations + +* version 2.5.8 + +** a new --posix option generates scanners with posix-style abc{1,3} + compatible parsing, see manual for the screwy details + +* version 2.5.7 + +** configure.in now includes a call to AC_PREREQ to enforce the + requirement for autoconf at least 2.50 (This only effects you if + you're doing flex development.) + +** configure now uses autoconf's versioning information and configure + --help reports the bug-reporting address for flex + +** test suite now only reports success versus failure; reporting + skipped is problematic under the current setup + +** compilation with --disable-nls now works + +** flex can now be built in a separate directory + +* version 2.5.6 + +** gettext support added (from gettext 0.11) + +*** translations for ca, da, de, es, fr, ko, ru, sv, tr included + +** distribution now built under automake 1.6 and autoconf 2.53 + +** command-line option parsing happens differently now: + +*** Added long option parsing + +*** Options -n and -c, previously deprecated, now simply do nothing + +*** Options are now parsed left to right + +** added a number of new options + +*** All positive %options are now accessible from the command line + +*** Added option -D, to define a preprocessor symbol + +*** Added option --header=FILE to specify a C .h file to generate + +*** added option --yywrap to call yywrap on EOF + +*** added option --yylineno to track line count in yylineno + +*** --yyclass=NAME name of C++ class when generating c++ scanners + +*** for long option names which are associated with existing short +options, see accompanying documentation + +*** new %option nounistd or command-line --nounistd added to prevent + flex from generating #include on systems that don't + have that include file + +** Support for reentrant C scanners has been added + +*** Updated the manual with the new reentrant API + +*** Two new options %option reentrant (-R) and +%option reentrant-bison (-Rb) + +*** All globals optionally placed into struct yyglobals_t + +*** All access to globals replaced by macro invocations + +*** All functions optionally take one additional +argument, yy_globals + +*** New style for invoking reentrant scanner: +yylex_init(void** scanner ); +yylex( scanner ); +yylex_destroy( scanner ); + +*** Added get/set functions for members of struct yy_globals_t +e.g., yyget_text, yyget_leng, etc + +*** Prefix substitution added for new functions + +*** Macro shortcuts to the lengthy get/set functions +provided for use in actions, e.g., yytext, yyleng, etc + +*** Arbitrary, user-defined data, "yyextra", may be added to scanner + +** %option nomain no longer implies %option yywrap +But the inverse is still true + +** Developer test suite added + +*** TESTS/ directory has been added. Users can +'make test' in the TESTS directory to execute the test suite + +** Support for bison variables yylval and yylloc added + +** automake support for the build process + +** manual is now in texinfo/info format + +*** flex.1 removed from distribution + +** flex no longer generates C-language scanners with C++-style + comments + +** flex now generates scanners in c++ which are compatible with + recent c++ compilers + +** flex input scanner now recognizes '\r' as an EOL character + +See the file ONEWS for changes in earlier releases. + +See the file COPYING for copying conditions. + +Local Variables: +mode: text +mode: outline-minor +end: diff --git a/platform/dbops/binaries/build/share/doc/flex/ONEWS b/platform/dbops/binaries/build/share/doc/flex/ONEWS new file mode 100644 index 0000000000000000000000000000000000000000..33415772185b3f48b8016348cb9ab0b071d66896 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/flex/ONEWS @@ -0,0 +1,1233 @@ +Changes between release 2.5.4 (11Sep96) and release 2.5.3: + + - Fixed a bug introduced in 2.5.3 that blew it when a call + to input() occurred at the end of an input file. + + - Fixed scanner skeleton so the example in the man page of + scanning strings using exclusive start conditions works. + + - Minor Makefile tweaks. + + +Changes between release 2.5.3 (29May96) and release 2.5.2: + + - Some serious bugs in yymore() have been fixed. In particular, + when using AT&T-lex-compatibility or %array, you can intermix + calls to input(), unput(), and yymore(). (This still doesn't + work for %pointer, and isn't likely to in the future.) + + - A bug in handling NUL's in the input stream of scanners using + REJECT has been fixed. + + - The default main() in libfl.a now repeatedly calls yylex() until + it returns 0, rather than just calling it once. + + - Minor tweak for Windows NT Makefile, MISC/NT/Makefile. + + +Changes between release 2.5.2 (25Apr95) and release 2.5.1: + + - The --prefix configuration option now works. + + - A bug that completely broke the "-Cf" table compression + option has been fixed. + + - A major headache involving "const" declarators and Solaris + systems has been fixed. + + - An octal escape sequence in a flex regular expression must + now contain only the digits 0-7. + + - You can now use "--" on the flex command line to mark the + end of flex options. + + - You can now specify the filename '-' as a synonym for stdin. + + - By default, the scanners generated by flex no longer + statically initialize yyin and yyout to stdin and stdout. + This change is necessary because in some ANSI environments, + stdin and stdout are not compile-time constant. You can + force the initialization using "%option stdinit" in the first + section of your flex input. + + - "%option nounput" now correctly omits the unput() routine + from the output. + + - "make clean" now removes config.log, config.cache, and the + flex binary. The fact that it removes the flex binary means + you should take care if making changes to scan.l, to make + sure you don't wind up in a bootstrap problem. + + - In general, the Makefile has been reworked somewhat (thanks + to Francois Pinard) for added flexibility - more changes will + follow in subsequent releases. + + - The .texi and .info files in MISC/texinfo/ have been updated, + thanks also to Francois Pinard. + + - The FlexLexer::yylex(istream* new_in, ostream* new_out) method + now does not have a default for the first argument, to disambiguate + it from FlexLexer::yylex(). + + - A bug in destructing a FlexLexer object before doing any scanning + with it has been fixed. + + - A problem with including FlexLexer.h multiple times has been fixed. + + - The alloca() chud necessary to accommodate bison has grown + even uglier, but hopefully more correct. + + - A portability tweak has been added to accommodate compilers that + use char* generic pointers. + + - EBCDIC contact information in the file MISC/EBCDIC has been updated. + + - An OS/2 Makefile and config.h for flex 2.5 is now available in + MISC/OS2/, contributed by Kai Uwe Rommel. + + - The descrip.mms file for building flex under VMS has been updated, + thanks to Pat Rankin. + + - The notes on building flex for the Amiga have been updated for + flex 2.5, contributed by Andreas Scherer. + + +Changes between release 2.5.1 (28Mar95) and release 2.4.7: + + - A new concept of "start condition" scope has been introduced. + A start condition scope is begun with: + + { + + where SCs is a list of one or more start conditions. Inside + the start condition scope, every rule automatically has the + prefix applied to it, until a '}' which matches the + initial '{'. So, for example: + + { + "\\n" return '\n'; + "\\r" return '\r'; + "\\f" return '\f'; + "\\0" return '\0'; + } + + is equivalent to: + + "\\n" return '\n'; + "\\r" return '\r'; + "\\f" return '\f'; + "\\0" return '\0'; + + As indicated in this example, rules inside start condition scopes + (and any rule, actually, other than the first) can be indented, + to better show the extent of the scope. + + Start condition scopes may be nested. + + - The new %option directive can be used in the first section of + a flex scanner to control scanner-generation options. Most + options are given simply as names, optionally preceded by the + word "no" (with no intervening whitespace) to negate their + meaning. Some are equivalent to flex flags, so putting them + in your scanner source is equivalent to always specifying + the flag (%option's take precedence over flags): + + 7bit -7 option + 8bit -8 option + align -Ca option + backup -b option + batch -B option + c++ -+ option + caseful opposite of -i option (caseful is the default); + case-sensitive same as above + caseless -i option; + case-insensitive same as above + debug -d option + default opposite of -s option + ecs -Ce option + fast -F option + full -f option + interactive -I option + lex-compat -l option + meta-ecs -Cm option + perf-report -p option + read -Cr option + stdout -t option + verbose -v option + warn opposite of -w option (so use "%option nowarn" for -w) + + array equivalent to "%array" + pointer equivalent to "%pointer" (default) + + Some provide new features: + + always-interactive generate a scanner which always + considers its input "interactive" (no call to isatty() + will be made when the scanner runs) + main supply a main program for the scanner, which + simply calls yylex(). Implies %option noyywrap. + never-interactive generate a scanner which never + considers its input "interactive" (no call to isatty() + will be made when the scanner runs) + stack if set, enable start condition stacks (see below) + stdinit if unset ("%option nostdinit"), initialize yyin + and yyout statically to nil FILE* pointers, instead + of stdin and stdout + yylineno if set, keep track of the current line + number in global yylineno (this option is expensive + in terms of performance). The line number is available + to C++ scanning objects via the new member function + lineno(). + yywrap if unset ("%option noyywrap"), scanner does not + call yywrap() upon EOF but simply assumes there + are no more files to scan + + Flex scans your rule actions to determine whether you use the + REJECT or yymore features (this is not new). Two %options can be + used to override its decision, either by setting them to indicate + the feature is indeed used, or unsetting them to indicate it + actually is not used: + + reject + yymore + + Three %option's take string-delimited values, offset with '=': + + outfile="" equivalent to -o + prefix="" equivalent to -P + yyclass="" set the name of the C++ scanning class + (see below) + + A number of %option's are available for lint purists who + want to suppress the appearance of unneeded routines in + the generated scanner. Each of the following, if unset, + results in the corresponding routine not appearing in the + generated scanner: + + input, unput + yy_push_state, yy_pop_state, yy_top_state + yy_scan_buffer, yy_scan_bytes, yy_scan_string + + You can specify multiple options with a single %option directive, + and multiple directives in the first section of your flex input file. + + - The new function: + + YY_BUFFER_STATE yy_scan_string( const char *str ) + + returns a YY_BUFFER_STATE (which also becomes the current input + buffer) for scanning the given string, which occurs starting + with the next call to yylex(). The string must be NUL-terminated. + A related function: + + YY_BUFFER_STATE yy_scan_bytes( const char *bytes, int len ) + + creates a buffer for scanning "len" bytes (including possibly NUL's) + starting at location "bytes". + + Note that both of these functions create and scan a *copy* of + the string/bytes. (This may be desirable, since yylex() modifies + the contents of the buffer it is scanning.) You can avoid the + copy by using: + + YY_BUFFER_STATE yy_scan_buffer( char *base, yy_size_t size ) + + which scans in place the buffer starting at "base", consisting + of "size" bytes, the last two bytes of which *must* be + YY_END_OF_BUFFER_CHAR (these bytes are not scanned; thus, scanning + consists of base[0] through base[size-2], inclusive). If you + fail to set up "base" in this manner, yy_scan_buffer returns a + nil pointer instead of creating a new input buffer. + + The type yy_size_t is an integral type to which you can cast + an integer expression reflecting the size of the buffer. + + - Three new routines are available for manipulating stacks of + start conditions: + + void yy_push_state( int new_state ) + + pushes the current start condition onto the top of the stack + and BEGIN's "new_state" (recall that start condition names are + also integers). + + void yy_pop_state() + + pops the top of the stack and BEGIN's to it, and + + int yy_top_state() + + returns the top of the stack without altering the stack's + contents. + + The start condition stack grows dynamically and so has no built-in + size limitation. If memory is exhausted, program execution + is aborted. + + To use start condition stacks, your scanner must include + a "%option stack" directive. + + - flex now supports POSIX character class expressions. These + are expressions enclosed inside "[:" and ":]" delimiters (which + themselves must appear between the '[' and ']' of a character + class; other elements may occur inside the character class, too). + The expressions flex recognizes are: + + [:alnum:] [:alpha:] [:blank:] [:cntrl:] [:digit:] [:graph:] + [:lower:] [:print:] [:punct:] [:space:] [:upper:] [:xdigit:] + + These expressions all designate a set of characters equivalent to + the corresponding isXXX function (for example, [:alnum:] designates + those characters for which isalnum() returns true - i.e., any + alphabetic or numeric). Some systems don't provide isblank(), + so flex defines [:blank:] as a blank or a tab. + + For example, the following character classes are all equivalent: + + [[:alnum:]] + [[:alpha:][:digit:] + [[:alpha:]0-9] + [a-zA-Z0-9] + + If your scanner is case-insensitive (-i flag), then [:upper:] + and [:lower:] are equivalent to [:alpha:]. + + - The promised rewrite of the C++ FlexLexer class has not yet + been done. Support for FlexLexer is limited at the moment to + fixing show-stopper bugs, so, for example, the new functions + yy_scan_string() & friends are not available to FlexLexer + objects. + + - The new macro + + yy_set_interactive(is_interactive) + + can be used to control whether the current buffer is considered + "interactive". An interactive buffer is processed more slowly, + but must be used when the scanner's input source is indeed + interactive to avoid problems due to waiting to fill buffers + (see the discussion of the -I flag in flex.1). A non-zero value + in the macro invocation marks the buffer as interactive, a zero + value as non-interactive. Note that use of this macro overrides + "%option always-interactive" or "%option never-interactive". + + yy_set_interactive() must be invoked prior to beginning to + scan the buffer. + + - The new macro + + yy_set_bol(at_bol) + + can be used to control whether the current buffer's scanning + context for the next token match is done as though at the + beginning of a line (non-zero macro argument; makes '^' anchored + rules active) or not at the beginning of a line (zero argument, + '^' rules inactive). + + - Related to this change, the mechanism for determining when a scan is + starting at the beginning of a line has changed. It used to be + that '^' was active iff the character prior to that at which the + scan started was a newline. The mechanism now is that '^' is + active iff the last token ended in a newline (or the last call to + input() returned a newline). For most users, the difference in + mechanisms is negligible. Where it will make a difference, + however, is if unput() or yyless() is used to alter the input + stream. When in doubt, use yy_set_bol(). + + - The new beginning-of-line mechanism involved changing some fairly + twisted code, so it may have introduced bugs - beware ... + + - The macro YY_AT_BOL() returns true if the next token scanned from + the current buffer will have '^' rules active, false otherwise. + + - The new function + + void yy_flush_buffer( struct yy_buffer_state* b ) + + flushes the contents of the current buffer (i.e., next time + the scanner attempts to match a token using b as the current + buffer, it will begin by invoking YY_INPUT to fill the buffer). + This routine is also available to C++ scanners (unlike some + of the other new routines). + + The related macro + + YY_FLUSH_BUFFER + + flushes the contents of the current buffer. + + - A new "-ooutput" option writes the generated scanner to "output". + If used with -t, the scanner is still written to stdout, but + its internal #line directives (see previous item) use "output". + + - Flex now generates #line directives relating the code it + produces to the output file; this means that error messages + in the flex-generated code should be correctly pinpointed. + + - When generating #line directives, filenames with embedded '\'s + have those characters escaped (i.e., turned into '\\'). This + feature helps with reporting filenames for some MS-DOS and OS/2 + systems. + + - The FlexLexer class includes two new public member functions: + + virtual void switch_streams( istream* new_in = 0, + ostream* new_out = 0 ) + + reassigns yyin to new_in (if non-nil) and yyout to new_out + (ditto), deleting the previous input buffer if yyin is + reassigned. It is used by: + + int yylex( istream* new_in = 0, ostream* new_out = 0 ) + + which first calls switch_streams() and then returns the value + of calling yylex(). + + - C++ scanners now have yy_flex_debug as a member variable of + FlexLexer rather than a global, and member functions for testing + and setting it. + + - When generating a C++ scanning class, you can now use + + %option yyclass="foo" + + to inform flex that you have derived "foo" as a subclass of + yyFlexLexer, so flex will place your actions in the member + function foo::yylex() instead of yyFlexLexer::yylex(). It also + generates a yyFlexLexer::yylex() member function that generates a + run-time error if called (by invoking yyFlexLexer::LexerError()). + This feature is necessary if your subclass "foo" introduces some + additional member functions or variables that you need to access + from yylex(). + + - Current texinfo files in MISC/texinfo, contributed by Francois + Pinard. + + - You can now change the name "flex" to something else (e.g., "lex") + by redefining $(FLEX) in the Makefile. + + - Two bugs (one serious) that could cause "bigcheck" to fail have + been fixed. + + - A number of portability/configuration changes have been made + for easier portability. + + - You can use "YYSTATE" in your scanner as an alias for YY_START + (for AT&T lex compatibility). + + - input() now maintains yylineno. + + - input() no longer trashes yytext. + + - interactive scanners now read characters in YY_INPUT up to a + newline, a large performance gain. + + - C++ scanner objects now work with the -P option. You include + once per scanner - see comments in + (or flex.1) for details. + + - C++ FlexLexer objects now use the "cerr" stream to report -d output + instead of stdio. + + - The -c flag now has its full glorious POSIX interpretation (do + nothing), rather than being interpreted as an old-style -C flag. + + - Scanners generated by flex now include two #define's giving + the major and minor version numbers (YY_FLEX_MAJOR_VERSION, + YY_FLEX_MINOR_VERSION). These can then be tested to see + whether certain flex features are available. + + - Scanners generated using -l lex compatibility now have the symbol + YY_FLEX_LEX_COMPAT #define'd. + + - When initializing (i.e., yy_init is non-zero on entry to yylex()), + generated scanners now set yy_init to zero before executing + YY_USER_INIT. This means that you can set yy_init back to a + non-zero value in YY_USER_INIT if you need the scanner to be + reinitialized on the next call. + + - You can now use "#line" directives in the first section of your + scanner specification. + + - When generating full-table scanners (-Cf), flex now puts braces + around each row of the 2-d array initialization, to silence warnings + on over-zealous compilers. + + - Improved support for MS-DOS. The flex sources have been successfully + built, unmodified, for Borland 4.02 (all that's required is a + Borland Makefile and config.h file, which are supplied in + MISC/Borland - contributed by Terrence O Kane). + + - Improved support for Macintosh using Think C - the sources should + build for this platform "out of the box". Contributed by Scott + Hofmann. + + - Improved support for VMS, in MISC/VMS/, contributed by Pat Rankin. + + - Support for the Amiga, in MISC/Amiga/, contributed by Andreas + Scherer. Note that the contributed files were developed for + flex 2.4 and have not been tested with flex 2.5. + + - Some notes on support for the NeXT, in MISC/NeXT, contributed + by Raf Schietekat. + + - The MISC/ directory now includes a preformatted version of flex.1 + in flex.man, and pre-yacc'd versions of parse.y in parse.{c,h}. + + - The flex.1 and flexdoc.1 manual pages have been merged. There + is now just one document, flex.1, which includes an overview + at the beginning to help you find the section you need. + + - Documentation now clarifies that start conditions persist across + switches to new input files or different input buffers. If you + want to e.g., return to INITIAL, you must explicitly do so. + + - The "Performance Considerations" section of the manual has been + updated. + + - Documented the "yy_act" variable, which when YY_USER_ACTION is + invoked holds the number of the matched rule, and added an + example of using yy_act to profile how often each rule is matched. + + - Added YY_NUM_RULES, a definition that gives the total number + of rules in the file, including the default rule (even if you + use -s). + + - Documentation now clarifies that you can pass a nil FILE* pointer + to yy_create_buffer() or yyrestart() if you've arrange YY_INPUT + to not need yyin. + + - Documentation now clarifies that YY_BUFFER_STATE is a pointer to + an opaque "struct yy_buffer_state". + + - Documentation now stresses that you gain the benefits of removing + backing-up states only if you remove *all* of them. + + - Documentation now points out that traditional lex allows you + to put the action on a separate line from the rule pattern if + the pattern has trailing whitespace (ugh!), but flex doesn't + support this. + + - A broken example in documentation of the difference between + inclusive and exclusive start conditions is now fixed. + + - Usage (-h) report now goes to stdout. + + - Version (-V) info now goes to stdout. + + - More #ifdef chud has been added to the parser in attempt to + deal with bison's use of alloca(). + + - "make clean" no longer deletes emacs backup files (*~). + + - Some memory leaks have been fixed. + + - A bug was fixed in which dynamically-expanded buffers were + reallocated a couple of bytes too small. + + - A bug was fixed which could cause flex to read and write beyond + the end of the input buffer. + + - -S will not be going away. + + +Changes between release 2.4.7 (03Aug94) and release 2.4.6: + + - Fixed serious bug in reading multiple files. + + - Fixed bug in scanning NUL's. + + - Fixed bug in input() returning 8-bit characters. + + - Fixed bug in matching text with embedded NUL's when + using %array or lex compatibility. + + - Fixed multiple invocations of YY_USER_ACTION when using '|' + continuation action. + + - Minor prototyping fixes. + +Changes between release 2.4.6 (04Jan94) and release 2.4.5: + + - Linking with -lfl no longer required if your program includes + its own yywrap() and main() functions. (This change will cause + problems if you have a non-ANSI compiler on a system for which + sizeof(int) != sizeof(void*) or sizeof(int) != sizeof(size_t).) + + - The use of 'extern "C++"' in FlexLexer.h has been modified to + get around an incompatibility with g++'s header files. + +Changes between release 2.4.5 (11Dec93) and release 2.4.4: + + - Fixed bug breaking C++ scanners that use REJECT or variable + trailing context. + + - Fixed serious input problem for interactive scanners on + systems for which char is unsigned. + + - Fixed bug in incorrectly treating '$' operator as variable + trailing context. + + - Fixed bug in -CF table representation that could lead to + corrupt tables. + + - Fixed fairly benign memory leak. + + - Added `extern "C++"' wrapper to FlexLexer.h header. This + should overcome the g++ 2.5.X problems mentioned in the + NEWS for release 2.4.3. + + - Changed #include of FlexLexer.h to use <> instead of "". + + - Added feature to control whether the scanner attempts to + refill the input buffer once it's exhausted. This feature + will be documented in the 2.5 release. + + +Changes between release 2.4.4 (07Dec93) and release 2.4.3: + + - Fixed two serious bugs in scanning 8-bit characters. + + - Fixed bug in YY_USER_ACTION that caused it to be executed + inappropriately (on the scanner's own internal actions, and + with incorrect yytext/yyleng values). + + - Fixed bug in pointing yyin at a new file and resuming scanning. + + - Portability fix regarding min/max/abs macros conflicting with + function definitions in standard header files. + + - Added a virtual LexerError() method to the C++ yyFlexLexer class + for reporting error messages instead of always using cerr. + + - Added warning in flexdoc that the C++ scanning class is presently + experimental and subject to considerable change between major + releases. + + +Changes between release 2.4.3 (03Dec93) and release 2.4.2: + + - Fixed bug causing fatal scanner messages to fail to print. + + - Fixed things so FlexLexer.h can be included in other C++ + sources. One side-effect of this change is that -+ and -CF + are now incompatible. + + - libfl.a now supplies private versions of the the / + string routines needed by flex and the scanners + it generates, to enhance portability to some BSD systems. + + - More robust solution to 2.4.2's flexfatal() bug fix. + + - Added ranlib of installed libfl.a. + + - Some lint tweaks. + + - NOTE: problems have been encountered attempting to build flex + C++ scanners using g++ version 2.5.X. The problem is due to an + unfortunate heuristic in g++ 2.5.X that attempts to discern between + C and C++ headers. Because FlexLexer.h is installed (by default) + in /usr/local/include and not /usr/local/lib/g++-include, g++ 2.5.X + decides that it's a C header :-(. So if you have problems, install + the header in /usr/local/lib/g++-include instead. + + +Changes between release 2.4.2 (01Dec93) and release 2.4.1: + + - Fixed bug in libfl.a referring to non-existent "flexfatal" function. + + - Modified to produce both compress'd and gzip'd tar files for + distributions (you probably don't care about this change!). + + +Changes between release 2.4.1 (30Nov93) and release 2.3.8: + + - The new '-+' flag instructs flex to generate a C++ scanner class + (thanks to Kent Williams). flex writes an implementation of the + class defined in FlexLexer.h to lex.yy.cc. You may include + multiple scanner classes in your program using the -P flag. Note + that the scanner class also provides a mechanism for creating + reentrant scanners. The scanner class uses C++ streams for I/O + instead of FILE*'s (thanks to Tom Epperly). If the flex executable's + name ends in '+' then the '-+' flag is automatically on, so creating + a symlink or copy of "flex" to "flex++" results in a version of + flex that can be used exclusively for C++ scanners. + + Note that without the '-+' flag, flex-generated scanners can still + be compiled using C++ compilers, though they use FILE*'s for I/O + instead of streams. + + See the "GENERATING C++ SCANNERS" section of flexdoc for details. + + - The new '-l' flag turns on maximum AT&T lex compatibility. In + particular, -l includes support for "yylineno" and makes yytext + be an array instead of a pointer. It does not, however, do away + with all incompatibilities. See the "INCOMPATIBILITIES WITH LEX + AND POSIX" section of flexdoc for details. + + - The new '-P' option specifies a prefix to use other than "yy" + for the scanner's globally-visible variables, and for the + "lex.yy.c" filename. Using -P you can link together multiple + flex scanners in the same executable. + + - The distribution includes a "texinfo" version of flexdoc.1, + contributed by Roland Pesch (thanks also to Marq Kole, who + contributed another version). It has not been brought up to + date, but reflects version 2.3. See MISC/flex.texinfo. + + The flex distribution will soon include G.T. Nicol's flex + manual; he is presently bringing it up-to-date for version 2.4. + + - yywrap() is now a function, and you now *must* link flex scanners + with libfl.a. + + - Site-configuration is now done via an autoconf-generated + "configure" script contributed by Francois Pinard. + + - Scanners now use fread() (or getc(), if interactive) and not + read() for input. A new "table compression" option, -Cr, + overrides this change and causes the scanner to use read() + (because read() is a bit faster than fread()). -f and -F + are now equivalent to -Cfr and -CFr; i.e., they imply the + -Cr option. + + - In the blessed name of POSIX compliance, flex supports "%array" + and "%pointer" directives in the definitions (first) section of + the scanner specification. The former specifies that yytext + should be an array (of size YYLMAX), the latter, that it should + be a pointer. The array version of yytext is universally slower + than the pointer version, but has the advantage that its contents + remain unmodified across calls to input() and unput() (the pointer + version of yytext is, still, trashed by such calls). + + "%array" cannot be used with the '-+' C++ scanner class option. + + - The new '-Ca' option directs flex to trade off memory for + natural alignment when generating a scanner's tables. In + particular, table entries that would otherwise be "short" + become "long". + + - The new '-h' option produces a summary of the flex flags. + + - The new '-V' option reports the flex version number and exits. + + - The new scanner macro YY_START returns an integer value + corresponding to the current start condition. You can return + to that start condition by passing the value to a subsequent + "BEGIN" action. You also can implement "start condition stacks" + by storing the values in an integer stack. + + - You can now redefine macros such as YY_INPUT by just #define'ing + them to some other value in the first section of the flex input; + no need to first #undef them. + + - flex now generates warnings for rules that can't be matched. + These warnings can be turned off using the new '-w' flag. If + your scanner uses REJECT then you will not get these warnings. + + - If you specify the '-s' flag but the default rule can be matched, + flex now generates a warning. + + - "yyleng" is now a global, and may be modified by the user (though + doing so and then using yymore() will yield weird results). + + - Name definitions in the first section of a scanner specification + can now include a leading '^' or trailing '$' operator. In this + case, the definition is *not* pushed back inside of parentheses. + + - Scanners with compressed tables are now "interactive" (-I option) + by default. You can suppress this attribute (which makes them + run slightly slower) using the new '-B' flag. + + - Flex now generates 8-bit scanners by default, unless you use the + -Cf or -CF compression options (-Cfe and -CFe result in 8-bit + scanners). You can force it to generate a 7-bit scanner using + the new '-7' flag. You can build flex to generate 8-bit scanners + for -Cf and -CF, too, by adding -DDEFAULT_CSIZE=256 to CFLAGS + in the Makefile. + + - You no longer need to call the scanner routine yyrestart() to + inform the scanner that you have switched to a new file after + having seen an EOF on the current input file. Instead, just + point yyin at the new file and continue scanning. + + - You no longer need to invoke YY_NEW_FILE in an <> action + to indicate you wish to continue scanning. Simply point yyin + at a new file. + + - A leading '#' no longer introduces a comment in a flex input. + + - flex no longer considers formfeed ('\f') a whitespace character. + + - %t, I'm happy to report, has been nuked. + + - The '-p' option may be given twice ('-pp') to instruct flex to + report minor performance problems as well as major ones. + + - The '-v' verbose output no longer includes start/finish time + information. + + - Newlines in flex inputs can optionally include leading or + trailing carriage-returns ('\r'), in support of several PC/Mac + run-time libraries that automatically include these. + + - A start condition of the form "<*>" makes the following rule + active in every start condition, whether exclusive or inclusive. + + - The following items have been corrected in the flex documentation: + + - '-C' table compression options *are* cumulative. + + - You may modify yytext but not lengthen it by appending + characters to the end. Modifying its final character + will affect '^' anchoring for the next rule matched + if the character is changed to or from a newline. + + - The term "backtracking" has been renamed "backing up", + since it is a one-time repositioning and not a repeated + search. What used to be the "lex.backtrack" file is now + "lex.backup". + + - Unindented "/* ... */" comments are allowed in the first + flex input section, but not in the second. + + - yyless() can only be used in the flex input source, not + externally. + + - You can use "yyrestart(yyin)" to throw away the + current contents of the input buffer. + + - To write high-speed scanners, attempt to match as much + text as possible with each rule. See MISC/fastwc/README + for more information. + + - Using the beginning-of-line operator ('^') is fairly + cheap. Using unput() is expensive. Using yyless() is + cheap. + + - An example of scanning strings with embedded escape + sequences has been added. + + - The example of backing-up in flexdoc was erroneous; it + has been corrected. + + - A flex scanner's internal buffer now dynamically grows if needed + to match large tokens. Note that growing the buffer presently + requires rescanning the (large) token, so consuming a lot of + text this way is a slow process. Also note that presently the + buffer does *not* grow if you unput() more text than can fit + into the buffer. + + - The MISC/ directory has been reorganized; see MISC/README for + details. + + - yyless() can now be used in the third (user action) section + of a scanner specification, thanks to Ceriel Jacobs. yyless() + remains a macro and cannot be used outside of the scanner source. + + - The skeleton file is no longer opened at run-time, but instead + compiled into a large string array (thanks to John Gilmore and + friends at Cygnus). You can still use the -S flag to point flex + at a different skeleton file. + + - flex no longer uses a temporary file to store the scanner's + actions. + + - A number of changes have been made to decrease porting headaches. + In particular, flex no longer uses memset() or ctime(), and + provides a single simple mechanism for dealing with C compilers + that still define malloc() as returning char* instead of void*. + + - Flex now detects if the scanner specification requires the -8 flag + but the flag was not given or on by default. + + - A number of table-expansion fencepost bugs have been fixed, + making flex more robust for generating large scanners. + + - flex more consistently identifies the location of errors in + its input. + + - YY_USER_ACTION is now invoked only for "real" actions, not for + internal actions used by the scanner for things like filling + the buffer or handling EOF. + + - The rule "[^]]" now matches any character other than a ']'; + formerly it matched any character at all followed by a ']'. + This change was made for compatibility with AT&T lex. + + - A large number of miscellaneous bugs have been found and fixed + thanks to Gerhard Wilhelms. + + - The source code has been heavily reformatted, making patches + relative to previous flex releases no longer accurate. + + +Changes between 2.3 Patch #8 (21Feb93) and 2.3 Patch #7: + + - Fixed bugs in dynamic memory allocation leading to grievous + fencepost problems when generating large scanners. + - Fixed bug causing infinite loops on character classes with 8-bit + characters in them. + - Fixed bug in matching repetitions with a lower bound of 0. + - Fixed bug in scanning NUL characters using an "interactive" scanner. + - Fixed bug in using yymore() at the end of a file. + - Fixed bug in misrecognizing rules with variable trailing context. + - Fixed bug compiling flex on Suns using gcc 2. + - Fixed bug in not recognizing that input files with the character + ASCII 128 in them require the -8 flag. + - Fixed bug that could cause an infinite loop writing out + error messages. + - Fixed bug in not recognizing old-style lex % declarations if + followed by a tab instead of a space. + - Fixed potential crash when flex terminated early (usually due + to a bad flag) and the -v flag had been given. + - Added some missing declarations of void functions. + - Changed to only use '\a' for __STDC__ compilers. + - Updated mailing addresses. + + +Changes between 2.3 Patch #7 (28Mar91) and 2.3 Patch #6: + + - Fixed out-of-bounds array access that caused bad tables + to be produced on machines where the bad reference happened + to yield a 1. This caused problems installing or running + flex on some Suns, in particular. + + +Changes between 2.3 Patch #6 (29Aug90) and 2.3 Patch #5: + + - Fixed a serious bug in yymore() which basically made it + completely broken. Thanks goes to Jean Christophe of + the Nethack development team for finding the problem + and passing along the fix. + + +Changes between 2.3 Patch #5 (16Aug90) and 2.3 Patch #4: + + - An up-to-date version of initscan.c so "make test" will + work after applying the previous patches + + +Changes between 2.3 Patch #4 (14Aug90) and 2.3 Patch #3: + + - Fixed bug in hexadecimal escapes which allowed only digits, + not letters, in escapes + - Fixed bug in previous "Changes" file! + + +Changes between 2.3 Patch #3 (03Aug90) and 2.3 Patch #2: + + - Correction to patch #2 for gcc compilation; thanks goes to + Paul Eggert for catching this. + + +Changes between 2.3 Patch #2 (02Aug90) and original 2.3 release: + + - Fixed (hopefully) headaches involving declaring malloc() + and free() for gcc, which defines __STDC__ but (often) doesn't + come with the standard include files such as . + Reordered #ifdef maze in the scanner skeleton in the hope of + getting the declarations right for cfront and g++, too. + + - Note that this patch supercedes patch #1 for release 2.3, + which was never announced but was available briefly for + anonymous ftp. + + +Changes between 2.3 (full) release of 28Jun90 and 2.2 (alpha) release: + +User-visible: + + - A lone <> rule (that is, one which is not qualified with + a list of start conditions) now specifies the EOF action for + *all* start conditions which haven't already had <> actions + given. To specify an end-of-file action for just the initial + state, use <>. + + - -d debug output is now contigent on the global yy_flex_debug + being set to a non-zero value, which it is by default. + + - A new macro, YY_USER_INIT, is provided for the user to specify + initialization action to be taken on the first call to the + scanner. This action is done before the scanner does its + own initialization. + + - yy_new_buffer() has been added as an alias for yy_create_buffer() + + - Comments beginning with '#' and extending to the end of the line + now work, but have been deprecated (in anticipation of making + flex recognize #line directives). + + - The funky restrictions on when semi-colons could follow the + YY_NEW_FILE and yyless macros have been removed. They now + behave identically to functions. + + - A bug in the sample redefinition of YY_INPUT in the documentation + has been corrected. + + - A bug in the sample simple tokener in the documentation has + been corrected. + + - The documentation on the incompatibilities between flex and + lex has been reordered so that the discussion of yylineno + and input() come first, as it's anticipated that these will + be the most common source of headaches. + + +Things which didn't used to be documented but now are: + + - flex interprets "^foo|bar" differently from lex. flex interprets + it as "match either a 'foo' or a 'bar', providing it comes at the + beginning of a line", whereas lex interprets it as "match either + a 'foo' at the beginning of a line, or a 'bar' anywhere". + + - flex initializes the global "yyin" on the first call to the + scanner, while lex initializes it at compile-time. + + - yy_switch_to_buffer() can be used in the yywrap() macro/routine. + + - flex scanners do not use stdio for their input, and hence when + writing an interactive scanner one must explictly call fflush() + after writing out a prompt. + + - flex scanner can be made reentrant (after a fashion) by using + "yyrestart( yyin );". This is useful for interactive scanners + which have interrupt handlers that long-jump out of the scanner. + + - a defense of why yylineno is not supported is included, along + with a suggestion on how to convert scanners which rely on it. + + +Other changes: + + - Prototypes and proper declarations of void routines have + been added to the flex source code, courtesy of Kevin B. Kenny. + + - Routines dealing with memory allocation now use void* pointers + instead of char* - see Makefile for porting implications. + + - Error-checking is now done when flex closes a file. + + - Various lint tweaks were added to reduce the number of gripes. + + - Makefile has been further parameterized to aid in porting. + + - Support for SCO Unix added. + + - Flex now sports the latest & greatest UC copyright notice + (which is only slightly different from the previous one). + + - A note has been added to flexdoc.1 mentioning work in progress + on modifying flex to generate straight C code rather than a + table-driven automaton, with an email address of whom to contact + if you are working along similar lines. + + +Changes between 2.2 Patch #3 (30Mar90) and 2.2 Patch #2: + + - fixed bug which caused -I scanners to bomb + + +Changes between 2.2 Patch #2 (27Mar90) and 2.2 Patch #1: + + - fixed bug writing past end of input buffer in yyunput() + - fixed bug detecting NUL's at the end of a buffer + + +Changes between 2.2 Patch #1 (23Mar90) and 2.2 (alpha) release: + + - Makefile fixes: definition of MAKE variable for systems + which don't have it; installation of flexdoc.1 along with + flex.1; fixed two bugs which could cause "bigtest" to fail. + + - flex.skel fix for compiling with g++. + + - README and flexdoc.1 no longer list an out-of-date BITNET address + for contacting me. + + - minor typos and formatting changes to flex.1 and flexdoc.1. + + +Changes between 2.2 (alpha) release of March '90 and previous release: + +User-visible: + + - Full user documentation now available. + + - Support for 8-bit scanners. + + - Scanners now accept NUL's. + + - A facility has been added for dealing with multiple + input buffers. + + - Two manual entries now. One which fully describes flex + (rather than just its differences from lex), and the + other for quick(er) reference. + + - A number of changes to bring flex closer into compliance + with the latest POSIX lex draft: + + %t support + flex now accepts multiple input files and concatenates + them together to form its input + previous -c (compress) flag renamed -C + do-nothing -c and -n flags added + Any indented code or code within %{}'s in section 2 is + now copied to the output + + - yyleng is now a bona fide global integer. + + - -d debug information now gives the line number of the + matched rule instead of which number rule it was from + the beginning of the file. + + - -v output now includes a summary of the flags used to generate + the scanner. + + - unput() and yyrestart() are now globally callable. + + - yyrestart() no longer closes the previous value of yyin. + + - C++ support; generated scanners can be compiled with C++ compiler. + + - Primitive -lfl library added, containing default main() + which calls yylex(). A number of routines currently living + in the scanner skeleton will probably migrate to here + in the future (in particular, yywrap() will probably cease + to be a macro and instead be a function in the -lfl library). + + - Hexadecimal (\x) escape sequences added. + + - Support for MS-DOS, VMS, and Turbo-C integrated. + + - The %used/%unused operators have been deprecated. They + may go away soon. + + +Other changes: + + - Makefile enhanced for easier testing and installation. + - The parser has been tweaked to detect some erroneous + constructions which previously were missed. + - Scanner input buffer overflow is now detected. + - Bugs with missing "const" declarations fixed. + - Out-of-date Minix/Atari patches provided. + - Scanners no longer require printf() unless FLEX_DEBUG is being used. + - A subtle input() bug has been fixed. + - Line numbers for "continued action" rules (those following + the special '|' action) are now correct. + - unput() bug fixed; had been causing problems porting flex to VMS. + - yymore() handling rewritten to fix bug with interaction + between yymore() and trailing context. + - EOF in actions now generates an error message. + - Bug involving -CFe and generating equivalence classes fixed. + - Bug which made -CF be treated as -Cf fixed. + - Support for SysV tmpnam() added. + - Unused #define's for scanner no longer generated. + - Error messages which are associated with a particular input + line are now all identified with their input line in standard + format. + - % directives which are valid to lex but not to flex are + now ignored instead of generating warnings. + - -DSYS_V flag can now also be specified -DUSG for System V + compilation. + + +Changes between 2.1 beta-test release of June '89 and previous release: + +User-visible: + + - -p flag generates a performance report to stderr. The report + consists of comments regarding features of the scanner rules + which result in slower scanners. + + - -b flag generates backtracking information to lex.backtrack. + This is a list of scanner states which require backtracking + and the characters on which they do so. By adding rules + one can remove backtracking states. If all backtracking states + are eliminated, the generated scanner will run faster. + Backtracking is not yet documented in the manual entry. + + - Variable trailing context now works, i.e., one can have + rules like "(foo)*/[ \t]*bletch". Some trailing context + patterns still cannot be properly matched and generate + error messages. These are patterns where the ending of the + first part of the rule matches the beginning of the second + part, such as "zx*/xy*", where the 'x*' matches the 'x' at + the beginning of the trailing context. Lex won't get these + patterns right either. + + - Faster scanners. + + - End-of-file rules. The special rule "<>" indicates + actions which are to be taken when an end-of-file is + encountered and yywrap() returns non-zero (i.e., indicates + no further files to process). See manual entry for example. + + - The -r (reject used) flag is gone. flex now scans the input + for occurrences of the string "REJECT" to determine if the + action is needed. It tries to be intelligent about this but + can be fooled. One can force the presence or absence of + REJECT by adding a line in the first section of the form + "%used REJECT" or "%unused REJECT". + + - yymore() has been implemented. Similarly to REJECT, flex + detects the use of yymore(), which can be overridden using + "%used" or "%unused". + + - Patterns like "x{0,3}" now work (i.e., with lower-limit == 0). + + - Removed '\^x' for ctrl-x misfeature. + + - Added '\a' and '\v' escape sequences. + + - \ now works for octal escape sequences; previously + \0 was required. + + - Better error reporting; line numbers are associated with rules. + + - yyleng is a macro; it cannot be accessed outside of the + scanner source file. + + - yytext and yyleng should not be modified within a flex action. + + - Generated scanners #define the name FLEX_SCANNER. + + - Rules are internally separated by YY_BREAK in lex.yy.c rather + than break, to allow redefinition. + + - The macro YY_USER_ACTION can be redefined to provide an action + which is always executed prior to the matched rule's action. + + - yyrestart() is a new action which can be used to restart + the scanner after it has seen an end-of-file (a "real" one, + that is, one for which yywrap() returned non-zero). It takes + a FILE* argument indicating a new file to scan and sets + things up so that a subsequent call to yylex() will start + scanning that file. + + - Internal scanner names all preceded by "yy_" + + - lex.yy.c is deleted if errors are encountered during processing. + + - Comments may be put in the first section of the input by preceding + them with '#'. + + + +Other changes: + + - Some portability-related bugs fixed, in particular for machines + with unsigned characters or sizeof( int* ) != sizeof( int ). + Also, tweaks for VMS and Microsoft C (MS-DOS), and identifiers all + trimmed to be 31 or fewer characters. Shortened file names + for dinosaur OS's. Checks for allocating > 64K memory + on 16 bit'ers. Amiga tweaks. Compiles using gcc on a Sun-3. + - Compressed and fast scanner skeletons merged. + - Skeleton header files done away with. + - Generated scanner uses prototypes and "const" for __STDC__. + - -DSV flag is now -DSYS_V for System V compilation. + - Removed all references to FTL language. + - Software now covered by BSD Copyright. + - flex will replace lex in subsequent BSD releases. diff --git a/platform/dbops/binaries/build/share/doc/flex/README.md b/platform/dbops/binaries/build/share/doc/flex/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b979b01062c122933f71e94c1f5ff109c1973336 --- /dev/null +++ b/platform/dbops/binaries/build/share/doc/flex/README.md @@ -0,0 +1,109 @@ +This is flex, the fast lexical analyzer generator. + +flex is a tool for generating scanners: programs which recognize +lexical patterns in text. + +The flex codebase is kept in +[Git on GitHub.](https://github.com/westes/flex) + +Use GitHub's [issues](https://github.com/westes/flex/issues) and +[pull request](https://github.com/westes/flex) features to file bugs +and submit patches. + +There are several mailing lists available as well: + +* flex-announce@lists.sourceforge.net - where posts will be made + announcing new releases of flex. +* flex-help@lists.sourceforge.net - where you can post questions about + using flex +* flex-devel@lists.sourceforge.net - where you can discuss development + of flex itself + +Find information on subscribing to the mailing lists at: + +http://sourceforge.net/mail/?group_id=97492 + +The flex distribution contains the following files which may be of +interest: + +* README - This file. +* NEWS - current version number and list of user-visible changes. +* INSTALL - basic installation information. +* ABOUT-NLS - description of internationalization support in flex. +* COPYING - flex's copyright and license. +* doc/ - user documentation. +* examples/ - containing examples of some possible flex scanners and a + few other things. See the file examples/README for more + details. +* tests/ - regression tests. See TESTS/README for details. +* po/ - internationalization support files. + +You need the following tools to build flex from the maintainer's +repository: + +* compiler suite - flex is built with gcc +* bash, or a good Bourne-style shell +* m4 - m4 -p needs to work; GNU m4 and a few others are suitable +* GNU bison; to generate parse.c from parse.y +* autoconf; for handling the build system +* automake; for Makefile generation +* gettext; for i18n support +* help2man; to generate the flex man page +* tar, gzip, lzip, etc.; for packaging of the source distribution +* GNU texinfo; to build and test the flex manual. Note that if you want + to build the dvi/ps/pdf versions of the documentation you will need + texi2dvi and related programs, along with a sufficiently powerful + implementation of TeX to process them. See your operating system + documentation for how to achieve this. The printable versions of the + manual are not built unless specifically requested, but the targets + are included by automake. +* GNU indent; for indenting the flex source the way we want it done + +In cases where the versions of the above tools matter, the file +configure.ac will specify the minimum required versions. + +Once you have all the necessary tools installed, life becomes +simple. To prepare the flex tree for building, run the script: + +```bash +./autogen.sh +``` + +in the top level of the flex source tree. + +This script calls the various tools needed to get flex ready for the +GNU-style configure script to be able to work. + +From this point on, building flex follows the usual routine: + +```bash +configure && make && make install +``` + +This file is part of flex. + +This code is derived from software contributed to Berkeley by +Vern Paxson. + +The United States Government has rights in this work pursuant +to contract no. DE-AC03-76SF00098 between the United States +Department of Energy and the University of California. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. diff --git a/platform/dbops/binaries/build/share/locale/en@boldquot/LC_MESSAGES/flex.mo b/platform/dbops/binaries/build/share/locale/en@boldquot/LC_MESSAGES/flex.mo new file mode 100644 index 0000000000000000000000000000000000000000..692a49ab32dd8b010dc02c91566b6f8cbfa69093 Binary files /dev/null and b/platform/dbops/binaries/build/share/locale/en@boldquot/LC_MESSAGES/flex.mo differ diff --git a/platform/dbops/binaries/build/share/locale/en@quot/LC_MESSAGES/flex.mo b/platform/dbops/binaries/build/share/locale/en@quot/LC_MESSAGES/flex.mo new file mode 100644 index 0000000000000000000000000000000000000000..d32b1b59d37537d25613138f13f01545c34d7e85 Binary files /dev/null and b/platform/dbops/binaries/build/share/locale/en@quot/LC_MESSAGES/flex.mo differ diff --git a/platform/dbops/binaries/weaviate-src/.github/CODEOWNERS b/platform/dbops/binaries/weaviate-src/.github/CODEOWNERS new file mode 100644 index 0000000000000000000000000000000000000000..1431c466d927e266b087d00f87ee8415b613868c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/CODEOWNERS @@ -0,0 +1,3 @@ +# Ci related folders +/.github/ @weaviate/core +/ci/ @weaviate/core diff --git a/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/config.yml b/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..9a23a694cbdc89e1042ba6dcfec6a61015cc7f62 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,9 @@ +blank_issues_enabled: true +contact_links: + - name: Need Help? Go to the community forum + url: https://forum.weaviate.io + about: "Community-powered knowledge repository: search, ask questions, give feedback" + + - name: Want to chat? Go to the community slack + url: https://weaviate.io/slack + about: "Chat with the community about issues, questions, networking and feedback" diff --git a/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/create_issue.yml b/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/create_issue.yml new file mode 100644 index 0000000000000000000000000000000000000000..9bc7b160adb0d56895d179bba5d7b0b765d37204 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/create_issue.yml @@ -0,0 +1,78 @@ +name: Found a bug? +description: Report your bug here. +labels: ["bug", "triage"] +body: + + - type: markdown + attributes: + value: | + #### Before you get started + * Check to make sure someone hasn't already opened a similar [issue](https://github.com/weaviate/weaviate-io/issues). + * Check this example of a [good bug report](https://github.com/weaviate/weaviate/issues/3762). + * Read the [Contributor Guide](https://weaviate.io/developers/contributor-guide) and [Code of Conduct](https://weaviate.io/service/code-of-conduct). + + - type: textarea + id: how_to_reproduce + attributes: + label: How to reproduce this bug? + description: Specify the steps here in order to reproduce this bug. + validations: + required: true + + - type: textarea + id: expected_behavior + attributes: + label: What is the expected behavior? + validations: + required: true + + - type: textarea + id: actual_behavior + attributes: + label: What is the actual behavior? + validations: + required: true + + - type: textarea + id: suporting_information + attributes: + label: Supporting information + description: Please, paste any logs, context information (client version? environment variables?) or other details in here. + validations: + required: false + + - type: input + id: server_version + attributes: + label: Server Version + description: What Weaviate Version are you running? + validations: + required: true + + - type: dropdown + id: setup + attributes: + label: Weaviate Setup + description: What type of Weaviate setup are you using? + options: + - Single Node + - Multi-Node Cluster + validations: + required: true + + - type: input + id: setup_node_count + attributes: + label: Nodes count + description: How many nodes ? + validations: + required: false + + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: This project has a Code of Conduct. All participants are expected to understand and follow the CoC. + options: + - label: I have read and agree to the Weaviate's [Contributor Guide](https://weaviate.io/developers/contributor-guide) and [Code of Conduct](https://weaviate.io/service/code-of-conduct) + required: true diff --git a/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/feature_request.yml b/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000000000000000000000000000000000..7f08b54b97d95bc32674e173c1b31ca43dc2b26c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,29 @@ +name: Missing a Feature? +description: Suggest it here. +labels: ["feature request", "triage"] +body: + - type: markdown + attributes: + value: | + #### Before you get started + * Is this feature already in our [roadmap](https://weaviate.io/developers/weaviate/roadmap)? + * Check to make sure someone hasn't already [requested this feature](https://github.com/weaviate/weaviate/issues?q=is:issue+label:"feature+request"). if that's the case, give it a thumbs up 👍 + * Read the [Contributor Guide](https://weaviate.io/developers/contributor-guide) and [Code of Conduct](https://weaviate.io/service/code-of-conduct). + + - type: textarea + id: feature_request_body + attributes: + label: Describe your feature request + description: | + - Describe here your requested feature. Provide all necessary context, use cases and the reasoning on why this feature is a good one. + validations: + required: true + + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: This project has a Code of Conduct that all participants are expected to understand and follow. + options: + - label: I have read and agree to the Weaviate's [Contributor Guide](https://weaviate.io/developers/contributor-guide) and [Code of Conduct](https://weaviate.io/service/code-of-conduct) + required: true diff --git a/platform/dbops/binaries/weaviate-src/.github/PULL_REQUEST_TEMPLATE.md b/platform/dbops/binaries/weaviate-src/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..58406c6bd776ddd953daaf1d83cb08cf8e5f2568 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,24 @@ +### What's being changed: + + +### Review checklist + +- [ ] Documentation has been updated, if necessary. Link to changed documentation: +- [ ] Chaos pipeline run or not necessary. Link to pipeline: +- [ ] All new code is covered by tests where it is reasonable. +- [ ] Performance tests have been run or not necessary. + + diff --git a/platform/dbops/binaries/weaviate-src/.github/dependabot.yml b/platform/dbops/binaries/weaviate-src/.github/dependabot.yml new file mode 100644 index 0000000000000000000000000000000000000000..cae66046e3fd663f8284c2826733b599d9742c4c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/dependabot.yml @@ -0,0 +1,19 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + # Check for updates to GitHub Actions every week + interval: "weekly" + - package-ecosystem: "gomod" + # Enable Dependabot to monitor and automatically update Go module dependencies for + # security vulnerabilities and version updates + directories: + - "/" + - "/test/acceptance_with_go_client" + - "/test/benchmark_bm25" + labels: ["security-update", "dependencies"] + schedule: + interval: "monthly" + commit-message: + prefix: "dependencies(update)" diff --git a/platform/dbops/binaries/weaviate-src/.github/stale.yml b/platform/dbops/binaries/weaviate-src/.github/stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..348e51e354118415e93aec7eb2dbb8bbfc6d23c9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/stale.yml @@ -0,0 +1,33 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 60 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 7 +# Issues with these labels will never be considered stale +exemptLabels: + - pinned + - security + - bug + - backlog + - needs-investigation +# Label to use when marking an issue as stale +staleLabel: autoclosed +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: > + Thank you for your contribution to Weaviate. This issue has not received any + activity in a while and has therefore been marked as stale. Stale issues will + eventually be autoclosed. This does not mean that we are ruling out to work + on this issue, but it most likely has not been prioritized high enough in the + last months. + + If you believe that this issue should remain open, please leave a short reply. + This lets us know that the issue is not abandoned and acts as a reminder for our + team to consider prioritizing this again. + + Please also consider if you can make a contribution to help with the solution + of this issue. If you are willing to contribute, but don't know where to start, + please leave a quick message and we'll try to help you. + + Thank you, + The Weaviate Team +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: false diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/cleanup.yaml b/platform/dbops/binaries/weaviate-src/.github/workflows/cleanup.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2239de38da24b3ae1b625cf37e998325df72aad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/cleanup.yaml @@ -0,0 +1,29 @@ +name: cleanup caches by a branch +on: + pull_request: + types: + - closed + +jobs: + cleanup: + runs-on: ubuntu-latest + steps: + - name: Cleanup + run: | + gh extension install actions/gh-actions-cache + + echo "Fetching list of cache key" + cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH -L 100 | cut -f 1 ) + + ## Setting this to not fail the workflow while deleting cache keys. + set +e + echo "Deleting caches..." + for cacheKey in $cacheKeysForPR + do + gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm + done + echo "Done" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + BRANCH: refs/pull/${{ github.event.pull_request.number }}/merge \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/create-cross-functional-issues.yml b/platform/dbops/binaries/weaviate-src/.github/workflows/create-cross-functional-issues.yml new file mode 100644 index 0000000000000000000000000000000000000000..a2e982deb800d82d8fc5b5f6919455647540bc0f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/create-cross-functional-issues.yml @@ -0,0 +1,88 @@ +name: Create Cross-Functional Issues + +on: + pull_request: + types: [opened] + workflow_dispatch: + inputs: + pr_body: + description: 'PR body content for testing' + required: true + default: '- [x] This change requires public documentation (weaviate-io) to be updated.' + +jobs: + create-cross-functional-issues: + runs-on: ubuntu-latest + if: ${{ !github.event.pull_request.head.repo.fork }} + steps: + - uses: actions/checkout@v5 + + - name: Check github token existence + env: + GH_TOKEN: ${{secrets.CROSS_REPO_ISSUE_WRITER_TOKEN}} + run: | + if [[ "$GH_TOKEN" == "" ]]; then + echo "gh_token_exists=false" >> $GITHUB_ENV + else + echo "gh_token_exists=true" >> $GITHUB_ENV + fi + + - name: Check PR body + id: check_pr + if: ${{ env.gh_token_exists == 'true' }} + uses: actions/github-script@v7 + with: + github-token: ${{secrets.CROSS_REPO_ISSUE_WRITER_TOKEN}} + script: | + const pr = context.payload.pull_request ? + context.payload.pull_request : + await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number + }).then(res => res.data); + + const body = pr.body || ''; + + const checkboxes = [ + { repo: 'weaviate-io', regex: /- \[x\] This change requires public documentation \(weaviate-io\) to be updated/ }, + { repo: 'weaviate-python-client', regex: /- \[x\] Python \(weaviate-python-client\)/ }, + { repo: 'typescript-client', regex: /- \[x\] JavaScript\/TypeScript \(typescript-client\)/ }, + { repo: 'weaviate-go-client', regex: /- \[x\] Go \(weaviate-go-client\)/ }, + { repo: 'java-client', regex: /- \[x\] Java \(java-client\)/ } + ]; + + const results = checkboxes.map(checkbox => ({ + repo: checkbox.repo, + checked: checkbox.regex.test(body) + })); + + console.log('Checkbox results:', JSON.stringify(results)); + return results; + + - name: Create issues in respective repos + if: ${{ env.gh_token_exists == 'true' }} + uses: actions/github-script@v7 + env: + RESULTS: ${{ steps.check_pr.outputs.result }} + with: + github-token: ${{secrets.CROSS_REPO_ISSUE_WRITER_TOKEN}} + script: | + const results = JSON.parse(process.env.RESULTS); + const pr = context.payload.pull_request; + + for (const result of results) { + if (result.checked) { + const issueTitle = `Update ${result.repo} for PR #${pr.number}`; + const issueBody = `A change in [PR #${pr.number}](${pr.html_url}) requires updates in the ${result.repo} repository.`; + + await github.rest.issues.create({ + owner: context.repo.owner, + repo: result.repo, + title: issueTitle, + body: issueBody + }); + + console.log(`Created issue in ${result.repo}`); + } + } diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/find-duplicates.yaml b/platform/dbops/binaries/weaviate-src/.github/workflows/find-duplicates.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fea399e22ce0603d23ee02972dedaa995008e965 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/find-duplicates.yaml @@ -0,0 +1,34 @@ +name: Find Issue Duplicates + +on: + issues: + types: [opened] + +permissions: + issues: write + +jobs: + triage: + runs-on: ubuntu-latest + # Only run if issue has "bug" label or no labels + if: contains(github.event.issue.labels.*.name, 'bug') || toJSON(github.event.issue.labels) == '[]' + steps: + - name: Check for similar issues + id: similarity-check + uses: weaviate/weaviate-github-issue-triage@v1 + with: + issue_body: ${{ github.event.issue.body }} + issue_number: ${{ github.event.issue.number }} + weaviate_url: ${{ secrets.WEAVIATE_URL }} + weaviate_api_key: ${{ secrets.WEAVIATE_API_KEY }} # optional + github_token: ${{ secrets.GITHUB_TOKEN }} + collection_name: GitHubIssuesWeaviate + + - name: Log results + if: always() + run: | + echo "Found similar issue: ${{ steps.similarity-check.outputs.found_similar_issue }}" + echo "Comment posted: ${{ steps.similarity-check.outputs.comment_posted }}" + if [ "${{ steps.similarity-check.outputs.error_message }}" != "" ]; then + echo "Error: ${{ steps.similarity-check.outputs.error_message }}" + fi diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/licence_updater.yaml b/platform/dbops/binaries/weaviate-src/.github/workflows/licence_updater.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f5b5398ba432d77e526372a3e8d0b8d5db01237 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/licence_updater.yaml @@ -0,0 +1,32 @@ +name: License Header Updater + +on: + schedule: + - cron: '0 0 1 1 *' # Every January 1st + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +jobs: + update-headers: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Get current year + id: date + run: echo "year=$(date +'%Y')" >> "$GITHUB_OUTPUT" + - name: Install Go 1.24 + uses: actions/setup-go@v5 + with: + go-version: '1.24' + - name: Run license header update (go run) + run: go run ./tools/license_headers/main.go + - name: Create PR + uses: peter-evans/create-pull-request@v7 + with: + commit-message: "chore: update license headers for ${{ steps.date.outputs.year }}" + title: "Update license headers for ${{ steps.date.outputs.year }}" + body: "Automatically updated license headers for year ${{ steps.date.outputs.year }}." + labels: "chore" diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/linter.yml b/platform/dbops/binaries/weaviate-src/.github/workflows/linter.yml new file mode 100644 index 0000000000000000000000000000000000000000..5c8126b1483bf8122b213e74b0c2ccabe55c4fa0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/linter.yml @@ -0,0 +1,42 @@ +name: golangci-lint +on: + push: + branches: + - main + tags: + - '**' + pull_request: +jobs: + golangci: + name: golangci + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v5 + with: + go-version: '1.24' + - uses: actions/checkout@v5 + - name: golangci-lint + uses: golangci/golangci-lint-action@v8 + protolint: + name: protolint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - uses: yoheimuta/action-protolint@v1 + python: + name: python checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - uses: psf/black@stable + with: + version: "24.2.0" + custom: + name: custom checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Error groups checks + run: ./tools/linter_error_groups.sh + - name: goroutine checks + run: ./tools/linter_go_routines.sh diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/pull_requests.yaml b/platform/dbops/binaries/weaviate-src/.github/workflows/pull_requests.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d882effc47b4dc5adbb6408cac1aa5aa05d3b332 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/pull_requests.yaml @@ -0,0 +1,611 @@ +name: Tests + +on: + push: + branches: + - main + - 'stable/v*' + tags: + - '**' + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + Buf-Checks: + name: buf-checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - uses: bufbuild/buf-setup-action@v1 + with: + github_token: ${{ github.token }} + - uses: bufbuild/buf-lint-action@v1 + with: + input: "cluster/proto" + - uses: bufbuild/buf-breaking-action@v1 + if: github.event_name == 'pull_request' + with: + input: "cluster/proto" + against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=${{ github.event.pull_request.base.ref }},subdir=cluster/proto" + Run-Swagger: + name: run-swagger + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Run Swagger + run: ./tools/gen-code-from-swagger.sh + - name: Error on change + run: | + # check if anything is different + CHANGED=$(git status -s | wc -l) + if [ "$CHANGED" -gt 0 ]; then + echo "Please run ./tools/gen-code-from-swagger.sh script and commit changes:" + git status -s + exit 1 + else + exit 0 + fi + Vulnerability-Scanning: + name: vulnerability-scanning + runs-on: ubuntu-latest + env: + PROJECT_KEY: ${{secrets.ORCA_PROJECT_KEY}} + if: ${{ !github.event.pull_request.head.repo.fork }} # no PRs from fork + steps: + - uses: actions/checkout@v5 + - name: Run locally Docker build + run: docker build -t weaviate:${{ github.sha }} . + - name: Run Orca Container Image Scan + id: orcasecurity_container_image_scan + uses: orcasecurity/shiftleft-container-image-action@v1 + with: + api_token: ${{ secrets.ORCA_SECURITY_API_TOKEN }} + project_key: ${{ env.PROJECT_KEY }} + image: 'weaviate:${{ github.sha }}' + console_output: "table" + format: "sarif" + output: "results/" + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v3 + if: ${{ always() && steps.orcasecurity_container_image_scan.outputs.exit_code != 1 }} + with: + sarif_file: results/image.sarif + SAST-Scanning: + name: sast-scanning + runs-on: ubuntu-latest + env: + PROJECT_KEY: ${{secrets.ORCA_PROJECT_KEY}} + if: ${{ !github.event.pull_request.head.repo.fork }} # no PRs from fork + steps: + - uses: actions/checkout@v5 + - name: Run Orca SAST Scan + id: orcasecurity_container_sast_action + uses: orcasecurity/shiftleft-sast-action@v1 + with: + api_token: ${{ secrets.ORCA_SECURITY_API_TOKEN }} + project_key: ${{ env.PROJECT_KEY }} + # diff scans against the entire git history, so should only scan changes in this PR + fetch-depth: 0 + path: + # scanning the entire repository + "." + Unit-Tests: + name: unit-tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Unit test + run: ./test/run.sh --unit-only + - name: Archive code coverage results + uses: actions/upload-artifact@v4 + with: + name: coverage-report-unit + path: coverage-unit.txt + Integration-Tests: + name: integration-tests + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + test: [ + "--integration-vector-package-only", + "--integration-without-vector-package" + ] + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Integration test + run: ./test/run.sh ${{ matrix.test }} + - name: Archive code coverage results + uses: actions/upload-artifact@v4 + with: + name: coverage-report-integration${{ matrix.test }} + path: coverage-integration.txt + Modules-Acceptance-Tests: + name: modules-acceptance-tests + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + test: [ + "--only-module-backup-azure", + "--only-module-backup-filesystem", + "--only-module-backup-gcs", + "--only-module-backup-s3", + "--only-module-offload-s3", + "--only-module-text2vec-transformers", + "--only-module-text2vec-ollama", + "--only-module-generative-ollama", + "--only-module-text2vec-model2vec" + ] + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Login to Docker Hub + if: ${{ !github.event.pull_request.head.repo.fork && github.triggering_actor != 'dependabot[bot]' }} + uses: docker/login-action@v3 + with: + username: ${{secrets.DOCKER_USERNAME}} + password: ${{secrets.DOCKER_PASSWORD}} + - name: Acceptance tests (modules) + uses: nick-fields/retry@v3 + with: + # 15 Minute is a large enough timeout for most of our tests + timeout_minutes: 15 + max_attempts: 2 + command: ./test/run.sh ${{ matrix.test }} + on_retry_command: ./test/run.sh --cleanup + Modules-On-Demand-Tests-Check: + name: modules-on-demand-tests-check + runs-on: ubuntu-latest + if: ${{ !github.event.pull_request.head.repo.fork }} # no PRs from fork + outputs: + run_pipeline: ${{ steps.check.outputs.run_pipeline }} + steps: + - uses: actions/checkout@v5 + with: + fetch-depth: 2 + - id: check + name: check + env: + COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: | + commit_message=$(git log -1 --format=%B $COMMIT_SHA) + if [[ "$commit_message" == *"[api]"* || "$commit_message" == "prepare release"* || "$commit_message" == *"[test]"* ]]; then + echo "Run pipeline" + echo "run_pipeline=true" >> $GITHUB_OUTPUT + else + echo "Skip pipeline. In order to run the pipeline commit title must contain: [api]" + echo "run_pipeline=false" >> $GITHUB_OUTPUT + fi + Modules-Acceptance-Tests-large: + name: modules-acceptance-tests-large + runs-on: DB-ubuntu-24.04-4-cores + needs: [Modules-On-Demand-Tests-Check] + if: ${{ needs.Modules-On-Demand-Tests-Check.outputs.run_pipeline == 'true' }} # no PRs from fork + strategy: + fail-fast: false + matrix: + test: [ + "--only-module-qna-transformers", + "--only-module-sum-transformers", + "--only-module-multi2vec-clip", + "--only-module-reranker-transformers" + ] + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Login to Docker Hub + uses: docker/login-action@v3 + if: ${{ !github.event.pull_request.head.repo.fork && github.triggering_actor != 'dependabot[bot]' }} + with: + username: ${{secrets.DOCKER_USERNAME}} + password: ${{secrets.DOCKER_PASSWORD}} + - name: Acceptance tests Large (modules) + uses: nick-fields/retry@v3 + with: + # 15 Minute is a large enough timeout for most of our tests + timeout_minutes: 15 + max_attempts: 2 + command: ./test/run.sh ${{ matrix.test }} + on_retry_command: ./test/run.sh --cleanup + Modules-Acceptance-Tests-light: + name: modules-acceptance-tests-light + runs-on: ubuntu-latest + needs: [Modules-On-Demand-Tests-Check] + if: ${{ needs.Modules-On-Demand-Tests-Check.outputs.run_pipeline == 'true' }} # no PRs from fork + strategy: + fail-fast: false + matrix: + test: [ + "--only-module-text2vec-contextionary", + "--only-module-img2vec-neural", + "--only-module-ref2vec-centroid", + "--only-module-many-modules", + "--only-module-many-generative" + ] + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Login to Docker Hub + uses: docker/login-action@v3 + if: ${{ !github.event.pull_request.head.repo.fork && github.triggering_actor != 'dependabot[bot]' }} + with: + username: ${{secrets.DOCKER_USERNAME}} + password: ${{secrets.DOCKER_PASSWORD}} + - name: Acceptance tests Large (modules) + uses: nick-fields/retry@v3 + with: + # 15 Minute is a large enough timeout for most of our tests + timeout_minutes: 15 + max_attempts: 2 + command: ./test/run.sh ${{ matrix.test }} + on_retry_command: ./test/run.sh --cleanup + Modules-Acceptance-Tests-api: + name: modules-acceptance-tests-api + runs-on: ubuntu-latest + needs: [Modules-On-Demand-Tests-Check] + if: ${{ needs.Modules-On-Demand-Tests-Check.outputs.run_pipeline == 'true' && !github.event.pull_request.head.repo.fork }} # no PRs from fork + strategy: + fail-fast: false + matrix: + test: [ + "--only-module-multi2vec-cohere", + "--only-module-multi2vec-google", + "--only-module-generative-aws", + "--only-module-generative-cohere", + "--only-module-generative-google", + "--only-module-generative-openai", + "--only-module-text2vec-google", + "--only-module-text2vec-aws", + "--only-module-text2vec-jinaai", + "--only-module-multi2vec-jinaai", + "--only-module-text2multivec-jinaai", + "--only-module-multi2multivec-jinaai", + ] + steps: + - uses: actions/checkout@v5 + with: + fetch-depth: 2 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: check + env: + COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: | + commit_message=$(git log -1 --format=%B $COMMIT_SHA) + if [[ "$commit_message" == *"[api]"* || "$commit_message" == "prepare release"* ]]; then + echo "Run pipeline" + echo "run_pipeline=true" >> $GITHUB_ENV + else + echo "Skip pipeline. In order to run the pipeline commit title must contain: [api]" + echo "run_pipeline=false" >> $GITHUB_ENV + fi + - name: configure gcp + if: ${{ env.run_pipeline == 'true' || startsWith(github.ref, 'refs/tags') }} + id: creds-gcp + env: + GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + GCP_PROJECT: ${{ secrets.GCP_PROJECT }} + run: | + ./tools/ci/gcloud.sh + google_apikey=$(gcloud auth print-access-token) + echo "::add-mask::$google_apikey" + echo "google_apikey=$google_apikey" >> "$GITHUB_OUTPUT" + - name: configure aws + if: ${{ env.run_pipeline == 'true' || startsWith(github.ref, 'refs/tags') }} + id: creds-aws + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_REGION }} + role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }} + role-external-id: ${{ secrets.AWS_ROLE_EXTERNAL_ID }} + role-skip-session-tagging: true + output-credentials: true + - name: Acceptance tests (modules) + if: ${{ env.run_pipeline == 'true' || startsWith(github.ref, 'refs/tags') }} + env: + GCP_PROJECT: ${{ secrets.GCP_PROJECT }} + GOOGLE_APIKEY: ${{ steps.creds-gcp.outputs.google_apikey }} + AWS_REGION: ${{ secrets.AWS_REGION }} + AWS_ACCESS_KEY_ID: ${{ steps.creds-aws.outputs.aws-access-key-id }} + AWS_SECRET_ACCESS_KEY: ${{ steps.creds-aws.outputs.aws-secret-access-key }} + AWS_SESSION_TOKEN: ${{ steps.creds-aws.outputs.aws-session-token }} + OPENAI_APIKEY: ${{ secrets.OPENAI_APIKEY }} + OPENAI_ORGANIZATION: ${{ secrets.OPENAI_ORGANIZATION }} + COHERE_APIKEY: ${{ secrets.COHERE_APIKEY }} + ANTHROPIC_APIKEY: ${{ secrets.ANTHROPIC_APIKEY }} + JINAAI_APIKEY: ${{ secrets.JINAAI_APIKEY }} + run: ./test/run.sh ${{ matrix.test }} + Acceptance-Tests: + name: acceptance-tests + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + test: [ + "--acceptance-only-fast", + "--acceptance-only-graphql", + "--acceptance-only-authz", + "--acceptance-only-replication", + "--acceptance-only-async-replication", + "--acceptance-only-replica-replication-fast", + "--acceptance-only-replica-replication-slow", + "--acceptance-go-client-only-fast", + "--acceptance-only-python", + ] + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Login to Docker Hub + uses: docker/login-action@v3 + if: ${{ !github.event.pull_request.head.repo.fork && github.triggering_actor != 'dependabot[bot]' }} + with: + username: ${{secrets.DOCKER_USERNAME}} + password: ${{secrets.DOCKER_PASSWORD}} + - name: Acceptance tests + env: + WCS_DUMMY_CI_PW: ${{ secrets.WCS_DUMMY_CI_PW }} + WCS_DUMMY_CI_PW_2: ${{ secrets.WCS_DUMMY_CI_PW_2 }} + run: ./test/run.sh ${{ matrix.test }} + Acceptance-Tests-large: + name: acceptance-tests-large + runs-on: DB-ubuntu-24.04-4-cores + strategy: + fail-fast: false + matrix: + test: ["--acceptance-go-client-named-vectors-single-node", "--acceptance-go-client-named-vectors-cluster", "--acceptance-lsmkv"] + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Login to Docker Hub + uses: docker/login-action@v3 + if: ${{ !github.event.pull_request.head.repo.fork && github.triggering_actor != 'dependabot[bot]' }} + with: + username: ${{secrets.DOCKER_USERNAME}} + password: ${{secrets.DOCKER_PASSWORD}} + - name: Determine retry max_attempts value + env: + MATRIX_TEST: ${{ matrix.test }} + run: | + if [[ "$MATRIX_TEST" == *"lsmkv"* ]]; then + echo "retry_max_attempts=2" >> $GITHUB_ENV + else + echo "retry_max_attempts=1" >> $GITHUB_ENV + fi + - name: Acceptance tests Large + uses: nick-fields/retry@v3 + with: + # 15 Minute is a large enough timeout for most of our tests + timeout_minutes: 15 + max_attempts: ${{ env.retry_max_attempts }} + command: ./test/run.sh ${{ matrix.test }} + on_retry_command: ./test/run.sh --cleanup + Codecov: + needs: [Unit-Tests, Integration-Tests] + name: codecov + runs-on: ubuntu-latest + if: ${{ (github.ref_type == 'branch') && (github.ref_name != 'main') }} + steps: + - uses: actions/checkout@v5 + - name: Download coverage artifacts integration + uses: actions/download-artifact@v5 + with: + name: coverage-report-unit + - name: Download coverage integration without vector package + uses: actions/download-artifact@v5 + with: + name: coverage-report-integration--integration-without-vector-package + path: coverage-integration-without-vector-package.txt + - name: Download coverage integration vector package only + uses: actions/download-artifact@v5 + with: + name: coverage-report-integration--integration-vector-package-only + path: coverage-integration-vector-package-only.txt + - name: Codecov + uses: codecov/codecov-action@v5 + with: + fail_ci_if_error: false + files: ./coverage-integration-without-vector-package.txt, ./coverage-integration-vector-package-only.txt, ./coverage-unit.txt + verbose: true + Compile-and-upload-binaries: + name: compile-and-upload-binaries + runs-on: DB-ubuntu-24.04-4-cores + steps: + - uses: actions/checkout@v5 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: Install GoReleaser + uses: goreleaser/goreleaser-action@v6 + with: + install-only: true + - name: goreleaser + run: | + GIT_REVISION=$(git rev-parse --short HEAD) GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD) BUILD_USER=ci BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") goreleaser build --clean --snapshot + - name: Upload macos + uses: actions/upload-artifact@v4 + with: + name: binaries-macos-unsigned + path: dist/weaviate_darwin_all + - name: Upload windows + uses: actions/upload-artifact@v4 + with: + name: binaries-windows-amd64 + path: dist/weaviate_windows_amd64_v1 + - name: Upload windows + uses: actions/upload-artifact@v4 + with: + name: binaries-windows-arm64 + path: dist/weaviate_windows_arm64 + - name: Upload linux amd64 + uses: actions/upload-artifact@v4 + with: + name: binaries-linux-amd64 + path: dist/weaviate_linux_amd64_v1 + - name: Upload linux arm64 + uses: actions/upload-artifact@v4 + with: + name: binaries-linux-arm64 + path: dist/weaviate_linux_arm64 + + + Acceptance-Tests-windows: + name: acceptance-tests-windows + needs: Compile-and-upload-binaries + runs-on: windows-latest + env: + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: true + PERSISTENCE_DATA_PATH: /tmp + QUERY_DEFAULTS_LIMIT: 20 + CLUSTER_HOSTNAME: node1 + RAFT_BOOTSTRAP_EXPECT: 1 + RAFT_JOIN: node1 + GRPC_PORT: 50052 + steps: + - uses: actions/checkout@v5 + - name: Download binaries + uses: actions/download-artifact@v5 + with: + name: binaries-windows-amd64 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + cache: true + - name: start weaviate + shell: bash + # Weaviate is started without a Vectorizer as running text2vec-contextionary on GH actions is difficult: + # - docker on GHA only supports windows container - which we currently are not build + # - building those containers without a windows machine is difficult to figure out + run: ./weaviate.exe --scheme http --port 8080 & + - name: run acceptance tests + shell: bash + run: go test -count 1 -race test/acceptance/actions/*.go # tests that don't need a Vectorizer + + Push-Docker: + if: ${{ !github.event.pull_request.head.repo.fork && github.triggering_actor != 'dependabot[bot]' && (startsWith(github.ref, 'refs/tags') || startsWith(github.head_ref, 'build') || github.head_ref == '') }} # no PRs from fork + needs: [Unit-Tests, Run-Swagger, Vulnerability-Scanning, SAST-Scanning, Integration-Tests] + name: push-docker + timeout-minutes: 30 + runs-on: DB-ubuntu-24.04-8-cores + steps: + - uses: actions/checkout@v5 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{secrets.DOCKER_USERNAME}} + password: ${{secrets.DOCKER_PASSWORD}} + - name: Push container + id: push-container + run: ./ci/push_docker.sh + env: + PR_TITLE: "${{ github.event.pull_request.title }}" + - name: Generate Report + env: + PREVIEW_TAG: "${{ steps.push-container.outputs.PREVIEW_TAG }}" + PREVIEW_SEMVER_TAG: "${{ steps.push-container.outputs.PREVIEW_SEMVER_TAG }}" + run: ./ci/generate_docker_report.sh + Push-Docker-Fast: + if: ${{ !github.event.pull_request.head.repo.fork && github.triggering_actor != 'dependabot[bot]' && !startsWith(github.ref, 'refs/tags') && !startsWith(github.head_ref, 'build') && !(github.head_ref == '') }} # no PRs from fork + name: push-docker-fast + timeout-minutes: 30 + strategy: + matrix: + include: + - arch: amd64 + runner: ubuntu-latest + - arch: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + outputs: + preview-tags-amd64: ${{ steps.set-outputs.outputs.preview-tags-amd64 }} + semver-tags-amd64: ${{ steps.set-outputs.outputs.semver-tags-amd64 }} + preview-tags-arm64: ${{ steps.set-outputs.outputs.preview-tags-arm64 }} + semver-tags-arm64: ${{ steps.set-outputs.outputs.semver-tags-arm64 }} + steps: + - uses: actions/checkout@v5 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{secrets.DOCKER_USERNAME}} + password: ${{secrets.DOCKER_PASSWORD}} + - name: Push container + id: push-container + run: ./ci/push_docker.sh --${{ matrix.arch }}-only + env: + PR_TITLE: "${{ github.event.pull_request.title }}" + - name: Set architecture-specific outputs + id: set-outputs + run: | + # Unique output names per architecture + echo "preview-tags-${{ matrix.arch }}=${{ steps.push-container.outputs.PREVIEW_TAG }}" >> $GITHUB_OUTPUT + echo "semver-tags-${{ matrix.arch }}=${{ steps.push-container.outputs.PREVIEW_SEMVER_TAG }}" >> $GITHUB_OUTPUT + Generate-Docker-Report: + needs: [Push-Docker-Fast] + name: generate-docker-report + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Combine tags + run: | + # Collect all architecture-specific outputs + ALL_PREVIEW_TAGS="${{ needs.Push-Docker-Fast.outputs.preview-tags-amd64 }}" + ALL_PREVIEW_TAGS+="\n${{ needs.Push-Docker-Fast.outputs.preview-tags-arm64 }}" + + ALL_SEMVER_TAGS="${{ needs.Push-Docker-Fast.outputs.semver-tags-amd64 }}" + ALL_SEMVER_TAGS+="\n${{ needs.Push-Docker-Fast.outputs.semver-tags-arm64 }}" + + # Write to environment variables + echo "PREVIEW_TAG<> $GITHUB_ENV + echo -e "$ALL_PREVIEW_TAGS" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + echo "PREVIEW_SEMVER_TAG<> $GITHUB_ENV + echo -e "$ALL_SEMVER_TAGS" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + - run: ./ci/generate_docker_report.sh diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/release.yaml b/platform/dbops/binaries/weaviate-src/.github/workflows/release.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d4fb97de3f9e73bbf763adc3a3c12ccf966c447 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/release.yaml @@ -0,0 +1,32 @@ +name: Generate release assets + +on: + release: + types: [published] + +env: + CGO_ENABLED: 0 + +permissions: + contents: write + +jobs: + releases-matrix: + name: Release precompiled binaries + runs-on: ubuntu-latest + strategy: + matrix: + goos: [linux] + goarch: [amd64, arm64] + steps: + - uses: actions/checkout@v5 + - uses: wangyoucao577/go-release-action@v1.53 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + goos: ${{ matrix.goos }} + goarch: ${{ matrix.goarch }} + goversion: "1.24" + project_path: "./cmd/weaviate-server" + extra_files: LICENSE README.md + ldflags: -w -extldflags "-static" -X github.com/weaviate/weaviate/usecases/config.GitHash='"$GITHASH"' + sha256sum: true diff --git a/platform/dbops/binaries/weaviate-src/.github/workflows/vectorize-issues.yaml b/platform/dbops/binaries/weaviate-src/.github/workflows/vectorize-issues.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fe409777307a182728c40eabb1b71c4fad40c7b9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/.github/workflows/vectorize-issues.yaml @@ -0,0 +1,18 @@ +name: Vectorize Weaviate's GitHub Issues + +on: + schedule: + - cron: '0 0 * * *' # Run daily at midnight + workflow_dispatch: # Allow manual triggering + +jobs: + vectorize-weaviate-issues: + runs-on: ubuntu-latest + steps: + - name: Vectorize GitHub Issues + uses: weaviate/github-issues-to-weaviate@v1 + with: + weaviate_url: ${{ secrets.WEAVIATE_URL }} + weaviate_api_key: ${{ secrets.WEAVIATE_API_KEY }} + github_token: ${{ secrets.GITHUB_TOKEN }} + collection_name: GitHubIssuesWeaviate diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/client.go b/platform/dbops/binaries/weaviate-src/adapters/clients/client.go new file mode 100644 index 0000000000000000000000000000000000000000..2cfbe1af617be3c3c864e3ceb37f79a284167b9f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/client.go @@ -0,0 +1,131 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" +) + +type retryClient struct { + client *http.Client + *retryer +} + +func (c *retryClient) doWithCustomMarshaller(timeout time.Duration, + req *http.Request, data []byte, decode func([]byte) error, success func(code int) bool, numRetries int, +) (err error) { + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + req = req.WithContext(ctx) + try := func(ctx context.Context) (b bool, e error) { + if data != nil { + req.Body = io.NopCloser(bytes.NewReader(data)) + } + res, err := c.client.Do(req) + if err != nil { + return false, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + respBody, err := io.ReadAll(res.Body) + if err != nil { + return shouldRetry(res.StatusCode), fmt.Errorf("read response: %w", err) + } + + if code := res.StatusCode; !success(code) { + return shouldRetry(code), fmt.Errorf("status code: %v, error: %s", code, respBody) + } + + if err := decode(respBody); err != nil { + return false, fmt.Errorf("unmarshal response: %w", err) + } + + return false, nil + } + return c.retry(ctx, numRetries, try) +} + +func (c *retryClient) do(timeout time.Duration, req *http.Request, body []byte, resp interface{}, success func(code int) bool) (code int, err error) { + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + req = req.WithContext(ctx) + try := func(ctx context.Context) (bool, error) { + if body != nil { + req.Body = io.NopCloser(bytes.NewReader(body)) + } + res, err := c.client.Do(req) + if err != nil { + return false, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code = res.StatusCode; !success(code) { + b, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v, error: %s", code, b) + } + if resp != nil { + if err := json.NewDecoder(res.Body).Decode(resp); err != nil { + return false, fmt.Errorf("decode response: %w", err) + } + } + return false, nil + } + return code, c.retry(ctx, 9, try) +} + +type retryer struct { + minBackOff time.Duration + maxBackOff time.Duration + timeoutUnit time.Duration +} + +func newRetryer() *retryer { + return &retryer{ + minBackOff: time.Millisecond * 250, + maxBackOff: time.Second * 30, + timeoutUnit: time.Second, // used by unit tests + } +} + +// n is the number of retries, work will always be called at least once. +func (r *retryer) retry(ctx context.Context, n int, work func(context.Context) (bool, error)) error { + delay := r.minBackOff + for { + keepTrying, err := work(ctx) + if !keepTrying || n < 1 || err == nil { + return err + } + + n-- + if delay = backOff(delay); delay > r.maxBackOff { + delay = r.maxBackOff + } + timer := time.NewTimer(delay) + select { + case <-ctx.Done(): + timer.Stop() + return fmt.Errorf("%w: %w", err, ctx.Err()) + case <-timer.C: + } + timer.Stop() + } +} + +func successCode(code int) bool { + return code >= http.StatusOK && code <= http.StatusIMUsed +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_backups.go b/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_backups.go new file mode 100644 index 0000000000000000000000000000000000000000..269990fd1857d2443cc9805d7b1e5726a0fac901 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_backups.go @@ -0,0 +1,178 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/weaviate/weaviate/usecases/backup" +) + +const ( + pathCanCommit = "/backups/can-commit" + pathCommit = "/backups/commit" + pathStatus = "/backups/status" + pathAbort = "/backups/abort" +) + +type ClusterBackups struct { + client *http.Client +} + +func NewClusterBackups(client *http.Client) *ClusterBackups { + return &ClusterBackups{client: client} +} + +func (c *ClusterBackups) CanCommit(ctx context.Context, + host string, req *backup.Request, +) (*backup.CanCommitResponse, error) { + url := url.URL{Scheme: "http", Host: host, Path: pathCanCommit} + + b, err := json.Marshal(req) + if err != nil { + return nil, fmt.Errorf("marshal can-commit request: %w", err) + } + + httpReq, err := http.NewRequest(http.MethodPost, url.String(), bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("new can-commit request: %w", err) + } + + respBody, statusCode, err := c.do(httpReq) + if err != nil { + return nil, fmt.Errorf("can-commit request: %w", err) + } + + if statusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code %d (%s)", + statusCode, respBody) + } + + var resp backup.CanCommitResponse + err = json.Unmarshal(respBody, &resp) + if err != nil { + return nil, fmt.Errorf("unmarshal can-commit response: %w", err) + } + + return &resp, nil +} + +func (c *ClusterBackups) Commit(ctx context.Context, + host string, req *backup.StatusRequest, +) error { + url := url.URL{Scheme: "http", Host: host, Path: pathCommit} + + b, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal commit request: %w", err) + } + + httpReq, err := http.NewRequest(http.MethodPost, url.String(), bytes.NewReader(b)) + if err != nil { + return fmt.Errorf("new commit request: %w", err) + } + + respBody, statusCode, err := c.do(httpReq) + if err != nil { + return fmt.Errorf("commit request: %w", err) + } + + if statusCode != http.StatusCreated { + return fmt.Errorf("unexpected status code %d (%s)", + statusCode, respBody) + } + + return nil +} + +func (c *ClusterBackups) Status(ctx context.Context, + host string, req *backup.StatusRequest, +) (*backup.StatusResponse, error) { + url := url.URL{Scheme: "http", Host: host, Path: pathStatus} + + b, err := json.Marshal(req) + if err != nil { + return nil, fmt.Errorf("marshal status request: %w", err) + } + + httpReq, err := http.NewRequest(http.MethodPost, url.String(), bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("new status request: %w", err) + } + + respBody, statusCode, err := c.do(httpReq) + if err != nil { + return nil, fmt.Errorf("status request: %w", err) + } + + if statusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code %d (%s)", + statusCode, respBody) + } + + var resp backup.StatusResponse + err = json.Unmarshal(respBody, &resp) + if err != nil { + return nil, fmt.Errorf("unmarshal status response: %w", err) + } + + return &resp, nil +} + +func (c *ClusterBackups) Abort(_ context.Context, + host string, req *backup.AbortRequest, +) error { + url := url.URL{Scheme: "http", Host: host, Path: pathAbort} + + b, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal abort request: %w", err) + } + + httpReq, err := http.NewRequest(http.MethodPost, url.String(), bytes.NewReader(b)) + if err != nil { + return fmt.Errorf("new abort request: %w", err) + } + + respBody, statusCode, err := c.do(httpReq) + if err != nil { + return fmt.Errorf("abort request: %w", err) + } + + if statusCode != http.StatusNoContent { + return fmt.Errorf("unexpected status code %d (%s)", + statusCode, respBody) + } + + return nil +} + +func (c *ClusterBackups) do(req *http.Request) (body []byte, statusCode int, err error) { + httpResp, err := c.client.Do(req) + if err != nil { + return nil, 0, fmt.Errorf("make request: %w", err) + } + + body, err = io.ReadAll(httpResp.Body) + if err != nil { + return nil, httpResp.StatusCode, fmt.Errorf("read response: %w", err) + } + defer httpResp.Body.Close() + + return body, httpResp.StatusCode, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_classifications.go b/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_classifications.go new file mode 100644 index 0000000000000000000000000000000000000000..5d947f11b1f8361754d0d222cd665efe26655fcb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_classifications.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/usecases/cluster" +) + +type txPayload struct { + Type cluster.TransactionType `json:"type"` + ID string `json:"id"` + Payload interface{} `json:"payload"` + DeadlineMilli int64 `json:"deadlineMilli"` +} + +type ClusterClassifications struct { + client *http.Client +} + +func NewClusterClassifications(httpClient *http.Client) *ClusterClassifications { + return &ClusterClassifications{client: httpClient} +} + +func (c *ClusterClassifications) OpenTransaction(ctx context.Context, host string, + tx *cluster.Transaction, +) error { + path := "/classifications/transactions/" + method := http.MethodPost + url := url.URL{Scheme: "http", Host: host, Path: path} + + pl := txPayload{ + Type: tx.Type, + ID: tx.ID, + Payload: tx.Payload, + } + + jsonBytes, err := json.Marshal(pl) + if err != nil { + return errors.Wrap(err, "marshal transaction payload") + } + + req, err := http.NewRequestWithContext(ctx, method, url.String(), + bytes.NewReader(jsonBytes)) + if err != nil { + return errors.Wrap(err, "open http request") + } + + req.Header.Set("content-type", "application/json") + + res, err := c.client.Do(req) + if err != nil { + return errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + if res.StatusCode == http.StatusConflict { + return cluster.ErrConcurrentTransaction + } + + body, _ := io.ReadAll(res.Body) + return errors.Errorf("unexpected status code %d (%s)", res.StatusCode, + body) + } + + return nil +} + +func (c *ClusterClassifications) AbortTransaction(ctx context.Context, host string, + tx *cluster.Transaction, +) error { + path := "/classifications/transactions/" + tx.ID + method := http.MethodDelete + url := url.URL{Scheme: "http", Host: host, Path: path} + + req, err := http.NewRequestWithContext(ctx, method, url.String(), nil) + if err != nil { + return errors.Wrap(err, "open http request") + } + + res, err := c.client.Do(req) + if err != nil { + return errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + return errors.Errorf("unexpected status code %d", res.StatusCode) + } + + return nil +} + +func (c *ClusterClassifications) CommitTransaction(ctx context.Context, host string, + tx *cluster.Transaction, +) error { + path := "/classifications/transactions/" + tx.ID + "/commit" + method := http.MethodPut + url := url.URL{Scheme: "http", Host: host, Path: path} + + req, err := http.NewRequestWithContext(ctx, method, url.String(), nil) + if err != nil { + return errors.Wrap(err, "open http request") + } + + res, err := c.client.Do(req) + if err != nil { + return errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + return errors.Errorf("unexpected status code %d", res.StatusCode) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_node.go b/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_node.go new file mode 100644 index 0000000000000000000000000000000000000000..5e594ba7474d80f118e35e39cd03f36bfc18d400 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/cluster_node.go @@ -0,0 +1,99 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "path" + + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" +) + +type RemoteNode struct { + client *http.Client +} + +func NewRemoteNode(httpClient *http.Client) *RemoteNode { + return &RemoteNode{client: httpClient} +} + +func (c *RemoteNode) GetNodeStatus(ctx context.Context, hostName, className, shardName, output string) (*models.NodeStatus, error) { + p := "/nodes/status" + if className != "" { + p = path.Join(p, className) + } + method := http.MethodGet + params := url.Values{"output": []string{output}} + if shardName != "" { + params.Add("shard", shardName) + } + url := url.URL{Scheme: "http", Host: hostName, Path: p, RawQuery: params.Encode()} + + req, err := http.NewRequestWithContext(ctx, method, url.String(), nil) + if err != nil { + return nil, enterrors.NewErrOpenHttpRequest(err) + } + + res, err := c.client.Do(req) + if err != nil { + return nil, enterrors.NewErrSendHttpRequest(err) + } + + defer res.Body.Close() + body, _ := io.ReadAll(res.Body) + if res.StatusCode != http.StatusOK { + return nil, enterrors.NewErrUnexpectedStatusCode(res.StatusCode, body) + } + + var nodeStatus models.NodeStatus + err = json.Unmarshal(body, &nodeStatus) + if err != nil { + return nil, enterrors.NewErrUnmarshalBody(err) + } + + return &nodeStatus, nil +} + +func (c *RemoteNode) GetStatistics(ctx context.Context, hostName string) (*models.Statistics, error) { + p := "/nodes/statistics" + method := http.MethodGet + url := url.URL{Scheme: "http", Host: hostName, Path: p} + + req, err := http.NewRequestWithContext(ctx, method, url.String(), nil) + if err != nil { + return nil, enterrors.NewErrOpenHttpRequest(err) + } + + res, err := c.client.Do(req) + if err != nil { + return nil, enterrors.NewErrSendHttpRequest(err) + } + + defer res.Body.Close() + body, _ := io.ReadAll(res.Body) + if res.StatusCode != http.StatusOK { + return nil, enterrors.NewErrUnexpectedStatusCode(res.StatusCode, body) + } + + var statistics models.Statistics + err = json.Unmarshal(body, &statistics) + if err != nil { + return nil, enterrors.NewErrUnmarshalBody(err) + } + + return &statistics, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/db_users.go b/platform/dbops/binaries/weaviate-src/adapters/clients/db_users.go new file mode 100644 index 0000000000000000000000000000000000000000..755660787fa19607bd064ebb29548d72c40e34a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/db_users.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "time" + + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +type RemoteUser struct { + client *http.Client + nodeResolver nodeResolver +} + +type nodeResolver interface { + NodeHostname(nodeName string) (string, bool) +} + +func NewRemoteUser(httpClient *http.Client, nodeResolver nodeResolver) *RemoteUser { + return &RemoteUser{client: httpClient, nodeResolver: nodeResolver} +} + +func (c *RemoteUser) GetAndUpdateLastUsedTime(ctx context.Context, nodeName string, users map[string]time.Time, returnStatus bool) (*apikey.UserStatusResponse, error) { + p := "/cluster/users/db/lastUsedTime" + method := http.MethodPost + hostName, found := c.nodeResolver.NodeHostname(nodeName) + if !found { + return nil, fmt.Errorf("unable to resolve hostname for %s", nodeName) + } + url := url.URL{Scheme: "http", Host: hostName, Path: p} + + jsonBody, err := json.Marshal(apikey.UserStatusRequest{Users: users, ReturnStatus: returnStatus}) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, method, url.String(), bytes.NewBuffer(jsonBody)) + if err != nil { + return nil, enterrors.NewErrOpenHttpRequest(err) + } + + res, err := c.client.Do(req) + if err != nil { + return nil, enterrors.NewErrSendHttpRequest(err) + } + + defer res.Body.Close() + body, _ := io.ReadAll(res.Body) + if res.StatusCode != http.StatusOK { + return nil, enterrors.NewErrUnexpectedStatusCode(res.StatusCode, body) + } + + var userStatus apikey.UserStatusResponse + err = json.Unmarshal(body, &userStatus) + if err != nil { + return nil, enterrors.NewErrUnmarshalBody(err) + } + + return &userStatus, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/remote_index.go b/platform/dbops/binaries/weaviate-src/adapters/clients/remote_index.go new file mode 100644 index 0000000000000000000000000000000000000000..6c87dd6ca2253771fb1f2715075437e71c35325a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/remote_index.go @@ -0,0 +1,1008 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/file" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" +) + +const ( + asyncReplicationTargetNodeEndpointPattern = "/indices/%s/shards/%s/async-replication-target-node" +) + +func AsyncReplicationTargetNodeEndpoint(indexName, shardName string) string { + return fmt.Sprintf(asyncReplicationTargetNodeEndpointPattern, indexName, shardName) +} + +type RemoteIndex struct { + retryClient +} + +func NewRemoteIndex(httpClient *http.Client) *RemoteIndex { + return &RemoteIndex{retryClient: retryClient{ + client: httpClient, + retryer: newRetryer(), + }} +} + +func (c *RemoteIndex) PutObject(ctx context.Context, host, index, + shard string, obj *storobj.Object, schemaVersion uint64, +) error { + value := []string{strconv.FormatUint(schemaVersion, 10)} + + body, err := clusterapi.IndicesPayloads.SingleObject.Marshal(obj) + if err != nil { + return fmt.Errorf("encode request: %w", err) + } + + req, err := setupRequest(ctx, http.MethodPost, host, + fmt.Sprintf("/indices/%s/shards/%s/objects", index, shard), + url.Values{replica.SchemaVersionKey: value}.Encode(), + bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + + clusterapi.IndicesPayloads.SingleObject.SetContentTypeHeaderReq(req) + _, err = c.do(c.timeoutUnit*60, req, body, nil, successCode) + return err +} + +func duplicateErr(in error, count int) []error { + out := make([]error, count) + for i := range out { + out[i] = in + } + return out +} + +func (c *RemoteIndex) BatchPutObjects(ctx context.Context, host, index, + shard string, objs []*storobj.Object, _ *additional.ReplicationProperties, schemaVersion uint64, +) []error { + value := []string{strconv.FormatUint(schemaVersion, 10)} + body, err := clusterapi.IndicesPayloads.ObjectList.Marshal(objs) + if err != nil { + return duplicateErr(fmt.Errorf("encode request: %w", err), len(objs)) + } + + req, err := setupRequest(ctx, http.MethodPost, host, + fmt.Sprintf("/indices/%s/shards/%s/objects", index, shard), + url.Values{replica.SchemaVersionKey: value}.Encode(), + bytes.NewReader(body)) + if err != nil { + return duplicateErr(fmt.Errorf("create http request: %w", err), len(objs)) + } + clusterapi.IndicesPayloads.ObjectList.SetContentTypeHeaderReq(req) + + var resp []error + decode := func(data []byte) error { + resp = clusterapi.IndicesPayloads.ErrorList.Unmarshal(data) + return nil + } + + if err = c.doWithCustomMarshaller(c.timeoutUnit*60, req, body, decode, successCode, 9); err != nil { + return duplicateErr(err, len(objs)) + } + + return resp +} + +func (c *RemoteIndex) BatchAddReferences(ctx context.Context, hostName, indexName, + shardName string, refs objects.BatchReferences, schemaVersion uint64, +) []error { + value := []string{strconv.FormatUint(schemaVersion, 10)} + marshalled, err := clusterapi.IndicesPayloads.ReferenceList.Marshal(refs) + if err != nil { + return duplicateErr(errors.Wrap(err, "marshal payload"), len(refs)) + } + + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/references", indexName, shardName), + url.Values{replica.SchemaVersionKey: value}.Encode(), + bytes.NewReader(marshalled)) + if err != nil { + return duplicateErr(errors.Wrap(err, "open http request"), len(refs)) + } + + clusterapi.IndicesPayloads.ReferenceList.SetContentTypeHeaderReq(req) + + res, err := c.client.Do(req) + if err != nil { + return duplicateErr(errors.Wrap(err, "send http request"), len(refs)) + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return duplicateErr(errors.Errorf("unexpected status code %d (%s)", + res.StatusCode, body), len(refs)) + } + + if ct, ok := clusterapi.IndicesPayloads.ErrorList. + CheckContentTypeHeader(res); !ok { + return duplicateErr(errors.Errorf("unexpected content type: %s", + ct), len(refs)) + } + + resBytes, err := io.ReadAll(res.Body) + if err != nil { + return duplicateErr(errors.Wrap(err, "ready body"), len(refs)) + } + + return clusterapi.IndicesPayloads.ErrorList.Unmarshal(resBytes) +} + +func (c *RemoteIndex) GetObject(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, selectProps search.SelectProperties, + additional additional.Properties, +) (*storobj.Object, error) { + selectPropsBytes, err := json.Marshal(selectProps) + if err != nil { + return nil, errors.Wrap(err, "marshal selectProps props") + } + + additionalBytes, err := json.Marshal(additional) + if err != nil { + return nil, errors.Wrap(err, "marshal additional props") + } + + selectPropsEncoded := base64.StdEncoding.EncodeToString(selectPropsBytes) + additionalEncoded := base64.StdEncoding.EncodeToString(additionalBytes) + + req, err := setupRequest(ctx, http.MethodGet, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects/%s", indexName, shardName, id), + url.Values{ + "additional": []string{additionalEncoded}, + "selectProperties": []string{selectPropsEncoded}, + }.Encode(), + nil) + if err != nil { + return nil, errors.Wrap(err, "open http request") + } + + res, err := c.client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + // this is a legitimate case - the requested ID doesn't exist, don't try + // to unmarshal anything + return nil, nil + } + + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return nil, errors.Errorf("unexpected status code %d (%s)", res.StatusCode, + body) + } + + ct, ok := clusterapi.IndicesPayloads.SingleObject.CheckContentTypeHeader(res) + if !ok { + return nil, errors.Errorf("unknown content type %s", ct) + } + + objBytes, err := io.ReadAll(res.Body) + if err != nil { + return nil, errors.Wrap(err, "read body") + } + + obj, err := clusterapi.IndicesPayloads.SingleObject.Unmarshal(objBytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal body") + } + + return obj, nil +} + +func (c *RemoteIndex) Exists(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, +) (bool, error) { + req, err := setupRequest(ctx, http.MethodGet, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects/%s", indexName, shardName, id), + url.Values{"check_exists": []string{"true"}}.Encode(), + nil) + if err != nil { + return false, fmt.Errorf("create http request: %w", err) + } + ok := func(code int) bool { return code == http.StatusNotFound || code == http.StatusNoContent } + code, err := c.do(c.timeoutUnit*20, req, nil, nil, ok) + return code != http.StatusNotFound, err +} + +func (c *RemoteIndex) DeleteObject(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, deletionTime time.Time, schemaVersion uint64, +) error { + value := []string{strconv.FormatUint(schemaVersion, 10)} + req, err := setupRequest(ctx, http.MethodDelete, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects/%s/%d", indexName, shardName, id, deletionTime.UnixMilli()), + url.Values{replica.SchemaVersionKey: value}.Encode(), + nil) + if err != nil { + return errors.Wrap(err, "open http request") + } + + res, err := c.client.Do(req) + if err != nil { + return errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + // this is a legitimate case - the requested ID doesn't exist, don't try + // to unmarshal anything, we can assume it was already deleted + return nil + } + + if res.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(res.Body) + return errors.Errorf("unexpected status code %d (%s)", res.StatusCode, + body) + } + + return nil +} + +func (c *RemoteIndex) MergeObject(ctx context.Context, hostName, indexName, + shardName string, mergeDoc objects.MergeDocument, schemaVersion uint64, +) error { + value := []string{strconv.FormatUint(schemaVersion, 10)} + marshalled, err := clusterapi.IndicesPayloads.MergeDoc.Marshal(mergeDoc) + if err != nil { + return errors.Wrap(err, "marshal payload") + } + req, err := setupRequest(ctx, http.MethodPatch, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects/%s", indexName, shardName, + mergeDoc.ID), + url.Values{replica.SchemaVersionKey: value}.Encode(), + bytes.NewReader(marshalled)) + if err != nil { + return errors.Wrap(err, "open http request") + } + + clusterapi.IndicesPayloads.MergeDoc.SetContentTypeHeaderReq(req) + res, err := c.client.Do(req) + if err != nil { + return errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(res.Body) + return errors.Errorf("unexpected status code %d (%s)", res.StatusCode, + body) + } + + return nil +} + +func (c *RemoteIndex) MultiGetObjects(ctx context.Context, hostName, indexName, + shardName string, ids []strfmt.UUID, +) ([]*storobj.Object, error) { + idsBytes, err := json.Marshal(ids) + if err != nil { + return nil, errors.Wrap(err, "marshal selectProps props") + } + idsEncoded := base64.StdEncoding.EncodeToString(idsBytes) + req, err := setupRequest(ctx, http.MethodGet, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects", indexName, shardName), + url.Values{"ids": []string{idsEncoded}}.Encode(), + nil) + if err != nil { + return nil, errors.Wrap(err, "open http request") + } + + res, err := c.client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + // this is a legitimate case - the requested ID doesn't exist, don't try + // to unmarshal anything + return nil, nil + } + + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return nil, errors.Errorf("unexpected status code %d (%s)", res.StatusCode, + body) + } + + ct, ok := clusterapi.IndicesPayloads.ObjectList.CheckContentTypeHeader(res) + if !ok { + return nil, errors.Errorf("unexpected content type: %s", ct) + } + + bodyBytes, err := io.ReadAll(res.Body) + if err != nil { + return nil, errors.Wrap(err, "read response body") + } + + objs, err := clusterapi.IndicesPayloads.ObjectList.Unmarshal(bodyBytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal objects") + } + + return objs, nil +} + +func (c *RemoteIndex) SearchShard(ctx context.Context, host, index, shard string, + vector []models.Vector, + targetVector []string, + distance float32, + limit int, + filters *filters.LocalFilter, + keywordRanking *searchparams.KeywordRanking, + sort []filters.Sort, + cursor *filters.Cursor, + groupBy *searchparams.GroupBy, + additional additional.Properties, + targetCombination *dto.TargetCombination, + properties []string, +) ([]*storobj.Object, []float32, error) { + // new request + body, err := clusterapi.IndicesPayloads.SearchParams. + Marshal(vector, targetVector, distance, limit, filters, keywordRanking, sort, cursor, groupBy, additional, targetCombination, properties) + if err != nil { + return nil, nil, fmt.Errorf("marshal request payload: %w", err) + } + req, err := setupRequest(ctx, http.MethodPost, host, + fmt.Sprintf("/indices/%s/shards/%s/objects/_search", index, shard), + "", bytes.NewReader(body)) + if err != nil { + return nil, nil, fmt.Errorf("create http request: %w", err) + } + clusterapi.IndicesPayloads.SearchParams.SetContentTypeHeaderReq(req) + + // send request + resp := &searchShardResp{} + err = c.doWithCustomMarshaller(c.timeoutUnit*20, req, body, resp.decode, successCode, 9) + return resp.Objects, resp.Distributions, err +} + +type searchShardResp struct { + Objects []*storobj.Object + Distributions []float32 +} + +func (r *searchShardResp) decode(data []byte) (err error) { + r.Objects, r.Distributions, err = clusterapi.IndicesPayloads.SearchResults.Unmarshal(data) + return +} + +type aggregateResp struct { + Result *aggregation.Result +} + +func (r *aggregateResp) decode(data []byte) (err error) { + r.Result, err = clusterapi.IndicesPayloads.AggregationResult.Unmarshal(data) + return +} + +func (c *RemoteIndex) Aggregate(ctx context.Context, hostName, index, + shard string, params aggregation.Params, +) (*aggregation.Result, error) { + // create new request + body, err := clusterapi.IndicesPayloads.AggregationParams.Marshal(params) + if err != nil { + return nil, fmt.Errorf("marshal request payload: %w", err) + } + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects/_aggregations", index, shard), + "", bytes.NewReader(body)) + if err != nil { + return nil, fmt.Errorf("create http request: %w", err) + } + clusterapi.IndicesPayloads.AggregationParams.SetContentTypeHeaderReq(req) + + // send request + resp := &aggregateResp{} + err = c.doWithCustomMarshaller(c.timeoutUnit*20, req, body, resp.decode, successCode, 9) + return resp.Result, err +} + +func (c *RemoteIndex) FindUUIDs(ctx context.Context, hostName, indexName, + shardName string, filters *filters.LocalFilter, +) ([]strfmt.UUID, error) { + paramsBytes, err := clusterapi.IndicesPayloads.FindUUIDsParams.Marshal(filters) + if err != nil { + return nil, errors.Wrap(err, "marshal request payload") + } + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects/_find", indexName, shardName), + "", bytes.NewReader(paramsBytes)) + if err != nil { + return nil, errors.Wrap(err, "open http request") + } + + clusterapi.IndicesPayloads.FindUUIDsParams.SetContentTypeHeaderReq(req) + res, err := c.client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return nil, errors.Errorf("unexpected status code %d (%s)", res.StatusCode, + body) + } + + resBytes, err := io.ReadAll(res.Body) + if err != nil { + return nil, errors.Wrap(err, "read body") + } + + ct, ok := clusterapi.IndicesPayloads.FindUUIDsResults.CheckContentTypeHeader(res) + if !ok { + return nil, errors.Errorf("unexpected content type: %s", ct) + } + + uuids, err := clusterapi.IndicesPayloads.FindUUIDsResults.Unmarshal(resBytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal body") + } + return uuids, nil +} + +func (c *RemoteIndex) DeleteObjectBatch(ctx context.Context, hostName, indexName, shardName string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) objects.BatchSimpleObjects { + value := []string{strconv.FormatUint(schemaVersion, 10)} + marshalled, err := clusterapi.IndicesPayloads.BatchDeleteParams.Marshal(uuids, deletionTime, dryRun) + if err != nil { + err := errors.Wrap(err, "marshal payload") + return objects.BatchSimpleObjects{objects.BatchSimpleObject{Err: err}} + } + req, err := setupRequest(ctx, http.MethodDelete, hostName, + fmt.Sprintf("/indices/%s/shards/%s/objects", indexName, shardName), + url.Values{replica.SchemaVersionKey: value}.Encode(), + bytes.NewReader(marshalled)) + if err != nil { + err := errors.Wrap(err, "open http request") + return objects.BatchSimpleObjects{objects.BatchSimpleObject{Err: err}} + } + + clusterapi.IndicesPayloads.BatchDeleteParams.SetContentTypeHeaderReq(req) + + res, err := c.client.Do(req) + if err != nil { + err := errors.Wrap(err, "send http request") + return objects.BatchSimpleObjects{objects.BatchSimpleObject{Err: err}} + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + err := errors.Errorf("unexpected status code %d (%s)", res.StatusCode, body) + return objects.BatchSimpleObjects{objects.BatchSimpleObject{Err: err}} + } + + if ct, ok := clusterapi.IndicesPayloads.BatchDeleteResults. + CheckContentTypeHeader(res); !ok { + err := errors.Errorf("unexpected content type: %s", ct) + return objects.BatchSimpleObjects{objects.BatchSimpleObject{Err: err}} + } + + resBytes, err := io.ReadAll(res.Body) + if err != nil { + err := errors.Wrap(err, "ready body") + return objects.BatchSimpleObjects{objects.BatchSimpleObject{Err: err}} + } + + batchDeleteResults, err := clusterapi.IndicesPayloads.BatchDeleteResults.Unmarshal(resBytes) + if err != nil { + err := errors.Wrap(err, "unmarshal body") + return objects.BatchSimpleObjects{objects.BatchSimpleObject{Err: err}} + } + + return batchDeleteResults +} + +func (c *RemoteIndex) GetShardQueueSize(ctx context.Context, + hostName, indexName, shardName string, +) (int64, error) { + req, err := setupRequest(ctx, http.MethodGet, hostName, + fmt.Sprintf("/indices/%s/shards/%s/queuesize", indexName, shardName), + "", nil) + if err != nil { + return 0, errors.Wrap(err, "open http request") + } + var size int64 + clusterapi.IndicesPayloads.GetShardQueueSizeParams.SetContentTypeHeaderReq(req) + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + resBytes, err := io.ReadAll(res.Body) + if err != nil { + return false, errors.Wrap(err, "read body") + } + + ct, ok := clusterapi.IndicesPayloads.GetShardQueueSizeResults.CheckContentTypeHeader(res) + if !ok { + return false, errors.Errorf("unexpected content type: %s", ct) + } + + size, err = clusterapi.IndicesPayloads.GetShardQueueSizeResults.Unmarshal(resBytes) + if err != nil { + return false, errors.Wrap(err, "unmarshal body") + } + return false, nil + } + return size, c.retry(ctx, 9, try) +} + +func (c *RemoteIndex) GetShardStatus(ctx context.Context, + hostName, indexName, shardName string, +) (string, error) { + req, err := setupRequest(ctx, http.MethodGet, hostName, + fmt.Sprintf("/indices/%s/shards/%s/status", indexName, shardName), + "", nil) + if err != nil { + return "", errors.Wrap(err, "open http request") + } + var status string + clusterapi.IndicesPayloads.GetShardStatusParams.SetContentTypeHeaderReq(req) + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + resBytes, err := io.ReadAll(res.Body) + if err != nil { + return false, errors.Wrap(err, "read body") + } + + ct, ok := clusterapi.IndicesPayloads.GetShardStatusResults.CheckContentTypeHeader(res) + if !ok { + return false, errors.Errorf("unexpected content type: %s", ct) + } + + status, err = clusterapi.IndicesPayloads.GetShardStatusResults.Unmarshal(resBytes) + if err != nil { + return false, errors.Wrap(err, "unmarshal body") + } + return false, nil + } + return status, c.retry(ctx, 9, try) +} + +func (c *RemoteIndex) UpdateShardStatus(ctx context.Context, hostName, indexName, shardName, + targetStatus string, schemaVersion uint64, +) error { + paramsBytes, err := clusterapi.IndicesPayloads.UpdateShardStatusParams.Marshal(targetStatus) + if err != nil { + return errors.Wrap(err, "marshal request payload") + } + value := []string{strconv.FormatUint(schemaVersion, 10)} + + try := func(ctx context.Context) (bool, error) { + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/status", indexName, shardName), + url.Values{replica.SchemaVersionKey: value}.Encode(), + bytes.NewReader(paramsBytes)) + if err != nil { + return false, fmt.Errorf("create http request: %w", err) + } + clusterapi.IndicesPayloads.UpdateShardStatusParams.SetContentTypeHeaderReq(req) + + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + + return false, nil + } + + return c.retry(ctx, 9, try) +} + +func (c *RemoteIndex) PutFile(ctx context.Context, hostName, indexName, + shardName, fileName string, payload io.ReadSeekCloser, +) error { + defer payload.Close() + + try := func(ctx context.Context) (bool, error) { + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/files/%s", indexName, shardName, fileName), + "", payload) + if err != nil { + return false, fmt.Errorf("create http request: %w", err) + } + clusterapi.IndicesPayloads.ShardFiles.SetContentTypeHeaderReq(req) + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusNoContent { + shouldRetry := shouldRetry(code) + if shouldRetry { + _, err := payload.Seek(0, 0) + shouldRetry = (err == nil) + } + body, _ := io.ReadAll(res.Body) + return shouldRetry, fmt.Errorf("status code: %v body: (%s)", code, body) + } + return false, nil + } + + return c.retry(ctx, 12, try) +} + +func (c *RemoteIndex) CreateShard(ctx context.Context, + hostName, indexName, shardName string, +) error { + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s", indexName, shardName), + "", nil) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusCreated { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + return false, nil + } + + return c.retry(ctx, 9, try) +} + +func (c *RemoteIndex) ReInitShard(ctx context.Context, + hostName, indexName, shardName string, +) error { + req, err := setupRequest(ctx, http.MethodPut, hostName, + fmt.Sprintf("/indices/%s/shards/%s:reinit", indexName, shardName), + "", nil) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusNoContent { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + + } + return false, nil + } + + return c.retry(ctx, 9, try) +} + +// PauseFileActivity pauses the collection's shard replica background processes on the specified +// host. You should explicitly resume the background processes once you're done with the +// files. +func (c *RemoteIndex) PauseFileActivity(ctx context.Context, + hostName, indexName, shardName string, schemaVersion uint64, +) error { + value := []string{strconv.FormatUint(schemaVersion, 10)} + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/background:pause", indexName, shardName), + url.Values{replica.SchemaVersionKey: value}.Encode(), + nil, + ) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + return false, nil + } + return c.retry(ctx, 9, try) +} + +// ResumeFileActivity resumes the collection's shard replica background processes on the specified host +func (c *RemoteIndex) ResumeFileActivity(ctx context.Context, + hostName, indexName, shardName string, +) error { + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/background:resume", indexName, shardName), + "", nil) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + return false, nil + } + return c.retry(ctx, 9, try) +} + +// ListFiles returns a list of files that can be used to get the shard data at the time the pause +// was requested. The returned relative file paths are relative to the shard's root directory. +// indexName is the collection name. +func (c *RemoteIndex) ListFiles(ctx context.Context, + hostName, indexName, shardName string, +) ([]string, error) { + req, err := setupRequest(ctx, http.MethodPost, hostName, + fmt.Sprintf("/indices/%s/shards/%s/background:list", indexName, shardName), + "", nil) + if err != nil { + return []string{}, fmt.Errorf("create http request: %w", err) + } + + var relativeFilePaths []string + clusterapi.IndicesPayloads.ShardFilesResults.SetContentTypeHeaderReq(req) + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + resBytes, err := io.ReadAll(res.Body) + if err != nil { + return false, errors.Wrap(err, "read body") + } + + relativeFilePaths, err = clusterapi.IndicesPayloads.ShardFilesResults.Unmarshal(resBytes) + if err != nil { + return false, errors.Wrap(err, "unmarshal body") + } + return false, nil + } + return relativeFilePaths, c.retry(ctx, 9, try) +} + +// GetFileMetadata returns file info to the file relative to the +// shard's root directory. +func (c *RemoteIndex) GetFileMetadata(ctx context.Context, hostName, indexName, + shardName, relativeFilePath string, +) (file.FileMetadata, error) { + req, err := setupRequest(ctx, http.MethodGet, hostName, + fmt.Sprintf("/indices/%s/shards/%s/files:metadata/%s", indexName, shardName, relativeFilePath), + "", nil) + if err != nil { + return file.FileMetadata{}, fmt.Errorf("create http request: %w", err) + } + + clusterapi.IndicesPayloads.ShardFiles.SetContentTypeHeaderReq(req) + + var md file.FileMetadata + + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + + if res.StatusCode != http.StatusOK { + defer res.Body.Close() + body, _ := io.ReadAll(res.Body) + return shouldRetry(res.StatusCode), fmt.Errorf( + "unexpected status code %d (%s)", res.StatusCode, body) + } + + resBytes, err := io.ReadAll(res.Body) + if err != nil { + return false, errors.Wrap(err, "read body") + } + + md, err = clusterapi.IndicesPayloads.ShardFileMetadataResults.Unmarshal(resBytes) + if err != nil { + return false, errors.Wrap(err, "unmarshal body") + } + + return false, nil + } + return md, c.retry(ctx, 9, try) +} + +// GetFile caller must close the returned io.ReadCloser if no error is returned. +// indexName is the collection name. relativeFilePath is the path to the file relative to the +// shard's root directory. +func (c *RemoteIndex) GetFile(ctx context.Context, hostName, indexName, + shardName, relativeFilePath string, +) (io.ReadCloser, error) { + req, err := setupRequest(ctx, http.MethodGet, hostName, + fmt.Sprintf("/indices/%s/shards/%s/files/%s", indexName, shardName, relativeFilePath), + "", nil) + if err != nil { + return nil, fmt.Errorf("create http request: %w", err) + } + clusterapi.IndicesPayloads.ShardFiles.SetContentTypeHeaderReq(req) + var file io.ReadCloser + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + + if res.StatusCode != http.StatusOK { + defer res.Body.Close() + body, _ := io.ReadAll(res.Body) + return shouldRetry(res.StatusCode), fmt.Errorf( + "unexpected status code %d (%s)", res.StatusCode, body) + } + + file = res.Body + return false, nil + } + return file, c.retry(ctx, 9, try) +} + +// AddAsyncReplicationTargetNode configures and starts async replication for the given +// host with the specified override. +func (c *RemoteIndex) AddAsyncReplicationTargetNode( + ctx context.Context, + hostName, indexName, shardName string, + targetNodeOverride additional.AsyncReplicationTargetNodeOverride, + schemaVersion uint64, +) error { + body, err := clusterapi.IndicesPayloads.AsyncReplicationTargetNode.Marshal(targetNodeOverride) + if err != nil { + return fmt.Errorf("marshal target node override: %w", err) + } + value := []string{strconv.FormatUint(schemaVersion, 10)} + req, err := setupRequest(ctx, http.MethodPost, hostName, + AsyncReplicationTargetNodeEndpoint(indexName, shardName), + url.Values{replica.SchemaVersionKey: value}.Encode(), + bytes.NewReader(body), + ) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + clusterapi.IndicesPayloads.AsyncReplicationTargetNode.SetContentTypeHeaderReq(req) + + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + return false, nil + } + return c.retry(ctx, 9, try) +} + +// RemoveAsyncReplicationTargetNode removes the given target node override for async replication. +func (c *RemoteIndex) RemoveAsyncReplicationTargetNode( + ctx context.Context, + hostName, indexName, shardName string, + targetNodeOverride additional.AsyncReplicationTargetNodeOverride, +) error { + body, err := clusterapi.IndicesPayloads.AsyncReplicationTargetNode.Marshal(targetNodeOverride) + if err != nil { + return fmt.Errorf("marshal target node override: %w", err) + } + + req, err := setupRequest(ctx, http.MethodDelete, hostName, + AsyncReplicationTargetNodeEndpoint(indexName, shardName), + "", bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + clusterapi.IndicesPayloads.AsyncReplicationTargetNode.SetContentTypeHeaderReq(req) + + try := func(ctx context.Context) (bool, error) { + res, err := c.client.Do(req) + if err != nil { + return ctx.Err() == nil, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusNoContent { + body, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v body: (%s)", code, body) + } + return false, nil + } + return c.retry(ctx, 9, try) +} + +// setupRequest is a simple helper to create a new http request with the given method, host, path, +// query, and body. Note that you can leave the query empty if you don't need it and the body can +// be nil. This does not send the request, just creates the request object. +func setupRequest( + ctx context.Context, + method, host, path, query string, + body io.Reader, +) (*http.Request, error) { + url := url.URL{Scheme: "http", Host: host, Path: path, RawQuery: query} + req, err := http.NewRequestWithContext(ctx, method, url.String(), body) + if err != nil { + return nil, fmt.Errorf("create http request: %w", err) + } + return req, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/remote_index_test.go b/platform/dbops/binaries/weaviate-src/adapters/clients/remote_index_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f6e8b63f9b3cdb1c2b5df7c278ea12b641521af4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/remote_index_test.go @@ -0,0 +1,372 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// _ _ +// +// __ _____ __ ___ ___ __ _| |_ ___ +// +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2022 SeMI Technologies B.V. All rights reserved. +// +// CONTACT: hello@semi.technology +package clients + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/weaviate/weaviate/entities/additional" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" +) + +func TestRemoteIndexReInitShardIn(t *testing.T) { + t.Parallel() + + ctx := context.Background() + path := "/indices/C1/shards/S1:reinit" + fs := newFakeRemoteIndexServer(t, http.MethodPut, path) + ts := fs.server(t) + defer ts.Close() + client := newRemoteIndex(ts.Client()) + t.Run("ConnectionError", func(t *testing.T) { + err := client.ReInitShard(ctx, "", "C1", "S1") + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + n := 0 + fs.doAfter = func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + w.WriteHeader(http.StatusInternalServerError) + case 1: + w.WriteHeader(http.StatusTooManyRequests) + default: + w.WriteHeader(http.StatusNoContent) + } + n++ + } + t.Run("Success", func(t *testing.T) { + err := client.ReInitShard(ctx, fs.host, "C1", "S1") + assert.Nil(t, err) + }) +} + +func TestRemoteIndexCreateShard(t *testing.T) { + t.Parallel() + + ctx := context.Background() + path := "/indices/C1/shards/S1" + fs := newFakeRemoteIndexServer(t, http.MethodPost, path) + ts := fs.server(t) + defer ts.Close() + client := newRemoteIndex(ts.Client()) + t.Run("ConnectionError", func(t *testing.T) { + err := client.CreateShard(ctx, "", "C1", "S1") + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + n := 0 + fs.doAfter = func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + w.WriteHeader(http.StatusInternalServerError) + case 1: + w.WriteHeader(http.StatusTooManyRequests) + default: + w.WriteHeader(http.StatusCreated) + } + n++ + } + t.Run("Success", func(t *testing.T) { + err := client.CreateShard(ctx, fs.host, "C1", "S1") + assert.Nil(t, err) + }) +} + +func TestRemoteIndexUpdateShardStatus(t *testing.T) { + t.Parallel() + + ctx := context.Background() + path := "/indices/C1/shards/S1/status" + fs := newFakeRemoteIndexServer(t, http.MethodPost, path) + ts := fs.server(t) + defer ts.Close() + client := newRemoteIndex(ts.Client()) + t.Run("ConnectionError", func(t *testing.T) { + err := client.UpdateShardStatus(ctx, "", "C1", "S1", "NewStatus", 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + n := 0 + fs.doAfter = func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + w.WriteHeader(http.StatusInternalServerError) + case 1: + w.WriteHeader(http.StatusTooManyRequests) + default: + // do nothing + } + n++ + } + t.Run("Success", func(t *testing.T) { + err := client.UpdateShardStatus(ctx, fs.host, "C1", "S1", "NewStatus", 0) + assert.Nil(t, err) + }) +} + +func TestRemoteIndexShardStatus(t *testing.T) { + t.Parallel() + var ( + ctx = context.Background() + path = "/indices/C1/shards/S1/status" + fs = newFakeRemoteIndexServer(t, http.MethodGet, path) + Status = "READONLY" + ) + ts := fs.server(t) + defer ts.Close() + client := newRemoteIndex(ts.Client()) + t.Run("ConnectionError", func(t *testing.T) { + _, err := client.GetShardStatus(ctx, "", "C1", "S1") + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + n := 0 + fs.doAfter = func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + w.WriteHeader(http.StatusInternalServerError) + case 1: + w.WriteHeader(http.StatusTooManyRequests) + case 2: + w.Header().Set("content-type", "any") + case 3: + clusterapi.IndicesPayloads.GetShardStatusResults.SetContentTypeHeader(w) + default: + clusterapi.IndicesPayloads.GetShardStatusResults.SetContentTypeHeader(w) + bytes, _ := clusterapi.IndicesPayloads.GetShardStatusResults.Marshal(Status) + w.Write(bytes) + } + n++ + } + + t.Run("ContentType", func(t *testing.T) { + _, err := client.GetShardStatus(ctx, fs.host, "C1", "S1") + assert.NotNil(t, err) + }) + t.Run("Status", func(t *testing.T) { + _, err := client.GetShardStatus(ctx, fs.host, "C1", "S1") + assert.NotNil(t, err) + }) + t.Run("Success", func(t *testing.T) { + st, err := client.GetShardStatus(ctx, fs.host, "C1", "S1") + assert.Nil(t, err) + assert.Equal(t, "READONLY", st) + }) +} + +func TestRemoteIndexPutFile(t *testing.T) { + t.Parallel() + var ( + ctx = context.Background() + path = "/indices/C1/shards/S1/files/file1" + fs = newFakeRemoteIndexServer(t, http.MethodPost, path) + ) + ts := fs.server(t) + defer ts.Close() + client := newRemoteIndex(ts.Client()) + + rsc := struct { + *strings.Reader + io.Closer + }{ + strings.NewReader("hello, world"), + io.NopCloser(nil), + } + t.Run("ConnectionError", func(t *testing.T) { + err := client.PutFile(ctx, "", "C1", "S1", "file1", rsc) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + n := 0 + fs.doAfter = func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + w.WriteHeader(http.StatusInternalServerError) + case 1: + w.WriteHeader(http.StatusTooManyRequests) + default: + w.WriteHeader(http.StatusNoContent) + } + n++ + } + + t.Run("Success", func(t *testing.T) { + err := client.PutFile(ctx, fs.host, "C1", "S1", "file1", rsc) + assert.Nil(t, err) + }) +} + +func newRemoteIndex(httpClient *http.Client) *RemoteIndex { + ri := NewRemoteIndex(httpClient) + ri.minBackOff = time.Millisecond * 1 + ri.maxBackOff = time.Millisecond * 10 + ri.timeoutUnit = time.Millisecond * 20 + return ri +} + +type fakeRemoteIndexServer struct { + method string + path string + host string + doBefore func(w http.ResponseWriter, r *http.Request) error + doAfter func(w http.ResponseWriter, r *http.Request) +} + +func newFakeRemoteIndexServer(t *testing.T, method, path string) *fakeRemoteIndexServer { + t.Helper() + + f := &fakeRemoteIndexServer{ + method: method, + path: path, + } + f.doBefore = func(w http.ResponseWriter, r *http.Request) error { + if r.Method != f.method { + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("method want %s got %s", method, r.Method) + } + if f.path != r.URL.Path { + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("path want %s got %s", path, r.URL.Path) + } + return nil + } + return f +} + +func (f *fakeRemoteIndexServer) server(t *testing.T) *httptest.Server { + if f.doBefore == nil { + f.doBefore = func(w http.ResponseWriter, r *http.Request) error { + if r.Method != f.method { + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("method want %s got %s", f.method, r.Method) + } + if f.path != r.URL.Path { + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("path want %s got %s", f.path, r.URL.Path) + } + return nil + } + } + handler := func(w http.ResponseWriter, r *http.Request) { + if err := f.doBefore(w, r); err != nil { + t.Error(err) + return + } + if f.doAfter != nil { + f.doAfter(w, r) + } + } + serv := httptest.NewServer(http.HandlerFunc(handler)) + f.host = serv.URL[7:] + return serv +} + +func TestRemoteIndexAddAsyncReplicationTargetNode(t *testing.T) { + t.Parallel() + + ctx := context.Background() + indexName := "C1" + shardName := "S1" + endpoint := AsyncReplicationTargetNodeEndpoint(indexName, shardName) + + fs := newFakeRemoteIndexServer(t, http.MethodPost, endpoint) + ts := fs.server(t) + defer ts.Close() + + client := newRemoteIndex(ts.Client()) + override := additional.AsyncReplicationTargetNodeOverride{} + + t.Run("ConnectionError", func(t *testing.T) { + err := client.AddAsyncReplicationTargetNode(ctx, "", indexName, shardName, override, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + n := 0 + fs.doAfter = func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + w.WriteHeader(http.StatusInternalServerError) + case 1: + w.WriteHeader(http.StatusTooManyRequests) + default: + w.WriteHeader(http.StatusOK) + } + n++ + } + + t.Run("Success", func(t *testing.T) { + err := client.AddAsyncReplicationTargetNode(ctx, fs.host, indexName, shardName, override, 0) + assert.Nil(t, err) + }) +} + +func TestRemoteIndexRemoveAsyncReplicationTargetNode(t *testing.T) { + t.Parallel() + + ctx := context.Background() + indexName := "C1" + shardName := "S1" + endpoint := AsyncReplicationTargetNodeEndpoint(indexName, shardName) + + fs := newFakeRemoteIndexServer(t, http.MethodDelete, endpoint) + ts := fs.server(t) + defer ts.Close() + + client := newRemoteIndex(ts.Client()) + override := additional.AsyncReplicationTargetNodeOverride{} + + t.Run("ConnectionError", func(t *testing.T) { + err := client.RemoveAsyncReplicationTargetNode(ctx, "", indexName, shardName, override) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + n := 0 + fs.doAfter = func(w http.ResponseWriter, r *http.Request) { + switch n { + case 0: + w.WriteHeader(http.StatusInternalServerError) + case 1: + w.WriteHeader(http.StatusTooManyRequests) + default: + w.WriteHeader(http.StatusNoContent) + } + n++ + } + + t.Run("Success", func(t *testing.T) { + err := client.RemoveAsyncReplicationTargetNode(ctx, fs.host, indexName, shardName, override) + assert.Nil(t, err) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/replication.go b/platform/dbops/binaries/weaviate-src/adapters/clients/replication.go new file mode 100644 index 0000000000000000000000000000000000000000..80e0a1ed35d10aa34b82d934cf03e4b70c6cf05f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/replication.go @@ -0,0 +1,410 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +// ReplicationClient is to coordinate operations among replicas + +type replicationClient retryClient + +func NewReplicationClient(httpClient *http.Client) replica.Client { + return &replicationClient{ + client: httpClient, + retryer: newRetryer(), + } +} + +// FetchObject fetches one object it exits +func (c *replicationClient) FetchObject(ctx context.Context, host, index, + shard string, id strfmt.UUID, selectProps search.SelectProperties, + additional additional.Properties, numRetries int, +) (replica.Replica, error) { + resp := replica.Replica{} + req, err := newHttpReplicaRequest(ctx, http.MethodGet, host, index, shard, "", id.String(), nil, 0) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + err = c.doCustomUnmarshal(c.timeoutUnit*20, req, nil, resp.UnmarshalBinary, numRetries) + return resp, err +} + +func (c *replicationClient) DigestObjects(ctx context.Context, + host, index, shard string, ids []strfmt.UUID, numRetries int, +) (result []types.RepairResponse, err error) { + var resp []types.RepairResponse + body, err := json.Marshal(ids) + if err != nil { + return nil, fmt.Errorf("marshal digest objects input: %w", err) + } + req, err := newHttpReplicaRequest( + ctx, http.MethodGet, host, index, shard, + "", "_digest", bytes.NewReader(body), 0) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + err = c.do(c.timeoutUnit*20, req, body, &resp, numRetries) + return resp, err +} + +func (c *replicationClient) DigestObjectsInRange(ctx context.Context, + host, index, shard string, initialUUID, finalUUID strfmt.UUID, limit int, +) (result []types.RepairResponse, err error) { + body, err := json.Marshal(replica.DigestObjectsInRangeReq{ + InitialUUID: initialUUID, + FinalUUID: finalUUID, + Limit: limit, + }) + if err != nil { + return nil, fmt.Errorf("marshal digest objects in range input: %w", err) + } + + req, err := newHttpReplicaRequest( + ctx, http.MethodPost, host, index, shard, + "", "digestsInRange", bytes.NewReader(body), 0) + if err != nil { + return nil, fmt.Errorf("create http request: %w", err) + } + + var resp replica.DigestObjectsInRangeResp + err = c.do(c.timeoutUnit*20, req, body, &resp, 9) + return resp.Digests, err +} + +func (c *replicationClient) HashTreeLevel(ctx context.Context, + host, index, shard string, level int, discriminant *hashtree.Bitset, +) (digests []hashtree.Digest, err error) { + var resp []hashtree.Digest + body, err := discriminant.Marshal() + if err != nil { + return nil, fmt.Errorf("marshal hashtree level input: %w", err) + } + req, err := newHttpReplicaRequest( + ctx, http.MethodPost, host, index, shard, + "", fmt.Sprintf("hashtree/%d", level), bytes.NewReader(body), 0) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + err = c.do(c.timeoutUnit*20, req, body, &resp, 9) + return resp, err +} + +func (c *replicationClient) OverwriteObjects(ctx context.Context, + host, index, shard string, vobjects []*objects.VObject, +) ([]types.RepairResponse, error) { + var resp []types.RepairResponse + body, err := clusterapi.IndicesPayloads.VersionedObjectList.Marshal(vobjects) + if err != nil { + return nil, fmt.Errorf("encode request: %w", err) + } + req, err := newHttpReplicaRequest( + ctx, http.MethodPut, host, index, shard, + "", "_overwrite", bytes.NewReader(body), 0) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + err = c.do(c.timeoutUnit*90, req, body, &resp, 9) + return resp, err +} + +func (c *replicationClient) FetchObjects(ctx context.Context, host, + index, shard string, ids []strfmt.UUID, +) ([]replica.Replica, error) { + resp := make(replica.Replicas, len(ids)) + idsBytes, err := json.Marshal(ids) + if err != nil { + return nil, fmt.Errorf("marshal ids: %w", err) + } + + idsEncoded := base64.StdEncoding.EncodeToString(idsBytes) + + req, err := newHttpReplicaRequest(ctx, http.MethodGet, host, index, shard, "", "", nil, 0) + if err != nil { + return nil, fmt.Errorf("create http request: %w", err) + } + + req.URL.RawQuery = url.Values{"ids": []string{idsEncoded}}.Encode() + err = c.doCustomUnmarshal(c.timeoutUnit*90, req, nil, resp.UnmarshalBinary, 9) + return resp, err +} + +func (c *replicationClient) PutObject(ctx context.Context, host, index, + shard, requestID string, obj *storobj.Object, schemaVersion uint64, +) (replica.SimpleResponse, error) { + var resp replica.SimpleResponse + body, err := clusterapi.IndicesPayloads.SingleObject.Marshal(obj) + if err != nil { + return resp, fmt.Errorf("encode request: %w", err) + } + + req, err := newHttpReplicaRequest(ctx, http.MethodPost, host, index, shard, requestID, "", nil, schemaVersion) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + + clusterapi.IndicesPayloads.SingleObject.SetContentTypeHeaderReq(req) + err = c.do(c.timeoutUnit*90, req, body, &resp, 9) + return resp, err +} + +func (c *replicationClient) DeleteObject(ctx context.Context, host, index, + shard, requestID string, uuid strfmt.UUID, deletionTime time.Time, schemaVersion uint64, +) (replica.SimpleResponse, error) { + var resp replica.SimpleResponse + uuidTs := fmt.Sprintf("%s/%d", uuid.String(), deletionTime.UnixMilli()) + req, err := newHttpReplicaRequest(ctx, http.MethodDelete, host, index, shard, requestID, uuidTs, nil, schemaVersion) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + + err = c.do(c.timeoutUnit*90, req, nil, &resp, 9) + return resp, err +} + +func (c *replicationClient) PutObjects(ctx context.Context, host, index, + shard, requestID string, objects []*storobj.Object, schemaVersion uint64, +) (replica.SimpleResponse, error) { + var resp replica.SimpleResponse + body, err := clusterapi.IndicesPayloads.ObjectList.Marshal(objects) + if err != nil { + return resp, fmt.Errorf("encode request: %w", err) + } + req, err := newHttpReplicaRequest(ctx, http.MethodPost, host, index, shard, requestID, "", nil, schemaVersion) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + + clusterapi.IndicesPayloads.ObjectList.SetContentTypeHeaderReq(req) + err = c.do(c.timeoutUnit*90, req, body, &resp, 9) + return resp, err +} + +func (c *replicationClient) MergeObject(ctx context.Context, host, index, shard, requestID string, + doc *objects.MergeDocument, schemaVersion uint64, +) (replica.SimpleResponse, error) { + var resp replica.SimpleResponse + body, err := clusterapi.IndicesPayloads.MergeDoc.Marshal(*doc) + if err != nil { + return resp, fmt.Errorf("encode request: %w", err) + } + + req, err := newHttpReplicaRequest(ctx, http.MethodPatch, host, index, shard, + requestID, doc.ID.String(), nil, schemaVersion) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + + clusterapi.IndicesPayloads.MergeDoc.SetContentTypeHeaderReq(req) + err = c.do(c.timeoutUnit*90, req, body, &resp, 9) + return resp, err +} + +func (c *replicationClient) AddReferences(ctx context.Context, host, index, + shard, requestID string, refs []objects.BatchReference, schemaVersion uint64, +) (replica.SimpleResponse, error) { + var resp replica.SimpleResponse + body, err := clusterapi.IndicesPayloads.ReferenceList.Marshal(refs) + if err != nil { + return resp, fmt.Errorf("encode request: %w", err) + } + req, err := newHttpReplicaRequest(ctx, http.MethodPost, host, index, shard, + requestID, "references", nil, schemaVersion) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + + clusterapi.IndicesPayloads.ReferenceList.SetContentTypeHeaderReq(req) + err = c.do(c.timeoutUnit*90, req, body, &resp, 9) + return resp, err +} + +func (c *replicationClient) DeleteObjects(ctx context.Context, host, index, shard, requestID string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) (resp replica.SimpleResponse, err error) { + body, err := clusterapi.IndicesPayloads.BatchDeleteParams.Marshal(uuids, deletionTime, dryRun) + if err != nil { + return resp, fmt.Errorf("encode request: %w", err) + } + req, err := newHttpReplicaRequest(ctx, http.MethodDelete, host, index, shard, requestID, "", nil, schemaVersion) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + + clusterapi.IndicesPayloads.BatchDeleteParams.SetContentTypeHeaderReq(req) + err = c.do(c.timeoutUnit*90, req, body, &resp, 9) + return resp, err +} + +func (c *replicationClient) FindUUIDs(ctx context.Context, hostName, indexName, + shardName string, filters *filters.LocalFilter, +) ([]strfmt.UUID, error) { + paramsBytes, err := clusterapi.IndicesPayloads.FindUUIDsParams.Marshal(filters) + if err != nil { + return nil, errors.Wrap(err, "marshal request payload") + } + + path := fmt.Sprintf("/indices/%s/shards/%s/objects/_find", indexName, shardName) + method := http.MethodPost + url := url.URL{Scheme: "http", Host: hostName, Path: path} + + req, err := http.NewRequestWithContext(ctx, method, url.String(), + bytes.NewReader(paramsBytes)) + if err != nil { + return nil, errors.Wrap(err, "open http request") + } + + clusterapi.IndicesPayloads.FindUUIDsParams.SetContentTypeHeaderReq(req) + res, err := c.client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "send http request") + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return nil, errors.Errorf("unexpected status code %d (%s)", res.StatusCode, + body) + } + + resBytes, err := io.ReadAll(res.Body) + if err != nil { + return nil, errors.Wrap(err, "read body") + } + + ct, ok := clusterapi.IndicesPayloads.FindUUIDsResults.CheckContentTypeHeader(res) + if !ok { + return nil, errors.Errorf("unexpected content type: %s", ct) + } + + uuids, err := clusterapi.IndicesPayloads.FindUUIDsResults.Unmarshal(resBytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal body") + } + return uuids, nil +} + +// Commit asks a host to commit and stores the response in the value pointed to by resp +func (c *replicationClient) Commit(ctx context.Context, host, index, shard string, requestID string, resp interface{}) error { + req, err := newHttpReplicaCMD(host, "commit", index, shard, requestID, nil) + if err != nil { + return fmt.Errorf("create http request: %w", err) + } + + return c.do(c.timeoutUnit*90, req, nil, resp, 9) +} + +func (c *replicationClient) Abort(ctx context.Context, host, index, shard, requestID string) ( + resp replica.SimpleResponse, err error, +) { + req, err := newHttpReplicaCMD(host, "abort", index, shard, requestID, nil) + if err != nil { + return resp, fmt.Errorf("create http request: %w", err) + } + + err = c.do(c.timeoutUnit*5, req, nil, &resp, 9) + return resp, err +} + +func newHttpReplicaRequest(ctx context.Context, method, host, index, shard, requestId, suffix string, body io.Reader, schemaVersion uint64) (*http.Request, error) { + path := fmt.Sprintf("/replicas/indices/%s/shards/%s/objects", index, shard) + if suffix != "" { + path = fmt.Sprintf("%s/%s", path, suffix) + } + u := url.URL{ + Scheme: "http", + Host: host, + Path: path, + } + + urlValues := url.Values{} + urlValues[replica.SchemaVersionKey] = []string{fmt.Sprint(schemaVersion)} + if requestId != "" { + urlValues[replica.RequestKey] = []string{requestId} + } + u.RawQuery = urlValues.Encode() + + return http.NewRequestWithContext(ctx, method, u.String(), body) +} + +func newHttpReplicaCMD(host, cmd, index, shard, requestId string, body io.Reader) (*http.Request, error) { + path := fmt.Sprintf("/replicas/indices/%s/shards/%s:%s", index, shard, cmd) + q := url.Values{replica.RequestKey: []string{requestId}}.Encode() + url := url.URL{Scheme: "http", Host: host, Path: path, RawQuery: q} + return http.NewRequest(http.MethodPost, url.String(), body) +} + +func (c *replicationClient) do(timeout time.Duration, req *http.Request, body []byte, resp interface{}, numRetries int) (err error) { + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + req = req.WithContext(ctx) + try := func(ctx context.Context) (bool, error) { + if body != nil { + req.Body = io.NopCloser(bytes.NewReader(body)) + } + res, err := c.client.Do(req) + if err != nil { + return false, fmt.Errorf("connect: %w", err) + } + defer res.Body.Close() + + if code := res.StatusCode; code != http.StatusOK { + b, _ := io.ReadAll(res.Body) + return shouldRetry(code), fmt.Errorf("status code: %v, error: %s", code, b) + } + if err := json.NewDecoder(res.Body).Decode(resp); err != nil { + return false, fmt.Errorf("decode response: %w", err) + } + return false, nil + } + return c.retry(ctx, numRetries, try) +} + +func (c *replicationClient) doCustomUnmarshal(timeout time.Duration, + req *http.Request, body []byte, decode func([]byte) error, numRetries int, +) (err error) { + return (*retryClient)(c).doWithCustomMarshaller(timeout, req, body, decode, successCode, numRetries) +} + +// backOff return a new random duration in the interval [d, 3d]. +// It implements truncated exponential back-off with introduced jitter. +func backOff(d time.Duration) time.Duration { + return time.Duration(float64(d.Nanoseconds()*2) * (0.5 + rand.Float64())) +} + +func shouldRetry(code int) bool { + return code == http.StatusInternalServerError || + code == http.StatusTooManyRequests || + code == http.StatusServiceUnavailable +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/clients/replication_test.go b/platform/dbops/binaries/weaviate-src/adapters/clients/replication_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9b2ece8228987cc6f7bb347274ab98a97b8cb435 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/clients/replication_test.go @@ -0,0 +1,609 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clients + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" +) + +const ( + RequestError = "RIDNotFound" + RequestSuccess = "RIDSuccess" + RequestInternalError = "RIDInternal" + RequestMalFormedResponse = "RIDMalFormed" +) + +const ( + UUID1 = strfmt.UUID("73f2eb5f-5abf-447a-81ca-74b1dd168241") + UUID2 = strfmt.UUID("73f2eb5f-5abf-447a-81ca-74b1dd168242") +) + +type fakeServer struct { + method string + path string + RequestError replica.SimpleResponse + RequestSuccess replica.SimpleResponse + host string + ExpectedSchemaVersion string +} + +func newFakeReplicationServer(t *testing.T, method, path string, schemaVersion uint64) *fakeServer { + t.Helper() + return &fakeServer{ + method: method, + path: path, + RequestError: replica.SimpleResponse{Errors: []replica.Error{{Msg: "error"}}}, + RequestSuccess: replica.SimpleResponse{}, + ExpectedSchemaVersion: fmt.Sprint(schemaVersion), + } +} + +func (f *fakeServer) server(t *testing.T) *httptest.Server { + t.Helper() + handler := func(w http.ResponseWriter, r *http.Request) { + if r.Method != f.method { + t.Errorf("method want %s got %s", f.method, r.Method) + w.WriteHeader(http.StatusBadRequest) + return + } + if f.path != r.URL.Path { + t.Errorf("path want %s got %s", f.path, r.URL.Path) + w.WriteHeader(http.StatusBadRequest) + return + } + schemaVersion := r.URL.Query().Get(replica.SchemaVersionKey) + if f.ExpectedSchemaVersion != "0" && schemaVersion != f.ExpectedSchemaVersion { + t.Errorf("schemaVersion want %s got %s", f.ExpectedSchemaVersion, schemaVersion) + w.WriteHeader(http.StatusBadRequest) + return + } + + requestID := r.URL.Query().Get(replica.RequestKey) + switch requestID { + case RequestInternalError: + w.WriteHeader(http.StatusInternalServerError) + case RequestError: + bytes, _ := json.Marshal(&f.RequestError) + w.Write(bytes) + case RequestSuccess: + bytes, _ := json.Marshal(&replica.SimpleResponse{}) + w.Write(bytes) + case RequestMalFormedResponse: + w.Write([]byte(`mal formed`)) + } + } + serv := httptest.NewServer(http.HandlerFunc(handler)) + f.host = serv.URL[7:] + return serv +} + +func anyObject(uuid strfmt.UUID) models.Object { + return models.Object{ + Class: "C1", + CreationTimeUnix: 900000000001, + LastUpdateTimeUnix: 900000000002, + ID: uuid, + Properties: map[string]interface{}{ + "stringProp": "string", + "textProp": "text", + "datePropArray": []string{"1980-01-01T00:00:00+02:00"}, + }, + } +} + +func TestReplicationPutObject(t *testing.T) { + t.Parallel() + + ctx := context.Background() + f := newFakeReplicationServer(t, http.MethodPost, "/replicas/indices/C1/shards/S1/objects", 0) + ts := f.server(t) + defer ts.Close() + + client := newReplicationClient(ts.Client()) + t.Run("EncodeRequest", func(t *testing.T) { + obj := &storobj.Object{} + _, err := client.PutObject(ctx, "Node1", "C1", "S1", "RID", obj, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "encode") + }) + + obj := &storobj.Object{MarshallerVersion: 1, Object: anyObject(UUID1)} + t.Run("ConnectionError", func(t *testing.T) { + _, err := client.PutObject(ctx, "", "C1", "S1", "", obj, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + resp, err := client.PutObject(ctx, f.host, "C1", "S1", RequestError, obj, 0) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: f.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + _, err := client.PutObject(ctx, f.host, "C1", "S1", RequestMalFormedResponse, obj, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + + t.Run("ServerInternalError", func(t *testing.T) { + _, err := client.PutObject(ctx, f.host, "C1", "S1", RequestInternalError, obj, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationDeleteObject(t *testing.T) { + t.Parallel() + + ctx := context.Background() + uuid := UUID1 + deletionTime := time.Now() + path := fmt.Sprintf("/replicas/indices/C1/shards/S1/objects/%s/%d", uuid.String(), deletionTime.UnixMilli()) + fs := newFakeReplicationServer(t, http.MethodDelete, path, 0) + ts := fs.server(t) + defer ts.Close() + + client := newReplicationClient(ts.Client()) + t.Run("ConnectionError", func(t *testing.T) { + _, err := client.DeleteObject(ctx, "", "C1", "S1", "", uuid, deletionTime, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + resp, err := client.DeleteObject(ctx, fs.host, "C1", "S1", RequestError, uuid, deletionTime, 0) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: fs.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + _, err := client.DeleteObject(ctx, fs.host, "C1", "S1", RequestMalFormedResponse, uuid, deletionTime, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + + t.Run("ServerInternalError", func(t *testing.T) { + _, err := client.DeleteObject(ctx, fs.host, "C1", "S1", RequestInternalError, uuid, deletionTime, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationPutObjects(t *testing.T) { + t.Parallel() + ctx := context.Background() + fs := newFakeReplicationServer(t, http.MethodPost, "/replicas/indices/C1/shards/S1/objects", 123) + fs.RequestError.Errors = append(fs.RequestError.Errors, replica.Error{Msg: "error2"}) + ts := fs.server(t) + defer ts.Close() + + client := newReplicationClient(ts.Client()) + t.Run("EncodeRequest", func(t *testing.T) { + objs := []*storobj.Object{{}} + _, err := client.PutObjects(ctx, "Node1", "C1", "S1", "RID", objs, 123) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "encode") + }) + + objects := []*storobj.Object{ + {MarshallerVersion: 1, Object: anyObject(UUID1)}, + {MarshallerVersion: 1, Object: anyObject(UUID2)}, + } + + t.Run("ConnectionError", func(t *testing.T) { + _, err := client.PutObjects(ctx, "", "C1", "S1", "", objects, 123) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + resp, err := client.PutObjects(ctx, fs.host, "C1", "S1", RequestError, objects, 123) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: fs.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + _, err := client.PutObjects(ctx, fs.host, "C1", "S1", RequestMalFormedResponse, objects, 123) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + + t.Run("ServerInternalError", func(t *testing.T) { + _, err := client.PutObjects(ctx, fs.host, "C1", "S1", RequestInternalError, objects, 123) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationMergeObject(t *testing.T) { + t.Parallel() + ctx := context.Background() + uuid := UUID1 + f := newFakeReplicationServer(t, http.MethodPatch, "/replicas/indices/C1/shards/S1/objects/"+uuid.String(), 0) + ts := f.server(t) + defer ts.Close() + + client := newReplicationClient(ts.Client()) + doc := &objects.MergeDocument{ID: uuid} + t.Run("ConnectionError", func(t *testing.T) { + _, err := client.MergeObject(ctx, "", "C1", "S1", "", doc, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + resp, err := client.MergeObject(ctx, f.host, "C1", "S1", RequestError, doc, 0) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: f.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + _, err := client.MergeObject(ctx, f.host, "C1", "S1", RequestMalFormedResponse, doc, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + + t.Run("ServerInternalError", func(t *testing.T) { + _, err := client.MergeObject(ctx, f.host, "C1", "S1", RequestInternalError, doc, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationAddReferences(t *testing.T) { + t.Parallel() + + ctx := context.Background() + fs := newFakeReplicationServer(t, http.MethodPost, "/replicas/indices/C1/shards/S1/objects/references", 0) + fs.RequestError.Errors = append(fs.RequestError.Errors, replica.Error{Msg: "error2"}) + ts := fs.server(t) + defer ts.Close() + + client := newReplicationClient(ts.Client()) + refs := []objects.BatchReference{{OriginalIndex: 1}, {OriginalIndex: 2}} + t.Run("ConnectionError", func(t *testing.T) { + _, err := client.AddReferences(ctx, "", "C1", "S1", "", refs, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + resp, err := client.AddReferences(ctx, fs.host, "C1", "S1", RequestError, refs, 0) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: fs.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + _, err := client.AddReferences(ctx, fs.host, "C1", "S1", RequestMalFormedResponse, refs, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + + t.Run("ServerInternalError", func(t *testing.T) { + _, err := client.AddReferences(ctx, fs.host, "C1", "S1", RequestInternalError, refs, 0) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationDeleteObjects(t *testing.T) { + t.Parallel() + + ctx := context.Background() + fs := newFakeReplicationServer(t, http.MethodDelete, "/replicas/indices/C1/shards/S1/objects", 0) + fs.RequestError.Errors = append(fs.RequestError.Errors, replica.Error{Msg: "error2"}) + ts := fs.server(t) + defer ts.Close() + client := newReplicationClient(ts.Client()) + + uuids := []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2")} + deletionTime := time.Now() + + t.Run("ConnectionError", func(t *testing.T) { + _, err := client.DeleteObjects(ctx, "", "C1", "S1", "", uuids, deletionTime, false, 123) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + resp, err := client.DeleteObjects(ctx, fs.host, "C1", "S1", RequestError, uuids, deletionTime, false, 123) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: fs.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + _, err := client.DeleteObjects(ctx, fs.host, "C1", "S1", RequestMalFormedResponse, uuids, deletionTime, false, 123) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + + t.Run("ServerInternalError", func(t *testing.T) { + _, err := client.DeleteObjects(ctx, fs.host, "C1", "S1", RequestInternalError, uuids, deletionTime, false, 123) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationAbort(t *testing.T) { + t.Parallel() + + ctx := context.Background() + path := "/replicas/indices/C1/shards/S1:abort" + fs := newFakeReplicationServer(t, http.MethodPost, path, 0) + ts := fs.server(t) + defer ts.Close() + client := newReplicationClient(ts.Client()) + + t.Run("ConnectionError", func(t *testing.T) { + client := newReplicationClient(ts.Client()) + client.maxBackOff = client.timeoutUnit * 20 + _, err := client.Abort(ctx, "", "C1", "S1", "") + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + resp, err := client.Abort(ctx, fs.host, "C1", "S1", RequestError) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: fs.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + _, err := client.Abort(ctx, fs.host, "C1", "S1", RequestMalFormedResponse) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + client.timeoutUnit = client.maxBackOff * 3 + t.Run("ServerInternalError", func(t *testing.T) { + _, err := client.Abort(ctx, fs.host, "C1", "S1", RequestInternalError) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationCommit(t *testing.T) { + t.Parallel() + + ctx := context.Background() + path := "/replicas/indices/C1/shards/S1:commit" + fs := newFakeReplicationServer(t, http.MethodPost, path, 0) + ts := fs.server(t) + defer ts.Close() + resp := replica.SimpleResponse{} + client := newReplicationClient(ts.Client()) + + t.Run("ConnectionError", func(t *testing.T) { + err := client.Commit(ctx, "", "C1", "S1", "", &resp) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "connect") + }) + + t.Run("Error", func(t *testing.T) { + err := client.Commit(ctx, fs.host, "C1", "S1", RequestError, &resp) + assert.Nil(t, err) + assert.Equal(t, replica.SimpleResponse{Errors: fs.RequestError.Errors}, resp) + }) + + t.Run("DecodeResponse", func(t *testing.T) { + err := client.Commit(ctx, fs.host, "C1", "S1", RequestMalFormedResponse, &resp) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "decode response") + }) + + t.Run("ServerInternalError", func(t *testing.T) { + err := client.Commit(ctx, fs.host, "C1", "S1", RequestInternalError, &resp) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "status code") + }) +} + +func TestReplicationFetchObject(t *testing.T) { + t.Parallel() + + expected := replica.Replica{ + ID: UUID1, + Object: &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: UUID1, + Properties: map[string]interface{}{ + "stringProp": "abc", + }, + }, + Vector: []float32{1, 2, 3, 4, 5}, + VectorLen: 5, + }, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, _ := expected.MarshalBinary() + w.Write(b) + })) + + c := newReplicationClient(server.Client()) + resp, err := c.FetchObject(context.Background(), server.URL[7:], + "C1", "S1", expected.ID, nil, additional.Properties{}, 9) + require.Nil(t, err) + assert.Equal(t, expected.ID, resp.ID) + assert.Equal(t, expected.Deleted, resp.Deleted) + assert.EqualValues(t, expected.Object, resp.Object) +} + +func TestReplicationFetchObjects(t *testing.T) { + t.Parallel() + expected := replica.Replicas{ + { + ID: UUID1, + Object: &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: UUID1, + Properties: map[string]interface{}{ + "stringProp": "abc", + }, + }, + Vector: []float32{1, 2, 3, 4, 5}, + VectorLen: 5, + }, + }, + { + ID: UUID2, + Object: &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: UUID2, + Properties: map[string]interface{}{ + "floatProp": float64(123), + }, + }, + Vector: []float32{10, 20, 30, 40, 50}, + VectorLen: 5, + }, + }, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, _ := expected.MarshalBinary() + w.Write(b) + })) + + c := newReplicationClient(server.Client()) + resp, err := c.FetchObjects(context.Background(), server.URL[7:], "C1", "S1", []strfmt.UUID{expected[0].ID}) + require.Nil(t, err) + require.Len(t, resp, 2) + assert.Equal(t, expected[0].ID, resp[0].ID) + assert.Equal(t, expected[0].Deleted, resp[0].Deleted) + assert.EqualValues(t, expected[0].Object, resp[0].Object) + assert.Equal(t, expected[1].ID, resp[1].ID) + assert.Equal(t, expected[1].Deleted, resp[1].Deleted) + assert.EqualValues(t, expected[1].Object, resp[1].Object) +} + +func TestReplicationDigestObjects(t *testing.T) { + t.Parallel() + + now := time.Now() + expected := []types.RepairResponse{ + { + ID: UUID1.String(), + UpdateTime: now.UnixMilli(), + Version: 1, + }, + { + ID: UUID2.String(), + Deleted: true, + UpdateTime: now.UnixMilli(), + Version: 1, + }, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, _ := json.Marshal(expected) + w.Write(b) + })) + + c := newReplicationClient(server.Client()) + resp, err := c.DigestObjects(context.Background(), server.URL[7:], "C1", "S1", []strfmt.UUID{ + strfmt.UUID(expected[0].ID), + strfmt.UUID(expected[1].ID), + }, 9) + require.Nil(t, err) + require.Len(t, resp, 2) + assert.Equal(t, expected[0].ID, resp[0].ID) + assert.Equal(t, expected[0].Deleted, resp[0].Deleted) + assert.Equal(t, expected[0].UpdateTime, resp[0].UpdateTime) + assert.Equal(t, expected[0].Version, resp[0].Version) + assert.Equal(t, expected[1].ID, resp[1].ID) + assert.Equal(t, expected[1].Deleted, resp[1].Deleted) + assert.Equal(t, expected[1].UpdateTime, resp[1].UpdateTime) + assert.Equal(t, expected[1].Version, resp[1].Version) +} + +func TestReplicationOverwriteObjects(t *testing.T) { + t.Parallel() + + now := time.Now() + input := []*objects.VObject{ + { + LatestObject: &models.Object{ + ID: UUID1, + Class: "C1", + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.Add(time.Hour).UnixMilli(), + Properties: map[string]interface{}{ + "stringProp": "abc", + }, + Vector: []float32{1, 2, 3, 4, 5}, + }, + StaleUpdateTime: now.UnixMilli(), + Version: 0, + }, + } + expected := []types.RepairResponse{ + { + ID: UUID1.String(), + Version: 1, + UpdateTime: now.Add(time.Hour).UnixMilli(), + }, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, _ := json.Marshal(expected) + w.Write(b) + })) + + c := newReplicationClient(server.Client()) + resp, err := c.OverwriteObjects(context.Background(), server.URL[7:], "C1", "S1", input) + require.Nil(t, err) + require.Len(t, resp, 1) + assert.Equal(t, expected[0].ID, resp[0].ID) + assert.Equal(t, expected[0].Version, resp[0].Version) + assert.Equal(t, expected[0].UpdateTime, resp[0].UpdateTime) +} + +func TestExpBackOff(t *testing.T) { + N := 200 + av := time.Duration(0) + delay := time.Nanosecond * 20 + for i := 0; i < N; i++ { + av += backOff(delay) + } + av /= time.Duration(N) + if av < time.Nanosecond*30 || av > time.Nanosecond*50 { + t.Errorf("average time got %v", av) + } +} + +func newReplicationClient(httpClient *http.Client) *replicationClient { + c := NewReplicationClient(httpClient).(*replicationClient) + c.minBackOff = time.Millisecond * 1 + c.maxBackOff = time.Millisecond * 8 + c.timeoutUnit = time.Millisecond * 20 + return c +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/fetch/filter.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/fetch/filter.go new file mode 100644 index 0000000000000000000000000000000000000000..8808d101e398283b6167c0fcf44f62448596bf1b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/fetch/filter.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package fetch + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" +) + +// FilterBuilder can build where filters for both local and +type FilterBuilder struct { + prefix string +} + +// NewFilterBuilder with kind and prefix +func NewFilterBuilder(prefix string) *FilterBuilder { + return &FilterBuilder{ + prefix: prefix, + } +} + +// Build a where filter ArgumentConfig +func (b *FilterBuilder) Build() *graphql.ArgumentConfig { + return &graphql.ArgumentConfig{ + Description: descriptions.FetchWhereFilterFields, + Type: graphql.NewNonNull(graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sFetchObjectWhereInpObj", b.prefix), + Fields: b.fields(), + Description: descriptions.FetchWhereFilterFieldsInpObj, + }, + )), + } +} + +func (b *FilterBuilder) fields() graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "class": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(b.class()), + Description: descriptions.WhereClass, + }, + "properties": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.NewList(b.properties())), + Description: descriptions.WhereProperties, + }, + "first": &graphql.InputObjectFieldConfig{ + Type: graphql.Int, + Description: descriptions.First, + }, + } +} + +func (b *FilterBuilder) properties() *graphql.InputObject { + elements := common_filters.BuildNew(fmt.Sprintf("%sFetchObject", b.prefix)) + + // Remove path and operands fields as they are not required here + delete(elements, "path") + delete(elements, "operands") + + // make operator required + elements["operator"].Type = graphql.NewNonNull(elements["operator"].Type) + + elements["certainty"] = &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.Float), + Description: descriptions.WhereCertainty, + } + elements["name"] = &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.String), + Description: descriptions.WhereName, + } + elements["keywords"] = &graphql.InputObjectFieldConfig{ + Type: graphql.NewList(b.keywordInpObj(fmt.Sprintf("%sFetchObjectWhereProperties", b.prefix))), + Description: descriptions.WhereKeywords, + } + + networkFetchWhereInpObjPropertiesObj := graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sFetchObjectWhereInpObjProperties", b.prefix), + Fields: elements, + Description: descriptions.WhereProperties, + }, + ) + + return networkFetchWhereInpObjPropertiesObj +} + +func (b *FilterBuilder) keywordInpObj(prefix string) *graphql.InputObject { + return graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sKeywordsInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "value": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.WhereKeywordsValue, + }, + "weight": &graphql.InputObjectFieldConfig{ + Type: graphql.Float, + Description: descriptions.WhereKeywordsWeight, + }, + }, + Description: descriptions.WhereKeywordsInpObj, + }, + ) +} + +func (b *FilterBuilder) class() *graphql.InputObject { + filterClassElements := graphql.InputObjectConfigFieldMap{ + "name": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.WhereName, + }, + "certainty": &graphql.InputObjectFieldConfig{ + Type: graphql.Float, + Description: descriptions.WhereCertainty, + }, + "keywords": &graphql.InputObjectFieldConfig{ + Type: graphql.NewList(b.keywordInpObj(fmt.Sprintf("%sFetchObjectWhereClass", b.prefix))), + Description: descriptions.WhereKeywords, + }, + "first": &graphql.InputObjectFieldConfig{ + Type: graphql.Int, + Description: descriptions.First, + }, + } + + networkFetchWhereInpObjClassInpObj := graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sFetchObjectWhereInpObjClassInpObj", b.prefix), + Fields: filterClassElements, + Description: descriptions.WhereClass, + }, + ) + return networkFetchWhereInpObjClassInpObj +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/json_number.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/json_number.go new file mode 100644 index 0000000000000000000000000000000000000000..5b0a7ed623d0d840407a8b99f8529a93d57d82ee --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/json_number.go @@ -0,0 +1,60 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "encoding/json" + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/entities/aggregation" +) + +// JSONNumberResolver turns json.Number types into number types usable by graphQL +func JSONNumberResolver(p graphql.ResolveParams) (interface{}, error) { + switch v := p.Source.(type) { + case map[string]interface{}: + field, ok := v[p.Info.FieldName] + if !ok { + return nil, nil + } + + switch n := field.(type) { + case json.Number: + return n.Float64() + case int64: + return float64(n), nil + case int: + return float64(n), nil + case float64: + return n, nil + } + + return nil, fmt.Errorf("unknown number type for %t", field) + + case map[string]float64: + return v[p.Info.FieldName], nil + + case aggregation.Text: + switch p.Info.FieldName { + // case "count": + // // TODO gh-974: Support Count in text aggregations + // return nil, nil + + default: + return nil, fmt.Errorf("fieldName '%s' does not match text aggregation", p.Info.FieldName) + } + + default: + return nil, fmt.Errorf("json number resolver: unusable type %T", p.Source) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/json_number_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/json_number_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f1611c255a5df00a319ac6488aa90ccfa9ea9257 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/common/json_number_test.go @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tailor-inc/graphql" +) + +type testCase struct { + input interface{} + expectedOutput float64 +} + +func TestJSONNumberResolver(t *testing.T) { + tests := []testCase{ + { + input: json.Number("10"), + expectedOutput: 10.0, + }, + { + input: int(10), + expectedOutput: 10.0, + }, + { + input: int64(10), + expectedOutput: 10.0, + }, + { + input: float64(10), + expectedOutput: 10.0, + }, + } + + for _, test := range tests { + name := fmt.Sprintf("%#v -> %#v", test.input, test.expectedOutput) + t.Run(name, func(t *testing.T) { + result, err := JSONNumberResolver(resolveParams(test.input)) + assert.Nil(t, err, "should not error") + assert.Equal(t, test.expectedOutput, result) + }) + } +} + +func resolveParams(input interface{}) graphql.ResolveParams { + return graphql.ResolveParams{ + Source: map[string]interface{}{ + "myField": input, + }, + Info: graphql.ResolveInfo{FieldName: "myField"}, + } +} + +func TestNumberFieldNotPresent(t *testing.T) { + // shouldn't return anything, but also not error. This can otherwise lead to + // odd behavior when no entries are present, yet we asked for int props and + // type, see https://github.com/weaviate/weaviate/issues/775 + params := graphql.ResolveParams{ + Source: map[string]interface{}{}, + Info: graphql.ResolveInfo{FieldName: "myField"}, + } + + result, err := JSONNumberResolver(params) + assert.Nil(t, err) + assert.Equal(t, nil, result) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/aggregate.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/aggregate.go new file mode 100644 index 0000000000000000000000000000000000000000..139edd48851ad8543f240fa3dac0a7c644307c32 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/aggregate.go @@ -0,0 +1,83 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// AGGREGATE +const ( + AggregateProperty = "Aggregate this property" + AggregateObjects = "Aggregate Objects on a local Weaviate" +) + +const GroupBy = "Specify which properties to group by" + +const ( + AggregatePropertyObject = "An object containing Aggregation information about this property" +) + +const AggregateObjectsObj = "An object allowing Aggregation of %ss on a local Weaviate" + +const ( + AggregateMean = "Aggregate on the mean of numeric property values" + AggregateSum = "Aggregate on the sum of numeric property values" + AggregateMedian = "Aggregate on the median of numeric property values" + AggregateMode = "Aggregate on the mode of numeric property values" + AggregateMin = "Aggregate on the minimum of numeric property values" + AggregateMax = "Aggregate on the maximum of numeric property values" + AggregateCount = "Aggregate on the total amount of found property values" + AggregateGroupedBy = "Indicates the group of returned data" +) + +const AggregateNumericObj = "An object containing the %s of numeric properties" + +const AggregateCountObj = "An object containing countable properties" + +const AggregateGroupedByObj = "An object containing the path and value of the grouped property" + +const ( + AggregateGroupedByGroupedByPath = "The path of the grouped property" + AggregateGroupedByGroupedByValue = "The value of the grouped property" +) + +// NETWORK +const NetworkAggregateWeaviateObj = "An object containing Get Objects fields for network Weaviate instance: " + +const NetworkAggregate = "Perform Aggregation of Objects" + +const ( + NetworkAggregateObj = "An object allowing Aggregation of Objects" + NetworkAggregatePropertyObject = "An object containing Aggregation information about this property" +) + +const NetworkAggregateThingsActionsObj = "An object allowing Aggregation of %ss on a network Weaviate" + +const ( + NetworkAggregateMean = "Aggregate on the mean of numeric property values" + NetworkAggregateSum = "Aggregate on the sum of numeric property values" + NetworkAggregateMedian = "Aggregate on the median of numeric property values" + NetworkAggregateMode = "Aggregate on the mode of numeric property values" + NetworkAggregateMin = "Aggregate on the minimum of numeric property values" + NetworkAggregateMax = "Aggregate on the maximum of numeric property values" + NetworkAggregateCount = "Aggregate on the total amount of found property values" + NetworkAggregateGroupedBy = "Indicates the group of returned data" +) + +const NetworkAggregateNumericObj = "An object containing the %s of numeric properties" + +const NetworkAggregateCountObj = "An object containing countable properties" + +const NetworkAggregateGroupedByObj = "An object containing the path and value of the grouped property" + +const ( + NetworkAggregateGroupedByGroupedByPath = "The path of the grouped property" + NetworkAggregateGroupedByGroupedByValue = "The value of the grouped property" +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/explore.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/explore.go new file mode 100644 index 0000000000000000000000000000000000000000..d78af4327865ff394b7c679c0c1b20bdec36474e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/explore.go @@ -0,0 +1,31 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +const ( + LocalExplore = "Explore Concepts on a local weaviate with vector-aided search" + LocalExploreConcepts = "Explore Concepts on a local weaviate with vector-aided serach through keyword-based search terms" + VectorMovement = "Move your search term closer to or further away from another vector described by keywords" + Keywords = "Keywords are a list of search terms. Array type, e.g. [\"keyword 1\", \"keyword 2\"]" + Network = "Set to true, if the exploration should include remote peers" + Limit = "Limit the results set (usually fewer results mean faster queries)" + Offset = "Offset of the results set (usually fewer results mean faster queries)" + Certainty = "Normalized Distance between the result item and the search vector. Normalized to be between 0 (identical vectors) and 1 (perfect opposite)." + Distance = "The required degree of similarity between an object's characteristics and the provided filter values" + Vector = "Target vector to be used in kNN search" + Force = "The force to apply for a particular movements. Must be between 0 and 1 where 0 is equivalent to no movement and 1 is equivalent to largest movement possible" + ClassName = "Name of the Class" + ID = "Concept identifier in the uuid format" + Beacon = "Concept identifier in the beacon format, such as weaviate:////id" + Target = "Configure how multi target searches are combined" +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/fetch.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/fetch.go new file mode 100644 index 0000000000000000000000000000000000000000..535f217587f258f2f406a42660c42a213a6ee99d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/fetch.go @@ -0,0 +1,55 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// Local +const ( + LocalFetch = "Fetch Beacons that are similar to a specified concept from the Objects subsets on a Weaviate network" + LocalFetchObj = "An object used to perform a Fuzzy Fetch to search for Objects and Actions similar to a specified concept on a Weaviate network" +) + +const ( + LocalFetchObjects = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from the Objects subset" + LocalFetchFuzzy = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from both the Objects subsets" +) + +const ( + LocalFetchBeacon = "A Beacon result from a local Weaviate Local Fetch query" + LocalFetchClassName = "The class name of the result from a local Weaviate Local Fetch query" + LocalFetchCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept" + LocalFetchActionsObj = "An object used to Fetch Beacons from the Actions subset of the dataset" +) + +const ( + LocalFetchFuzzyBeacon = "A Beacon result from a local Weaviate Fetch Fuzzy query from both the Objects subsets" + LocalFetchFuzzyClassName = "Class name of the result from a local Weaviate Fetch Fuzzy query from both the Objects subsets" + LocalFetchFuzzyCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept" + LocalFetchFuzzyObj = "An object used to Fetch Beacons from both the Objects subsets" +) + +// NETWORK +const ( + NetworkFetch = "Fetch Beacons that are similar to a specified concept from the Objects subsets on a Weaviate network" + NetworkFetchObj = "An object used to perform a Fuzzy Fetch to search for Objects similar to a specified concept on a Weaviate network" +) + +const ( + NetworkFetchFuzzy = "Perform a Fuzzy Fetch to Fetch Beacons similar to a specified concept on a Weaviate network from both the Objects subsets" +) + +const ( + NetworkFetchFuzzyClassName = "The class name of the result from a network Weaviate Fetch Fuzzy query from both the Objects subsets" + NetworkFetchFuzzyBeacon = "A Beacon result from a network Weaviate Fetch Fuzzy query from both the Objects subsets" + NetworkFetchFuzzyCertainty = "The degree of similarity on a scale of 0-1 between the Beacon's characteristics and the provided concept" + NetworkFetchFuzzyObj = "An object used to Fetch Beacons from both the Objects subsets" +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/filters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/filters.go new file mode 100644 index 0000000000000000000000000000000000000000..b339dc0398147a884bce896c2bf6f90d04245ece --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/filters.go @@ -0,0 +1,154 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// Where filter elements +const ( + GetWhere = "Filter options for a local Get query, used to convert the result to the specified filters" + GetWhereInpObj = "An object containing filter options for a local Get query, used to convert the result to the specified filters" +) + +const ( + LocalMetaWhere = "Filter options for a local Meta query, used to convert the result to the specified filters" + LocalMetaWhereInpObj = "An object containing filter options for a local Meta query, used to convert the result to the specified filters" +) + +const ( + AggregateWhere = "Filter options for a local Aggregate query, used to convert the result to the specified filters" + AggregateWhereInpObj = "An object containing filter options for a local Aggregate query, used to convert the result to the specified filters" +) + +const ( + NetworkGetWhere = "Filter options for a network Get query, used to convert the result to the specified filters" + NetworkGetWhereInpObj = "An object containing filter options for a network Get query, used to convert the result to the specified filters" +) + +const ( + NetworkMetaWhere = "Filter options for a network Meta query, used to convert the result to the specified filters" + NetworkMetaWhereInpObj = "An object containing filter options for a network Meta query, used to convert the result to the specified filters" +) + +const ( + NetworkAggregateWhere = "Filter options for a network Aggregate query, used to convert the result to the specified filters" + NetworkAggregateWhereInpObj = "An object containing filter options for a network Aggregate query, used to convert the result to the specified filters" +) + +const ( + WhereOperands = "Contains the Operands that can be applied to a 'where' filter" + WhereOperandsInpObj = "An object containing the Operands that can be applied to a 'where' filter" +) + +const ( + WhereOperator = "Contains the Operators that can be applied to a 'where' filter" + WhereOperatorEnum = "An object containing the Operators that can be applied to a 'where' filter" +) + +const WherePath = "Specify the path from the Objects fields to the property name (e.g. ['Things', 'City', 'population'] leads to the 'population' property of a 'City' object)" + +const ( + WhereValueInt = "Specify an Integer value that the target property will be compared to" + WhereValueNumber = "Specify a Float value that the target property will be compared to" + WhereValueBoolean = "Specify a Boolean value that the target property will be compared to" + WhereValueString = "Specify a String value that the target property will be compared to" + WhereValueRange = "Specify both geo-coordinates (latitude and longitude as decimals) and a maximum distance from the described coordinates. The search will return any result which is located less than or equal to the specified maximum distance in km away from the specified point." + WhereValueRangeGeoCoordinates = "The geoCoordinates that form the center point of the search." + WhereValueRangeGeoCoordinatesLatitude = "The latitude (in decimal format) of the geoCoordinates to search around." + WhereValueRangeGeoCoordinatesLongitude = "The longitude (in decimal format) of the geoCoordinates to search around." + WhereValueRangeDistance = "The distance from the point specified via geoCoordinates." + WhereValueRangeDistanceMax = "The maximum distance from the point specified geoCoordinates." + WhereValueText = "Specify a Text value that the target property will be compared to" + WhereValueDate = "Specify a Date value that the target property will be compared to" +) + +// Properties and Classes filter elements (used by Fetch and Introspect Where filters) +const ( + WhereProperties = "Specify which properties to filter on" + WherePropertiesObj = "Specify which properties to filter on" +) + +const ( + WherePropertiesPropertyName = "Specify which property name to filter properties on" + WhereCertainty = "Specify the required degree of similarity between an object's characteristics and the provided filter values on a scale of 0-1" + WhereName = "Specify the name of the property to filter on" +) + +const ( + WhereKeywords = "Specify which keywords to filter on" + WhereKeywordsInpObj = "Specify the value and the weight of a keyword" +) + +const ( + WhereKeywordsValue = "Specify the value of the keyword" + WhereKeywordsWeight = "Specify the weight of the keyword" +) + +const ( + WhereClass = "Specify which classes to filter on" + WhereInpObj = "Specify which classes and properties to filter on" +) + +// Unique Fetch filter elements +const ( + FetchWhereFilterFields = "An object containing filter options for a network Fetch search, used to convert the result to the specified filters" + FetchWhereFilterFieldsInpObj = "Filter options for a network Fetch search, used to convert the result to the specified filters" +) + +const ( + FetchFuzzyValue = "Specify the concept that will be used to fetch Objects on the network (e.g. 'Airplane', or 'City')" + FetchFuzzyCertainty = "Specify how much a Beacon's characteristics must match the provided concept on a scale of 0 to 1" +) + +// Unique Introspect filter elements +const ( + IntrospectWhereFilterFields = "An object containing filter options for a network Fetch search, used to convert the result to the specified filters" + IntrospectWhereFilterFieldsInpObj = "Filter options for a network Fetch search, used to convert the result to the specified filters" + IntrospectBeaconId = "The id of the Beacon" +) + +// GroupBy filter elements +const ( + GroupByGroup = "Specify the property of the class to group by" + GroupByCount = "Get the number of instances of a property in a group" + GroupBySum = "Get the sum of the values of a property in a group" + GroupByMin = "Get the minimum occurring value of a property in a group" + GroupByMax = "Get the maximum occurring value of a property in a group" + GroupByMean = "Get the mean value of a property in a group" + GroupByMedian = "Get the median of a property in a group" + GroupByMode = "Get the mode of a property in a group" +) + +// Request timeout filter elements +const NetworkTimeout = "Specify the time in seconds after which an unresolved request automatically fails" + +// Pagination filter elements +const ( + First = "Show the first x results (pagination option)" + After = "Show the results after the first x results (pagination option)" +) + +// Cursor API +const ( + AfterID = "Show the results after a given ID" +) + +const ( + SortPath = "Specify the path from the Objects fields to the property name (e.g. ['Get', 'City', 'population'] leads to the 'population' property of a 'City' object)" + SortOrder = "Specify the sort order, either ascending (asc) which is default or descending (desc)" +) + +const ( + GroupByFilter = "Specify the property of the class to group by" + GroupByPath = "Specify the path from the objects fields to the property name (e.g. ['Things', 'City', 'population'] leads to the 'population' property of a 'City' object)" + GroupByGroups = "Specify the number of groups to be created" + GroupByObjectsPerGroup = "Specify the number of max objects in group" +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/get.go new file mode 100644 index 0000000000000000000000000000000000000000..d10aedadcf860eb0b6b255ac96e342f124ea2369 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/get.go @@ -0,0 +1,50 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// Local +const ( + GetObjects = "Get Objects on a local Weaviate" +) + +const ( + GetObj = "An object used to Get Objects on a local Weaviate" + Get = "Get Objects on a local Weaviate" +) + +const GetObjectsActionsObj = "An object used to get %ss on a local Weaviate" + +const GetClassUUID = "The UUID of a Object, assigned by its local Weaviate" + +// Network +const ( + NetworkGet = "Get Objects from a Weaviate in a network" + NetworkGetObj = "An object used to Get Objects from a Weaviate in a network" +) + +const NetworkGetWeaviateObj = "An object containing Get Objects fields for network Weaviate instance: " + +const ( + NetworkGetObjects = "Get Objects from a Weaviate in a network" +) + +const ( + NetworkGetObjectsObj = "An object containing the Objects objects on this network Weaviate instance." +) + +const NetworkGetClassUUID = "The UUID of a Object, assigned by the Weaviate network" // TODO check this with @lauraham + +const ConsistencyLevel = "Determines how many replicas must acknowledge a request " + + "before it is considered successful. Can be 'ONE', 'QUORUM', or 'ALL'" + +const Tenant = "The value by which a tenant is identified, specified in the class schema" diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/getMeta.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/getMeta.go new file mode 100644 index 0000000000000000000000000000000000000000..f6267e0f3ba5aa79383355579c0e10e00957b369 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/getMeta.go @@ -0,0 +1,88 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// Local +const ( + LocalMetaObj = "An object used to Get Meta information about Objects on a local Weaviate" + LocalMeta = "Get Meta information about Objects on a local Weaviate" +) + +const ( + MetaPropertyType = "The datatype of this property" + MetaPropertyCount = "The total amount of found instances for this property" // TODO check this with @lauraham + MetaPropertyTopOccurrences = "An object containing data about the most frequently occurring values for this property" + MetaPropertyTopOccurrencesValue = "The most frequently occurring value for this property" + MetaPropertyTopOccurrencesOccurs = "How often the most frequently occurring value for this property occurs" // TODO check this with @lauraham + MetaPropertyMinimum = "The minimum value for this property" + MetaPropertyMaximum = "The maximum value for this property" + MetaPropertyMean = "The mean of all values for this property" + MetaPropertySum = "The sum of all values for this property" + MetaPropertyObject = "An object containing meta information about this property" +) + +const ( + AggregatePropertyType = "The datatype of this property" + AggregatePropertyCount = "The total amount of found instances for this property" // TODO check this with @lauraham + AggregatePropertyTopOccurrences = "An object containing data about the most frequently occurring values for this property" + AggregatePropertyTopOccurrencesValue = "The most frequently occurring value for this property" + AggregatePropertyTopOccurrencesOccurs = "How often the most frequently occurring value for this property occurs" // TODO check this with @lauraham + AggregatePropertyMinimum = "The minimum value for this property" + AggregatePropertyMaximum = "The maximum value for this property" + AggregatePropertyMean = "The mean of all values for this property" + AggregatePropertySum = "The sum of all values for this property" +) + +// Network +const ( + NetworkMeta = "Get meta information about Objects from a Weaviate in a network" + NetworkMetaObj = "An object used to Get meta information about Objects from a Weaviate in a network" + NetworkMetaWeaviateObj = "An object containing the Meta Objects fields for network Weaviate instance: " +) + +const ( + MetaMetaProperty = "Meta information about the object" + MetaProperty = "Meta information about the property " +) + +const ( + MetaClassPropertyTotalTrue = "How often this boolean property's value is true in the dataset" + MetaClassPropertyPercentageTrue = "The percentage of true values for this boolean property in the dataset" +) + +const ( + MetaClassPropertyTotalFalse = "How often this boolean property's value is false in the dataset" + MetaClassPropertyPercentageFalse = "The percentage of false values for this boolean property in the dataset" +) + +const ( + MetaClassPropertyPointingTo = "The classes that this object contains a reference to" + MetaClassMetaCount = "The total amount of found instances for a class" + MetaClassMetaObj = "An object containing Meta information about a class" +) + +const ( + AggregateClassPropertyTotalTrue = "How often this boolean property's value is true in the dataset" + AggregateClassPropertyPercentageTrue = "The percentage of true values for this boolean property in the dataset" +) + +const ( + AggregateClassPropertyTotalFalse = "How often this boolean property's value is false in the dataset" + AggregateClassPropertyPercentageFalse = "The percentage of false values for this boolean property in the dataset" +) + +const ( + AggregateClassPropertyPointingTo = "The classes that this object contains a reference to" + AggregateClassAggregateCount = "The total amount of found instances for a class" + AggregateClassAggregateObj = "An object containing Aggregate information about a class" +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/introspect.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/introspect.go new file mode 100644 index 0000000000000000000000000000000000000000..11cc5d55c56d55f6a5601509db57ebf915bf4963 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/introspect.go @@ -0,0 +1,30 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// NETWORK +const ( + NetworkIntrospect = "Get Introspection information about Objects and/or Beacons in a Weaviate network" + NetworkIntrospectObj = "An object used to perform an Introspection query on a Weaviate network" +) + +const ( + NetworkIntrospectWeaviate = "The Weaviate instance that the current Object or Beacon belongs to" + NetworkIntrospectClassName = "The name of the current Object or Beacon's class" + NetworkIntrospectCertainty = "The degree of similarity between a(n) Object or Beacon and the filter input" +) + +const ( + NetworkIntrospectBeaconProperties = "The properties of a Beacon" + NetworkIntrospectBeaconPropertiesPropertyName = "The names of the properties of a Beacon" +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/merge.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/merge.go new file mode 100644 index 0000000000000000000000000000000000000000..c4fedce3ad85ac232be210bfe01bc5050ef75508 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/merge.go @@ -0,0 +1,31 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// Local +const ( + LocalMergeObj = "An object used to Merge Objects on a local Weaviate" + LocalMerge = "Merge Objects on a local Weaviate" +) + +const LocalMergeClassUUID = "The UUID of a Object, assigned by its local Weaviate" + +// Network +const ( + NetworkMerge = "Merge Objects from a Weaviate in a network" + NetworkMergeObj = "An object used to Merge Objects from a Weaviate in a network" +) + +const NetworkMergeWeaviateObj = "An object containing Merge Objects fields for network Weaviate instance: " + +const NetworkMergeClassUUID = "The UUID of a Thing or Action, assigned by the Weaviate network" // TODO check this with @lauraham diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/rootQuery.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/rootQuery.go new file mode 100644 index 0000000000000000000000000000000000000000..5689eee4d456550a5835feec7ebc1c4f7076baf4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/descriptions/rootQuery.go @@ -0,0 +1,28 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package descriptions provides the descriptions as used by the graphql endpoint for Weaviate +package descriptions + +// ROOT +const ( + WeaviateObj = "The location of the root query" + WeaviateNetwork = "Query a Weaviate network" +) + +// LOCAL +const LocalObj = "A query on a local Weaviate" + +// NETWORK +const ( + NetworkWeaviate = "An object for the network Weaviate instance: " + NetworkObj = "An object used to perform queries on a Weaviate network" +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/graphiql/graphiql.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/graphiql/graphiql.go new file mode 100644 index 0000000000000000000000000000000000000000..d7b012555aa40729edd90b60412484043a2fbcd5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/graphiql/graphiql.go @@ -0,0 +1,234 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Based on `graphiql.go` from https://github.com/graphql-go/handler +// only made RenderGraphiQL a public function. +package graphiql + +import ( + "encoding/json" + "html/template" + "net/http" + "strings" +) + +// graphiqlVersion is the current version of GraphiQL +const graphiqlVersion = "0.11.11" + +// graphiqlData is the page data structure of the rendered GraphiQL page +type graphiqlData struct { + GraphiqlVersion string + QueryString string + Variables string + OperationName string + AuthKey string + AuthToken string +} + +func AddMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/v1/graphql") && r.Method == http.MethodGet { + renderGraphiQL(w, r) + } else { + next.ServeHTTP(w, r) + } + }) +} + +// renderGraphiQL renders the GraphiQL GUI +func renderGraphiQL(w http.ResponseWriter, r *http.Request) { + w.Header().Set("WWW-Authenticate", `Basic realm="Provide your key and token (as username as password respectively)"`) + + user, password, authOk := r.BasicAuth() + if !authOk { + http.Error(w, "Not authorized", http.StatusUnauthorized) + return + } + + queryParams := r.URL.Query() + + t := template.New("GraphiQL") + t, err := t.Parse(graphiqlTemplate) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Attempt to deserialize the 'variables' query key to something reasonable. + var queryVars interface{} + err = json.Unmarshal([]byte(queryParams.Get("variables")), &queryVars) + + var varsString string + if err == nil { + vars, err := json.MarshalIndent(queryVars, "", " ") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + varsString = string(vars) + if varsString == "null" { + varsString = "" + } + } + + // Create result string + d := graphiqlData{ + GraphiqlVersion: graphiqlVersion, + QueryString: queryParams.Get("query"), + Variables: varsString, + OperationName: queryParams.Get("operationName"), + AuthKey: user, + AuthToken: password, + } + err = t.ExecuteTemplate(w, "index", d) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// tmpl is the page template to render GraphiQL +const graphiqlTemplate = ` +{{ define "index" }} + + + + + + GraphiQL + + + + + + + + + + + +
    Loading...
    + + + +{{ end }} +` diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/aggregate.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/aggregate.go new file mode 100644 index 0000000000000000000000000000000000000000..0bea1560b64e8ee7e08485d4c7c78c6888f22d3c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/aggregate.go @@ -0,0 +1,291 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/adapters/handlers/graphql/utils" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" +) + +type ModulesProvider interface { + AggregateArguments(class *models.Class) map[string]*graphql.ArgumentConfig + ExtractSearchParams(arguments map[string]interface{}, className string) (map[string]interface{}, map[string]*dto.TargetCombination) +} + +// Build the Aggregate Kinds schema +func Build(dbSchema *schema.SchemaWithAliases, config config.Config, + modulesProvider ModulesProvider, authorizer authorization.Authorizer, +) (*graphql.Field, error) { + if len(dbSchema.Objects.Classes) == 0 { + return nil, utils.ErrEmptySchema + } + + var err error + var localAggregateObjects *graphql.Object + if len(dbSchema.Objects.Classes) > 0 { + localAggregateObjects, err = classFields(dbSchema, config, modulesProvider, authorizer) + if err != nil { + return nil, err + } + } + + field := graphql.Field{ + Name: "Aggregate", + Description: descriptions.AggregateWhere, + Type: localAggregateObjects, + Resolve: passThroughResolver, + } + + return &field, nil +} + +func classFields(databaseSchema *schema.SchemaWithAliases, + config config.Config, modulesProvider ModulesProvider, authorizer authorization.Authorizer, +) (*graphql.Object, error) { + fields := graphql.Fields{} + + for _, class := range databaseSchema.Objects.Classes { + field, err := classField(class, class.Description, config, modulesProvider, authorizer) + if err != nil { + return nil, err + } + + fields[class.Class] = field + } + + // Make aliases available in GQL schema + for alias, aliasedClassName := range databaseSchema.Aliases { + field, ok := fields[aliasedClassName] + if ok { + fields[alias] = field + } + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: "AggregateObjectsObj", + Fields: fields, + Description: descriptions.AggregateObjectsObj, + }), nil +} + +func classField(class *models.Class, description string, + config config.Config, modulesProvider ModulesProvider, authorizer authorization.Authorizer, +) (*graphql.Field, error) { + metaClassName := fmt.Sprintf("Aggregate%s", class.Class) + + fields := graphql.ObjectConfig{ + Name: metaClassName, + Fields: (graphql.FieldsThunk)(func() graphql.Fields { + fields, err := classPropertyFields(class) + if err != nil { + // we cannot return an error in this FieldsThunk and have to panic unfortunately + panic(fmt.Sprintf("Failed to assemble single Local Aggregate Class field: %s", err)) + } + + return fields + }), + Description: description, + } + + fieldsObject := graphql.NewObject(fields) + fieldsField := &graphql.Field{ + Type: graphql.NewList(fieldsObject), + Description: description, + Args: graphql.FieldConfigArgument{ + "limit": &graphql.ArgumentConfig{ + Description: descriptions.First, + Type: graphql.Int, + }, + "where": &graphql.ArgumentConfig{ + Description: descriptions.GetWhere, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("AggregateObjects%sWhereInpObj", class.Class), + Fields: common_filters.BuildNew(fmt.Sprintf("AggregateObjects%s", class.Class)), + Description: descriptions.GetWhereInpObj, + }, + ), + }, + "groupBy": &graphql.ArgumentConfig{ + Description: descriptions.GroupBy, + Type: graphql.NewList(graphql.String), + }, + "nearVector": nearVectorArgument(class.Class), + "nearObject": nearObjectArgument(class.Class), + "objectLimit": &graphql.ArgumentConfig{ + Description: descriptions.First, + Type: graphql.Int, + }, + "hybrid": hybridArgument(fieldsObject, class, modulesProvider), + }, + Resolve: makeResolveClass(authorizer, modulesProvider, class), + } + + if modulesProvider != nil { + for name, argument := range modulesProvider.AggregateArguments(class) { + fieldsField.Args[name] = argument + } + } + + if schema.MultiTenancyEnabled(class) { + fieldsField.Args["tenant"] = tenantArgument() + } + + return fieldsField, nil +} + +func classPropertyFields(class *models.Class) (graphql.Fields, error) { + fields := graphql.Fields{} + for _, property := range class.Properties { + propertyType, err := schema.GetPropertyDataType(class, property.Name) + if err != nil { + return nil, fmt.Errorf("%s.%s: %w", class.Class, property.Name, err) + } + + convertedDataType, err := classPropertyField(*propertyType, class, property) + if err != nil { + return nil, err + } + + fields[property.Name] = convertedDataType + } + + // Special case: meta { count } appended to all regular props + fields["meta"] = &graphql.Field{ + Description: descriptions.LocalMetaObj, + Type: metaObject(fmt.Sprintf("Aggregate%s", class.Class)), + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + // pass-through + return p.Source, nil + }, + } + + // Always append Grouped By field + fields["groupedBy"] = &graphql.Field{ + Description: descriptions.AggregateGroupedBy, + Type: groupedByProperty(class), + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + switch typed := p.Source.(type) { + case aggregation.Group: + return typed.GroupedBy, nil + case map[string]interface{}: + return typed["groupedBy"], nil + default: + return nil, fmt.Errorf("groupedBy: unsupported type %T", p.Source) + } + }, + } + + return fields, nil +} + +func metaObject(prefix string) *graphql.Object { + return graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sMetaObject", prefix), + Fields: graphql.Fields{ + "count": &graphql.Field{ + Type: graphql.Int, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + group, ok := p.Source.(aggregation.Group) + if !ok { + return nil, fmt.Errorf("meta count: expected aggregation.Group, got %T", p.Source) + } + + return group.Count, nil + }, + }, + }, + }) +} + +func classPropertyField(dataType schema.DataType, class *models.Class, property *models.Property) (*graphql.Field, error) { + switch dataType { + case schema.DataTypeText: + return makePropertyField(class, property, stringPropertyFields) + case schema.DataTypeInt: + return makePropertyField(class, property, numericPropertyFields) + case schema.DataTypeNumber: + return makePropertyField(class, property, numericPropertyFields) + case schema.DataTypeBoolean: + return makePropertyField(class, property, booleanPropertyFields) + case schema.DataTypeDate: + return makePropertyField(class, property, datePropertyFields) + case schema.DataTypeCRef: + return makePropertyField(class, property, referencePropertyFields) + case schema.DataTypeGeoCoordinates: + // simply skip for now, see gh-729 + return nil, nil + case schema.DataTypePhoneNumber: + // skipping for now, see gh-1088 where it was outscoped + return nil, nil + case schema.DataTypeBlob: + return makePropertyField(class, property, stringPropertyFields) + case schema.DataTypeTextArray: + return makePropertyField(class, property, stringPropertyFields) + case schema.DataTypeIntArray, schema.DataTypeNumberArray: + return makePropertyField(class, property, numericPropertyFields) + case schema.DataTypeBooleanArray: + return makePropertyField(class, property, booleanPropertyFields) + case schema.DataTypeDateArray: + return makePropertyField(class, property, datePropertyFields) + case schema.DataTypeUUID, schema.DataTypeUUIDArray: + // not aggregatable + return nil, nil + case schema.DataTypeObject, schema.DataTypeObjectArray: + // TODO: check if it's aggregable, skip for now + return nil, nil + default: + return nil, fmt.Errorf(schema.ErrorNoSuchDatatype+": %s", dataType) + } +} + +type propertyFieldMaker func(class *models.Class, + property *models.Property, prefix string) *graphql.Object + +func makePropertyField(class *models.Class, property *models.Property, + fieldMaker propertyFieldMaker, +) (*graphql.Field, error) { + prefix := "Aggregate" + return &graphql.Field{ + Description: fmt.Sprintf(`%s"%s"`, descriptions.AggregateProperty, property.Name), + Type: fieldMaker(class, property, prefix), + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + switch typed := p.Source.(type) { + case aggregation.Group: + res, ok := typed.Properties[property.Name] + if !ok { + return nil, fmt.Errorf("missing property '%s'", property.Name) + } + + return res, nil + + default: + return nil, fmt.Errorf("property %s, unsupported type %T", property.Name, p.Source) + } + }, + }, nil +} + +func passThroughResolver(p graphql.ResolveParams) (interface{}, error) { + // bubble up root resolver + return p.Source, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/explore_argument.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/explore_argument.go new file mode 100644 index 0000000000000000000000000000000000000000..988b9e30c047e28ce4b2bdc593b2f0ed485dc7a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/explore_argument.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" +) + +func nearVectorArgument(className string) *graphql.ArgumentConfig { + return common_filters.NearVectorArgument("AggregateObjects", className, false) +} + +func nearObjectArgument(className string) *graphql.ArgumentConfig { + return common_filters.NearObjectArgument("AggregateObjects", className, false) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/helpers_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/helpers_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6cc870d83b2a011186246f1080cbbf5710d21bc5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/helpers_for_test.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "context" + "fmt" + + testhelper "github.com/weaviate/weaviate/adapters/handlers/graphql/test/helper" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/config" +) + +type mockRequestsLog struct{} + +func (m *mockRequestsLog) Register(first string, second string) { +} + +type mockResolver struct { + testhelper.MockResolver +} + +type mockAuthorizer struct{} + +func (m *mockAuthorizer) Authorize(ctx context.Context, principal *models.Principal, action string, resource ...string) error { + return nil +} + +func (m *mockAuthorizer) AuthorizeSilent(ctx context.Context, principal *models.Principal, action string, resource ...string) error { + return nil +} + +func (m *mockAuthorizer) FilterAuthorizedResources(ctx context.Context, principal *models.Principal, verb string, resources ...string) ([]string, error) { + return resources, nil +} + +func newMockResolver(cfg config.Config) *mockResolver { + field, err := Build(&testhelper.CarSchema, cfg, nil, &mockAuthorizer{}) + if err != nil { + panic(fmt.Sprintf("could not build graphql test schema: %s", err)) + } + mockLog := &mockRequestsLog{} + mocker := &mockResolver{} + mocker.RootFieldName = "Aggregate" + mocker.RootField = field + mocker.RootObject = map[string]interface{}{ + "Resolver": Resolver(mocker), + "RequestsLog": mockLog, + "Config": cfg, + } + + return mocker +} + +func (m *mockResolver) Aggregate(ctx context.Context, principal *models.Principal, + params *aggregation.Params, +) (interface{}, error) { + args := m.Called(params) + return args.Get(0), args.Error(1) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/hybrid_search.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/hybrid_search.go new file mode 100644 index 0000000000000000000000000000000000000000..52bc6fdeffe309ec4e9b4339e6d376830a4ece2b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/hybrid_search.go @@ -0,0 +1,217 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "fmt" + "os" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/entities/models" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func hybridArgument(classObject *graphql.Object, + class *models.Class, modulesProvider ModulesProvider, +) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("AggregateObjects%s", class.Class) + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sHybridInpObj", prefix), + Fields: hybridOperands(classObject, class, modulesProvider), + Description: "Hybrid search", + }, + ), + } +} + +func hybridOperands(classObject *graphql.Object, + class *models.Class, modulesProvider ModulesProvider, +) graphql.InputObjectConfigFieldMap { + ss := graphql.NewInputObject(graphql.InputObjectConfig{ + Name: class.Class + "HybridSubSearch", + Fields: hybridSubSearch(classObject, class, modulesProvider), + }) + prefixName := class.Class + "HybridSubSearch" + searchesPrefixName := prefixName + "Searches" + fieldMap := graphql.InputObjectConfigFieldMap{ + "query": &graphql.InputObjectFieldConfig{ + Description: "Query string", + Type: graphql.String, + }, + "alpha": &graphql.InputObjectFieldConfig{ + Description: "Search weight", + Type: graphql.Float, + }, + "maxVectorDistance": &graphql.InputObjectFieldConfig{ + Description: "Removes all results that have a vector distance larger than the given value", + Type: graphql.Float, + }, + "vector": &graphql.InputObjectFieldConfig{ + Description: "Vector search", + Type: common_filters.Vector(prefixName), + }, + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + "properties": &graphql.InputObjectFieldConfig{ + Description: "Properties to search", + Type: graphql.NewList(graphql.String), + }, + "bm25SearchOperator": common_filters.GenerateBM25SearchOperatorFields(prefixName), + "searches": &graphql.InputObjectFieldConfig{ + Description: "Subsearch list", + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Description: "Subsearch list", + Name: fmt.Sprintf("%sSearchesInpObj", searchesPrefixName), + Fields: (func() graphql.InputObjectConfigFieldMap { + subSearchFields := make(graphql.InputObjectConfigFieldMap) + fieldMap := graphql.InputObjectConfigFieldMap{ + "nearText": &graphql.InputObjectFieldConfig{ + Description: "nearText element", + + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearTextInpObj", searchesPrefixName), + Fields: nearTextFields(searchesPrefixName), + Description: "Near text search", + }, + ), + }, + "nearVector": &graphql.InputObjectFieldConfig{ + Description: "nearVector element", + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearVectorInpObj", searchesPrefixName), + Description: "Near vector search", + Fields: common_filters.NearVectorFields(searchesPrefixName, false), + }, + ), + }, + } + for key, fieldConfig := range fieldMap { + subSearchFields[key] = fieldConfig + } + return subSearchFields + })(), + }, + )), + }, + } + + if os.Getenv("ENABLE_EXPERIMENTAL_HYBRID_OPERANDS") != "" { + fieldMap["operands"] = &graphql.InputObjectFieldConfig{ + Description: "Subsearch list", + Type: graphql.NewList(ss), + } + } + + return fieldMap +} + +func hybridSubSearch(classObject *graphql.Object, + class *models.Class, modulesProvider ModulesProvider, +) graphql.InputObjectConfigFieldMap { + prefixName := class.Class + "SubSearch" + + return graphql.InputObjectConfigFieldMap{ + "weight": &graphql.InputObjectFieldConfig{ + Description: "weight, 0 to 1", + Type: graphql.Float, + }, + "sparseSearch": &graphql.InputObjectFieldConfig{ + Description: "Sparse Search", + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sHybridAggregateBM25InpObj", prefixName), + Fields: bm25Fields(prefixName), + Description: "BM25f search", + }, + ), + }, + } +} + +func nearTextFields(prefix string) graphql.InputObjectConfigFieldMap { + nearTextFields := graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + // Description: descriptions.Concepts, + Type: graphql.NewNonNull(graphql.NewList(graphql.String)), + }, + "moveTo": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveTo", prefix), + Fields: movementInp(fmt.Sprintf("%sMoveTo", prefix)), + }), + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + "moveAwayFrom": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveAwayFrom", prefix), + Fields: movementInp(fmt.Sprintf("%sMoveAwayFrom", prefix)), + }), + }, + } + return nearTextFields +} + +func movementInp(prefix string) graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Description: descriptions.Keywords, + Type: graphql.NewList(graphql.String), + }, + "objects": &graphql.InputObjectFieldConfig{ + Description: "objects", + Type: graphql.NewList(objectsInpObj(prefix)), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + } +} + +func objectsInpObj(prefix string) *graphql.InputObject { + return graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMovementObjectsInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: "id of an object", + }, + "beacon": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.Beacon, + }, + }, + Description: "Movement Object", + }, + ) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/multi_tenancy.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/multi_tenancy.go new file mode 100644 index 0000000000000000000000000000000000000000..aece0ccf11fc9eb2b462804335e2e4ef21029985 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/multi_tenancy.go @@ -0,0 +1,24 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func tenantArgument() *graphql.ArgumentConfig { + return &graphql.ArgumentConfig{ + Description: descriptions.Tenant, + Type: graphql.String, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/properties.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/properties.go new file mode 100644 index 0000000000000000000000000000000000000000..ff87ea3a67c4b40351e545866da03013842a9d44 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/properties.go @@ -0,0 +1,478 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/models" +) + +func numericPropertyFields(class *models.Class, property *models.Property, prefix string) *graphql.Object { + getMetaIntFields := graphql.Fields{ + "sum": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sSum", prefix, class.Class, property.Name), + Description: descriptions.AggregateSum, + Type: graphql.Float, + Resolve: makeResolveNumericFieldAggregator("sum"), + }, + "minimum": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMinimum", prefix, class.Class, property.Name), + Description: descriptions.AggregateMin, + Type: graphql.Float, + Resolve: makeResolveNumericFieldAggregator("minimum"), + }, + "maximum": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMaximum", prefix, class.Class, property.Name), + Description: descriptions.AggregateMax, + Type: graphql.Float, + Resolve: makeResolveNumericFieldAggregator("maximum"), + }, + "mean": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMean", prefix, class.Class, property.Name), + Description: descriptions.AggregateMean, + Type: graphql.Float, + Resolve: makeResolveNumericFieldAggregator("mean"), + }, + "mode": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMode", prefix, class.Class, property.Name), + Description: descriptions.AggregateMode, + Type: graphql.Float, + Resolve: makeResolveNumericFieldAggregator("mode"), + }, + "median": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMedian", prefix, class.Class, property.Name), + Description: descriptions.AggregateMedian, + Type: graphql.Float, + Resolve: makeResolveNumericFieldAggregator("median"), + }, + "count": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sCount", prefix, class.Class, property.Name), + Description: descriptions.AggregateCount, + Type: graphql.Int, + Resolve: makeResolveNumericFieldAggregator("count"), + }, + "type": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sType", prefix, class.Class, property.Name), + Description: descriptions.AggregateCount, + Type: graphql.String, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + prop, ok := p.Source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("numerical: type: expected aggregation.Property, got %T", p.Source) + } + + return prop.SchemaType, nil + }, + }, + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%s%s%sObj", prefix, class.Class, property.Name), + Fields: getMetaIntFields, + Description: descriptions.AggregatePropertyObject, + }) +} + +func datePropertyFields(class *models.Class, + property *models.Property, prefix string, +) *graphql.Object { + getMetaDateFields := graphql.Fields{ + "count": &graphql.Field{ + Name: fmt.Sprintf("%s%sCount", prefix, class.Class), + Description: descriptions.AggregateCount, + Type: graphql.Int, + Resolve: makeResolveDateFieldAggregator("count"), + }, + "minimum": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMinimum", prefix, class.Class, property.Name), + Description: descriptions.AggregateMin, + Type: graphql.String, + Resolve: makeResolveDateFieldAggregator("minimum"), + }, + "maximum": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMaximum", prefix, class.Class, property.Name), + Description: descriptions.AggregateMax, + Type: graphql.String, + Resolve: makeResolveDateFieldAggregator("maximum"), + }, + "mode": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMode", prefix, class.Class, property.Name), + Description: descriptions.AggregateMode, + Type: graphql.String, + Resolve: makeResolveDateFieldAggregator("mode"), + }, + "median": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sMedian", prefix, class.Class, property.Name), + Description: descriptions.AggregateMedian, + Type: graphql.String, + Resolve: makeResolveDateFieldAggregator("median"), + }, + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%s%s%sObj", prefix, class.Class, property.Name), + Fields: getMetaDateFields, + Description: descriptions.AggregatePropertyObject, + }) +} + +func referencePropertyFields(class *models.Class, + property *models.Property, prefix string, +) *graphql.Object { + getMetaPointingFields := graphql.Fields{ + "type": &graphql.Field{ + Name: fmt.Sprintf("%s%sType", prefix, class.Class), + Description: descriptions.AggregatePropertyType, + Type: graphql.String, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + prop, ok := p.Source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("ref property type: expected aggregation.Property, got %T", + p.Source) + } + + return prop.SchemaType, nil + }, + }, + "pointingTo": &graphql.Field{ + Name: fmt.Sprintf("%s%sPointingTo", prefix, class.Class), + Description: descriptions.AggregateClassPropertyPointingTo, + Type: graphql.NewList(graphql.String), + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + ref, err := extractReferenceAggregation(p.Source) + if err != nil { + return nil, fmt.Errorf("ref property pointingTo: %w", err) + } + + return ref.PointingTo, nil + }, + DeprecationReason: "Experimental, the format will change", + }, + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%s%s%sObj", prefix, class.Class, property.Name), + Fields: getMetaPointingFields, + Description: descriptions.AggregatePropertyObject, + }) +} + +func extractReferenceAggregation(source interface{}) (*aggregation.Reference, error) { + property, ok := source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("expected aggregation.Property, got %T", source) + } + + if property.Type != aggregation.PropertyTypeReference { + return nil, fmt.Errorf("expected property to be of type reference, got %s", property.Type) + } + + return &property.ReferenceAggregation, nil +} + +func booleanPropertyFields(class *models.Class, + property *models.Property, prefix string, +) *graphql.Object { + getMetaPointingFields := graphql.Fields{ + "count": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sCount", prefix, class.Class, property.Name), + Description: descriptions.AggregatePropertyCount, + Type: graphql.Int, + Resolve: booleanResolver(func(b aggregation.Boolean) interface{} { return b.Count }), + }, + "totalTrue": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sTotalTrue", prefix, class.Class, property.Name), + Description: descriptions.AggregateClassPropertyTotalTrue, + Type: graphql.Int, + Resolve: booleanResolver(func(b aggregation.Boolean) interface{} { return b.TotalTrue }), + }, + "percentageTrue": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sPercentageTrue", prefix, class.Class, property.Name), + Description: descriptions.AggregateClassPropertyPercentageTrue, + Type: graphql.Float, + Resolve: booleanResolver(func(b aggregation.Boolean) interface{} { return b.PercentageTrue }), + }, + "totalFalse": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sTotalFalse", prefix, class.Class, property.Name), + Description: descriptions.AggregateClassPropertyTotalFalse, + Type: graphql.Int, + Resolve: booleanResolver(func(b aggregation.Boolean) interface{} { return b.TotalFalse }), + }, + "percentageFalse": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sPercentageFalse", prefix, class.Class, property.Name), + Description: descriptions.AggregateClassPropertyPercentageFalse, + Type: graphql.Float, + Resolve: booleanResolver(func(b aggregation.Boolean) interface{} { return b.PercentageFalse }), + }, + "type": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sType", prefix, class.Class, property.Name), + Description: descriptions.AggregateCount, + Type: graphql.String, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + prop, ok := p.Source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("boolean: type: expected aggregation.Property, got %T", p.Source) + } + + return prop.SchemaType, nil + }, + }, + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%s%s%sObj", prefix, class.Class, property.Name), + Fields: getMetaPointingFields, + Description: descriptions.AggregatePropertyObject, + }) +} + +type booleanExtractorFunc func(aggregation.Boolean) interface{} + +func booleanResolver(extractor booleanExtractorFunc) func(p graphql.ResolveParams) (interface{}, error) { + return func(p graphql.ResolveParams) (interface{}, error) { + boolean, err := extractBooleanAggregation(p.Source) + if err != nil { + return nil, fmt.Errorf("boolean: %w", err) + } + + return extractor(*boolean), nil + } +} + +func extractBooleanAggregation(source interface{}) (*aggregation.Boolean, error) { + property, ok := source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("expected aggregation.Property, got %T", source) + } + + if property.Type != aggregation.PropertyTypeBoolean { + return nil, fmt.Errorf("expected property to be of type boolean, got %s", property.Type) + } + + return &property.BooleanAggregation, nil +} + +func stringPropertyFields(class *models.Class, + property *models.Property, prefix string, +) *graphql.Object { + getAggregatePointingFields := graphql.Fields{ + "count": &graphql.Field{ + Name: fmt.Sprintf("%s%sCount", prefix, class.Class), + Description: descriptions.AggregatePropertyCount, + Type: graphql.Int, + Resolve: textResolver(func(text aggregation.Text) (interface{}, error) { + return text.Count, nil + }), + }, + "type": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sType", prefix, class.Class, property.Name), + Description: descriptions.AggregateCount, + Type: graphql.String, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + prop, ok := p.Source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("text type: expected aggregation.Property, got %T", p.Source) + } + + return prop.SchemaType, nil + }, + }, + "topOccurrences": &graphql.Field{ + Name: fmt.Sprintf("%s%sTopOccurrences", prefix, class.Class), + Description: descriptions.AggregatePropertyTopOccurrences, + Type: graphql.NewList(stringTopOccurrences(class, property, prefix)), + Resolve: textResolver(func(text aggregation.Text) (interface{}, error) { + list := make([]interface{}, len(text.Items)) + for i, to := range text.Items { + list[i] = to + } + + return list, nil + }), + Args: graphql.FieldConfigArgument{ + "limit": &graphql.ArgumentConfig{ + Description: descriptions.First, + Type: graphql.Int, + }, + }, + }, + } + + return graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%s%s%sObj", prefix, class.Class, property.Name), + Fields: getAggregatePointingFields, + Description: descriptions.AggregatePropertyObject, + }) +} + +type textExtractorFunc func(aggregation.Text) (interface{}, error) + +func textResolver(extractor textExtractorFunc) func(p graphql.ResolveParams) (interface{}, error) { + return func(p graphql.ResolveParams) (interface{}, error) { + text, err := extractTextAggregation(p.Source) + if err != nil { + return nil, fmt.Errorf("text: %w", err) + } + + return extractor(text) + } +} + +func stringTopOccurrences(class *models.Class, + property *models.Property, prefix string, +) *graphql.Object { + getAggregateAggregatePointingFields := graphql.Fields{ + "value": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sTopOccurrencesValue", prefix, class.Class, property.Name), + Description: descriptions.AggregatePropertyTopOccurrencesValue, + Type: graphql.String, + Resolve: textOccurrenceResolver(func(t aggregation.TextOccurrence) interface{} { return t.Value }), + }, + "occurs": &graphql.Field{ + Name: fmt.Sprintf("%s%s%sTopOccurrencesOccurs", prefix, class.Class, property.Name), + Description: descriptions.AggregatePropertyTopOccurrencesOccurs, + Type: graphql.Int, + Resolve: textOccurrenceResolver(func(t aggregation.TextOccurrence) interface{} { return t.Occurs }), + }, + } + + getAggregateAggregatePointing := graphql.ObjectConfig{ + Name: fmt.Sprintf("%s%s%sTopOccurrencesObj", prefix, class.Class, property.Name), + Fields: getAggregateAggregatePointingFields, + Description: descriptions.AggregatePropertyTopOccurrences, + } + + return graphql.NewObject(getAggregateAggregatePointing) +} + +type textOccurrenceExtractorFunc func(aggregation.TextOccurrence) interface{} + +func textOccurrenceResolver(extractor textOccurrenceExtractorFunc) func(p graphql.ResolveParams) (interface{}, error) { + return func(p graphql.ResolveParams) (interface{}, error) { + textOccurrence, ok := p.Source.(aggregation.TextOccurrence) + if !ok { + return nil, fmt.Errorf("textOccurrence: %s: expected aggregation.TextOccurrence, but got %T", + p.Info.FieldName, p.Source) + } + + return extractor(textOccurrence), nil + } +} + +func extractTextAggregation(source interface{}) (aggregation.Text, error) { + property, ok := source.(aggregation.Property) + if !ok { + return aggregation.Text{}, fmt.Errorf("expected aggregation.Property, got %T", source) + } + + if property.Type == aggregation.PropertyTypeNumerical { + // in this case we can only use count + return aggregation.Text{ + Count: property.NumericalAggregations["count"].(int), + }, nil + } + + if property.Type != aggregation.PropertyTypeText { + return aggregation.Text{}, fmt.Errorf("expected property to be of type text, got %s (%#v)", property.Type, property) + } + + return property.TextAggregation, nil +} + +func groupedByProperty(class *models.Class) *graphql.Object { + classProperties := graphql.Fields{ + "path": &graphql.Field{ + Description: descriptions.AggregateGroupedByGroupedByPath, + Type: graphql.NewList(graphql.String), + Resolve: groupedByResolver(func(g *aggregation.GroupedBy) interface{} { return g.Path }), + }, + "value": &graphql.Field{ + Description: descriptions.AggregateGroupedByGroupedByValue, + Type: graphql.String, + Resolve: groupedByResolver(func(g *aggregation.GroupedBy) interface{} { return g.Value }), + }, + } + + classPropertiesObj := graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("Aggregate%sGroupedByObj", class.Class), + Fields: classProperties, + Description: descriptions.AggregateGroupedByObj, + }) + + return classPropertiesObj +} + +type groupedByExtractorFunc func(*aggregation.GroupedBy) interface{} + +func groupedByResolver(extractor groupedByExtractorFunc) func(p graphql.ResolveParams) (interface{}, error) { + return func(p graphql.ResolveParams) (interface{}, error) { + groupedBy, ok := p.Source.(*aggregation.GroupedBy) + if !ok { + return nil, fmt.Errorf("groupedBy: %s: expected aggregation.GroupedBy, but got %T", + p.Info.FieldName, p.Source) + } + + return extractor(groupedBy), nil + } +} + +func makeResolveNumericFieldAggregator(aggregator string) func(p graphql.ResolveParams) (interface{}, error) { + return func(p graphql.ResolveParams) (interface{}, error) { + num, err := extractNumericAggregation(p.Source) + if err != nil { + return nil, fmt.Errorf("numerical aggregator %s: %w", aggregator, err) + } + + return num[aggregator], nil + } +} + +func extractNumericAggregation(source interface{}) (map[string]interface{}, error) { + property, ok := source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("expected aggregation.Property, got %T", source) + } + + if property.Type != aggregation.PropertyTypeNumerical { + return nil, fmt.Errorf("expected property to be of type numerical, got %s", property.Type) + } + + return property.NumericalAggregations, nil +} + +func makeResolveDateFieldAggregator(aggregator string) func(p graphql.ResolveParams) (interface{}, error) { + return func(p graphql.ResolveParams) (interface{}, error) { + date, err := extractDateAggregation(p.Source) + if err != nil { + return nil, fmt.Errorf("date aggregator %s: %w", aggregator, err) + } + + return date[aggregator], nil + } +} + +func extractDateAggregation(source interface{}) (map[string]interface{}, error) { + property, ok := source.(aggregation.Property) + if !ok { + return nil, fmt.Errorf("expected aggregation.Property, got %T", source) + } + + if property.Type != aggregation.PropertyTypeDate { + return nil, fmt.Errorf("expected property to be of type date, got %s", property.Type) + } + + return property.DateAggregations, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/resolver.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..4627981d2035e737ebf0bd9f8de811eb0457e5bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/resolver.go @@ -0,0 +1,330 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package aggregate provides the local aggregate graphql endpoint for Weaviate +package aggregate + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/entities/aggregation" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +// GroupedByFieldName is a special graphQL field that appears alongside the +// to-be-aggregated props, but doesn't require any processing by the connectors +// itself, as it just displays meta info about the overall aggregation. +const GroupedByFieldName = "groupedBy" + +// Resolver is a local interface that can be composed with other interfaces to +// form the overall GraphQL API main interface. All data-base connectors that +// want to support the Meta feature must implement this interface. +type Resolver interface { + Aggregate(ctx context.Context, principal *models.Principal, info *aggregation.Params) (interface{}, error) +} + +// RequestsLog is a local abstraction on the RequestsLog that needs to be +// provided to the graphQL API in order to log Local.Get queries. +type RequestsLog interface { + Register(requestType string, identifier string) +} + +func makeResolveClass(authorizer authorization.Authorizer, modulesProvider ModulesProvider, class *models.Class) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + res, err := resolveAggregate(p, authorizer, modulesProvider, class) + if err != nil { + return res, enterrors.NewErrGraphQLUser(err, "Aggregate", schema.ClassName(p.Info.FieldName).String()) + } + return res, nil + } +} + +func resolveAggregate(p graphql.ResolveParams, authorizer authorization.Authorizer, modulesProvider ModulesProvider, class *models.Class) (interface{}, error) { + principal := restCtx.GetPrincipalFromContext(p.Context) + className := schema.ClassName(p.Info.FieldName) + + source, ok := p.Source.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expected source to be a map, but was %t", p.Source) + } + + resolver, ok := source["Resolver"].(Resolver) + if !ok { + return nil, fmt.Errorf("expected source to contain a usable Resolver, but was %t", p.Source) + } + + var tenant string + if tk, ok := p.Args["tenant"]; ok { + tenant = tk.(string) + } + + if err := authorizer.Authorize(p.Context, principal, authorization.READ, authorization.ShardsData(className.String(), tenant)...); err != nil { + return nil, err + } + + // There can only be exactly one ast.Field; it is the class name. + if len(p.Info.FieldASTs) != 1 { + panic("Only one Field expected here") + } + + selections := p.Info.FieldASTs[0].SelectionSet + properties, includeMeta, err := extractProperties(selections) + if err != nil { + return nil, fmt.Errorf("could not extract properties for class '%s': %w", className, err) + } + + groupBy, err := extractGroupBy(p.Args, p.Info.FieldName) + if err != nil { + return nil, fmt.Errorf("could not extract groupBy path: %w", err) + } + + limit, err := extractLimit(p.Args) + if err != nil { + return nil, fmt.Errorf("could not extract limit: %w", err) + } + + objectLimit, err := extractObjectLimit(p.Args) + if objectLimit != nil && *objectLimit <= 0 { + return nil, fmt.Errorf("objectLimit must be a positive integer") + } + if err != nil { + return nil, fmt.Errorf("could not extract objectLimit: %w", err) + } + + filters, err := common_filters.ExtractFilters(p.Args, p.Info.FieldName) + if err != nil { + return nil, fmt.Errorf("could not extract filters: %w", err) + } + if filters != nil { + if err := common_filters.AuthorizeFilters(p.Context, authorizer, filters.Root, principal); err != nil { + return nil, err + } + } + + var nearVectorParams *searchparams.NearVector + if nearVector, ok := p.Args["nearVector"]; ok { + p, _, err := common_filters.ExtractNearVector(nearVector.(map[string]interface{}), nil) + if err != nil { + return nil, fmt.Errorf("failed to extract nearVector params: %w", err) + } + nearVectorParams = &p + } + + var nearObjectParams *searchparams.NearObject + if nearObject, ok := p.Args["nearObject"]; ok { + p, _, err := common_filters.ExtractNearObject(nearObject.(map[string]interface{})) + if err != nil { + return nil, fmt.Errorf("failed to extract nearObject params: %w", err) + } + nearObjectParams = &p + } + + var moduleParams map[string]interface{} + if modulesProvider != nil { + extractedParams, _ := modulesProvider.ExtractSearchParams(p.Args, class.Class) + if len(extractedParams) > 0 { + moduleParams = extractedParams + } + } + + // Extract hybrid search params from the processed query + // Everything hybrid can go in another namespace AFTER modulesprovider is + // refactored + var hybridParams *searchparams.HybridSearch + if hybrid, ok := p.Args["hybrid"]; ok { + p, _, err := common_filters.ExtractHybridSearch(hybrid.(map[string]interface{}), false) + if err != nil { + return nil, fmt.Errorf("failed to extract hybrid params: %w", err) + } + hybridParams = p + } + + params := &aggregation.Params{ + Filters: filters, + ClassName: className, + Properties: properties, + GroupBy: groupBy, + IncludeMetaCount: includeMeta, + Limit: limit, + ObjectLimit: objectLimit, + NearVector: nearVectorParams, + NearObject: nearObjectParams, + ModuleParams: moduleParams, + Hybrid: hybridParams, + Tenant: tenant, + } + + // we might support objectLimit without nearMedia filters later, e.g. with sort + if params.ObjectLimit != nil && !validateObjectLimitUsage(params) { + return nil, fmt.Errorf("objectLimit can only be used with a near or hybrid filter") + } + + res, err := resolver.Aggregate(p.Context, principal, params) + if err != nil { + return nil, err + } + + switch parsed := res.(type) { + case *aggregation.Result: + return parsed.Groups, nil + default: + return res, nil + } +} + +func extractProperties(selections *ast.SelectionSet) ([]aggregation.ParamProperty, bool, error) { + properties := []aggregation.ParamProperty{} + var includeMeta bool + + for _, selection := range selections.Selections { + field := selection.(*ast.Field) + name := field.Name.Value + if name == GroupedByFieldName { + // in the graphQL API we show the "groupedBy" field alongside various + // properties, however, we don't have to include it here, as we don't + // won't to perform aggregations on it. + // If we didn't exclude it we'd run into errors down the line, because + // the connector would look for a "groupedBy" prop on the specific class + // which doesn't exist. + + continue + } + + if name == "meta" { + includeMeta = true + continue + } + + if name == "__typename" { + continue + } + + name = strings.ToLower(string(name[0:1])) + string(name[1:]) + property := aggregation.ParamProperty{Name: schema.PropertyName(name)} + aggregators, err := extractAggregators(field.SelectionSet) + if err != nil { + return nil, false, err + } + + property.Aggregators = aggregators + properties = append(properties, property) + } + + return properties, includeMeta, nil +} + +func extractAggregators(selections *ast.SelectionSet) ([]aggregation.Aggregator, error) { + if selections == nil { + return nil, nil + } + analyses := []aggregation.Aggregator{} + for _, selection := range selections.Selections { + field := selection.(*ast.Field) + name := field.Name.Value + if name == "__typename" { + continue + } + property, err := aggregation.ParseAggregatorProp(name) + if err != nil { + return nil, err + } + + if property.String() == aggregation.NewTopOccurrencesAggregator(nil).String() { + // a top occurrence, so we need to check if we have a limit argument + if overwrite := extractLimitFromArgs(field.Arguments); overwrite != nil { + property.Limit = overwrite + } + } + + analyses = append(analyses, property) + } + + return analyses, nil +} + +func extractGroupBy(args map[string]interface{}, rootClass string) (*filters.Path, error) { + groupBy, ok := args["groupBy"] + if !ok { + // not set means the user is not interested in grouping (former Meta) + return nil, nil + } + + pathSegments, ok := groupBy.([]interface{}) + if !ok { + return nil, fmt.Errorf("no groupBy must be a list, instead got: %#v", groupBy) + } + + return filters.ParsePath(pathSegments, rootClass) +} + +func extractLimit(args map[string]interface{}) (*int, error) { + limit, ok := args["limit"] + if !ok { + // not set means the user is not interested and the UC should use a reasonable default + return nil, nil + } + + limitInt, ok := limit.(int) + if !ok { + return nil, fmt.Errorf("limit must be an int, instead got: %#v", limit) + } + + return &limitInt, nil +} + +func extractObjectLimit(args map[string]interface{}) (*int, error) { + objectLimit, ok := args["objectLimit"] + if !ok { + return nil, nil + } + + objectLimitInt, ok := objectLimit.(int) + if !ok { + return nil, fmt.Errorf("objectLimit must be an int, instead got: %#v", objectLimit) + } + + return &objectLimitInt, nil +} + +func extractLimitFromArgs(args []*ast.Argument) *int { + for _, arg := range args { + if arg.Name.Value != "limit" { + continue + } + + v, ok := arg.Value.GetValue().(string) + if ok { + asInt, _ := strconv.Atoi(v) + return &asInt + } + } + + return nil +} + +func validateObjectLimitUsage(params *aggregation.Params) bool { + return params.NearObject != nil || + params.NearVector != nil || + len(params.ModuleParams) > 0 || + params.Hybrid != nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/resolver_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/resolver_test.go new file mode 100644 index 0000000000000000000000000000000000000000..91fd6581d209e94a0d159db5aee1a193a8a1681f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/resolver_test.go @@ -0,0 +1,1020 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/usecases/config" +) + +type testCase struct { + name string + query string + expectedProps []aggregation.ParamProperty + resolverReturn interface{} + expectedResults []result + expectedGroupBy *filters.Path + expectedWhereFilter *filters.LocalFilter + expectedNearObjectFilter *searchparams.NearObject + expectedNearVectorFilter *searchparams.NearVector + expectedNearHybrid *searchparams.HybridSearch + expectedIncludeMetaCount bool + expectedLimit *int + expectedObjectLimit *int +} + +type testCases []testCase + +type result struct { + pathToField []string + expectedValue interface{} +} + +func groupCarByMadeByManufacturerName() *filters.Path { + return &filters.Path{ + Class: schema.ClassName("Car"), + Property: schema.PropertyName("madeBy"), + Child: &filters.Path{ + Class: schema.ClassName("Manufacturer"), + Property: schema.PropertyName("name"), + }, + } +} + +func Test_Resolve(t *testing.T) { + t.Parallel() + var emptySubsearch []searchparams.WeightedSearchResult + tests := testCases{ + testCase{ + name: "for gh-758 (multiple operands)", + query: ` + { + Aggregate { + Car(where:{ + operator:Or + operands:[{ + valueText:"Fast", + operator:Equal, + path:["modelName"] + }, { + valueText:"Slow", + operator:Equal, + path:["modelName"] + }] + }) { + __typename + modelName { + __typename + count + } + } + } + }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{aggregation.CountAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "modelName": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 20, + }, + }, + }, + }, + }, + expectedWhereFilter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("Car"), + Property: schema.PropertyName("modelName"), + }, + Value: &filters.Value{ + Value: "Fast", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("Car"), + Property: schema.PropertyName("modelName"), + }, + Value: &filters.Value{ + Value: "Slow", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + + expectedGroupBy: nil, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "__typename": "AggregateCar", + "modelName": map[string]interface{}{ + "count": 20, + "__typename": "AggregateCarmodelNameObj", + }, + }, + }, + }}, + }, + testCase{ + name: "without grouping prop", + query: `{ Aggregate { Car { horsepower { mean } } } }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "horsepower", + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "horsepower": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }, + }, + + expectedGroupBy: nil, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "horsepower": map[string]interface{}{"mean": 275.7773}, + }, + }, + }}, + }, + testCase{ + name: "setting limits overall", + query: `{ Aggregate { Car(limit:20) { horsepower { mean } } } }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "horsepower", + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "horsepower": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }, + }, + + expectedGroupBy: nil, + expectedLimit: ptInt(20), + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "horsepower": map[string]interface{}{"mean": 275.7773}, + }, + }, + }}, + }, + testCase{ + name: "with props formerly contained only in Meta", + query: `{ Aggregate { Car { + stillInProduction { type count totalTrue percentageTrue totalFalse percentageFalse } + modelName { type count topOccurrences { value occurs } } + madeBy { type pointingTo } + meta { count } + } } } `, + expectedIncludeMetaCount: true, + expectedProps: []aggregation.ParamProperty{ + { + Name: "stillInProduction", + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.CountAggregator, + aggregation.TotalTrueAggregator, + aggregation.PercentageTrueAggregator, + aggregation.TotalFalseAggregator, + aggregation.PercentageFalseAggregator, + }, + }, + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.CountAggregator, + aggregation.NewTopOccurrencesAggregator(ptInt(5)), + }, + }, + { + Name: "madeBy", + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.PointingToAggregator, + }, + }, + }, + resolverReturn: []aggregation.Group{ + { + Count: 10, + Properties: map[string]aggregation.Property{ + "stillInProduction": { + SchemaType: "boolean", + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 23, + TotalFalse: 17, + PercentageTrue: 60, + PercentageFalse: 40, + Count: 40, + }, + }, + "modelName": { + SchemaType: "string", + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 40, + Items: []aggregation.TextOccurrence{ + { + Value: "fastcar", + Occurs: 39, + }, + { + Value: "slowcar", + Occurs: 1, + }, + }, + }, + }, + "madeBy": { + SchemaType: "cref", + Type: aggregation.PropertyTypeReference, + ReferenceAggregation: aggregation.Reference{ + PointingTo: []string{"Manufacturer"}, + }, + }, + }, + }, + }, + + expectedGroupBy: nil, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "stillInProduction": map[string]interface{}{ + "type": "boolean", + "totalTrue": 23, + "totalFalse": 17, + "percentageTrue": 60.0, + "percentageFalse": 40.0, + "count": 40, + }, + "modelName": map[string]interface{}{ + "count": 40, + "type": "string", + "topOccurrences": []interface{}{ + map[string]interface{}{ + "value": "fastcar", + "occurs": 39, + }, + map[string]interface{}{ + "value": "slowcar", + "occurs": 1, + }, + }, + }, + "madeBy": map[string]interface{}{ + "type": "cref", + "pointingTo": []interface{}{"Manufacturer"}, + }, + "meta": map[string]interface{}{ + "count": 10, + }, + }, + }, + }}, + }, + testCase{ + name: "with custom limit in topOccurrences", + query: `{ Aggregate { Car { + modelName { topOccurrences(limit: 7) { value occurs } } + } } } `, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{ + aggregation.NewTopOccurrencesAggregator(ptInt(7)), + }, + }, + }, + resolverReturn: []aggregation.Group{ + { + Count: 10, + Properties: map[string]aggregation.Property{ + "modelName": { + SchemaType: "string", + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Items: []aggregation.TextOccurrence{ + { + Value: "fastcar", + Occurs: 39, + }, + { + Value: "slowcar", + Occurs: 1, + }, + }, + }, + }, + }, + }, + }, + + expectedGroupBy: nil, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "modelName": map[string]interface{}{ + "topOccurrences": []interface{}{ + map[string]interface{}{ + "value": "fastcar", + "occurs": 39, + }, + map[string]interface{}{ + "value": "slowcar", + "occurs": 1, + }, + }, + }, + }, + }, + }}, + }, + testCase{ + name: "single prop: mean (with type)", + query: `{ Aggregate { Car(groupBy:["madeBy", "Manufacturer", "name"]) { horsepower { mean type } } } }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "horsepower", + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator, aggregation.TypeAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "horsepower": { + Type: aggregation.PropertyTypeNumerical, + SchemaType: "int", + NumericalAggregations: map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }, + }, + + expectedGroupBy: groupCarByMadeByManufacturerName(), + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "horsepower": map[string]interface{}{ + "mean": 275.7773, + "type": "int", + }, + }, + }, + }}, + }, + + testCase{ + name: "single prop: mean with groupedBy path/value", + query: `{ Aggregate { Car(groupBy:["madeBy", "Manufacturer", "name"]) { horsepower { mean } groupedBy { value path } } } }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "horsepower", + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"madeBy", "Manufacturer", "name"}, + Value: "best-manufacturer", + }, + Properties: map[string]aggregation.Property{ + "horsepower": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }, + }, + expectedGroupBy: groupCarByMadeByManufacturerName(), + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "horsepower": map[string]interface{}{"mean": 275.7773}, + "groupedBy": map[string]interface{}{ + "path": []interface{}{"madeBy", "Manufacturer", "name"}, + "value": "best-manufacturer", + }, + }, + }, + }}, + }, + testCase{ + name: "hybrid vector distance", + query: `{ + Aggregate { + Car( + hybrid: {query:"apple", maxVectorDistance: 0.5} + ) { + horsepower { + mean + } + } + } + }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "horsepower", + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + expectedNearHybrid: &searchparams.HybridSearch{ + Distance: 0.5, + WithDistance: true, + Alpha: 0.75, + Query: "apple", + FusionAlgorithm: 1, + Type: "hybrid", + SubSearches: emptySubsearch, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "horsepower": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }, + }, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "horsepower": map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }}, + }, + testCase{ + name: "single prop: mean with a where filter", + query: `{ + Aggregate { + Car( + groupBy:["madeBy", "Manufacturer", "name"] + where: { + operator: LessThan, + valueInt: 200, + path: ["horsepower"], + } + ) { + horsepower { + mean + } + } + } + }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "horsepower", + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"madeBy", "Manufacturer", "name"}, + Value: "best-manufacturer", + }, + Properties: map[string]aggregation.Property{ + "horsepower": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }, + }, + expectedGroupBy: groupCarByMadeByManufacturerName(), + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "horsepower": map[string]interface{}{ + "mean": 275.7773, + }, + }, + }, + }}, + expectedWhereFilter: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName("Car"), + Property: schema.PropertyName("horsepower"), + }, + Value: &filters.Value{ + Value: 200, + Type: schema.DataTypeInt, + }, + Operator: filters.OperatorLessThan, + }, + }, + }, + + testCase{ + name: "all int props", + query: `{ Aggregate { Car(groupBy:["madeBy", "Manufacturer", "name"]) { horsepower { mean, median, mode, maximum, minimum, count, sum } } } }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "horsepower", + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator, aggregation.MedianAggregator, aggregation.ModeAggregator, aggregation.MaximumAggregator, aggregation.MinimumAggregator, aggregation.CountAggregator, aggregation.SumAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"madeBy", "Manufacturer", "name"}, + Value: "best-manufacturer", + }, + Properties: map[string]aggregation.Property{ + "horsepower": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "maximum": 610.0, + "minimum": 89.0, + "mean": 275.7, + "median": 289.0, + "mode": 115.0, + "count": 23, + "sum": 6343.0, + }, + }, + }, + }, + }, + expectedGroupBy: groupCarByMadeByManufacturerName(), + expectedResults: []result{ + { + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "horsepower": map[string]interface{}{ + "maximum": 610.0, + "minimum": 89.0, + "mean": 275.7, + "median": 289.0, + "mode": 115.0, + "count": 23, + "sum": 6343.0, + }, + }, + }, + }, + }, + }, + + testCase{ + name: "single prop: string", + query: `{ Aggregate { Car(groupBy:["madeBy", "Manufacturer", "name"]) { modelName { count } } } }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{aggregation.CountAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"madeBy", "Manufacturer", "name"}, + Value: "best-manufacturer", + }, + Properties: map[string]aggregation.Property{ + "modelName": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 7, + }, + }, + }, + }, + }, + expectedGroupBy: groupCarByMadeByManufacturerName(), + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "modelName": map[string]interface{}{ + "count": 7, + }, + }, + }, + }}, + }, + + testCase{ + name: "with objectLimit + nearObject (distance)", + query: ` + { + Aggregate{ + Car( + objectLimit: 1 + nearObject: { + id: "123" + distance: 0.3 + } + ) { + modelName { + count + } + } + } + } + `, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{aggregation.CountAggregator}, + }, + }, + expectedObjectLimit: ptInt(1), + expectedNearObjectFilter: &searchparams.NearObject{ + ID: "123", + Beacon: "", + Distance: 0.3, + WithDistance: true, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "modelName": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 7, + }, + }, + }, + }, + }, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "modelName": map[string]interface{}{ + "count": 7, + }, + }, + }, + }}, + }, + + testCase{ + name: "with objectLimit + nearObject (certainty)", + query: ` + { + Aggregate{ + Car( + objectLimit: 1 + nearObject: { + id: "123" + certainty: 0.7 + } + ) { + modelName { + count + } + } + } + } + `, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{aggregation.CountAggregator}, + }, + }, + expectedObjectLimit: ptInt(1), + expectedNearObjectFilter: &searchparams.NearObject{ + ID: "123", + Beacon: "", + Certainty: 0.7, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "modelName": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 7, + }, + }, + }, + }, + }, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "modelName": map[string]interface{}{ + "count": 7, + }, + }, + }, + }}, + }, + + testCase{ + name: "with objectLimit + nearVector (distance)", + query: ` + { + Aggregate{ + Car( + objectLimit: 1 + nearVector: { + vector: [1, 2, 3] + distance: 0.3 + } + ) { + modelName { + count + } + } + } + } + `, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{aggregation.CountAggregator}, + }, + }, + expectedObjectLimit: ptInt(1), + expectedNearVectorFilter: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Distance: 0.3, + WithDistance: true, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "modelName": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 7, + }, + }, + }, + }, + }, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "modelName": map[string]interface{}{ + "count": 7, + }, + }, + }, + }}, + }, + + testCase{ + name: "with objectLimit + nearVector (certainty)", + query: ` + { + Aggregate{ + Car( + objectLimit: 1 + nearVector: { + vector: [1, 2, 3] + certainty: 0.7 + } + ) { + modelName { + count + } + } + } + } + `, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{aggregation.CountAggregator}, + }, + }, + expectedObjectLimit: ptInt(1), + expectedNearVectorFilter: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Certainty: 0.7, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "modelName": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 7, + }, + }, + }, + }, + }, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "modelName": map[string]interface{}{ + "count": 7, + }, + }, + }, + }}, + }, + testCase{ + name: "[deprecated string] for gh-758 (multiple operands)", + query: ` + { + Aggregate { + Car(where:{ + operator:Or + operands:[{ + valueString:"Fast", + operator:Equal, + path:["modelName"] + }, { + valueString:"Slow", + operator:Equal, + path:["modelName"] + }] + }) { + __typename + modelName { + __typename + count + } + } + } + }`, + expectedProps: []aggregation.ParamProperty{ + { + Name: "modelName", + Aggregators: []aggregation.Aggregator{aggregation.CountAggregator}, + }, + }, + resolverReturn: []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "modelName": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 20, + }, + }, + }, + }, + }, + expectedWhereFilter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("Car"), + Property: schema.PropertyName("modelName"), + }, + Value: &filters.Value{ + Value: "Fast", + Type: schema.DataTypeString, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("Car"), + Property: schema.PropertyName("modelName"), + }, + Value: &filters.Value{ + Value: "Slow", + Type: schema.DataTypeString, + }, + }, + }, + }, + }, + + expectedGroupBy: nil, + expectedResults: []result{{ + pathToField: []string{"Aggregate", "Car"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "__typename": "AggregateCar", + "modelName": map[string]interface{}{ + "count": 20, + "__typename": "AggregateCarmodelNameObj", + }, + }, + }, + }}, + }, + } + + tests.AssertExtraction(t, "Car") +} + +func (tests testCases) AssertExtraction(t *testing.T, className string) { + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + resolver := newMockResolver(config.Config{}) + + expectedParams := &aggregation.Params{ + ClassName: schema.ClassName(className), + Properties: testCase.expectedProps, + GroupBy: testCase.expectedGroupBy, + Filters: testCase.expectedWhereFilter, + NearObject: testCase.expectedNearObjectFilter, + NearVector: testCase.expectedNearVectorFilter, + IncludeMetaCount: testCase.expectedIncludeMetaCount, + Limit: testCase.expectedLimit, + ObjectLimit: testCase.expectedObjectLimit, + Hybrid: testCase.expectedNearHybrid, + } + + resolver.On("Aggregate", expectedParams). + Return(testCase.resolverReturn, nil).Once() + + result := resolver.AssertResolve(t, testCase.query) + + for _, expectedResult := range testCase.expectedResults { + value := result.Get(expectedResult.pathToField...).Result + + assert.Equal(t, expectedResult.expectedValue, value) + } + }) + } +} + +func ptInt(in int) *int { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/sparse_search.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/sparse_search.go new file mode 100644 index 0000000000000000000000000000000000000000..a7743647e7ab1d1f9f9023a6b8aef12c48a5b132 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/aggregate/sparse_search.go @@ -0,0 +1,31 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregate + +import ( + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" +) + +func bm25Fields(prefix string) graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "query": &graphql.InputObjectFieldConfig{ + Description: "The query to search for", + Type: graphql.String, + }, + "properties": &graphql.InputObjectFieldConfig{ + Description: "The properties to search in", + Type: graphql.NewList(graphql.String), + }, + "searchOperator": common_filters.GenerateBM25SearchOperatorFields(prefix), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/authz.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/authz.go new file mode 100644 index 0000000000000000000000000000000000000000..9dc508ba1dc442fe3366e60babf182a6febffce5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/authz.go @@ -0,0 +1,72 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func authorizePath(ctx context.Context, authorizer authorization.Authorizer, path *filters.Path, principal *models.Principal) error { + if path == nil { + return nil + } + if err := authorizer.Authorize(ctx, principal, authorization.READ, authorization.CollectionsData(path.Class.String())...); err != nil { + return err + } + if path.Child != nil { + return authorizePath(ctx, authorizer, path.Child, principal) + } + return nil +} + +func AuthorizeFilters(ctx context.Context, authorizer authorization.Authorizer, clause *filters.Clause, principal *models.Principal) error { + if clause == nil { + return nil + } + if len(clause.Operands) == 0 { + path := clause.On + if path == nil { + return fmt.Errorf("no path found in clause: %v", clause) + } + return authorizePath(ctx, authorizer, path, principal) + } else { + for _, operand := range clause.Operands { + if err := AuthorizeFilters(ctx, authorizer, &operand, principal); err != nil { + return err + } + } + } + return nil +} + +func AuthorizeProperty(ctx context.Context, authorizer authorization.Authorizer, property *search.SelectProperty, principal *models.Principal) error { + if property == nil { + return nil + } + for _, ref := range property.Refs { + if err := authorizer.Authorize(ctx, principal, authorization.READ, authorization.CollectionsData(ref.ClassName)...); err != nil { + return err + } + for _, prop := range ref.RefProperties { + if err := AuthorizeProperty(ctx, authorizer, &prop, principal); err != nil { + return err + } + } + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/bm25.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/bm25.go new file mode 100644 index 0000000000000000000000000000000000000000..1d05c7511c76509e4b175481b82cc478d6e5210e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/bm25.go @@ -0,0 +1,91 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/entities/searchparams" +) + +var ( + SearchOperatorAnd = "OPERATOR_AND" + SearchOperatorOr = "OPERATOR_OR" +) + +func GenerateBM25SearchOperatorFields(prefixName string) *graphql.InputObjectFieldConfig { + searchesPrefixName := prefixName + "Searches" + return &graphql.InputObjectFieldConfig{ + Description: fmt.Sprintf("The search operator to use for the %s", searchesPrefixName), + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: searchesPrefixName, + Fields: graphql.InputObjectConfigFieldMap{ + "operator": &graphql.InputObjectFieldConfig{ + Description: "The search operator to use", + Type: graphql.NewEnum(graphql.EnumConfig{ // EnumConfig is a struct that defines the enum + Name: fmt.Sprintf("%sOperator", searchesPrefixName), + Values: graphql.EnumValueConfigMap{ + "And": &graphql.EnumValueConfig{ + Value: SearchOperatorAnd, + Description: "All tokens must match", + }, + "Or": &graphql.EnumValueConfig{ + Value: SearchOperatorOr, + Description: "At least one token must match", + }, + }, + }), + }, + "minimumOrTokensMatch": &graphql.InputObjectFieldConfig{ + Description: "The minimum number of tokens that should match (only for OR operator)", + Type: graphql.Int, + }, + }, + }, + ), + } +} + +// ExtractBM25 +func ExtractBM25(source map[string]interface{}, explainScore bool) searchparams.KeywordRanking { + var args searchparams.KeywordRanking + + p, ok := source["properties"] + if ok { + rawSlice := p.([]interface{}) + args.Properties = make([]string, len(rawSlice)) + for i, raw := range rawSlice { + args.Properties[i] = raw.(string) + } + } + + query, ok := source["query"] + if ok { + args.Query = query.(string) + } + + args.AdditionalExplanations = explainScore + args.Type = "bm25" + + operator, ok := source["searchOperator"] + if ok { + operator := operator.(map[string]interface{}) + args.SearchOperator = operator["operator"].(string) + if operator["minimumOrTokensMatch"] != nil { + args.MinimumOrTokensMatch = int(operator["minimumOrTokensMatch"].(int)) + } + } + + return args +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/extract_targets.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/extract_targets.go new file mode 100644 index 0000000000000000000000000000000000000000..28a83f43d7e15fdf71b448644bcd260f0c4ba259 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/extract_targets.go @@ -0,0 +1,119 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/dto" +) + +func ExtractTargets(source map[string]interface{}) ([]string, *dto.TargetCombination, error) { + targets, ok := source["targets"] + if ok { + targetsGql, ok := targets.(map[string]interface{}) + if !ok { + return nil, nil, fmt.Errorf("targets is not a map, got %v", targets) + } + targetVectorsGQL, ok := targetsGql["targetVectors"] + if !ok { + return nil, nil, fmt.Errorf("targetVectors is required field, got %v", targets) + } + targetVectorsArray, ok := targetVectorsGQL.([]interface{}) + if !ok { + return nil, nil, fmt.Errorf("targetVectors is not an array, got %v", targetVectorsGQL) + } + targetVectors := make([]string, len(targetVectorsArray)) + for i, value := range targetVectorsArray { + targetVectors[i], ok = value.(string) + if !ok { + return nil, nil, fmt.Errorf("target vector is not a string, got %v", value) + } + } + + combinationType, ok := targetsGql["combinationMethod"] + targetCombinationType := dto.DefaultTargetCombinationType + if ok { + targetCombinationType, ok = combinationType.(dto.TargetCombinationType) + if !ok { + return nil, nil, fmt.Errorf("combinationMethod is not a TargetCombinationType, got %v", combinationType) + } + } + + weightsGQL, ok := targetsGql["weights"] + var weightsIn map[string]interface{} + if ok { + weightsIn = weightsGQL.(map[string]interface{}) + } + + extractWeights := func(weightsIn map[string]interface{}, weightsOut []float32) error { + handled := make(map[string]struct{}) + for i, target := range targetVectors { + if _, ok := handled[target]; ok { + continue + } else { + handled[target] = struct{}{} + } + weightForTarget, ok := weightsIn[target] + if !ok { + return fmt.Errorf("weight for target %s is not provided", target) + } + if weightIn, ok := weightForTarget.(float64); ok { + weightsOut[i] = float32(weightIn) + } else if weightsIn, ok := weightForTarget.([]float64); ok { + for j, w := range weightsIn { + weightsOut[i+j] = float32(w) + } + } else { + return fmt.Errorf("weight for target %s is not a float or list of floats, got %v", target, weightForTarget) + } + } + return nil + } + + weights := make([]float32, len(targetVectors)) + switch targetCombinationType { + case dto.Average: + for i := range targetVectors { + weights[i] = 1.0 / float32(len(targetVectors)) + } + case dto.Sum: + for i := range targetVectors { + weights[i] = 1.0 + } + case dto.Minimum: + case dto.ManualWeights: + if err := extractWeights(weightsIn, weights); err != nil { + return nil, nil, err + } + case dto.RelativeScore: + if err := extractWeights(weightsIn, weights); err != nil { + return nil, nil, err + } + default: + return nil, nil, fmt.Errorf("unknown combination method %v", targetCombinationType) + } + return targetVectors, &dto.TargetCombination{Weights: weights, Type: targetCombinationType}, nil + + } else { + targetVectorsGQL, ok := source["targetVectors"] + if ok { + targetVectorsArray := targetVectorsGQL.([]interface{}) + targetVectors := make([]string, len(targetVectorsArray)) + for i, value := range targetVectorsArray { + targetVectors[i] = value.(string) + } + return targetVectors, &dto.TargetCombination{Type: dto.Minimum}, nil + } + } + return nil, nil, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/extract_targets_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/extract_targets_test.go new file mode 100644 index 0000000000000000000000000000000000000000..187cac5cb313b924937910c5df46a55efbec8cc1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/extract_targets_test.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/dto" +) + +func TestTargetExtraction(t *testing.T) { + cases := []struct { + name string + source map[string]interface{} + expectTargetVectors []string + expectCombinationType *dto.TargetCombination + wantErr bool + }{ + { + name: "two target vectors with default", + source: map[string]interface{}{"targets": map[string]interface{}{"targetVectors": []interface{}{"a", "b"}}}, + expectTargetVectors: []string{"a", "b"}, + expectCombinationType: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0}}, + }, + { + name: "two target vectors with min", + source: map[string]interface{}{ + "targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": dto.Minimum, + }, + }, + expectTargetVectors: []string{"a", "b"}, + expectCombinationType: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0}}, + }, + { + name: "two target vectors with sum", + source: map[string]interface{}{ + "targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": dto.Sum, + }, + }, + expectTargetVectors: []string{"a", "b"}, + expectCombinationType: &dto.TargetCombination{Type: dto.Sum, Weights: []float32{1.0, 1.0}}, + }, + { + name: "two target vectors with average", + source: map[string]interface{}{ + "targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": dto.Average, + }, + }, + expectTargetVectors: []string{"a", "b"}, + expectCombinationType: &dto.TargetCombination{Type: dto.Average, Weights: []float32{0.5, 0.5}}, + }, + { + name: "two target vectors with manual weights", + source: map[string]interface{}{ + "targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": dto.ManualWeights, + "weights": map[string]interface{}{"a": 0.5, "b": 0.25}, + }, + }, + expectTargetVectors: []string{"a", "b"}, + expectCombinationType: &dto.TargetCombination{Type: dto.ManualWeights, Weights: []float32{0.5, 0.25}}, + }, + { + name: "two target vectors with relative score", + source: map[string]interface{}{ + "targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": dto.RelativeScore, + "weights": map[string]interface{}{"a": 0.5, "b": 0.25}, + }, + }, + expectTargetVectors: []string{"a", "b"}, + expectCombinationType: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{0.5, 0.25}}, + }, + { + name: "relative score, weights missmatch", + source: map[string]interface{}{"targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": dto.RelativeScore, + "weights": map[string]interface{}{"a": 0.5}, + }}, + wantErr: true, + }, + { + name: "manual weights missmatch", + source: map[string]interface{}{"targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": dto.ManualWeights, + "weights": map[string]interface{}{"a": 0.5}, + }}, + wantErr: true, + }, + { + name: "combination method type", + source: map[string]interface{}{"targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", "b"}, + "combinationMethod": "wrong", + "weights": map[string]interface{}{"a": 0.5, "b": 0.25}, + }}, + wantErr: true, + }, + { + name: "target vector type", + source: map[string]interface{}{"targets": map[string]interface{}{ + "targetVectors": "a", + }}, + wantErr: true, + }, + { + name: "target vector entry type", + source: map[string]interface{}{"targets": map[string]interface{}{ + "targetVectors": []interface{}{"a", 5}, + }}, + wantErr: true, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + targetVectors, combination, err := ExtractTargets(tt.source) + if tt.wantErr { + require.NotNil(t, err) + } else { + require.Nil(t, err) + require.Equal(t, tt.expectTargetVectors, targetVectors) + require.Equal(t, tt.expectCombinationType, combination) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/filters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/filters.go new file mode 100644 index 0000000000000000000000000000000000000000..6ec07463bdf1cc771d2232a9531e25630e5f7e62 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/filters.go @@ -0,0 +1,142 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package common_filters provides the filters for the graphql endpoint for Weaviate +package common_filters + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +// The filters common to Local->Get and Local->Meta queries. +func BuildNew(path string) graphql.InputObjectConfigFieldMap { + commonFilters := graphql.InputObjectConfigFieldMap{ + "operator": &graphql.InputObjectFieldConfig{ + Type: graphql.NewEnum(graphql.EnumConfig{ + Name: fmt.Sprintf("%sWhereOperatorEnum", path), + Values: graphql.EnumValueConfigMap{ + "And": &graphql.EnumValueConfig{}, + "Like": &graphql.EnumValueConfig{}, + "Or": &graphql.EnumValueConfig{}, + "Equal": &graphql.EnumValueConfig{}, + "Not": &graphql.EnumValueConfig{}, + "NotEqual": &graphql.EnumValueConfig{}, + "GreaterThan": &graphql.EnumValueConfig{}, + "GreaterThanEqual": &graphql.EnumValueConfig{}, + "LessThan": &graphql.EnumValueConfig{}, + "LessThanEqual": &graphql.EnumValueConfig{}, + "WithinGeoRange": &graphql.EnumValueConfig{}, + "IsNull": &graphql.EnumValueConfig{}, + "ContainsAny": &graphql.EnumValueConfig{}, + "ContainsAll": &graphql.EnumValueConfig{}, + "ContainsNone": &graphql.EnumValueConfig{}, + }, + Description: descriptions.WhereOperatorEnum, + }), + Description: descriptions.WhereOperator, + }, + "path": &graphql.InputObjectFieldConfig{ + Type: graphql.NewList(graphql.String), + Description: descriptions.WherePath, + }, + "valueInt": &graphql.InputObjectFieldConfig{ + Type: newValueIntType(path), + Description: descriptions.WhereValueInt, + }, + "valueNumber": &graphql.InputObjectFieldConfig{ + Type: newValueNumberType(path), + Description: descriptions.WhereValueNumber, + }, + "valueBoolean": &graphql.InputObjectFieldConfig{ + Type: newValueBooleanType(path), + Description: descriptions.WhereValueBoolean, + }, + "valueString": &graphql.InputObjectFieldConfig{ + Type: newValueStringType(path), + Description: descriptions.WhereValueString, + }, + "valueText": &graphql.InputObjectFieldConfig{ + Type: newValueTextType(path), + Description: descriptions.WhereValueText, + }, + "valueDate": &graphql.InputObjectFieldConfig{ + Type: newValueDateType(path), + Description: descriptions.WhereValueString, + }, + "valueGeoRange": &graphql.InputObjectFieldConfig{ + Type: newGeoRangeInputObject(path), + Description: descriptions.WhereValueRange, + }, + } + + // Recurse into the same time. + commonFilters["operands"] = &graphql.InputObjectFieldConfig{ + Description: descriptions.WhereOperands, + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sWhereOperandsInpObj", path), + Description: descriptions.WhereOperandsInpObj, + Fields: (graphql.InputObjectConfigFieldMapThunk)(func() graphql.InputObjectConfigFieldMap { + return commonFilters + }), + }, + )), + } + + return commonFilters +} + +func newGeoRangeInputObject(path string) *graphql.InputObject { + return graphql.NewInputObject(graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sWhereGeoRangeInpObj", path), + Fields: graphql.InputObjectConfigFieldMap{ + "geoCoordinates": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(newGeoRangeGeoCoordinatesInputObject(path)), + Description: descriptions.WhereValueRangeGeoCoordinates, + }, + "distance": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(newGeoRangeDistanceInputObject(path)), + Description: descriptions.WhereValueRangeDistance, + }, + }, + }) +} + +func newGeoRangeGeoCoordinatesInputObject(path string) *graphql.InputObject { + return graphql.NewInputObject(graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sWhereGeoRangeGeoCoordinatesInpObj", path), + Fields: graphql.InputObjectConfigFieldMap{ + "latitude": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.Float), + Description: descriptions.WhereValueRangeGeoCoordinatesLatitude, + }, + "longitude": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.Float), + Description: descriptions.WhereValueRangeGeoCoordinatesLongitude, + }, + }, + }) +} + +func newGeoRangeDistanceInputObject(path string) *graphql.InputObject { + return graphql.NewInputObject(graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sWhereGeoRangeDistanceInpObj", path), + Fields: graphql.InputObjectConfigFieldMap{ + "max": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.Float), + Description: descriptions.WhereValueRangeDistanceMax, + }, + }, + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/filters_types.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/filters_types.go new file mode 100644 index 0000000000000000000000000000000000000000..8d148bb530ce3f59ee2cdcd7cd016233c2dfa414 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/filters_types.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package common_filters provides the filters for the graphql endpoint for Weaviate +package common_filters + +import ( + "fmt" + "strconv" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" +) + +func newValueTextType(path string) graphql.Input { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: fmt.Sprintf("Text%v", path), + Description: "String or String[]", + Serialize: func(value interface{}) interface{} { + return graphql.String.Serialize(value) + }, + ParseValue: func(value interface{}) interface{} { + return graphql.String.ParseValue(value) + }, + ParseLiteral: func(valueAST ast.Value) interface{} { + switch valueAST := valueAST.(type) { + case *ast.StringValue: + return valueAST.Value + case *ast.ListValue: + result := make([]string, len(valueAST.Values)) + for i := range valueAST.Values { + result[i] = valueAST.Values[i].GetValue().(string) + } + return result + } + return nil + }, + }) +} + +func newValueStringType(path string) graphql.Input { + return newValueTextType(fmt.Sprintf("String%v", path)) +} + +func newValueDateType(path string) graphql.Input { + return newValueTextType(fmt.Sprintf("Date%v", path)) +} + +func newValueIntType(path string) graphql.Input { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: fmt.Sprintf("Int%v", path), + Description: "Int or Int[]", + Serialize: func(value interface{}) interface{} { + return graphql.Int.Serialize(value) + }, + ParseValue: func(value interface{}) interface{} { + return graphql.Int.ParseValue(value) + }, + ParseLiteral: func(valueAST ast.Value) interface{} { + switch valueAST := valueAST.(type) { + case *ast.IntValue: + if intValue, err := strconv.Atoi(valueAST.Value); err == nil { + return intValue + } + case *ast.ListValue: + result := make([]int, len(valueAST.Values)) + for i := range valueAST.Values { + if intValue, err := strconv.Atoi(valueAST.Values[i].GetValue().(string)); err == nil { + result[i] = int(intValue) + } + } + return result + } + return nil + }, + }) +} + +func newValueNumberType(path string) graphql.Input { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: fmt.Sprintf("Float%v", path), + Description: "Float or Float[]", + Serialize: func(value interface{}) interface{} { + return graphql.Float.Serialize(value) + }, + ParseValue: func(value interface{}) interface{} { + return graphql.Float.ParseValue(value) + }, + ParseLiteral: func(valueAST ast.Value) interface{} { + switch valueAST := valueAST.(type) { + case *ast.FloatValue: + if floatValue, err := strconv.ParseFloat(valueAST.Value, 64); err == nil { + return floatValue + } + case *ast.IntValue: + if floatValue, err := strconv.ParseFloat(valueAST.Value, 64); err == nil { + return floatValue + } + case *ast.ListValue: + result := make([]float64, len(valueAST.Values)) + for i := range valueAST.Values { + if floatValue, err := strconv.ParseFloat(valueAST.Values[i].GetValue().(string), 64); err == nil { + result[i] = floatValue + } + } + return result + } + return nil + }, + }) +} + +func newValueBooleanType(path string) graphql.Input { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: fmt.Sprintf("Boolean%v", path), + Description: "Boolean or Boolean[]", + Serialize: func(value interface{}) interface{} { + return graphql.Boolean.Serialize(value) + }, + ParseValue: func(value interface{}) interface{} { + return graphql.Boolean.ParseValue(value) + }, + ParseLiteral: func(valueAST ast.Value) interface{} { + switch valueAST := valueAST.(type) { + case *ast.BooleanValue: + return valueAST.Value + case *ast.ListValue: + result := make([]bool, len(valueAST.Values)) + for i, val := range valueAST.Values { + switch v := val.(type) { + case *ast.BooleanValue: + result[i] = v.Value + } + } + return result + } + return nil + }, + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/graphql_types.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/graphql_types.go new file mode 100644 index 0000000000000000000000000000000000000000..a8ae5b0f6313e834c68697f7b4d3f2c48386bf8a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/graphql_types.go @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + "strconv" + + "github.com/tailor-inc/graphql/language/ast" + + "github.com/tailor-inc/graphql" +) + +var Vector func(prefix string) *graphql.Scalar = func(prefix string) *graphql.Scalar { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: fmt.Sprintf("%sNearVectorVectorScalar", prefix), + Description: "A type that can be either a regular or colbert embedding", + Serialize: func(value interface{}) interface{} { + switch v := value.(type) { + case []float32, [][]float32: + return v + default: + return nil + } + }, + ParseValue: func(value interface{}) interface{} { + return nil // do nothing, this type is meant to only serialize vectors + }, + ParseLiteral: func(valueAST ast.Value) interface{} { + switch valueAST := valueAST.(type) { + case *ast.ListValue: + var vector []float32 + var multiVector [][]float32 + for i := range valueAST.Values { + switch val := valueAST.Values[i].(type) { + case *ast.ListValue: + vec := make([]float32, len(val.Values)) + for j := range val.Values { + switch v := val.Values[j].(type) { + case *ast.FloatValue: + if floatValue, err := strconv.ParseFloat(v.Value, 64); err == nil { + vec[j] = float32(floatValue) + } + case *ast.IntValue: + if floatValue, err := strconv.ParseFloat(v.Value, 64); err == nil { + vec[j] = float32(floatValue) + } + } + } + multiVector = append(multiVector, vec) + case *ast.FloatValue: + if floatValue, err := strconv.ParseFloat(val.Value, 64); err == nil { + vector = append(vector, float32(floatValue)) + } + case *ast.IntValue: + if floatValue, err := strconv.ParseFloat(val.Value, 64); err == nil { + vector = append(vector, float32(floatValue)) + } + } + } + if len(multiVector) > 0 { + return multiVector + } + return vector + default: + return nil + } + }, + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/group_by.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/group_by.go new file mode 100644 index 0000000000000000000000000000000000000000..d657a35f851029fa2ab228ea23ebe7b25a1a0929 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/group_by.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import "github.com/weaviate/weaviate/entities/searchparams" + +// ExtractGroupBy +func ExtractGroupBy(source map[string]interface{}) searchparams.GroupBy { + var args searchparams.GroupBy + + p, ok := source["path"] + if ok { + rawSlice := p.([]interface{}) + if len(rawSlice) == 1 { + args.Property = rawSlice[0].(string) + } + } + + groups := source["groups"] + if groups != nil { + args.Groups = int(groups.(int)) + } + + objectsPerGroup := source["objectsPerGroup"] + if objectsPerGroup != nil { + args.ObjectsPerGroup = int(objectsPerGroup.(int)) + } + + return args +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/helper_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/helper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ad7551d0a73f0faff9d730fa6c83b44273b8f93b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/helper_test.go @@ -0,0 +1,122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "testing" + + "github.com/tailor-inc/graphql" + test_helper "github.com/weaviate/weaviate/adapters/handlers/graphql/test/helper" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/searchparams" +) + +type mockResolver struct { + test_helper.MockResolver +} + +type mockParams struct { + reportFilter bool + reportNearVector bool + reportNearObject bool +} + +func newMockResolver(t *testing.T, params mockParams) *mockResolver { + if params.reportNearVector && params.reportNearObject { + t.Fatal("cannot provide both nearVector and nearObject") + } + + // Build a FakeGet. + fakeGet := &graphql.Field{ + Name: "SomeAction", + Description: "Fake Some Action", + Args: graphql.FieldConfigArgument{ + "where": &graphql.ArgumentConfig{ + Description: "Filter options for the Get search, to convert the data to the filter input", + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: "GetWhereInpObj", + Fields: BuildNew("Get"), + Description: "", + }, + ), + }, + "nearVector": NearVectorArgument("Get", "SomeAction", false), + "nearObject": NearObjectArgument("Get", "SomeAction", false), + }, + Type: graphql.Int, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + resolver := p.Source.(map[string]interface{})["Resolver"].(*mockResolver) + return resolver.ReportArgs(params, p.Args, p.Info.FieldName) + }, + } + + mocker := &mockResolver{} + mocker.RootFieldName = "SomeAction" + mocker.RootField = fakeGet + mocker.RootObject = map[string]interface{}{"Resolver": mocker} + return mocker +} + +func (m *mockResolver) ReportArgs(params mockParams, args map[string]interface{}, + fieldName string, +) (result interface{}, err error) { + if params.reportFilter { + filters, err := ExtractFilters(args, fieldName) + if err != nil { + return nil, err + } + result, err = m.ReportFilters(filters) + if err != nil { + return nil, err + } + } + + if params.reportNearVector { + nearVec, _, err := ExtractNearVector(args["nearVector"].(map[string]interface{}), nil) + if err != nil { + return nil, err + } + result, err = m.ReportNearVector(nearVec) + if err != nil { + return nil, err + } + } + + if params.reportNearObject { + nearObj, _, err := ExtractNearObject(args["nearObject"].(map[string]interface{})) + if err != nil { + return nil, err + } + result, err = m.ReportNearObject(nearObj) + if err != nil { + return nil, err + } + } + + return +} + +func (m *mockResolver) ReportFilters(filter *filters.LocalFilter) (interface{}, error) { + args := m.Called(filter) + return args.Get(0), args.Error(1) +} + +func (m *mockResolver) ReportNearVector(params searchparams.NearVector) (interface{}, error) { + args := m.Called(params) + return args.Get(0), args.Error(1) +} + +func (m *mockResolver) ReportNearObject(params searchparams.NearObject) (interface{}, error) { + args := m.Called(params) + return args.Get(0), args.Error(1) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/hybrid.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/hybrid.go new file mode 100644 index 0000000000000000000000000000000000000000..a16d42d02fa2ebd223771e72cba04e5ded89b723 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/hybrid.go @@ -0,0 +1,188 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + + "github.com/weaviate/weaviate/entities/searchparams" +) + +const DefaultAlpha = float64(0.75) +const ( + HybridRankedFusion = iota + HybridRelativeScoreFusion +) +const HybridFusionDefault = HybridRelativeScoreFusion + +func ExtractHybridSearch(source map[string]interface{}, explainScore bool) (*searchparams.HybridSearch, *dto.TargetCombination, error) { + var subsearches []interface{} + operandsI := source["operands"] + if operandsI != nil { + operands := operandsI.([]interface{}) + for _, operand := range operands { + operandMap := operand.(map[string]interface{}) + subsearches = append(subsearches, operandMap) + } + } + var args searchparams.HybridSearch + targetVectors, combination, err := ExtractTargets(source) + if err != nil { + return &searchparams.HybridSearch{}, nil, err + } + args.TargetVectors = targetVectors + + namedSearchesI := source["searches"] + if namedSearchesI != nil { + namedSearchess := namedSearchesI.([]interface{}) + namedSearches := namedSearchess[0].(map[string]interface{}) + // TODO: add bm25 here too + if namedSearches["nearText"] != nil { + nearText := namedSearches["nearText"].(map[string]interface{}) + arguments, _ := ExtractNearText(nearText) + + args.NearTextParams = &arguments + } + + if namedSearches["nearVector"] != nil { + nearVector := namedSearches["nearVector"].(map[string]interface{}) + arguments, _, _ := ExtractNearVector(nearVector, targetVectors) + // targetvectors need to be set in the hybrid search to be handled correctly, return an error if not set + if targetVectors == nil && arguments.TargetVectors != nil { + return nil, nil, fmt.Errorf("targetVectors need to be set in the hybrid search to be handled correctly") + } + + args.NearVectorParams = &arguments + + } + } + + var weightedSearchResults []searchparams.WeightedSearchResult + + for _, ss := range subsearches { + subsearch := ss.(map[string]interface{}) + switch { + case subsearch["sparseSearch"] != nil: + bm25 := subsearch["sparseSearch"].(map[string]interface{}) + arguments := ExtractBM25(bm25, explainScore) + + weightedSearchResults = append(weightedSearchResults, searchparams.WeightedSearchResult{ + SearchParams: arguments, + Weight: subsearch["weight"].(float64), + Type: "bm25", + }) + case subsearch["nearText"] != nil: + nearText := subsearch["nearText"].(map[string]interface{}) + arguments, _ := ExtractNearText(nearText) + + weightedSearchResults = append(weightedSearchResults, searchparams.WeightedSearchResult{ + SearchParams: arguments, + Weight: subsearch["weight"].(float64), + Type: "nearText", + }) + + case subsearch["nearVector"] != nil: + nearVector := subsearch["nearVector"].(map[string]interface{}) + arguments, _, _ := ExtractNearVector(nearVector, targetVectors) + + weightedSearchResults = append(weightedSearchResults, searchparams.WeightedSearchResult{ + SearchParams: arguments, + Weight: subsearch["weight"].(float64), + Type: "nearVector", + }) + + default: + return nil, nil, fmt.Errorf("unknown subsearch type: %+v", subsearch) + } + } + + args.SubSearches = weightedSearchResults + + alpha, ok := source["alpha"] + if ok { + args.Alpha = alpha.(float64) + } else { + args.Alpha = DefaultAlpha + } + if args.Alpha < 0 || args.Alpha > 1 { + return nil, nil, fmt.Errorf("alpha should be between 0.0 and 1.0") + } + + vectorDistanceCutOff, ok := source["maxVectorDistance"] + if ok { + args.Distance = float32(vectorDistanceCutOff.(float64)) + args.WithDistance = true + } else { + args.WithDistance = false + } + + query, ok := source["query"] + if ok { + args.Query = query.(string) + } + + fusionType, ok := source["fusionType"] + if ok { + args.FusionAlgorithm = fusionType.(int) + } else { + args.FusionAlgorithm = HybridFusionDefault + } + + switch vector := source["vector"].(type) { + case nil: + args.Vector = nil + case []float32, [][]float32, models.C11yVector: + args.Vector = vector + case []interface{}: + v := make([]float32, len(vector)) + for i, value := range vector { + v[i] = float32(value.(float64)) + } + args.Vector = v + default: + return nil, nil, fmt.Errorf("cannot parse vector: unrecognized vector type: %T", source["vector"]) + } + + if _, ok := source["properties"]; ok { + properties := source["properties"].([]interface{}) + args.Properties = make([]string, len(properties)) + for i, value := range properties { + args.Properties[i] = value.(string) + } + } + + operator, ok := source["bm25SearchOperator"] + if ok { + operator := operator.(map[string]interface{}) + args.SearchOperator = operator["operator"].(string) + if operator["minimumOrTokensMatch"] != nil { + args.MinimumOrTokensMatch = int(operator["minimumOrTokensMatch"].(int)) + } + } + + args.Type = "hybrid" + + if args.NearTextParams != nil && args.NearVectorParams != nil { + return nil, nil, fmt.Errorf("hybrid search cannot have both nearText and nearVector parameters") + } + if args.Vector != nil && args.NearTextParams != nil { + return nil, nil, fmt.Errorf("cannot have both vector and nearTextParams") + } + if args.Vector != nil && args.NearVectorParams != nil { + return nil, nil, fmt.Errorf("cannot have both vector and nearVectorParams") + } + + return &args, combination, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/hybrid_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/hybrid_test.go new file mode 100644 index 0000000000000000000000000000000000000000..35965a4625ed8794590757026507491acda023de --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/hybrid_test.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/searchparams" +) + +func TestHybrid(t *testing.T) { + var nilweights []float32 + var ss []searchparams.WeightedSearchResult + cases := []struct { + input map[string]interface{} + output *searchparams.HybridSearch + outputCombination *dto.TargetCombination + error bool + }{ + { + input: map[string]interface{}{"vector": []float32{1.0, 2.0, 3.0}}, + output: &searchparams.HybridSearch{Vector: []float32{1.0, 2.0, 3.0}, SubSearches: ss, Type: "hybrid", Alpha: 0.75, FusionAlgorithm: 1}, + outputCombination: nil, + }, + { + input: map[string]interface{}{"vector": []float32{1.0, 2.0, 3.0}, "targetVectors": []interface{}{"target1", "target2"}}, + output: &searchparams.HybridSearch{Vector: []float32{1.0, 2.0, 3.0}, TargetVectors: []string{"target1", "target2"}, SubSearches: ss, Type: "hybrid", Alpha: 0.75, FusionAlgorithm: 1}, + outputCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: nilweights}, + }, + { + input: map[string]interface{}{"targetVectors": []interface{}{"target1", "target2"}, "searches": []interface{}{map[string]interface{}{"nearVector": map[string]interface{}{"vector": []float32{float32(1.0), float32(2.0), float32(3.0)}}}}}, + output: &searchparams.HybridSearch{NearVectorParams: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2, 3}}}, TargetVectors: []string{"target1", "target2"}, SubSearches: ss, Type: "hybrid", Alpha: 0.75, FusionAlgorithm: 1}, + outputCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: nilweights}, + }, + { + input: map[string]interface{}{"targetVectors": []interface{}{"target1", "target2"}, "searches": []interface{}{map[string]interface{}{"nearVector": map[string]interface{}{"vectorPerTarget": map[string]interface{}{"target1": []float32{1.0, 2.0, 3.0}, "target2": []float32{1.0, 2.0}}}}}}, + output: &searchparams.HybridSearch{NearVectorParams: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2}}}, TargetVectors: []string{"target1", "target2"}, SubSearches: ss, Type: "hybrid", Alpha: 0.75, FusionAlgorithm: 1}, + outputCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: nilweights}, + }, + } + + for _, tt := range cases { + t.Run("near vector", func(t *testing.T) { + hybrid, outputCombination, err := ExtractHybridSearch(tt.input, false) + if tt.error { + require.NotNil(t, err) + } else { + require.Nil(t, err) + require.Equal(t, tt.output, hybrid) + require.Equal(t, tt.outputCombination, outputCombination) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/nearText.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/nearText.go new file mode 100644 index 0000000000000000000000000000000000000000..68d71b959842b5ce3a03cec327f9ce5f839b328c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/nearText.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/searchparams" +) + +// ExtractNearText arguments, such as "concepts", "moveTo", "moveAwayFrom", +// "limit", etc. +func ExtractNearText(source map[string]interface{}) (searchparams.NearTextParams, error) { + var args searchparams.NearTextParams + + // keywords is a required argument, so we don't need to check for its existing + keywords := source["concepts"].([]interface{}) + args.Values = make([]string, len(keywords)) + for i, value := range keywords { + args.Values[i] = value.(string) + } + + // autocorrect is an optional arg, so it could be nil + autocorrect, ok := source["autocorrect"] + if ok { + args.Autocorrect = autocorrect.(bool) + if args.Autocorrect { + return searchparams.NearTextParams{}, fmt.Errorf("autocorrect is not supported for hybrid nearText") + } + } + + // limit is an optional arg, so it could be nil + limit, ok := source["limit"] + if ok { + // the type is fixed through gql config, no need to catch incorrect type + // assumption + args.Limit = limit.(int) + } + + certainty, ok := source["certainty"] + if ok { + args.Certainty = certainty.(float64) + } + + distance, ok := source["distance"] + if ok { + args.Distance = distance.(float64) + args.WithDistance = true + } + + // moveTo is an optional arg, so it could be nil + moveTo, ok := source["moveTo"] + if ok { + args.MoveTo = extractMovement(moveTo) + } + + // network is an optional arg, so it could be nil + network, ok := source["network"] + if ok { + args.Network = network.(bool) + } + + // moveAwayFrom is an optional arg, so it could be nil + moveAwayFrom, ok := source["moveAwayFrom"] + if ok { + args.MoveAwayFrom = extractMovement(moveAwayFrom) + } + + return args, nil +} + +func extractMovement(input interface{}) searchparams.ExploreMove { + // the type is fixed through gql config, no need to catch incorrect type + // assumption, all fields are required so we don't need to check for their + // presence + moveToMap := input.(map[string]interface{}) + res := searchparams.ExploreMove{} + res.Force = float32(moveToMap["force"].(float64)) + + keywords, ok := moveToMap["concepts"].([]interface{}) + if ok { + res.Values = make([]string, len(keywords)) + for i, value := range keywords { + res.Values[i] = value.(string) + } + } + + objects, ok := moveToMap["objects"].([]interface{}) + if ok { + res.Objects = make([]searchparams.ObjectMove, len(objects)) + for i, value := range objects { + v, ok := value.(map[string]interface{}) + if ok { + if v["id"] != nil { + res.Objects[i].ID = v["id"].(string) + } + if v["beacon"] != nil { + res.Objects[i].Beacon = v["beacon"].(string) + } + } + } + } + + return res +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_filters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_filters.go new file mode 100644 index 0000000000000000000000000000000000000000..58afb9dd8836537bcde2de9cdc0b10684d715d16 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_filters.go @@ -0,0 +1,238 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "errors" + "fmt" + "strconv" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func NearVectorArgument(argumentPrefix, className string, addTarget bool) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("%s%s", argumentPrefix, className) + return &graphql.ArgumentConfig{ + // Description: descriptions.GetExplore, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearVectorInpObj", prefix), + Fields: NearVectorFields(prefix, addTarget), + }, + ), + } +} + +func NearVectorFields(prefix string, addTarget bool) graphql.InputObjectConfigFieldMap { + fieldMap := graphql.InputObjectConfigFieldMap{ + "vector": &graphql.InputObjectFieldConfig{ + Description: descriptions.Vector, + Type: Vector(prefix), + }, + "vectorPerTarget": &graphql.InputObjectFieldConfig{ + Description: "Vector per target", + Type: vectorPerTarget, + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + } + fieldMap = AddTargetArgument(fieldMap, prefix+"nearVector", addTarget) + return fieldMap +} + +func NearObjectArgument(argumentPrefix, className string, addTarget bool) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("%s%s", argumentPrefix, className) + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearObjectInpObj", prefix), + Fields: nearObjectFields(prefix, addTarget), + }, + ), + } +} + +func nearObjectFields(prefix string, addTarget bool) graphql.InputObjectConfigFieldMap { + fieldMap := graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Description: descriptions.ID, + Type: graphql.String, + }, + "beacon": &graphql.InputObjectFieldConfig{ + Description: descriptions.Beacon, + Type: graphql.String, + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + } + fieldMap = AddTargetArgument(fieldMap, prefix+"nearObject", addTarget) + return fieldMap +} + +var vectorPerTarget = graphql.NewScalar(graphql.ScalarConfig{ + Name: "VectorPerTarget", + Description: "A custom scalar type for a map with strings as keys and list of floats or list of lists of floats as values", + Serialize: func(value interface{}) interface{} { + return value + }, + ParseValue: func(value interface{}) interface{} { + return value + }, + ParseLiteral: vectorPerTargetParseLiteral, +}) + +func vectorPerTargetParseLiteral(valueAST ast.Value) interface{} { + switch v := valueAST.(type) { + case *ast.ObjectValue: + result := make(map[string]interface{}) + for _, field := range v.Fields { + key := field.Name.Value + switch value := field.Value.(type) { + case *ast.ListValue: + if len(value.Values) > 0 { + switch value.Values[0].(type) { + case *ast.ListValue: + areMultiVectors, err := valuesAreMultiVectors(value.Values) + if err != nil { + return nil + } + if areMultiVectors { + r, err := getListOfMultiVectors(value.Values) + if err != nil { + return nil + } + result[key] = r + } else { + r, err := getListOfNormalVectors(value.Values) + if err != nil { + return nil + } + result[key] = r + } + default: + normalVector, err := getNormalVector(value.Values) + if err != nil { + return nil + } + result[key] = normalVector + } + } + default: + return nil + } + } + return result + default: + return nil + } +} + +func getNormalVector(values []ast.Value) ([]float32, error) { + normalVector := make([]float32, len(values)) + for i, value := range values { + vStr, ok := value.GetValue().(string) + if !ok { + return nil, fmt.Errorf("value is not a string: %T", value) + } + floatValue, err := strconv.ParseFloat(vStr, 64) + if err != nil { + return nil, err + } + normalVector[i] = float32(floatValue) + } + return normalVector, nil +} + +func getListOfNormalVectors(values []ast.Value) ([][]float32, error) { + normalVectors := make([][]float32, len(values)) + for i, value := range values { + vector, ok := value.(*ast.ListValue) + if !ok { + return nil, fmt.Errorf("value is not a list: %T", value) + } + v, err := getNormalVector(vector.Values) + if err != nil { + return nil, err + } + normalVectors[i] = v + } + return normalVectors, nil +} + +func getListOfMultiVectors(values []ast.Value) ([][][]float32, error) { + multiVectors := make([][][]float32, len(values)) + for i, value := range values { + multiVector, ok := value.(*ast.ListValue) + if !ok { + return nil, fmt.Errorf("value is not a multivector list: %T", value) + } + mv, err := getListOfNormalVectors(multiVector.Values) + if err != nil { + return nil, err + } + multiVectors[i] = mv + } + return multiVectors, nil +} + +func valuesAreMultiVectors(values []ast.Value) (bool, error) { + if len(values) == 0 { + return false, errors.New(("values are empty")) + } + firstValue := values[0] + switch firstValue.(type) { + case *ast.ListValue: + switch firstValue.(type) { + case *ast.ListValue: + vList := firstValue.(*ast.ListValue) + if len(vList.Values) == 0 { + return false, errors.New("empty list") + } + vv := vList.Values[0] + if _, vvIsList := vv.(*ast.ListValue); vvIsList { + return true, nil + } + if _, vvIsFloat := vv.(*ast.FloatValue); vvIsFloat { + return false, nil + } + if _, vvIsInt := vv.(*ast.IntValue); vvIsInt { + return false, nil + } + return false, fmt.Errorf("unknown type: %T", vv) + } + default: + return false, errors.New("not list value") + } + return false, errors.New("failed to determine") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_object.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_object.go new file mode 100644 index 0000000000000000000000000000000000000000..2af6aa449744bb892c1f28ee95de16455166d3d7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_object.go @@ -0,0 +1,59 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/dto" + + "github.com/weaviate/weaviate/entities/searchparams" +) + +// ExtractNearObject arguments, such as "vector" and "certainty" +func ExtractNearObject(source map[string]interface{}) (searchparams.NearObject, *dto.TargetCombination, error) { + var args searchparams.NearObject + + id, ok := source["id"] + if ok { + args.ID = id.(string) + } + + beacon, ok := source["beacon"] + if ok { + args.Beacon = beacon.(string) + } + + certainty, certaintyOK := source["certainty"] + if certaintyOK { + args.Certainty = certainty.(float64) + } + + distance, distanceOK := source["distance"] + if distanceOK { + args.Distance = distance.(float64) + args.WithDistance = true + } + + if certaintyOK && distanceOK { + return searchparams.NearObject{}, nil, + fmt.Errorf("cannot provide distance and certainty") + } + + targetVectors, combination, err := ExtractTargets(source) + if err != nil { + return searchparams.NearObject{}, nil, err + } + args.TargetVectors = targetVectors + + return args, combination, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_vector.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_vector.go new file mode 100644 index 0000000000000000000000000000000000000000..8f2ad000a5e7c9abe1823e5a9799a1837757db9c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/near_vector.go @@ -0,0 +1,155 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/searchparams" +) + +// ExtractNearVector arguments, such as "vector" and "distance" +func ExtractNearVector(source map[string]interface{}, targetVectorsFromOtherLevel []string) (searchparams.NearVector, *dto.TargetCombination, error) { + var args searchparams.NearVector + + vectorGQL, okVec := source["vector"] + vectorPerTarget, okVecPerTarget := source["vectorPerTarget"].(map[string]interface{}) + if (!okVec && !okVecPerTarget) || (okVec && okVecPerTarget) { + return searchparams.NearVector{}, nil, + fmt.Errorf("vector or vectorPerTarget is required field") + } + + certainty, certaintyOK := source["certainty"] + if certaintyOK { + args.Certainty = certainty.(float64) + } + + distance, distanceOK := source["distance"] + if distanceOK { + args.Distance = distance.(float64) + args.WithDistance = true + } + + if certaintyOK && distanceOK { + return searchparams.NearVector{}, nil, + fmt.Errorf("cannot provide distance and certainty") + } + + var targetVectors []string + var combination *dto.TargetCombination + if targetVectorsFromOtherLevel == nil { + var err error + targetVectors, combination, err = ExtractTargets(source) + if err != nil { + return searchparams.NearVector{}, nil, err + } + args.TargetVectors = targetVectors + } else { + targetVectors = targetVectorsFromOtherLevel + } + + if okVec { + if len(targetVectors) == 0 { + args.Vectors = []models.Vector{vectorGQL} + } else { + args.Vectors = make([]models.Vector, len(targetVectors)) + for i := range targetVectors { + args.Vectors[i] = vectorGQL + } + } + } + + if okVecPerTarget { + var vectors []models.Vector + // needs to handle the case of targetVectors being empty (if you only provide a near vector with targets) + if len(targetVectors) == 0 { + targets := make([]string, 0, len(vectorPerTarget)) + vectors = make([]models.Vector, 0, len(vectorPerTarget)) + + for target := range vectorPerTarget { + single, ok := vectorPerTarget[target].([]float32) + if ok { + vectors = append(vectors, single) + targets = append(targets, target) + } else { + if normalVectors, ok := vectorPerTarget[target].([][]float32); ok { + for j := range normalVectors { + vectors = append(vectors, normalVectors[j]) + targets = append(targets, target) + } + } else if multiVectors, ok := vectorPerTarget[target].([][][]float32); ok { + // NOTE the type of multiVectors is [][][]float32 (vs normalVectors which is [][]float32), + // so there are two similar loops here to handle the different types, if there is a simpler + // way to handle this, feel free to change it + for j := range multiVectors { + vectors = append(vectors, multiVectors[j]) + targets = append(targets, target) + } + } else { + return searchparams.NearVector{}, nil, + fmt.Errorf( + "vectorPerTarget should be a map with strings as keys and a normal vector, list of vectors, "+ + "or list of multi-vectors as values. Received %T", vectorPerTarget[target]) + } + } + } + args.TargetVectors = targets + } else { + // map provided targetVectors to the provided searchvectors + vectors = make([]models.Vector, len(targetVectors)) + handled := make(map[string]struct{}) + for i, target := range targetVectors { + if _, ok := handled[target]; ok { + continue + } else { + handled[target] = struct{}{} + } + vectorPerTargetParsed, ok := vectorPerTarget[target] + if !ok { + return searchparams.NearVector{}, nil, fmt.Errorf("vectorPerTarget for target %s is not provided", target) + } + if vectorIn, ok := vectorPerTargetParsed.([]float32); ok { + vectors[i] = vectorIn + } else if vectorsIn, ok := vectorPerTargetParsed.([][]float32); ok { + // if one target vector has multiple search vectors, the target vector needs to be repeated multiple times + for j, w := range vectorsIn { + if !targetVectorOrderMatches(i, j, targetVectors, target) { + return searchparams.NearVector{}, nil, fmt.Errorf("target %s is not in the correct order", target) + } + vectors[i+j] = w + } + } else if multiVectorsIn, ok := vectorPerTargetParsed.([][][]float32); ok { + // NOTE the type of multiVectorsIn is [][][]float32 (vs vectorsIn which is [][]float32), + // so there are two similar loops here to handle the different types, if there is a simpler + // way to handle this, feel free to change it + for j, w := range multiVectorsIn { + if !targetVectorOrderMatches(i, j, targetVectors, target) { + return searchparams.NearVector{}, nil, fmt.Errorf("multivector target %s is not in the correct order", target) + } + vectors[i+j] = w + } + } else { + return searchparams.NearVector{}, nil, fmt.Errorf("could not handle type of near vector for target %s, got %v", target, vectorPerTargetParsed) + } + } + } + args.Vectors = vectors + } + + return args, combination, nil +} + +func targetVectorOrderMatches(i, j int, targetVectors []string, target string) bool { + return i+j < len(targetVectors) && targetVectors[i+j] == target +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/parse_filters_into_ast.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/parse_filters_into_ast.go new file mode 100644 index 0000000000000000000000000000000000000000..74f71a7d5d90dd2c8a0992c0a2652ffaa6785e40 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/parse_filters_into_ast.go @@ -0,0 +1,176 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/adapters/handlers/rest/filterext" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" +) + +// Extract the filters from the arguments of a Local->Get or Local->Meta query. +func ExtractFilters(args map[string]interface{}, rootClass string) (*filters.LocalFilter, error) { + where, wherePresent := args["where"] + if !wherePresent { + // No filters; all is fine! + return nil, nil + } else { + whereMap := where.(map[string]interface{}) // guaranteed by GraphQL to be a map. + filter, err := filterMapToModel(whereMap) + if err != nil { + return nil, fmt.Errorf("failed to extract filters: %w", err) + } + + return filterext.Parse(filter, rootClass) + } +} + +func filterMapToModel(m map[string]interface{}) (*models.WhereFilter, error) { + b, err := json.Marshal(m) + if err != nil { + return nil, fmt.Errorf("failed convert map to models.WhereFilter: %w", err) + } + + var filter WhereFilter + err = json.Unmarshal(b, &filter) + if err != nil { + return nil, fmt.Errorf("failed convert map to models.WhereFilter: %w", err) + } + + return newConverter().do(&filter) +} + +type converter struct{} + +func newConverter() *converter { + return &converter{} +} + +func (c *converter) do(in *WhereFilter) (*models.WhereFilter, error) { + whereFilter := &models.WhereFilter{ + Operator: in.Operator, + Path: in.Path, + } + + if in.ValueInt != nil { + switch v := in.ValueInt.(type) { + case float64: + val := int64(v) + whereFilter.ValueInt = &val + case []interface{}: + ints := make([]int64, len(v)) + for i := range v { + ints[i] = int64(v[i].(float64)) + } + whereFilter.ValueIntArray = ints + default: + return nil, fmt.Errorf("unsupported type: '%T'", in.ValueInt) + } + } + if in.ValueNumber != nil { + switch v := in.ValueNumber.(type) { + case float64: + whereFilter.ValueNumber = &v + case []interface{}: + numbers := make([]float64, len(v)) + for i := range v { + numbers[i] = v[i].(float64) + } + whereFilter.ValueNumberArray = numbers + default: + return nil, fmt.Errorf("unsupported type: '%T'", in.ValueNumber) + } + } + if in.ValueBoolean != nil { + switch v := in.ValueBoolean.(type) { + case bool: + whereFilter.ValueBoolean = &v + case []interface{}: + bools := make([]bool, len(v)) + for i := range v { + bools[i] = v[i].(bool) + } + whereFilter.ValueBooleanArray = bools + default: + return nil, fmt.Errorf("unsupported type: '%T'", in.ValueBoolean) + } + } + if in.ValueString != nil { + value, valueArray, err := c.parseString(in.ValueString) + if err != nil { + return nil, err + } + whereFilter.ValueString = value + whereFilter.ValueStringArray = valueArray + } + if in.ValueText != nil { + value, valueArray, err := c.parseString(in.ValueText) + if err != nil { + return nil, err + } + whereFilter.ValueText = value + whereFilter.ValueTextArray = valueArray + } + if in.ValueDate != nil { + value, valueArray, err := c.parseString(in.ValueDate) + if err != nil { + return nil, err + } + whereFilter.ValueDate = value + whereFilter.ValueDateArray = valueArray + } + if in.ValueGeoRange != nil { + whereFilter.ValueGeoRange = in.ValueGeoRange + } + + // recursively build operands + for i, op := range in.Operands { + whereFilterOp, err := c.do(op) + if err != nil { + return nil, fmt.Errorf("operands[%v]: %w", i, err) + } + whereFilter.Operands = append(whereFilter.Operands, whereFilterOp) + } + + return whereFilter, nil +} + +func (c *converter) parseString(in interface{}) (value *string, valueArray []string, err error) { + switch v := in.(type) { + case string: + value = &v + case []interface{}: + valueArray = make([]string, len(v)) + for i := range v { + valueArray[i] = v[i].(string) + } + default: + err = fmt.Errorf("unsupported type: '%T'", in) + } + return +} + +type WhereFilter struct { + Operands []*WhereFilter `json:"operands"` + Operator string `json:"operator,omitempty"` + Path []string `json:"path"` + ValueBoolean interface{} `json:"valueBoolean,omitempty"` + ValueDate interface{} `json:"valueDate,omitempty"` + ValueInt interface{} `json:"valueInt,omitempty"` + ValueNumber interface{} `json:"valueNumber,omitempty"` + ValueString interface{} `json:"valueString,omitempty"` + ValueText interface{} `json:"valueText,omitempty"` + ValueGeoRange *models.WhereFilterGeoRange `json:"valueGeoRange,omitempty"` +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/parse_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/parse_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6131b5ec80d23f6aa90989be0052b7d9f68a4cb3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/parse_test.go @@ -0,0 +1,425 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "testing" + + "github.com/tailor-inc/graphql/gqlerrors" + "github.com/tailor-inc/graphql/language/location" + test_helper "github.com/weaviate/weaviate/adapters/handlers/graphql/test/helper" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" +) + +// Basic test on filter +func TestExtractFilterToplevelField(t *testing.T) { + t.Parallel() + + resolver := newMockResolver(t, mockParams{reportFilter: true}) + /*localfilter is a struct containing a clause struct + type filters.Clause struct { + Operator Operator + On *filters.Path + filters.Value *filters.Value + Operands []filters.Clause + }*/ + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("intField"), + }, + Value: &filters.Value{ + Value: 42, + Type: schema.DataTypeInt, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { path: ["intField"], operator: Equal, valueInt: 42}) }` + resolver.AssertResolve(t, query) +} + +func TestExtractFilterLike(t *testing.T) { + t.Parallel() + + t.Run("extracts with valueText", func(t *testing.T) { + resolver := newMockResolver(t, mockParams{reportFilter: true}) + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorLike, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("name"), + }, + Value: &filters.Value{ + Value: "Schn*el", + Type: schema.DataTypeText, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { + path: ["name"], + operator: Like, + valueText: "Schn*el", + }) }` + resolver.AssertResolve(t, query) + }) + + t.Run("[deprecated string] extracts with valueString", func(t *testing.T) { + resolver := newMockResolver(t, mockParams{reportFilter: true}) + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorLike, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("name"), + }, + Value: &filters.Value{ + Value: "Schn*el", + Type: schema.DataTypeString, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { + path: ["name"], + operator: Like, + valueString: "Schn*el", + }) }` + resolver.AssertResolve(t, query) + }) +} + +func TestExtractFilterLike_ValueText(t *testing.T) { + t.Parallel() + + resolver := newMockResolver(t, mockParams{reportFilter: true}) + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorLike, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("name"), + }, + Value: &filters.Value{ + Value: "schn*el", + Type: schema.DataTypeText, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { + path: ["name"], + operator: Like, + valueText: "schn*el", + }) }` + resolver.AssertResolve(t, query) +} + +func TestExtractFilterIsNull(t *testing.T) { + resolver := newMockResolver(t, mockParams{reportFilter: true}) + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorIsNull, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("name"), + }, + Value: &filters.Value{ + Value: "true", + Type: schema.DataTypeText, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { + path: ["name"], + operator: IsNull, + valueText: "true", + }) }` + resolver.AssertResolve(t, query) +} + +func TestExtractFilterGeoLocation(t *testing.T) { + t.Parallel() + + t.Run("with all fields set as required", func(t *testing.T) { + resolver := newMockResolver(t, mockParams{reportFilter: true}) + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorWithinGeoRange, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("location"), + }, + Value: &filters.Value{ + Value: filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(0.5), + Longitude: ptFloat32(0.6), + }, + Distance: 2.0, + }, + Type: schema.DataTypeGeoCoordinates, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { + path: ["location"], + operator: WithinGeoRange, + valueGeoRange: {geoCoordinates: { latitude: 0.5, longitude: 0.6 }, distance: { max: 2.0 } } + }) }` + resolver.AssertResolve(t, query) + }) + + t.Run("with only some of the fields set", func(t *testing.T) { + resolver := newMockResolver(t, mockParams{reportFilter: true}) + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorWithinGeoRange, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("location"), + }, + Value: &filters.Value{ + Value: filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(0.5), + Longitude: ptFloat32(0.6), + }, + Distance: 2.0, + }, + Type: schema.DataTypeGeoCoordinates, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { + path: ["location"], + operator: WithinGeoRange, + valueGeoRange: { geoCoordinates: { latitude: 0.5 }, distance: { max: 2.0} } + }) }` + + expectedErrors := []gqlerrors.FormattedError{ + { + Message: "Argument \"where\" has invalid value {path: [\"location\"], operator: WithinGeoRange, valueGeoRange: {geoCoordinates: {latitude: 0.5}, distance: {max: 2.0}}}.\nIn field \"valueGeoRange\": In field \"geoCoordinates\": In field \"longitude\": Expected \"Float!\", found null.", + Locations: []location.SourceLocation{{Line: 1, Column: 21}}, + }, + } + resolver.AssertErrors(t, query, expectedErrors) + }) +} + +func TestExtractFilterNestedField(t *testing.T) { + t.Parallel() + + resolver := newMockResolver(t, mockParams{reportFilter: true}) + + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("hasAction"), + Child: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("intField"), + }, + }, + Value: &filters.Value{ + Value: 42, + Type: schema.DataTypeInt, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { path: ["hasAction", "SomeAction", "intField"], operator: Equal, valueInt: 42}) }` + resolver.AssertResolve(t, query) +} + +func TestExtractOperand(t *testing.T) { + t.Parallel() + + resolver := newMockResolver(t, mockParams{reportFilter: true}) + + expectedParams := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("intField"), + }, + Value: &filters.Value{ + Value: 42, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("hasAction"), + Child: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("intField"), + }, + }, + Value: &filters.Value{ + Value: 4242, + Type: schema.DataTypeInt, + }, + }, + }, + }} + + resolver.On("ReportFilters", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ SomeAction(where: { operator: And, operands: [ + { operator: Equal, valueInt: 42, path: ["intField"]}, + { operator: Equal, valueInt: 4242, path: ["hasAction", "SomeAction", "intField"] } + ]}) }` + resolver.AssertResolve(t, query) +} + +func TestExtractCompareOpFailsIfOperandPresent(t *testing.T) { + t.Parallel() + + resolver := newMockResolver(t, mockParams{reportFilter: true}) + + query := `{ SomeAction(where: { operator: Equal, operands: []}) }` + resolver.AssertFailToResolve(t, query) +} + +func TestExtractOperandFailsIfPathPresent(t *testing.T) { + t.Parallel() + + resolver := newMockResolver(t, mockParams{reportFilter: true}) + + query := `{ SomeAction(where: { path:["should", "not", "be", "present"], operator: And })}` + resolver.AssertFailToResolve(t, query) +} + +func TestExtractNearVector(t *testing.T) { + t.Parallel() + + t.Run("with certainty provided", func(t *testing.T) { + t.Parallel() + + query := `{ SomeAction(nearVector: {vector: [1, 2, 3], certainty: 0.7})}` + expectedparams := searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Certainty: 0.7, + } + + resolver := newMockResolver(t, mockParams{reportNearVector: true}) + + resolver.On("ReportNearVector", expectedparams). + Return(test_helper.EmptyList(), nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with distance provided", func(t *testing.T) { + t.Parallel() + + query := `{ SomeAction(nearVector: {vector: [1, 2, 3], distance: 0.4})}` + expectedparams := searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Distance: 0.4, + WithDistance: true, + } + + resolver := newMockResolver(t, mockParams{reportNearVector: true}) + + resolver.On("ReportNearVector", expectedparams). + Return(test_helper.EmptyList(), nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with distance and certainty provided", func(t *testing.T) { + t.Parallel() + + query := `{ SomeAction(nearVector: {vector: [1, 2, 3], distance: 0.4, certainty: 0.7})}` + resolver := newMockResolver(t, mockParams{reportNearVector: true}) + resolver.AssertFailToResolve(t, query) + }) +} + +func TestExtractNearObject(t *testing.T) { + t.Parallel() + + t.Run("with certainty provided", func(t *testing.T) { + t.Parallel() + + query := `{ SomeAction(nearObject: {id: "123", certainty: 0.7})}` + expectedparams := searchparams.NearObject{ + ID: "123", + Certainty: 0.7, + } + + resolver := newMockResolver(t, mockParams{reportNearObject: true}) + + resolver.On("ReportNearObject", expectedparams). + Return(test_helper.EmptyList(), nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with distance provided", func(t *testing.T) { + t.Parallel() + + query := `{ SomeAction(nearObject: {id: "123", distance: 0.4})}` + expectedparams := searchparams.NearObject{ + ID: "123", + Distance: 0.4, + WithDistance: true, + } + + resolver := newMockResolver(t, mockParams{reportNearObject: true}) + + resolver.On("ReportNearObject", expectedparams). + Return(test_helper.EmptyList(), nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with distance and certainty provided", func(t *testing.T) { + t.Parallel() + + query := `{ SomeAction(nearObject: {id: "123", distance: 0.4, certainty: 0.7})}` + resolver := newMockResolver(t, mockParams{reportNearObject: true}) + resolver.AssertFailToResolve(t, query) + }) +} + +func ptFloat32(in float32) *float32 { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/targets.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/targets.go new file mode 100644 index 0000000000000000000000000000000000000000..307cc47d8b6900f5cb164afd32bc4c3b2cd4708c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/common_filters/targets.go @@ -0,0 +1,125 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package common_filters + +import ( + "fmt" + "strconv" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + "github.com/weaviate/weaviate/entities/dto" +) + +func AddTargetArgument(fieldMap graphql.InputObjectConfigFieldMap, prefix string, addTarget bool) graphql.InputObjectConfigFieldMap { + if !addTarget { // not supported by aggregate and explore + return fieldMap + } + fieldMap["targets"] = &graphql.InputObjectFieldConfig{ + Description: "Subsearch list", + Type: targetFields(prefix), + } + return fieldMap +} + +var targetCombinationEnum = graphql.NewEnum(graphql.EnumConfig{ + Name: "targetCombinationEnum", + Values: graphql.EnumValueConfigMap{ + "minimum": &graphql.EnumValueConfig{Value: dto.Minimum}, + "average": &graphql.EnumValueConfig{Value: dto.Average}, + "sum": &graphql.EnumValueConfig{Value: dto.Sum}, + "manualWeights": &graphql.EnumValueConfig{Value: dto.ManualWeights}, + "relativeScore": &graphql.EnumValueConfig{Value: dto.RelativeScore}, + }, +}) + +var WeightsScalar = graphql.NewScalar(graphql.ScalarConfig{ + Name: "Weights", + Description: "A custom scalar type for a map with strings as keys and floats as values", + Serialize: func(value interface{}) interface{} { + return value + }, + ParseValue: func(value interface{}) interface{} { + return value + }, + ParseLiteral: func(valueAST ast.Value) interface{} { + switch v := valueAST.(type) { + case *ast.ObjectValue: + result := make(map[string]interface{}) + for _, field := range v.Fields { + key := field.Name.Value + switch value := field.Value.(type) { + case *ast.FloatValue: + floatValue, err := strconv.ParseFloat(value.Value, 64) + if err != nil { + return nil + } + result[key] = floatValue + case *ast.IntValue: + intValue, err := strconv.ParseFloat(value.Value, 64) + if err != nil { + return nil + } + result[key] = intValue + case *ast.ListValue: + var list []float64 + for _, item := range value.Values { + switch item := item.(type) { + case *ast.FloatValue: + floatValue, err := strconv.ParseFloat(item.Value, 64) + if err != nil { + return nil + } + list = append(list, floatValue) + case *ast.IntValue: + intValue, err := strconv.ParseFloat(item.Value, 64) + if err != nil { + return nil + } + list = append(list, intValue) + default: + return nil + } + } + result[key] = list + default: + return nil + } + } + return result + default: + return nil + } + }, +}) + +func targetFields(prefix string) *graphql.InputObject { + return graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sTargetInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + "combinationMethod": &graphql.InputObjectFieldConfig{ + Description: "Combination method", + Type: targetCombinationEnum, + }, + "weights": &graphql.InputObjectFieldConfig{ + Description: "Weights for target vectors", + Type: WeightsScalar, + }, + }, + }, + ) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts.go new file mode 100644 index 0000000000000000000000000000000000000000..cda3eaafc79f8a8cfd601544c71d2a011ead24d1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts.go @@ -0,0 +1,196 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package explore + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +type ModulesProvider interface { + ExploreArguments(schema *models.Schema) map[string]*graphql.ArgumentConfig + CrossClassExtractSearchParams(arguments map[string]interface{}) map[string]interface{} +} + +// Build builds the object containing the Local->Explore Fields, such as Objects +func Build(schema *models.Schema, modulesProvider ModulesProvider, authorizer authorization.Authorizer) *graphql.Field { + field := &graphql.Field{ + Name: "Explore", + Description: descriptions.LocalExplore, + Type: graphql.NewList(exploreObject()), + Resolve: newResolver(authorizer, modulesProvider).resolve, + Args: graphql.FieldConfigArgument{ + "offset": &graphql.ArgumentConfig{ + Type: graphql.Int, + Description: descriptions.Offset, + }, + "limit": &graphql.ArgumentConfig{ + Type: graphql.Int, + Description: descriptions.Limit, + }, + + "nearVector": nearVectorArgument(), + "nearObject": nearObjectArgument(), + }, + } + + if modulesProvider != nil { + for name, argument := range modulesProvider.ExploreArguments(schema) { + field.Args[name] = argument + } + } + + return field +} + +func exploreObject() *graphql.Object { + getLocalExploreFields := graphql.Fields{ + "className": &graphql.Field{ + Name: "ExploreClassName", + Description: descriptions.ClassName, + Type: graphql.String, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + vsr, ok := p.Source.(search.Result) + if !ok { + return nil, fmt.Errorf("unknown type %T in Explore..className resolver", p.Source) + } + + return vsr.ClassName, nil + }, + }, + + "beacon": &graphql.Field{ + Name: "ExploreBeacon", + Description: descriptions.Beacon, + Type: graphql.String, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + vsr, ok := p.Source.(search.Result) + if !ok { + return nil, fmt.Errorf("unknown type %T in Explore..className resolver", p.Source) + } + + return vsr.Beacon, nil + }, + }, + + "certainty": &graphql.Field{ + Name: "ExploreCertainty", + Description: descriptions.Certainty, + Type: graphql.Float, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + vsr, ok := p.Source.(search.Result) + if !ok { + return nil, fmt.Errorf("unknown type %T in Explore..className resolver", p.Source) + } + + return vsr.Certainty, nil + }, + }, + + "distance": &graphql.Field{ + Name: "ExploreDistance", + Description: descriptions.Distance, + Type: graphql.Float, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + vsr, ok := p.Source.(search.Result) + if !ok { + return nil, fmt.Errorf("unknown type %T in Explore..className resolver", p.Source) + } + + return vsr.Dist, nil + }, + }, + } + + getLocalExploreFieldsObject := graphql.ObjectConfig{ + Name: "ExploreObj", + Fields: getLocalExploreFields, + Description: descriptions.LocalExplore, + } + + return graphql.NewObject(getLocalExploreFieldsObject) +} + +func nearVectorArgument() *graphql.ArgumentConfig { + return &graphql.ArgumentConfig{ + // Description: descriptions.GetExplore, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: "ExploreNearVectorInpObj", + Fields: nearVectorFields(), + }, + ), + } +} + +func nearVectorFields() graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "vector": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.NewNonNull(common_filters.Vector("ExploreNearVectorInpObj")), + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + } +} + +func nearObjectArgument() *graphql.ArgumentConfig { + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: "ExploreNearObjectInpObj", + Fields: nearObjectFields(), + }, + ), + } +} + +func nearObjectFields() graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Description: descriptions.ID, + Type: graphql.String, + }, + "beacon": &graphql.InputObjectFieldConfig{ + Description: descriptions.Beacon, + Type: graphql.String, + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts_resolver.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts_resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..3f9d473d5539d64b8c0f79c650cbb46ed819234f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts_resolver.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package explore + +import ( + "context" + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/traverser" +) + +// Resolver is a local interface that can be composed with other interfaces to +// form the overall GraphQL API main interface. All data-base connectors that +// want to support the Meta feature must implement this interface. +type Resolver interface { + Explore(ctx context.Context, principal *models.Principal, + params traverser.ExploreParams) ([]search.Result, error) +} + +// RequestsLog is a local abstraction on the RequestsLog that needs to be +// provided to the graphQL API in order to log Local.Fetch queries. +type RequestsLog interface { + Register(requestType string, identifier string) +} + +type resources struct { + resolver Resolver +} + +func newResources(s interface{}) (*resources, error) { + source, ok := s.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expected source to be a map, but was %T", source) + } + + resolver, ok := source["Resolver"].(Resolver) + if !ok { + return nil, fmt.Errorf("expected source to contain a usable Resolver, but was %#v", source) + } + + return &resources{ + resolver: resolver, + }, nil +} + +type resolver struct { + authorizer authorization.Authorizer + modulesProvider ModulesProvider +} + +func newResolver(authorizer authorization.Authorizer, modulesProvider ModulesProvider) *resolver { + return &resolver{authorizer, modulesProvider} +} + +func (r *resolver) resolve(p graphql.ResolveParams) (interface{}, error) { + result, err := r.resolveExplore(p) + if err != nil { + return result, enterrors.NewErrGraphQLUser(err, "Explore", "") + } + return result, nil +} + +func (r *resolver) resolveExplore(p graphql.ResolveParams) (interface{}, error) { + principal := restCtx.GetPrincipalFromContext(p.Context) + + err := r.authorizer.Authorize(p.Context, principal, authorization.READ, authorization.CollectionsData()...) + if err != nil { + return nil, err + } + + resources, err := newResources(p.Source) + if err != nil { + return nil, err + } + + params := traverser.ExploreParams{} + + if param, ok := p.Args["nearVector"]; ok { + extracted, _, err := common_filters.ExtractNearVector(param.(map[string]interface{}), nil) + if err != nil { + return nil, fmt.Errorf("failed to extract nearVector params: %w", err) + } + params.NearVector = &extracted + } + + if param, ok := p.Args["nearObject"]; ok { + extracted, _, err := common_filters.ExtractNearObject(param.(map[string]interface{})) + if err != nil { + return nil, fmt.Errorf("failed to extract nearObject params: %w", err) + } + params.NearObject = &extracted + } + + if param, ok := p.Args["offset"]; ok { + params.Offset = param.(int) + } + + if param, ok := p.Args["limit"]; ok { + params.Limit = param.(int) + } + + if r.modulesProvider != nil { + extractedParams := r.modulesProvider.CrossClassExtractSearchParams(p.Args) + if len(extractedParams) > 0 { + params.ModuleParams = extractedParams + } + } + + if containsCertaintyProperty(p.Info) { + params.WithCertaintyProp = true + } + + return resources.resolver.Explore(p.Context, principal, params) +} + +func containsCertaintyProperty(info graphql.ResolveInfo) bool { + if len(info.FieldASTs) == 0 { + return false + } + + for _, selection := range info.FieldASTs[0].SelectionSet.Selections { + field := selection.(*ast.Field) + name := field.Name.Value + if name == "certainty" { + return true + } + } + + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts_test.go new file mode 100644 index 0000000000000000000000000000000000000000..228caa2ab35e29f6b4967d43518eabe16e1234c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/concepts_test.go @@ -0,0 +1,714 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package explore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + helper "github.com/weaviate/weaviate/test/helper" + "github.com/weaviate/weaviate/usecases/traverser" +) + +type testCase struct { + name string + query string + expectedParamsToTraverser traverser.ExploreParams + resolverReturn []search.Result + expectedResults []result +} + +type testCases []testCase + +type result struct { + pathToField []string + expectedValue interface{} +} + +func Test_ResolveExplore(t *testing.T) { + t.Parallel() + + testsNearText := testCases{ + testCase{ + name: "Resolve Explore with nearCustomText", + query: ` + { + Explore(nearCustomText: {concepts: ["car", "best brand"]}) { + beacon className certainty distance + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearCustomTextParam(map[string]interface{}{ + "concepts": []interface{}{"car", "best brand"}, + }), + }, + WithCertaintyProp: true, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + Certainty: 0.7, + Dist: helper.CertaintyToDist(t, 0.7), + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + "certainty": float32(0.7), + "distance": helper.CertaintyToDist(t, 0.7), + }, + }, + }}, + }, + + testCase{ + name: "with nearCustomText with optional limit and distance set", + query: ` + { + Explore( + nearCustomText: {concepts: ["car", "best brand"], distance: 0.4}, limit: 17 + ){ + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearCustomTextParam(map[string]interface{}{ + "concepts": []interface{}{"car", "best brand"}, + "distance": float64(0.4), + }), + }, + Limit: 17, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + + testCase{ + name: "with nearCustomText with optional limit and certainty set", + query: ` + { + Explore( + nearCustomText: {concepts: ["car", "best brand"], certainty: 0.6}, limit: 17 + ){ + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearCustomTextParam(map[string]interface{}{ + "concepts": []interface{}{"car", "best brand"}, + "certainty": float64(0.6), + }), + }, + Limit: 17, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + + testCase{ + name: "with moveTo set", + query: ` + { + Explore( + limit: 17 + nearCustomText: { + concepts: ["car", "best brand"] + moveTo: { + concepts: ["mercedes"] + force: 0.7 + } + } + ) { + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + Limit: 17, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearCustomTextParam(map[string]interface{}{ + "concepts": []interface{}{"car", "best brand"}, + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"mercedes"}, + "force": float64(0.7), + }, + }), + }, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + + testCase{ + name: "with moveTo and moveAwayFrom set", + query: ` + { + Explore( + limit: 17 + nearCustomText: { + concepts: ["car", "best brand"] + moveTo: { + concepts: ["mercedes"] + force: 0.7 + } + moveAwayFrom: { + concepts: ["van"] + force: 0.7 + } + } + ) { + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + Limit: 17, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearCustomTextParam(map[string]interface{}{ + "concepts": []interface{}{"car", "best brand"}, + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"mercedes"}, + "force": float64(0.7), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"van"}, + "force": float64(0.7), + }, + }), + }, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + + testCase{ + name: "with moveTo and objects set", + query: ` + { + Explore( + limit: 17 + nearCustomText: { + concepts: ["car", "best brand"] + moveTo: { + concepts: ["mercedes"] + force: 0.7 + objects: [ + {id: "moveto-uuid"}, + {beacon: "weaviate://localhost/other-moveto-uuid"}, + ] + } + } + ) { + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + Limit: 17, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearCustomTextParam(map[string]interface{}{ + "concepts": []interface{}{"car", "best brand"}, + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"mercedes"}, + "force": float64(0.7), + "objects": []interface{}{ + map[string]interface{}{ + "id": "moveto-uuid", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/other-moveto-uuid", + }, + }, + }, + }), + }, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + + testCase{ + name: "with moveTo and objects set", + query: ` + { + Explore( + limit: 17 + nearCustomText: { + concepts: ["car", "best brand"] + moveTo: { + concepts: ["mercedes"] + force: 0.7 + objects: [ + {id: "moveto-uuid1"}, + {beacon: "weaviate://localhost/moveto-uuid2"}, + ] + } + moveAwayFrom: { + concepts: ["van"] + force: 0.7 + objects: [ + {id: "moveAway-uuid1"}, + {beacon: "weaviate://localhost/moveAway-uuid2"}, + {id: "moveAway-uuid3"}, + {id: "moveAway-uuid4"}, + ] + } + } + ) { + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + Limit: 17, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearCustomTextParam(map[string]interface{}{ + "concepts": []interface{}{"car", "best brand"}, + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"mercedes"}, + "force": float64(0.7), + "objects": []interface{}{ + map[string]interface{}{ + "id": "moveto-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveto-uuid2", + }, + }, + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"van"}, + "force": float64(0.7), + "objects": []interface{}{ + map[string]interface{}{ + "id": "moveAway-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveAway-uuid2", + }, + map[string]interface{}{ + "id": "moveAway-uuid3", + }, + map[string]interface{}{ + "id": "moveAway-uuid4", + }, + }, + }, + }), + }, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + } + + tests := testCases{ + testCase{ + name: "Resolve Explore with nearVector", + query: ` + { + Explore(nearVector: {vector: [0, 1, 0.8]}) { + beacon className certainty distance + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0, 1, 0.8}}, + }, + WithCertaintyProp: true, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + Certainty: 0.7, + Dist: helper.CertaintyToDist(t, 0.7), + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + "certainty": float32(0.7), + "distance": helper.CertaintyToDist(t, 0.7), + }, + }, + }}, + }, + + testCase{ + name: "with nearVector with optional limit", + query: ` + { + Explore(limit: 17, nearVector: {vector: [0, 1, 0.8]}) { + beacon className certainty distance + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0, 1, 0.8}}, + }, + Limit: 17, + WithCertaintyProp: true, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + Certainty: 0.7, + Dist: helper.CertaintyToDist(t, 0.7), + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + "certainty": float32(0.7), + "distance": helper.CertaintyToDist(t, 0.7), + }, + }, + }}, + }, + + testCase{ + name: "Resolve Explore with nearObject, distance, and beacon set", + query: ` + { + Explore( + nearObject: { + beacon: "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d" + distance: 0.3 + }) { + beacon className certainty distance + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + NearObject: &searchparams.NearObject{ + Beacon: "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d", + Distance: float64(0.3), + WithDistance: true, + }, + WithCertaintyProp: true, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d", + ClassName: "bestClass", + Certainty: 0.7, + Dist: helper.CertaintyToDist(t, 0.7), + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d", + "className": "bestClass", + "certainty": float32(0.7), + "distance": helper.CertaintyToDist(t, 0.7), + }, + }, + }}, + }, + + testCase{ + name: "Resolve Explore with nearObject, certainty, and beacon set", + query: ` + { + Explore( + nearObject: { + beacon: "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d" + certainty: 0.7 + }) { + beacon className certainty distance + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + NearObject: &searchparams.NearObject{ + Beacon: "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d", + Certainty: 0.7, + }, + WithCertaintyProp: true, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d", + ClassName: "bestClass", + Certainty: 0.7, + Dist: helper.CertaintyToDist(t, 0.7), + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/27b5213d-e152-4fea-bd63-2063d529024d", + "className": "bestClass", + "certainty": float32(0.7), + "distance": helper.CertaintyToDist(t, 0.7), + }, + }, + }}, + }, + + testCase{ + name: "Resolve Explore with nearObject, distance and id set", + query: ` + { + Explore( + limit: 17 + nearObject: { + id: "27b5213d-e152-4fea-bd63-2063d529024d" + distance: 0.3 + } + ) { + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + Limit: 17, + NearObject: &searchparams.NearObject{ + ID: "27b5213d-e152-4fea-bd63-2063d529024d", + Distance: 0.3, + WithDistance: true, + }, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + + testCase{ + name: "Resolve Explore with nearObject, certainty and id set", + query: ` + { + Explore( + limit: 17 + nearObject: { + id: "27b5213d-e152-4fea-bd63-2063d529024d" + certainty: 0.7 + } + ) { + beacon className + } + }`, + expectedParamsToTraverser: traverser.ExploreParams{ + Limit: 17, + NearObject: &searchparams.NearObject{ + ID: "27b5213d-e152-4fea-bd63-2063d529024d", + Certainty: 0.7, + }, + }, + resolverReturn: []search.Result{ + { + Beacon: "weaviate://localhost/some-uuid", + ClassName: "bestClass", + }, + }, + expectedResults: []result{{ + pathToField: []string{"Explore"}, + expectedValue: []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/some-uuid", + "className": "bestClass", + }, + }, + }}, + }, + } + + tests.AssertExtraction(t, newMockResolver()) + testsNearText.AssertExtraction(t, newMockResolver()) + tests.AssertExtraction(t, newMockResolverNoModules()) +} + +func Test_ExploreWithNoText2VecClasses(t *testing.T) { + t.Run("with distance", func(t *testing.T) { + resolver := newMockResolverEmptySchema() + query := ` + { + Explore( + nearCustomText: {concepts: ["car", "best brand"], distance: 0.6}, limit: 17 + ){ + beacon className + } + }` + res := resolver.Resolve(query) + require.Len(t, res.Errors, 1) + assert.Contains(t, res.Errors[0].Message, "Unknown argument \"nearCustomText\" on field \"Explore\"") + }) + + t.Run("with certainty", func(t *testing.T) { + resolver := newMockResolverEmptySchema() + query := ` + { + Explore( + nearCustomText: {concepts: ["car", "best brand"], certainty: 0.6}, limit: 17 + ){ + beacon className + } + }` + res := resolver.Resolve(query) + require.Len(t, res.Errors, 1) + assert.Contains(t, res.Errors[0].Message, "Unknown argument \"nearCustomText\" on field \"Explore\"") + }) +} + +func Test_ExploreWithNoModules(t *testing.T) { + t.Run("with distance", func(t *testing.T) { + resolver := newMockResolverNoModules() + query := ` + { + Explore( + nearCustomText: {concepts: ["car", "best brand"], distance: 0.6}, limit: 17 + ){ + beacon className + } + }` + res := resolver.Resolve(query) + require.Len(t, res.Errors, 1) + assert.Contains(t, res.Errors[0].Message, "Unknown argument \"nearCustomText\" on field \"Explore\"") + }) + + t.Run("with certainty", func(t *testing.T) { + resolver := newMockResolverNoModules() + query := ` + { + Explore( + nearCustomText: {concepts: ["car", "best brand"], certainty: 0.6}, limit: 17 + ){ + beacon className + } + }` + res := resolver.Resolve(query) + require.Len(t, res.Errors, 1) + assert.Contains(t, res.Errors[0].Message, "Unknown argument \"nearCustomText\" on field \"Explore\"") + }) +} + +func (tests testCases) AssertExtraction(t *testing.T, resolver *mockResolver) { + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + resolver.On("Explore", testCase.expectedParamsToTraverser). + Return(testCase.resolverReturn, nil).Once() + + result := resolver.AssertResolve(t, testCase.query) + + for _, expectedResult := range testCase.expectedResults { + value := result.Get(expectedResult.pathToField...).Result + + assert.Equal(t, expectedResult.expectedValue, value) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/helpers_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/helpers_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bce2717891adb9cbb2f365c90402c18e2c4ea7d4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/explore/helpers_for_test.go @@ -0,0 +1,355 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package explore + +import ( + "context" + "fmt" + + "github.com/tailor-inc/graphql" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + testhelper "github.com/weaviate/weaviate/adapters/handlers/graphql/test/helper" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/traverser" +) + +type mockRequestsLog struct{} + +func (m *mockRequestsLog) Register(first string, second string) { +} + +type mockResolver struct { + testhelper.MockResolver +} + +type fakeModulesProvider struct{} + +type fakeAuthorizer struct{} + +func (a *fakeAuthorizer) Authorize(ctx context.Context, principal *models.Principal, verb string, resource ...string) error { + return nil +} + +func (a *fakeAuthorizer) AuthorizeSilent(ctx context.Context, principal *models.Principal, verb string, resource ...string) error { + return nil +} + +func (a *fakeAuthorizer) FilterAuthorizedResources(ctx context.Context, principal *models.Principal, verb string, resources ...string) ([]string, error) { + return resources, nil +} + +func getFakeAuthorizer() authorization.Authorizer { + return &fakeAuthorizer{} +} + +func (p *fakeModulesProvider) VectorFromInput(ctx context.Context, className string, input string) ([]float32, error) { + panic("not implemented") +} + +func (p *fakeModulesProvider) ExploreArguments(schema *models.Schema) map[string]*graphql.ArgumentConfig { + args := map[string]*graphql.ArgumentConfig{} + txt2vec := &nearCustomTextModule{} + for _, c := range schema.Classes { + if c.Vectorizer == txt2vec.Name() { + for name, argument := range txt2vec.Arguments() { + args[name] = argument.ExploreArgumentsFunction() + } + } + } + return args +} + +func (p *fakeModulesProvider) CrossClassExtractSearchParams(arguments map[string]interface{}) map[string]interface{} { + exractedParams := map[string]interface{}{} + if param, ok := arguments["nearCustomText"]; ok { + exractedParams["nearCustomText"] = extractNearCustomTextParam(param.(map[string]interface{})) + } + return exractedParams +} + +func extractNearCustomTextParam(param map[string]interface{}) interface{} { + nearCustomText := &nearCustomTextModule{} + argument := nearCustomText.Arguments()["nearCustomText"] + params, _, _ := argument.ExtractFunction(param) + return params +} + +func getFakeModulesProvider() ModulesProvider { + return &fakeModulesProvider{} +} + +func newMockResolver() *mockResolver { + field := Build(testhelper.SimpleSchema.Objects, getFakeModulesProvider(), getFakeAuthorizer()) + mocker := &mockResolver{} + mockLog := &mockRequestsLog{} + mocker.RootFieldName = "Explore" + mocker.RootField = field + mocker.RootObject = map[string]interface{}{ + "Resolver": Resolver(mocker), + "RequestsLog": mockLog, + } + return mocker +} + +func newMockResolverNoModules() *mockResolver { + field := Build(testhelper.SimpleSchema.Objects, nil, getFakeAuthorizer()) + mocker := &mockResolver{} + mockLog := &mockRequestsLog{} + mocker.RootFieldName = "Explore" + mocker.RootField = field + mocker.RootObject = map[string]interface{}{ + "Resolver": Resolver(mocker), + "RequestsLog": mockLog, + } + return mocker +} + +func newMockResolverEmptySchema() *mockResolver { + field := Build(&models.Schema{}, getFakeModulesProvider(), getFakeAuthorizer()) + mocker := &mockResolver{} + mockLog := &mockRequestsLog{} + mocker.RootFieldName = "Explore" + mocker.RootField = field + mocker.RootObject = map[string]interface{}{ + "Resolver": Resolver(mocker), + "RequestsLog": mockLog, + } + return mocker +} + +func (m *mockResolver) Explore(ctx context.Context, + principal *models.Principal, params traverser.ExploreParams, +) ([]search.Result, error) { + args := m.Called(params) + return args.Get(0).([]search.Result), args.Error(1) +} + +type nearCustomTextParams struct { + Values []string + MoveTo nearExploreMove + MoveAwayFrom nearExploreMove + Certainty float64 + Distance float64 + WithDistance bool +} + +type nearExploreMove struct { + Values []string + Force float32 + Objects []nearObjectMove +} + +type nearObjectMove struct { + ID string + Beacon string +} + +type nearCustomTextModule struct{} + +func (m *nearCustomTextModule) Name() string { + return "text2vec-contextionary" +} + +func (m *nearCustomTextModule) Init(params moduletools.ModuleInitParams) error { + return nil +} + +func (m *nearCustomTextModule) Arguments() map[string]modulecapabilities.GraphQLArgument { + arguments := map[string]modulecapabilities.GraphQLArgument{} + // define nearCustomText argument + arguments["nearCustomText"] = modulecapabilities.GraphQLArgument{ + GetArgumentsFunction: func(classname string) *graphql.ArgumentConfig { + return m.getNearCustomTextArgument(classname) + }, + ExploreArgumentsFunction: func() *graphql.ArgumentConfig { + return m.getNearCustomTextArgument("") + }, + ExtractFunction: func(source map[string]interface{}) (interface{}, *dto.TargetCombination, error) { + return m.extractNearCustomTextArgument(source) + }, + ValidateFunction: func(param interface{}) error { + // all is valid + return nil + }, + } + return arguments +} + +func (m *nearCustomTextModule) getNearCustomTextArgument(classname string) *graphql.ArgumentConfig { + prefix := classname + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearCustomTextInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.NewList(graphql.String)), + }, + "moveTo": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveTo", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Description: descriptions.Keywords, + Type: graphql.NewList(graphql.String), + }, + "objects": &graphql.InputObjectFieldConfig{ + Description: "objects", + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMovementObjectsToInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: "id of an object", + }, + "beacon": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.Beacon, + }, + }, + Description: "Movement Object", + }, + )), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + }, + }), + }, + "moveAwayFrom": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveAway", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Description: descriptions.Keywords, + Type: graphql.NewList(graphql.String), + }, + "objects": &graphql.InputObjectFieldConfig{ + Description: "objects", + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMovementObjectsAwayInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: "id of an object", + }, + "beacon": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.Beacon, + }, + }, + Description: "Movement Object", + }, + )), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + }, + }), + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + }, + Description: descriptions.GetWhereInpObj, + }, + ), + } +} + +func (m *nearCustomTextModule) extractNearCustomTextArgument(source map[string]interface{}) (*nearCustomTextParams, *dto.TargetCombination, error) { + var args nearCustomTextParams + + concepts := source["concepts"].([]interface{}) + args.Values = make([]string, len(concepts)) + for i, value := range concepts { + args.Values[i] = value.(string) + } + + certainty, ok := source["certainty"] + if ok { + args.Certainty = certainty.(float64) + } + + distance, ok := source["distance"] + if ok { + args.Distance = distance.(float64) + args.WithDistance = true + } + + // moveTo is an optional arg, so it could be nil + moveTo, ok := source["moveTo"] + if ok { + moveToMap := moveTo.(map[string]interface{}) + args.MoveTo = m.parseMoveParam(moveToMap) + } + + moveAwayFrom, ok := source["moveAwayFrom"] + if ok { + moveAwayFromMap := moveAwayFrom.(map[string]interface{}) + args.MoveAwayFrom = m.parseMoveParam(moveAwayFromMap) + } + + return &args, nil, nil +} + +func (m *nearCustomTextModule) parseMoveParam(source map[string]interface{}) nearExploreMove { + res := nearExploreMove{} + res.Force = float32(source["force"].(float64)) + + concepts, ok := source["concepts"].([]interface{}) + if ok { + res.Values = make([]string, len(concepts)) + for i, value := range concepts { + res.Values[i] = value.(string) + } + } + + objects, ok := source["objects"].([]interface{}) + if ok { + res.Objects = make([]nearObjectMove, len(objects)) + for i, value := range objects { + v, ok := value.(map[string]interface{}) + if ok { + if v["id"] != nil { + res.Objects[i].ID = v["id"].(string) + } + if v["beacon"] != nil { + res.Objects[i].Beacon = v["beacon"].(string) + } + } + } + } + + return res +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder.go new file mode 100644 index 0000000000000000000000000000000000000000..5a3471e0160e94a8793f5f4344b26e4925797489 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder.go @@ -0,0 +1,351 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +type classBuilder struct { + authorizer authorization.Authorizer + schema *schema.SchemaWithAliases + knownClasses map[string]*graphql.Object + beaconClass *graphql.Object + logger logrus.FieldLogger + modulesProvider ModulesProvider +} + +func newClassBuilder(schema *schema.SchemaWithAliases, logger logrus.FieldLogger, + modulesProvider ModulesProvider, authorizer authorization.Authorizer, +) *classBuilder { + b := &classBuilder{} + + b.logger = logger + b.schema = schema + b.modulesProvider = modulesProvider + b.authorizer = authorizer + + b.initKnownClasses() + b.initBeaconClass() + + return b +} + +func (b *classBuilder) initKnownClasses() { + b.knownClasses = map[string]*graphql.Object{} +} + +func (b *classBuilder) initBeaconClass() { + b.beaconClass = graphql.NewObject(graphql.ObjectConfig{ + Name: "Beacon", + Fields: graphql.Fields{ + "beacon": &graphql.Field{ + Type: graphql.String, + }, + }, + }) +} + +func (b *classBuilder) objects() (*graphql.Object, error) { + return b.kinds(b.schema.Objects) +} + +func (b *classBuilder) kinds(kindSchema *models.Schema) (*graphql.Object, error) { + // needs to be defined outside the individual class as there can only be one definition of an enum + fusionAlgoEnum := graphql.NewEnum(graphql.EnumConfig{ + Name: "FusionEnum", + Values: graphql.EnumValueConfigMap{ + "rankedFusion": &graphql.EnumValueConfig{ + Value: common_filters.HybridRankedFusion, + }, + "relativeScoreFusion": &graphql.EnumValueConfig{ + Value: common_filters.HybridRelativeScoreFusion, + }, + }, + }) + + classFields := graphql.Fields{} + for _, class := range kindSchema.Classes { + classField, err := b.classField(class, fusionAlgoEnum) + if err != nil { + return nil, fmt.Errorf("could not build class for %s", class.Class) + } + classFields[class.Class] = classField + } + + // Include alias as top level class name in gql schema + for alias, aliasedClassName := range b.schema.Aliases { + field, ok := classFields[aliasedClassName] + if ok { + classFields[alias] = field + } + } + + classes := graphql.NewObject(graphql.ObjectConfig{ + Name: "GetObjectsObj", + Fields: classFields, + Description: descriptions.GetObjectsActionsObj, + }) + + return classes, nil +} + +func (b *classBuilder) classField(class *models.Class, fusionEnum *graphql.Enum) (*graphql.Field, error) { + classObject := b.classObject(class) + b.knownClasses[class.Class] = classObject + classField := buildGetClassField(classObject, class, b.modulesProvider, fusionEnum, b.authorizer) + return &classField, nil +} + +func (b *classBuilder) classObject(class *models.Class) *graphql.Object { + return graphql.NewObject(graphql.ObjectConfig{ + Name: b.getClassObjectName(class.Class), + Fields: (graphql.FieldsThunk)(func() graphql.Fields { + classProperties := graphql.Fields{} + for _, property := range class.Properties { + propertyType, err := b.schema.FindPropertyDataType(property.DataType) + if err != nil { + if errors.Is(err, schema.ErrRefToNonexistentClass) { + // This is a common case when a class which is referenced + // by another class is deleted, leaving the referencing + // class with an invalid reference property. Panicking + // is not necessary here + b.logger.WithField("action", "graphql_rebuild"). + Warnf("ignoring ref prop %q on class %q, because it contains reference to nonexistent class %q", + property.Name, class.Class, property.DataType) + + continue + } else { + // We can't return an error in this FieldsThunk function, so we need to panic + panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s; %s", + class.Class, property.Name, err.Error())) + } + } + + if propertyType.IsPrimitive() { + classProperties[property.Name] = b.primitiveField(propertyType, property, + class.Class) + } else if propertyType.IsNested() { + classProperties[property.Name] = b.nestedField(propertyType, property, + class.Class) + } else { + classProperties[property.Name] = b.referenceField(propertyType, property, + class.Class) + } + } + + b.additionalFields(classProperties, class) + + return classProperties + }), + Description: class.Description, + }) +} + +func (b *classBuilder) getClassObjectName(name string) string { + switch name { + // GraphQL scalars have graphql names assigned the same as the name of the scalar. + // In order to avoid name clash we must override those class names. We are prepending + // underscore character before the class name as it is safe to do so + // because class names starting with "_" are not valid Weaviate class names, + // so it is safe to override such a class name with "_" prefix and use it as GraphQL name. + case graphql.String.Name(), graphql.DateTime.Name(), graphql.Int.Name(), graphql.Float.Name(), + graphql.Boolean.Name(), graphql.ID.Name(), graphql.FieldSet.Name(): + return fmt.Sprintf("_%s", name) + default: + return name + } +} + +func (b *classBuilder) additionalFields(classProperties graphql.Fields, class *models.Class) { + additionalProperties := graphql.Fields{} + additionalProperties["classification"] = b.additionalClassificationField(class) + additionalProperties["certainty"] = b.additionalCertaintyField(class) + additionalProperties["distance"] = b.additionalDistanceField(class) + additionalProperties["vector"] = b.additionalVectorField(class) + additionalProperties["vectors"] = b.additionalVectorsField(class) + additionalProperties["id"] = b.additionalIDField() + additionalProperties["creationTimeUnix"] = b.additionalCreationTimeUnix() + additionalProperties["lastUpdateTimeUnix"] = b.additionalLastUpdateTimeUnix() + additionalProperties["score"] = b.additionalScoreField() + additionalProperties["explainScore"] = b.additionalExplainScoreField() + additionalProperties["group"] = b.additionalGroupField(classProperties, class) + if replicationEnabled(class) { + additionalProperties["isConsistent"] = b.isConsistentField() + } + // module specific additional properties + if b.modulesProvider != nil { + for name, field := range b.modulesProvider.GetAdditionalFields(class) { + additionalProperties[name] = field + } + } + classProperties["_additional"] = &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditional", class.Class), + Fields: additionalProperties, + }), + } +} + +func (b *classBuilder) additionalIDField() *graphql.Field { + return &graphql.Field{ + Description: descriptions.GetClassUUID, + Type: graphql.String, + } +} + +func (b *classBuilder) additionalClassificationField(class *models.Class) *graphql.Field { + return &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalClassification", class.Class), + Fields: graphql.Fields{ + "id": &graphql.Field{Type: graphql.String}, + "basedOn": &graphql.Field{Type: graphql.NewList(graphql.String)}, + "scope": &graphql.Field{Type: graphql.NewList(graphql.String)}, + "classifiedFields": &graphql.Field{Type: graphql.NewList(graphql.String)}, + "completed": &graphql.Field{Type: graphql.String}, + }, + }), + } +} + +func (b *classBuilder) additionalCertaintyField(class *models.Class) *graphql.Field { + return &graphql.Field{ + Type: graphql.Float, + } +} + +func (b *classBuilder) additionalDistanceField(class *models.Class) *graphql.Field { + return &graphql.Field{ + Type: graphql.Float, + } +} + +func (b *classBuilder) additionalVectorField(class *models.Class) *graphql.Field { + return &graphql.Field{ + Type: graphql.NewList(graphql.Float), + } +} + +func (b *classBuilder) additionalVectorsField(class *models.Class) *graphql.Field { + if len(class.VectorConfig) > 0 { + fields := graphql.Fields{} + for targetVector := range class.VectorConfig { + fields[targetVector] = &graphql.Field{ + Name: fmt.Sprintf("%sAdditionalVectors%s", class.Class, targetVector), + Type: common_filters.Vector(fmt.Sprintf("%s%s", class.Class, targetVector)), + } + } + return &graphql.Field{ + Type: graphql.NewObject( + graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalVectors", class.Class), + Fields: fields, + }, + ), + } + } + return nil +} + +func (b *classBuilder) additionalCreationTimeUnix() *graphql.Field { + return &graphql.Field{ + Type: graphql.String, + } +} + +func (b *classBuilder) additionalScoreField() *graphql.Field { + return &graphql.Field{ + Type: graphql.String, + } +} + +func (b *classBuilder) additionalExplainScoreField() *graphql.Field { + return &graphql.Field{ + Type: graphql.String, + } +} + +func (b *classBuilder) additionalLastUpdateTimeUnix() *graphql.Field { + return &graphql.Field{ + Type: graphql.String, + } +} + +func (b *classBuilder) isConsistentField() *graphql.Field { + return &graphql.Field{ + Type: graphql.Boolean, + } +} + +func (b *classBuilder) additionalGroupField(classProperties graphql.Fields, class *models.Class) *graphql.Field { + hitsFields := graphql.Fields{ + "_additional": &graphql.Field{ + Type: graphql.NewObject( + graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalGroupHitsAdditional", class.Class), + Fields: graphql.Fields{ + "id": &graphql.Field{Type: graphql.String}, + "vector": &graphql.Field{Type: graphql.NewList(graphql.Float)}, + "distance": &graphql.Field{Type: graphql.Float}, + }, + }, + ), + }, + } + for name, field := range classProperties { + hitsFields[name] = field + } + + return &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalGroup", class.Class), + Fields: graphql.Fields{ + "id": &graphql.Field{Type: graphql.Int}, + "groupedBy": &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalGroupGroupedBy", class.Class), + Fields: graphql.Fields{ + "path": &graphql.Field{ + Type: graphql.NewList(graphql.String), + }, + "value": &graphql.Field{ + Type: graphql.String, + }, + }, + }), + }, + + "minDistance": &graphql.Field{Type: graphql.Float}, + "maxDistance": &graphql.Field{Type: graphql.Float}, + "count": &graphql.Field{Type: graphql.Int}, + "hits": &graphql.Field{ + Type: graphql.NewList(graphql.NewObject( + graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalGroupHits", class.Class), + Fields: hitsFields, + }, + )), + }, + }, + }), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_fields.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_fields.go new file mode 100644 index 0000000000000000000000000000000000000000..3c8b52766dc43b2fbfb146d84707cb33fc768b5a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_fields.go @@ -0,0 +1,892 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/weaviate/weaviate/usecases/auth/authorization" + moduleadditional "github.com/weaviate/weaviate/usecases/modulecomponents/additional" + + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" +) + +func (b *classBuilder) primitiveField(propertyType schema.PropertyDataType, + property *models.Property, className string, +) *graphql.Field { + switch propertyType.AsPrimitive() { + case schema.DataTypeText: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.String, + } + case schema.DataTypeInt: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.Int, + } + case schema.DataTypeNumber: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.Float, + } + case schema.DataTypeBoolean: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.Boolean, + } + case schema.DataTypeDate: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.String, // String since no graphql date datatype exists + } + case schema.DataTypeGeoCoordinates: + obj := newGeoCoordinatesObject(className, property.Name) + + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: obj, + Resolve: resolveGeoCoordinates, + } + case schema.DataTypePhoneNumber: + obj := newPhoneNumberObject(className, property.Name) + + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: obj, + Resolve: resolvePhoneNumber, + } + case schema.DataTypeBlob: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.String, + } + case schema.DataTypeTextArray: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.NewList(graphql.String), + } + case schema.DataTypeIntArray: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.NewList(graphql.Int), + } + case schema.DataTypeNumberArray: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.NewList(graphql.Float), + } + case schema.DataTypeBooleanArray: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.NewList(graphql.Boolean), + } + case schema.DataTypeDateArray: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.NewList(graphql.String), // String since no graphql date datatype exists + } + case schema.DataTypeUUIDArray: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.NewList(graphql.String), // Always return UUID as string representation to the user + } + case schema.DataTypeUUID: + return &graphql.Field{ + Description: property.Description, + Name: property.Name, + Type: graphql.String, // Always return UUID as string representation to the user + } + default: + panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s; %s", + className, property.Name, propertyType.AsPrimitive())) + } +} + +func newGeoCoordinatesObject(className string, propertyName string) *graphql.Object { + return graphql.NewObject(graphql.ObjectConfig{ + Description: "GeoCoordinates as latitude and longitude in decimal form", + Name: fmt.Sprintf("%s%sGeoCoordinatesObj", className, propertyName), + Fields: graphql.Fields{ + "latitude": &graphql.Field{ + Name: "Latitude", + Description: "The Latitude of the point in decimal form.", + Type: graphql.Float, + }, + "longitude": &graphql.Field{ + Name: "Longitude", + Description: "The Longitude of the point in decimal form.", + Type: graphql.Float, + }, + }, + }) +} + +func newPhoneNumberObject(className string, propertyName string) *graphql.Object { + return graphql.NewObject(graphql.ObjectConfig{ + Description: "PhoneNumber in various parsed formats", + Name: fmt.Sprintf("%s%sPhoneNumberObj", className, propertyName), + Fields: graphql.Fields{ + "input": &graphql.Field{ + Name: "Input", + Description: "The raw phone number as put in by the user prior to parsing", + Type: graphql.String, + }, + "internationalFormatted": &graphql.Field{ + Name: "Input", + Description: "The parsed phone number in the international format", + Type: graphql.String, + }, + "nationalFormatted": &graphql.Field{ + Name: "Input", + Description: "The parsed phone number in the national format", + Type: graphql.String, + }, + "national": &graphql.Field{ + Name: "Input", + Description: "The parsed phone number in the national format", + Type: graphql.Int, + }, + "valid": &graphql.Field{ + Name: "Input", + Description: "Whether the phone number could be successfully parsed and was considered valid by the parser", + Type: graphql.Boolean, + }, + "countryCode": &graphql.Field{ + Name: "Input", + Description: "The parsed country code, i.e. the leading numbers identifing the country in an international format", + Type: graphql.Int, + }, + "defaultCountry": &graphql.Field{ + Name: "Input", + Description: "The defaultCountry as put in by the user. (This is used to help parse national numbers into an international format)", + Type: graphql.String, + }, + }, + }) +} + +func buildGetClassField(classObject *graphql.Object, + class *models.Class, modulesProvider ModulesProvider, fusionEnum *graphql.Enum, authorizer authorization.Authorizer, +) graphql.Field { + field := graphql.Field{ + Type: graphql.NewList(classObject), + Description: class.Description, + Args: graphql.FieldConfigArgument{ + "after": &graphql.ArgumentConfig{ + Description: descriptions.AfterID, + Type: graphql.String, + }, + "limit": &graphql.ArgumentConfig{ + Description: descriptions.Limit, + Type: graphql.Int, + }, + "offset": &graphql.ArgumentConfig{ + Description: descriptions.After, + Type: graphql.Int, + }, + "autocut": &graphql.ArgumentConfig{ + Description: "Cut off number of results after the Nth extrema. Off by default, negative numbers mean off.", + Type: graphql.Int, + }, + + "sort": sortArgument(class.Class), + "nearVector": nearVectorArgument(class.Class), + "nearObject": nearObjectArgument(class.Class), + "where": whereArgument(class.Class), + "group": groupArgument(class.Class), + "groupBy": groupByArgument(class.Class), + }, + Resolve: newResolver(authorizer, modulesProvider).makeResolveGetClass(class.Class), + } + + field.Args["bm25"] = bm25Argument(class.Class) + field.Args["hybrid"] = hybridArgument(classObject, class, modulesProvider, fusionEnum) + + if modulesProvider != nil { + for name, argument := range modulesProvider.GetArguments(class) { + field.Args[name] = argument + } + } + + if replicationEnabled(class) { + field.Args["consistencyLevel"] = consistencyLevelArgument(class) + } + + if schema.MultiTenancyEnabled(class) { + field.Args["tenant"] = tenantArgument() + } + + return field +} + +func resolveGeoCoordinates(p graphql.ResolveParams) (interface{}, error) { + field := p.Source.(map[string]interface{})[p.Info.FieldName] + if field == nil { + return nil, nil + } + + geo, ok := field.(*models.GeoCoordinates) + if !ok { + return nil, fmt.Errorf("expected a *models.GeoCoordinates, but got: %T", field) + } + + return map[string]interface{}{ + "latitude": geo.Latitude, + "longitude": geo.Longitude, + }, nil +} + +func resolvePhoneNumber(p graphql.ResolveParams) (interface{}, error) { + field := p.Source.(map[string]interface{})[p.Info.FieldName] + if field == nil { + return nil, nil + } + + phone, ok := field.(*models.PhoneNumber) + if !ok { + return nil, fmt.Errorf("expected a *models.PhoneNumber, but got: %T", field) + } + + return map[string]interface{}{ + "input": phone.Input, + "internationalFormatted": phone.InternationalFormatted, + "nationalFormatted": phone.NationalFormatted, + "national": phone.National, + "valid": phone.Valid, + "countryCode": phone.CountryCode, + "defaultCountry": phone.DefaultCountry, + }, nil +} + +func whereArgument(className string) *graphql.ArgumentConfig { + return &graphql.ArgumentConfig{ + Description: descriptions.GetWhere, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("GetObjects%sWhereInpObj", className), + Fields: common_filters.BuildNew(fmt.Sprintf("GetObjects%s", className)), + Description: descriptions.GetWhereInpObj, + }, + ), + } +} + +type resolver struct { + authorizer authorization.Authorizer + modulesProvider ModulesProvider +} + +func newResolver(authorizer authorization.Authorizer, modulesProvider ModulesProvider) *resolver { + return &resolver{authorizer, modulesProvider} +} + +func (r *resolver) makeResolveGetClass(className string) graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + result, err := r.resolveGet(p, className) + if err != nil { + return result, enterrors.NewErrGraphQLUser(err, "Get", className) + } + return result, nil + } +} + +func (r *resolver) resolveGet(p graphql.ResolveParams, className string) (interface{}, error) { + principal := restCtx.GetPrincipalFromContext(p.Context) + + source, ok := p.Source.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expected graphql root to be a map, but was %T", p.Source) + } + + resolver, ok := source["Resolver"].(Resolver) + if !ok { + return nil, fmt.Errorf("expected source map to have a usable Resolver, but got %#v", source["Resolver"]) + } + + var tenant string + if tk, ok := p.Args["tenant"]; ok { + tenant = tk.(string) + } + + if err := r.authorizer.Authorize(p.Context, principal, authorization.READ, authorization.ShardsData(className, tenant)...); err != nil { + return nil, err + } + + pagination, err := filters.ExtractPaginationFromArgs(p.Args) + if err != nil { + return nil, err + } + + cursor, err := filters.ExtractCursorFromArgs(p.Args) + if err != nil { + return nil, err + } + + // There can only be exactly one ast.Field; it is the class name. + if len(p.Info.FieldASTs) != 1 { + panic("Only one Field expected here") + } + + selectionsOfClass := p.Info.FieldASTs[0].SelectionSet + + properties, addlProps, groupByProperties, err := extractProperties( + className, selectionsOfClass, p.Info.Fragments, r.modulesProvider) + if err != nil { + return nil, err + } + allPropsToAuthorize := append(properties, groupByProperties...) + for _, property := range allPropsToAuthorize { + if err := common_filters.AuthorizeProperty(p.Context, r.authorizer, &property, principal); err != nil { + return nil, err + } + } + + var sort []filters.Sort + if sortArg, ok := p.Args["sort"]; ok { + sort = filters.ExtractSortFromArgs(sortArg.([]interface{})) + } + + filters, err := common_filters.ExtractFilters(p.Args, p.Info.FieldName) + if err != nil { + return nil, fmt.Errorf("could not extract filters: %w", err) + } + if filters != nil { + if err = common_filters.AuthorizeFilters(p.Context, r.authorizer, filters.Root, principal); err != nil { + return nil, err + } + } + + var targetVectorCombination *dto.TargetCombination + var nearVectorParams *searchparams.NearVector + if nearVector, ok := p.Args["nearVector"]; ok { + p, targetCombination, err := common_filters.ExtractNearVector(nearVector.(map[string]interface{}), nil) + if err != nil { + return nil, fmt.Errorf("failed to extract nearVector params: %w", err) + } + nearVectorParams = &p + targetVectorCombination = targetCombination + } + + var nearObjectParams *searchparams.NearObject + if nearObject, ok := p.Args["nearObject"]; ok { + p, targetCombination, err := common_filters.ExtractNearObject(nearObject.(map[string]interface{})) + if err != nil { + return nil, fmt.Errorf("failed to extract nearObject params: %w", err) + } + nearObjectParams = &p + targetVectorCombination = targetCombination + } + + var moduleParams map[string]interface{} + if r.modulesProvider != nil { + extractedParams, extractedCombinations := r.modulesProvider.ExtractSearchParams(p.Args, className) + if len(extractedParams) > 0 { + moduleParams = extractedParams + } + if len(extractedCombinations) == 1 { + for _, val := range extractedCombinations { + targetVectorCombination = val + } + } + } + + // extracts bm25 (sparseSearch) from the query + var keywordRankingParams *searchparams.KeywordRanking + if bm25, ok := p.Args["bm25"]; ok { + if len(sort) > 0 { + return nil, fmt.Errorf("bm25 search is not compatible with sort") + } + p := common_filters.ExtractBM25(bm25.(map[string]interface{}), addlProps.ExplainScore) + keywordRankingParams = &p + } + + // Extract hybrid search params from the processed query + // Everything hybrid can go in another namespace AFTER modulesprovider is + // refactored + var hybridParams *searchparams.HybridSearch + if hybrid, ok := p.Args["hybrid"]; ok { + if len(sort) > 0 { + return nil, fmt.Errorf("hybrid search is not compatible with sort") + } + p, targetCombination, err := common_filters.ExtractHybridSearch(hybrid.(map[string]interface{}), addlProps.ExplainScore) + if err != nil { + return nil, fmt.Errorf("failed to extract hybrid params: %w", err) + } + hybridParams = p + targetVectorCombination = targetCombination + } + + var replProps *additional.ReplicationProperties + if cl, ok := p.Args["consistencyLevel"]; ok { + replProps = &additional.ReplicationProperties{ + ConsistencyLevel: cl.(string), + } + } + + group := extractGroup(p.Args) + + var groupByParams *searchparams.GroupBy + if groupBy, ok := p.Args["groupBy"]; ok { + p := common_filters.ExtractGroupBy(groupBy.(map[string]interface{})) + p.Properties = groupByProperties + groupByParams = &p + } + + params := dto.GetParams{ + Filters: filters, + ClassName: className, + Pagination: pagination, + Cursor: cursor, + Properties: properties, + Sort: sort, + NearVector: nearVectorParams, + NearObject: nearObjectParams, + Group: group, + ModuleParams: moduleParams, + AdditionalProperties: addlProps, + KeywordRanking: keywordRankingParams, + HybridSearch: hybridParams, + ReplicationProperties: replProps, + GroupBy: groupByParams, + Tenant: tenant, + TargetVectorCombination: targetVectorCombination, + } + + // need to perform vector search by distance + // under certain conditions + setLimitBasedOnVectorSearchParams(¶ms) + + return func() (interface{}, error) { + result, err := resolver.GetClass(p.Context, principal, params) + if err != nil { + return result, enterrors.NewErrGraphQLUser(err, "Get", params.ClassName) + } + return result, nil + }, nil +} + +// the limit needs to be set according to the vector search parameters. +// for example, if a certainty is provided by any of the near* options, +// and no limit was provided, weaviate will want to execute a vector +// search by distance. it knows to do this by watching for a limit +// flag, specifically filters.LimitFlagSearchByDistance +func setLimitBasedOnVectorSearchParams(params *dto.GetParams) { + setLimit := func(params *dto.GetParams) { + if params.Pagination == nil { + // limit was omitted entirely, implicitly + // indicating to do unlimited search + params.Pagination = &filters.Pagination{ + Limit: filters.LimitFlagSearchByDist, + } + } else if params.Pagination.Limit < 0 { + // a negative limit was set, explicitly + // indicating to do unlimited search + params.Pagination.Limit = filters.LimitFlagSearchByDist + } + } + + if params.NearVector != nil && + (params.NearVector.Certainty != 0 || params.NearVector.WithDistance) { + setLimit(params) + return + } + + if params.NearObject != nil && + (params.NearObject.Certainty != 0 || params.NearObject.WithDistance) { + setLimit(params) + return + } + + for _, param := range params.ModuleParams { + nearParam, ok := param.(modulecapabilities.NearParam) + if ok && nearParam.SimilarityMetricProvided() { + setLimit(params) + return + } + } +} + +func extractGroup(args map[string]interface{}) *dto.GroupParams { + group, ok := args["group"] + if !ok { + return nil + } + + asMap := group.(map[string]interface{}) // guaranteed by graphql + strategy := asMap["type"].(string) + force := asMap["force"].(float64) + return &dto.GroupParams{ + Strategy: strategy, + Force: float32(force), + } +} + +func isPrimitive(selectionSet *ast.SelectionSet) bool { + if selectionSet == nil { + return true + } + + // if there is a selection set it could either be a cross-ref or a map-type + // field like GeoCoordinates or PhoneNumber + for _, subSelection := range selectionSet.Selections { + if subsectionField, ok := subSelection.(*ast.Field); ok { + if fieldNameIsOfObjectButNonReferenceType(subsectionField.Name.Value) { + return true + } + } + } + + // must be a ref field + return false +} + +type additionalCheck struct { + modulesProvider ModulesProvider +} + +func (ac *additionalCheck) isAdditional(parentName, name string) bool { + if parentName == "_additional" { + if name == "classification" || name == "certainty" || + name == "distance" || name == "id" || name == "vector" || name == "vectors" || + name == "creationTimeUnix" || name == "lastUpdateTimeUnix" || + name == "score" || name == "explainScore" || name == "isConsistent" || + name == "group" { + return true + } + if ac.isModuleAdditional(name) { + return true + } + } + return false +} + +func (ac *additionalCheck) isModuleAdditional(name string) bool { + if ac.modulesProvider != nil { + if len(ac.modulesProvider.GraphQLAdditionalFieldNames()) > 0 { + for _, moduleAdditionalProperty := range ac.modulesProvider.GraphQLAdditionalFieldNames() { + if name == moduleAdditionalProperty { + return true + } + } + } + } + return false +} + +func fieldNameIsOfObjectButNonReferenceType(field string) bool { + switch field { + case "latitude", "longitude": + // must be a geo prop + return true + case "input", "internationalFormatted", "nationalFormatted", "national", + "valid", "countryCode", "defaultCountry": + // must be a phone number + return true + default: + return false + } +} + +func extractProperties(className string, selections *ast.SelectionSet, + fragments map[string]ast.Definition, + modulesProvider ModulesProvider, +) ([]search.SelectProperty, additional.Properties, []search.SelectProperty, error) { + var properties []search.SelectProperty + var additionalGroupHitProperties []search.SelectProperty + var additionalProps additional.Properties + additionalCheck := &additionalCheck{modulesProvider} + + for _, selection := range selections.Selections { + field := selection.(*ast.Field) + name := field.Name.Value + property := search.SelectProperty{Name: name} + + property.IsPrimitive = isPrimitive(field.SelectionSet) + if !property.IsPrimitive { + // We can interpret this property in different ways + for _, subSelection := range field.SelectionSet.Selections { + switch s := subSelection.(type) { + case *ast.Field: + // Is it a field with the name __typename? + if s.Name.Value == "__typename" { + property.IncludeTypeName = true + continue + } else if additionalCheck.isAdditional(name, s.Name.Value) { + additionalProperty := s.Name.Value + if additionalProperty == "classification" { + additionalProps.Classification = true + continue + } + if additionalProperty == "certainty" { + additionalProps.Certainty = true + continue + } + if additionalProperty == "distance" { + additionalProps.Distance = true + continue + } + if additionalProperty == "id" { + additionalProps.ID = true + continue + } + if additionalProperty == "vector" { + additionalProps.Vector = true + continue + } + if additionalProperty == "vectors" { + if s.SelectionSet != nil && len(s.SelectionSet.Selections) > 0 { + vectors := make([]string, len(s.SelectionSet.Selections)) + for i, selection := range s.SelectionSet.Selections { + if field, ok := selection.(*ast.Field); ok { + vectors[i] = field.Name.Value + } + } + additionalProps.Vectors = vectors + } + continue + } + if additionalProperty == "creationTimeUnix" { + additionalProps.CreationTimeUnix = true + continue + } + if additionalProperty == "score" { + additionalProps.Score = true + continue + } + if additionalProperty == "explainScore" { + additionalProps.ExplainScore = true + continue + } + if additionalProperty == "lastUpdateTimeUnix" { + additionalProps.LastUpdateTimeUnix = true + continue + } + if additionalProperty == "isConsistent" { + additionalProps.IsConsistent = true + continue + } + if additionalProperty == "group" { + additionalProps.Group = true + var err error + additionalGroupHitProperties, err = extractGroupHitProperties(className, additionalProps, subSelection, fragments, modulesProvider) + if err != nil { + return nil, additionalProps, nil, err + } + continue + } + if modulesProvider != nil { + if additionalCheck.isModuleAdditional(additionalProperty) { + additionalProps.ModuleParams = getModuleParams(additionalProps.ModuleParams) + extracted := modulesProvider.ExtractAdditionalField(className, additionalProperty, s.Arguments) + if extractor, ok := extracted.(moduleadditional.PropertyExtractor); ok { + extractedProperties := extractor.GetPropertiesToExtract() + for _, extractedProperty := range extractedProperties { + properties = append(properties, search.SelectProperty{Name: extractedProperty}) + } + } + additionalProps.ModuleParams[additionalProperty] = extracted + continue + } + } + } else { + // It's an object / object array property + continue + } + + case *ast.FragmentSpread: + ref, err := extractFragmentSpread(className, s, fragments, modulesProvider) + if err != nil { + return nil, additionalProps, nil, err + } + + property.Refs = append(property.Refs, ref) + + case *ast.InlineFragment: + ref, err := extractInlineFragment(className, s, fragments, modulesProvider) + if err != nil { + return nil, additionalProps, nil, err + } + + property.Refs = append(property.Refs, ref) + + default: + return nil, additionalProps, nil, fmt.Errorf("unrecoginzed type in subs-selection: %T", subSelection) + } + } + } + + if name == "_additional" { + continue + } + + properties = append(properties, property) + } + + return properties, additionalProps, additionalGroupHitProperties, nil +} + +func extractGroupHitProperties( + className string, + additionalProps additional.Properties, + subSelection ast.Selection, + fragments map[string]ast.Definition, + modulesProvider ModulesProvider, +) ([]search.SelectProperty, error) { + additionalGroupProperties := []search.SelectProperty{} + if subSelection != nil { + if selectionSet := subSelection.GetSelectionSet(); selectionSet != nil { + for _, groupSubSelection := range selectionSet.Selections { + if groupSubSelection != nil { + if groupSubSelectionField, ok := groupSubSelection.(*ast.Field); ok { + if groupSubSelectionField.Name.Value == "hits" && groupSubSelectionField.SelectionSet != nil { + for _, groupHitsSubSelection := range groupSubSelectionField.SelectionSet.Selections { + if hf, ok := groupHitsSubSelection.(*ast.Field); ok { + if hf.SelectionSet != nil { + for _, ss := range hf.SelectionSet.Selections { + if inlineFrag, ok := ss.(*ast.InlineFragment); ok { + ref, err := extractInlineFragment(className, inlineFrag, fragments, modulesProvider) + if err != nil { + return nil, err + } + + additionalGroupHitProp := search.SelectProperty{Name: hf.Name.Value} + additionalGroupHitProp.Refs = append(additionalGroupHitProp.Refs, ref) + additionalGroupProperties = append(additionalGroupProperties, additionalGroupHitProp) + } + } + } else { + additionalGroupProperties = append(additionalGroupProperties, search.SelectProperty{Name: hf.Name.Value}) + } + } + } + } + } + } + } + } + } + return additionalGroupProperties, nil +} + +func getModuleParams(moduleParams map[string]interface{}) map[string]interface{} { + if moduleParams == nil { + return map[string]interface{}{} + } + return moduleParams +} + +func extractInlineFragment(class string, fragment *ast.InlineFragment, + fragments map[string]ast.Definition, + modulesProvider ModulesProvider, +) (search.SelectClass, error) { + var className schema.ClassName + var err error + var result search.SelectClass + + if strings.Contains(fragment.TypeCondition.Name.Value, "__") { + // is a helper type for a network ref + // don't validate anything as of now + className = schema.ClassName(fragment.TypeCondition.Name.Value) + } else { + className, err = schema.ValidateClassName(fragment.TypeCondition.Name.Value) + if err != nil { + return result, fmt.Errorf("the inline fragment type name '%s' is not a valid class name", fragment.TypeCondition.Name.Value) + } + } + + if className == "Beacon" { + return result, errors.New("retrieving cross-refs by beacon is not supported yet - coming soon!") + } + + subProperties, additionalProperties, _, err := extractProperties(class, fragment.SelectionSet, fragments, modulesProvider) + if err != nil { + return result, err + } + + result.ClassName = string(className) + result.RefProperties = subProperties + result.AdditionalProperties = additionalProperties + return result, nil +} + +func extractFragmentSpread(class string, spread *ast.FragmentSpread, + fragments map[string]ast.Definition, + modulesProvider ModulesProvider, +) (search.SelectClass, error) { + var result search.SelectClass + name := spread.Name.Value + + def, ok := fragments[name] + if !ok { + return result, fmt.Errorf("spread fragment '%s' refers to unknown fragment", name) + } + + className, err := hackyWorkaroundToExtractClassName(def, name) + if err != nil { + return result, err + } + + subProperties, additionalProperties, _, err := extractProperties(class, def.GetSelectionSet(), fragments, modulesProvider) + if err != nil { + return result, err + } + + result.ClassName = string(className) + result.RefProperties = subProperties + result.AdditionalProperties = additionalProperties + return result, nil +} + +// It seems there's no proper way to extract this info unfortunately: +// https://github.com/tailor-inc/graphql/issues/455 +func hackyWorkaroundToExtractClassName(def ast.Definition, name string) (string, error) { + loc := def.GetLoc() + raw := loc.Source.Body[loc.Start:loc.End] + r := regexp.MustCompile(fmt.Sprintf(`fragment\s*%s\s*on\s*(\w*)\s*{`, name)) + matches := r.FindSubmatch(raw) + if len(matches) < 2 { + return "", fmt.Errorf("could not extract a className from fragment") + } + + return string(matches[1]), nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_nested.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_nested.go new file mode 100644 index 0000000000000000000000000000000000000000..fecc9a793c5eacfbdc59e3e974f334243abe44f2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_nested.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func (b *classBuilder) nestedField(propertyType schema.PropertyDataType, + property *models.Property, className string, +) *graphql.Field { + return b.parseNestedProperties(property.NestedProperties, className, property.Name, property.DataType) +} + +func (b *classBuilder) parseNestedProperties(nestedProps []*models.NestedProperty, + className, prefix string, propDataType []string, +) *graphql.Field { + fields := graphql.Fields{} + for _, prop := range nestedProps { + if prop.NestedProperties != nil { + fields[prop.Name] = b.parseNestedProperties(prop.NestedProperties, + className, fmt.Sprintf("%s_%s", prefix, prop.Name), prop.DataType) + } else { + fields[prop.Name] = &graphql.Field{ + Name: fmt.Sprintf("%s_%s_%s_field", className, prefix, prop.Name), + Type: b.determinNestedPropertyType(prop.DataType, prop.Name), + } + } + } + + fieldType := graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%s_%s_object", className, prefix), + Fields: fields, + }) + + if len(propDataType) == 1 && propDataType[0] == schema.DataTypeObjectArray.String() { + return &graphql.Field{Type: graphql.NewList(fieldType)} + } + return &graphql.Field{Type: fieldType} +} + +func (b *classBuilder) determinNestedPropertyType(dataType []string, propName string) graphql.Output { + switch schema.DataType(dataType[0]) { + case schema.DataTypeText, schema.DataTypeString: + return graphql.String + case schema.DataTypeInt: + return graphql.Int + case schema.DataTypeNumber: + return graphql.Float + case schema.DataTypeBoolean: + return graphql.Boolean + case schema.DataTypeDate: + return graphql.String + case schema.DataTypeBlob: + return graphql.String + case schema.DataTypeUUID: + return graphql.String + case schema.DataTypeTextArray, schema.DataTypeStringArray: + return graphql.NewList(graphql.String) + case schema.DataTypeIntArray: + return graphql.NewList(graphql.Int) + case schema.DataTypeNumberArray: + return graphql.NewList(graphql.Float) + case schema.DataTypeBooleanArray: + return graphql.NewList(graphql.Boolean) + case schema.DataTypeDateArray: + return graphql.NewList(graphql.String) + case schema.DataTypeUUIDArray: + return graphql.NewList(graphql.String) + default: + panic(fmt.Sprintf("determinNestedPropertyType: unknown primitive type for property %s: %s", + propName, dataType[0])) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_references.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_references.go new file mode 100644 index 0000000000000000000000000000000000000000..cc3b9e0b4284e7fc7b6d232d6e82acf607c0c8a4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_references.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +func (b *classBuilder) referenceField(propertyType schema.PropertyDataType, + property *models.Property, className string, +) *graphql.Field { + refClasses := propertyType.Classes() + propertyName := cases.Title(language.Und, cases.NoLower).String(property.Name) + dataTypeClasses := []*graphql.Object{} + + for _, refClassName := range refClasses { + // is a local ref + refClass, ok := b.knownClasses[string(refClassName)] + if !ok { + panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s; %s", + className, property.Name, refClassName)) + } + + dataTypeClasses = append(dataTypeClasses, refClass) + } + + if (len(dataTypeClasses)) == 0 { + // this could be the case when we only have network-refs, but all network + // refs were invalid (e.g. because the peers are gone). In this case we + // must return (nil) early, otherwise graphql will error because it has a + // union field with an empty list of unions. + return nil + } + + dataTypeClasses = append(dataTypeClasses, b.beaconClass) + + classUnion := graphql.NewUnion(graphql.UnionConfig{ + Name: fmt.Sprintf("%s%s%s", className, propertyName, "Obj"), + Types: dataTypeClasses, + ResolveType: makeResolveClassUnionType(&b.knownClasses), + Description: property.Description, + }) + + return &graphql.Field{ + Type: graphql.NewList(classUnion), + Description: property.Description, + Resolve: makeResolveRefField(), + } +} + +func makeResolveClassUnionType(knownClasses *map[string]*graphql.Object) graphql.ResolveTypeFn { + return func(p graphql.ResolveTypeParams) *graphql.Object { + valueMap := p.Value.(map[string]interface{}) + refType := valueMap["__refClassType"].(string) + switch refType { + case "local": + className := valueMap["__refClassName"].(string) + classObj, ok := (*knownClasses)[className] + if !ok { + panic(fmt.Errorf( + "local ref refers to class '%s', but no such kind exists in the peer network", className)) + } + return classObj + default: + panic(fmt.Sprintf("unknown ref type %#v", refType)) + } + } +} + +func makeResolveRefField() graphql.FieldResolveFn { + return func(p graphql.ResolveParams) (interface{}, error) { + if p.Source.(map[string]interface{})[p.Info.FieldName] == nil { + return nil, nil + } + + items, ok := p.Source.(map[string]interface{})[p.Info.FieldName].([]interface{}) + if !ok { + // could be a models.MultipleRef which would indicate that we found only + // unresolved references, this is the case when accepts refs to types + // ClassA and ClassB and the object only contains refs to one type (e.g. + // ClassA). Now if the user only asks for resolving all of the other type + // (i.e. ClassB), then all results would be returned unresolved (as + // models.MultipleRef). + + return nil, nil + } + results := make([]interface{}, len(items)) + for i, item := range items { + switch v := item.(type) { + case search.LocalRef: + // inject some meta data so the ResolveType can determine the type + localRef := v.Fields + localRef["__refClassType"] = "local" + localRef["__refClassName"] = v.Class + results[i] = localRef + default: + return nil, fmt.Errorf("unsupported type, expected search.LocalRef or NetworkRef, got %T", v) + } + } + return results, nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_references_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_references_test.go new file mode 100644 index 0000000000000000000000000000000000000000..978db7b277635081a521f3c838a43cd80a9d6979 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/class_builder_references_test.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" +) + +func TestGetNoNetworkRequestIsMadeWhenUserDoesntWantNetworkRef(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + AdditionalProperties: additional.Properties{ + ID: true, + }, + } + + resolverResponse := []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": "some-uuid-for-the-local-class", + }, + }, + } + + resolver.On("GetClass", expectedParams). + Return(resolverResponse, nil).Once() + + query := "{ Get { SomeThing { _additional { id } } } }" + result := resolver.AssertResolve(t, query).Result + + expectedResult := map[string]interface{}{ + "Get": map[string]interface{}{ + "SomeThing": []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": "some-uuid-for-the-local-class", + }, + }, + }, + }, + } + + assert.Equal(t, expectedResult, result, "should resolve the network cross-ref correctly") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/explore_argument.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/explore_argument.go new file mode 100644 index 0000000000000000000000000000000000000000..01ae44186b342eeea2b0f492f0e44b1227594f59 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/explore_argument.go @@ -0,0 +1,99 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func nearVectorArgument(className string) *graphql.ArgumentConfig { + return common_filters.NearVectorArgument("GetObjects", className, true) +} + +func nearObjectArgument(className string) *graphql.ArgumentConfig { + return common_filters.NearObjectArgument("GetObjects", className, true) +} + +func nearTextFields(prefix string) graphql.InputObjectConfigFieldMap { + nearTextFields := graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + // Description: descriptions.Concepts, + Type: graphql.NewNonNull(graphql.NewList(graphql.String)), + }, + "moveTo": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveTo", prefix), + Fields: movementInp(fmt.Sprintf("%sMoveTo", prefix)), + }), + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + "moveAwayFrom": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveAwayFrom", prefix), + Fields: movementInp(fmt.Sprintf("%sMoveAwayFrom", prefix)), + }), + }, + } + return nearTextFields +} + +func movementInp(prefix string) graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Description: descriptions.Keywords, + Type: graphql.NewList(graphql.String), + }, + "objects": &graphql.InputObjectFieldConfig{ + Description: "objects", + Type: graphql.NewList(objectsInpObj(prefix)), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + } +} + +func objectsInpObj(prefix string) *graphql.InputObject { + return graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMovementObjectsInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: "id of an object", + }, + "beacon": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.Beacon, + }, + }, + Description: "Movement Object", + }, + ) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/get.go new file mode 100644 index 0000000000000000000000000000000000000000..96504e6e30f82bb2643c63394c29388b3600c494 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/get.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "github.com/sirupsen/logrus" + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/adapters/handlers/graphql/utils" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +type ModulesProvider interface { + GetArguments(class *models.Class) map[string]*graphql.ArgumentConfig + ExtractSearchParams(arguments map[string]interface{}, className string) (map[string]interface{}, map[string]*dto.TargetCombination) + GetAdditionalFields(class *models.Class) map[string]*graphql.Field + ExtractAdditionalField(className, name string, params []*ast.Argument) interface{} + GraphQLAdditionalFieldNames() []string + GetAll() []modulecapabilities.Module +} + +// Build the Local.Get part of the graphql tree +func Build(schema *schema.SchemaWithAliases, logger logrus.FieldLogger, + modulesProvider ModulesProvider, authorizer authorization.Authorizer, +) (*graphql.Field, error) { + if len(schema.Objects.Classes) == 0 { + return nil, utils.ErrEmptySchema + } + + cb := newClassBuilder(schema, logger, modulesProvider, authorizer) + + var err error + var objects *graphql.Object + if len(schema.Objects.Classes) > 0 { + objects, err = cb.objects() + if err != nil { + return nil, err + } + } + + return &graphql.Field{ + Name: "Get", + Description: descriptions.GetObjects, + Type: objects, + Resolve: func(p graphql.ResolveParams) (interface{}, error) { + // Does nothing; pass through the filters + return p.Source, nil + }, + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/get_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/get_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0c4a678cc071fc11f0d200c1d7392d66a88c5e04 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/get_test.go @@ -0,0 +1,3523 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// These tests verify that the parameters to the resolver are properly extracted from a GraphQL query. + +package get + +import ( + "fmt" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tailor-inc/graphql/language/ast" + test_helper "github.com/weaviate/weaviate/adapters/handlers/graphql/test/helper" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + helper "github.com/weaviate/weaviate/test/helper" +) + +func TestSimpleFieldParamsOK(t *testing.T) { + t.Parallel() + resolver := newMockResolver() + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + resolver.AssertResolve(t, "{ Get { SomeAction { intField } } }") +} + +func TestExtractIntField(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := "{ Get { SomeAction { intField } } }" + resolver.AssertResolve(t, query) +} + +func TestExtractGeoCoordinatesField(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "location", IsPrimitive: true}}, + } + + resolverReturn := []interface{}{ + map[string]interface{}{ + "location": &models.GeoCoordinates{Latitude: ptFloat32(0.5), Longitude: ptFloat32(0.6)}, + }, + } + + resolver.On("GetClass", expectedParams). + Return(resolverReturn, nil).Once() + + query := "{ Get { SomeAction { location { latitude longitude } } } }" + result := resolver.AssertResolve(t, query) + + expectedLocation := map[string]interface{}{ + "location": map[string]interface{}{ + "latitude": float32(0.5), + "longitude": float32(0.6), + }, + } + + assert.Equal(t, expectedLocation, result.Get("Get", "SomeAction").Result.([]interface{})[0]) +} + +func TestExtractUUIDField(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "uuidField", IsPrimitive: true}}, + } + + id := uuid.New() + + resolverReturn := []interface{}{ + map[string]interface{}{ + "uuidField": id, + }, + } + + resolver.On("GetClass", expectedParams). + Return(resolverReturn, nil).Once() + + query := "{ Get { SomeAction { uuidField } } }" + result := resolver.AssertResolve(t, query) + + expectedProps := map[string]interface{}{ + "uuidField": id.String(), + } + + assert.Equal(t, expectedProps, result.Get("Get", "SomeAction").Result.([]interface{})[0]) +} + +func TestExtractUUIDArrayField(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "uuidArrayField", IsPrimitive: true}}, + } + + id1 := uuid.New() + id2 := uuid.New() + + resolverReturn := []interface{}{ + map[string]interface{}{ + "uuidArrayField": []uuid.UUID{id1, id2}, + }, + } + + resolver.On("GetClass", expectedParams). + Return(resolverReturn, nil).Once() + + query := "{ Get { SomeAction { uuidArrayField } } }" + result := resolver.AssertResolve(t, query) + + expectedProps := map[string]interface{}{ + "uuidArrayField": []any{id1.String(), id2.String()}, + } + + assert.Equal(t, expectedProps, result.Get("Get", "SomeAction").Result.([]interface{})[0]) +} + +func TestExtractPhoneNumberField(t *testing.T) { + // We need to explicitly test all cases of asking for just one sub-property + // at a time, because the AST-parsing uses known fields of known props to + // distinguish a complex primitive prop from a reference prop + // + // See "isPrimitive()" and "fieldNameIsOfObjectButNonReferenceType" in + // class_builder_fields.go for more details + + type test struct { + name string + query string + expectedParams dto.GetParams + resolverReturn interface{} + expectedResult interface{} + } + + tests := []test{ + { + name: "with only input requested", + query: "{ Get { SomeAction { phone { input } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{Input: "+49 171 1234567"}, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "input": "+49 171 1234567", + }, + }, + }, + { + name: "with only internationalFormatted requested", + query: "{ Get { SomeAction { phone { internationalFormatted } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{InternationalFormatted: "+49 171 1234567"}, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "internationalFormatted": "+49 171 1234567", + }, + }, + }, + { + name: "with only nationalFormatted requested", + query: "{ Get { SomeAction { phone { nationalFormatted } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{NationalFormatted: "0171 1234567"}, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "nationalFormatted": "0171 1234567", + }, + }, + }, + { + name: "with only national requested", + query: "{ Get { SomeAction { phone { national } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{National: 0o1711234567}, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "national": 0o1711234567, + }, + }, + }, + { + name: "with only valid requested", + query: "{ Get { SomeAction { phone { valid } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{Valid: true}, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "valid": true, + }, + }, + }, + { + name: "with only countryCode requested", + query: "{ Get { SomeAction { phone { countryCode } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{CountryCode: 49}, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "countryCode": 49, + }, + }, + }, + { + name: "with only defaultCountry requested", + query: "{ Get { SomeAction { phone { defaultCountry } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{DefaultCountry: "DE"}, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "defaultCountry": "DE", + }, + }, + }, + { + name: "with multiple fields set", + query: "{ Get { SomeAction { phone { input internationalFormatted " + + "nationalFormatted defaultCountry national countryCode valid } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "phone", IsPrimitive: true}}, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "phone": &models.PhoneNumber{ + DefaultCountry: "DE", + CountryCode: 49, + NationalFormatted: "0171 123456", + InternationalFormatted: "+49 171 123456", + National: 171123456, + Input: "0171123456", + Valid: true, + }, + }, + }, + expectedResult: map[string]interface{}{ + "phone": map[string]interface{}{ + "defaultCountry": "DE", + "countryCode": 49, + "nationalFormatted": "0171 123456", + "internationalFormatted": "+49 171 123456", + "national": 171123456, + "input": "0171123456", + "valid": true, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + resolver := newMockResolver() + + resolver.On("GetClass", test.expectedParams). + Return(test.resolverReturn, nil).Once() + result := resolver.AssertResolve(t, test.query) + assert.Equal(t, test.expectedResult, result.Get("Get", "SomeAction").Result.([]interface{})[0]) + }) + } +} + +func TestExtractAdditionalFields(t *testing.T) { + // We don't need to explicitly test every subselection as we did on + // phoneNumber as these fields have fixed keys. So we can simply check for + // the prop + + type test struct { + name string + query string + expectedParams dto.GetParams + resolverReturn interface{} + expectedResult interface{} + } + + // To facilitate testing timestamps + nowString := fmt.Sprint(time.Now().UnixNano() / int64(time.Millisecond)) + + tests := []test{ + { + name: "with _additional distance", + query: "{ Get { SomeAction { _additional { distance } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + Distance: true, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "distance": helper.CertaintyToDist(t, 0.69), + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "distance": helper.CertaintyToDist(t, 0.69), + }, + }, + }, + { + name: "with _additional certainty", + query: "{ Get { SomeAction { _additional { certainty } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + Certainty: true, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "certainty": 0.69, + "distance": helper.CertaintyToDist(t, 0.69), + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "certainty": 0.69, + }, + }, + }, + { + name: "with _additional vector", + query: "{ Get { SomeAction { _additional { vector } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "vector": []float32{0.1, -0.3}, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "vector": []interface{}{float32(0.1), float32(-0.3)}, + }, + }, + }, + { + name: "with _additional creationTimeUnix", + query: "{ Get { SomeAction { _additional { creationTimeUnix } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + CreationTimeUnix: true, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "creationTimeUnix": nowString, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "creationTimeUnix": nowString, + }, + }, + }, + { + name: "with _additional lastUpdateTimeUnix", + query: "{ Get { SomeAction { _additional { lastUpdateTimeUnix } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + LastUpdateTimeUnix: true, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "lastUpdateTimeUnix": nowString, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "lastUpdateTimeUnix": nowString, + }, + }, + }, + { + name: "with _additional classification", + query: "{ Get { SomeAction { _additional { classification { id completed classifiedFields scope basedOn } } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + Classification: true, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": models.AdditionalProperties{ + "classification": &additional.Classification{ + ID: "12345", + BasedOn: []string{"primitiveProp"}, + Scope: []string{"refprop1", "refprop2", "refprop3"}, + ClassifiedFields: []string{"refprop3"}, + Completed: timeMust(strfmt.ParseDateTime("2006-01-02T15:04:05.000Z")), + }, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "classification": map[string]interface{}{ + "id": strfmt.UUID("12345"), + "basedOn": []interface{}{"primitiveProp"}, + "scope": []interface{}{"refprop1", "refprop2", "refprop3"}, + "classifiedFields": []interface{}{"refprop3"}, + "completed": "2006-01-02T15:04:05.000Z", + }, + }, + }, + }, + { + name: "with _additional interpretation", + query: "{ Get { SomeAction { _additional { interpretation { source { concept weight occurrence } } } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + ModuleParams: map[string]interface{}{ + "interpretation": true, + }, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "interpretation": &Interpretation{ + Source: []*InterpretationSource{ + { + Concept: "foo", + Weight: 0.6, + Occurrence: 1200, + }, + { + Concept: "bar", + Weight: 0.9, + Occurrence: 800, + }, + }, + }, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "interpretation": map[string]interface{}{ + "source": []interface{}{ + map[string]interface{}{ + "concept": "foo", + "weight": 0.6, + "occurrence": 1200, + }, + map[string]interface{}{ + "concept": "bar", + "weight": 0.9, + "occurrence": 800, + }, + }, + }, + }, + }, + }, + { + name: "with _additional nearestNeighbors", + query: "{ Get { SomeAction { _additional { nearestNeighbors { neighbors { concept distance } } } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + ModuleParams: map[string]interface{}{ + "nearestNeighbors": true, + }, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.1, + }, + { + Concept: "bar", + Distance: 0.2, + }, + }, + }, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "nearestNeighbors": map[string]interface{}{ + "neighbors": []interface{}{ + map[string]interface{}{ + "concept": "foo", + "distance": float32(0.1), + }, + map[string]interface{}{ + "concept": "bar", + "distance": float32(0.2), + }, + }, + }, + }, + }, + }, + { + name: "with _additional featureProjection without any optional parameters", + query: "{ Get { SomeAction { _additional { featureProjection { vector } } } } }", + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + ModuleParams: map[string]interface{}{ + "featureProjection": extractAdditionalParam("featureProjection", nil), + }, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": models.AdditionalProperties{ + "featureProjection": &FeatureProjection{ + Vector: []float32{0.0, 1.1, 2.2}, + }, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "featureProjection": map[string]interface{}{ + "vector": []interface{}{float32(0.0), float32(1.1), float32(2.2)}, + }, + }, + }, + }, + { + name: "with _additional featureProjection with optional parameters", + query: `{ Get { SomeAction { _additional { featureProjection(algorithm: "tsne", dimensions: 3, learningRate: 15, iterations: 100, perplexity: 10) { vector } } } } }`, + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + ModuleParams: map[string]interface{}{ + "featureProjection": extractAdditionalParam("featureProjection", + []*ast.Argument{ + createArg("algorithm", "tsne"), + createArg("dimensions", "3"), + createArg("iterations", "100"), + createArg("learningRate", "15"), + createArg("perplexity", "10"), + }, + ), + }, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "featureProjection": &FeatureProjection{ + Vector: []float32{0.0, 1.1, 2.2}, + }, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "featureProjection": map[string]interface{}{ + "vector": []interface{}{float32(0.0), float32(1.1), float32(2.2)}, + }, + }, + }, + }, + { + name: "with _additional semanticPath set", + query: `{ Get { SomeAction { _additional { semanticPath { path { concept distanceToQuery distanceToResult distanceToPrevious distanceToNext } } } } } }`, + expectedParams: dto.GetParams{ + ClassName: "SomeAction", + AdditionalProperties: additional.Properties{ + ModuleParams: map[string]interface{}{ + "semanticPath": extractAdditionalParam("semanticPath", nil), + }, + }, + }, + resolverReturn: []interface{}{ + map[string]interface{}{ + "_additional": models.AdditionalProperties{ + "semanticPath": &SemanticPath{ + Path: []*SemanticPathElement{ + { + Concept: "foo", + DistanceToNext: ptFloat32(0.5), + DistanceToPrevious: nil, + DistanceToQuery: 0.1, + DistanceToResult: 0.1, + }, + { + Concept: "bar", + DistanceToPrevious: ptFloat32(0.5), + DistanceToNext: nil, + DistanceToQuery: 0.1, + DistanceToResult: 0.1, + }, + }, + }, + }, + }, + }, + expectedResult: map[string]interface{}{ + "_additional": map[string]interface{}{ + "semanticPath": map[string]interface{}{ + "path": []interface{}{ + map[string]interface{}{ + "concept": "foo", + "distanceToNext": float32(0.5), + "distanceToPrevious": nil, + "distanceToQuery": float32(0.1), + "distanceToResult": float32(0.1), + }, + map[string]interface{}{ + "concept": "bar", + "distanceToPrevious": float32(0.5), + "distanceToNext": nil, + "distanceToQuery": float32(0.1), + "distanceToResult": float32(0.1), + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + resolver := newMockResolverWithVectorizer("mock-custom-near-text-module") + + resolver.On("GetClass", test.expectedParams). + Return(test.resolverReturn, nil).Once() + result := resolver.AssertResolve(t, test.query) + assert.Equal(t, test.expectedResult, result.Get("Get", "SomeAction").Result.([]interface{})[0]) + }) + } +} + +func TestNearCustomTextRanker(t *testing.T) { + t.Parallel() + + resolver := newMockResolverWithVectorizer("mock-custom-near-text-module") + + t.Run("for actions", func(t *testing.T) { + query := `{ Get { SomeAction(nearCustomText: { + concepts: ["c1", "c2", "c3"], + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + }), + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for actions with targetvec", func(t *testing.T) { + query := `{ Get { SomeAction(nearCustomText: { + concepts: ["c1", "c2", "c3"], + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + targetVectors: ["epic"] + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + "targetVectors": []interface{}{"epic"}, + }), + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for a class that does not have a text2vec module", func(t *testing.T) { + query := `{ Get { CustomVectorClass(nearCustomText: { + concepts: ["c1", "c2", "c3"], + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + res := resolver.Resolve(query) + require.Len(t, res.Errors, 1) + assert.Contains(t, res.Errors[0].Message, "Unknown argument \"nearCustomText\" on field \"CustomVectorClass\"") + }) + + t.Run("for things with optional distance set", func(t *testing.T) { + query := `{ Get { SomeThing(nearCustomText: { + concepts: ["c1", "c2", "c3"], + distance: 0.6, + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "distance": float64(0.6), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty set", func(t *testing.T) { + query := `{ Get { SomeThing(nearCustomText: { + concepts: ["c1", "c2", "c3"], + certainty: 0.4, + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "certainty": float64(0.4), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance and objects set", func(t *testing.T) { + query := `{ Get { SomeThing(nearCustomText: { + concepts: ["c1", "c2", "c3"], + distance: 0.4, + moveTo: { + concepts:["positive"], + force: 0.5 + objects: [ + { id: "moveTo-uuid1" } + { beacon: "weaviate://localhost/moveTo-uuid1" }, + { beacon: "weaviate://localhost/moveTo-uuid2" } + ] + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + objects: [ + { id: "moveAway-uuid1" } + { beacon: "weaviate://localhost/moveAway-uuid2" } + ] + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "distance": float64(0.4), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + "objects": []interface{}{ + map[string]interface{}{ + "id": "moveTo-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveTo-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveTo-uuid2", + }, + }, + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + "objects": []interface{}{ + map[string]interface{}{ + "id": "moveAway-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveAway-uuid2", + }, + }, + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty and objects set", func(t *testing.T) { + query := `{ Get { SomeThing(nearCustomText: { + concepts: ["c1", "c2", "c3"], + certainty: 0.4, + moveTo: { + concepts:["positive"], + force: 0.5 + objects: [ + { id: "moveTo-uuid1" } + { beacon: "weaviate://localhost/moveTo-uuid1" }, + { beacon: "weaviate://localhost/moveTo-uuid2" } + ] + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + objects: [ + { id: "moveAway-uuid1" } + { beacon: "weaviate://localhost/moveAway-uuid2" } + ] + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "certainty": float64(0.4), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + "objects": []interface{}{ + map[string]interface{}{ + "id": "moveTo-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveTo-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveTo-uuid2", + }, + }, + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + "objects": []interface{}{ + map[string]interface{}{ + "id": "moveAway-uuid1", + }, + map[string]interface{}{ + "beacon": "weaviate://localhost/moveAway-uuid2", + }, + }, + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 6 + nearCustomText: { + concepts: ["c1", "c2", "c3"], + distance: 0.4, + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: 6}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "distance": float64(0.4), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 6 + nearCustomText: { + concepts: ["c1", "c2", "c3"], + certainty: 0.4, + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: 6}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "certainty": float64(0.4), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearCustomText: { + concepts: ["c1", "c2", "c3"], + distance: 0.4, + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "distance": float64(0.4), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearCustomText: { + concepts: ["c1", "c2", "c3"], + certainty: 0.4, + moveTo: { + concepts:["positive"], + force: 0.5 + } + moveAwayFrom: { + concepts:["epic"] + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + ModuleParams: map[string]interface{}{ + "nearCustomText": extractNearTextParam(map[string]interface{}{ + "concepts": []interface{}{"c1", "c2", "c3"}, + "certainty": float64(0.4), + "moveTo": map[string]interface{}{ + "concepts": []interface{}{"positive"}, + "force": float64(0.5), + }, + "moveAwayFrom": map[string]interface{}{ + "concepts": []interface{}{"epic"}, + "force": float64(0.25), + }, + }), + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) +} + +func TestNearVectorRanker(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + t.Run("for actions", func(t *testing.T) { + query := `{ Get { SomeAction(nearVector: { + vector: [0.123, 0.984] + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance set", func(t *testing.T) { + query := `{ Get { SomeThing(nearVector: { + vector: [0.123, 0.984] + distance: 0.4 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Distance: 0.4, + WithDistance: true, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty set", func(t *testing.T) { + query := `{ Get { SomeThing(nearVector: { + vector: [0.123, 0.984] + certainty: 0.4 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Certainty: 0.4, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 4 + nearVector: { + vector: [0.123, 0.984] + distance: 0.1 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: 4}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Distance: 0.1, + WithDistance: true, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 4 + nearVector: { + vector: [0.123, 0.984] + certainty: 0.1 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: 4}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Certainty: 0.1, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearVector: { + vector: [0.123, 0.984] + distance: 0.1 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Distance: 0.1, + WithDistance: true, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearVector: { + vector: [0.123, 0.984] + certainty: 0.1 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Certainty: 0.1, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with targetvector", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [1, 0], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1., 0}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title2", "title3"}, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with targetvector list", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[1, 0]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1., 0}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title2", "title3"}, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + t.Run("with target multivector", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[[1, 0]]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1., 0}}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title2", "title3"}, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with targetvector and multiple entries for a vector", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[1, 0], [0,1]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1., 0}, []float32{0, 1}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title1", "title2", "title3"}, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with target multivector and multiple entries for multivector", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[[1, 0]], [[0,1]]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1., 0}}, [][]float32{{0, 1}}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title1", "title2", "title3"}, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with targetvector and multiple entries for a vector and targets", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[1, 0], [0,1]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + targets: {targetVectors: ["title1", "title1", "title2", "title3"], combinationMethod: sum} + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1., 0}, []float32{0, 1}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title1", "title2", "title3"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Sum, Weights: []float32{1, 1, 1, 1}}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with target multivector and multiple entries for multivector and targets", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[[1, 0]], [[0,1]]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + targets: {targetVectors: ["title1", "title1", "title2", "title3"], combinationMethod: sum} + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1., 0}}, [][]float32{{0, 1}}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title1", "title2", "title3"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Sum, Weights: []float32{1, 1, 1, 1}}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with targetvector and weights", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [1, 0], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + targets: { + targetVectors: ["title1", "title2", "title3"], + combinationMethod: manualWeights, + weights: {title1: 1, title2: 3, title3: 4} + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1., 0}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title2", "title3"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.ManualWeights, Weights: []float32{1, 3, 4}}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with target multivector and weights", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [1, 0], title2: [0, 0, 1], title3: [[[0, 0, 0, 1]]]} + targets: { + targetVectors: ["title1", "title2", "title3"], + combinationMethod: manualWeights, + weights: {title1: 1, title2: 3, title3: 4} + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1., 0}, []float32{0, 0, 1}, [][]float32{{0, 0, 0, 1}}}, + TargetVectors: []string{"title1", "title2", "title3"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.ManualWeights, Weights: []float32{1, 3, 4}}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with targetvector and multiple entries for a vector and weights", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[1, 0], [0,1]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + targets: { + targetVectors: ["title1", "title1", "title2", "title3"], + combinationMethod: manualWeights, + weights: {title1: [1, 2], title2: 3, title3: 4} + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1., 0}, []float32{0, 1}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title1", "title2", "title3"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.ManualWeights, Weights: []float32{1, 2, 3, 4}}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with target multivector and multiple entries for multivector and weights", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[[1, 0]], [[0,1]]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + targets: { + targetVectors: ["title1", "title1", "title2", "title3"], + combinationMethod: manualWeights, + weights: {title1: [1, 2], title2: 3, title3: 4} + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1., 0}}, [][]float32{{0, 1}}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + TargetVectors: []string{"title1", "title1", "title2", "title3"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.ManualWeights, Weights: []float32{1, 2, 3, 4}}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("with non fitting target vectors", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[1, 0], [0,1]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + targets: { + targetVectors: ["title1", "title2", "title3"], + } + }) { intField } } }` + resolver.AssertFailToResolve(t, query) + }) + + t.Run("with non fitting target multivectors", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [[[1, 0]], [[0,1]]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + targets: { + targetVectors: ["title1", "title2", "title3"], + } + }) { intField } } }` + resolver.AssertFailToResolve(t, query) + }) + + t.Run("with non fitting target vectors 2", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [0,1], title2: [0, 0, 1], title3:[[1, 0], [0,1]]} + targets: { + targetVectors: ["title1", "title2", "title3"], + } + }) { intField } } }` + resolver.AssertFailToResolve(t, query) + }) + + t.Run("with non fitting target multivectors 2", func(t *testing.T) { + query := `{ Get { SomeThing( + nearVector: { + vectorPerTarget: {title1: [0,1], title2: [0, 0, 1], title3:[[[1, 0]], [[0,1]]]} + targets: { + targetVectors: ["title1", "title2", "title3"], + } + }) { intField } } }` + resolver.AssertFailToResolve(t, query) + }) +} + +func TestExtractPagination(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{ + Limit: 10, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := "{ Get { SomeAction(limit: 10) { intField } } }" + resolver.AssertResolve(t, query) +} + +func TestExtractPaginationWithOffset(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{ + Offset: 5, + Limit: 10, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := "{ Get { SomeAction(offset: 5 limit: 10) { intField } } }" + resolver.AssertResolve(t, query) +} + +func TestExtractPaginationWithOnlyOffset(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{ + Offset: 5, + Limit: -1, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := "{ Get { SomeAction(offset: 5) { intField } } }" + resolver.AssertResolve(t, query) +} + +func TestExtractCursor(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Cursor: &filters.Cursor{ + After: "8ef8d5cc-c101-4fbd-a016-84e766b93ecf", + Limit: 2, + }, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 2, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := `{ Get { SomeAction(after: "8ef8d5cc-c101-4fbd-a016-84e766b93ecf" limit: 2) { intField } } }` + resolver.AssertResolve(t, query) +} + +func TestExtractGroupParams(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Group: &dto.GroupParams{ + Strategy: "closest", + Force: 0.3, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := "{ Get { SomeAction(group: {type: closest, force: 0.3}) { intField } } }" + resolver.AssertResolve(t, query) +} + +func TestGetRelation(t *testing.T) { + t.Parallel() + + t.Run("without using custom fragments", func(t *testing.T) { + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{ + { + Name: "hasAction", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "SomeAction", + RefProperties: []search.SelectProperty{ + { + Name: "intField", + IsPrimitive: true, + }, + { + Name: "hasAction", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "SomeAction", + RefProperties: []search.SelectProperty{ + { + Name: "intField", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := "{ Get { SomeAction { hasAction { ... on SomeAction { intField, hasAction { ... on SomeAction { intField } } } } } } }" + resolver.AssertResolve(t, query) + }) + + t.Run("with a custom fragment one level deep", func(t *testing.T) { + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{ + { + Name: "hasAction", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "SomeAction", + RefProperties: []search.SelectProperty{ + { + Name: "intField", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := "fragment actionFragment on SomeAction { intField } { Get { SomeAction { hasAction { ...actionFragment } } } }" + resolver.AssertResolve(t, query) + }) + + t.Run("with a custom fragment multiple levels deep", func(t *testing.T) { + resolver := newMockResolver() + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{ + { + Name: "hasAction", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "SomeAction", + RefProperties: []search.SelectProperty{ + { + Name: "intField", + IsPrimitive: true, + }, + { + Name: "hasAction", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "SomeAction", + RefProperties: []search.SelectProperty{ + { + Name: "intField", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + query := ` + fragment innerFragment on SomeAction { intField } + fragment actionFragment on SomeAction { intField hasAction { ...innerFragment } } + + { Get { SomeAction { hasAction { ...actionFragment } } } }` + resolver.AssertResolve(t, query) + }) +} + +func TestGetWithAlias(t *testing.T) { + t.Parallel() + resolver := newMockResolver() + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + } + + resolver.On("GetClass", expectedParams). + Return(test_helper.EmptyList(), nil).Once() + + resolver.AssertResolve(t, "{ Get { SomeActionAlias { intField } } }") +} + +func TestNearObject(t *testing.T) { + t.Parallel() + + resolver := newMockResolver() + + t.Run("for objects with beacon", func(t *testing.T) { + query := `{ Get { SomeAction( + nearObject: { + beacon: "weaviate://localhost/some-uuid" + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + Beacon: "weaviate://localhost/some-uuid", + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with beacon and optional distance set", func(t *testing.T) { + query := `{ Get { SomeThing( + nearObject: { + beacon: "weaviate://localhost/some-other-uuid" + distance: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearObject: &searchparams.NearObject{ + Beacon: "weaviate://localhost/some-other-uuid", + Distance: 0.7, + WithDistance: true, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with beacon and optional certainty set", func(t *testing.T) { + query := `{ Get { SomeThing( + nearObject: { + beacon: "weaviate://localhost/some-other-uuid" + certainty: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearObject: &searchparams.NearObject{ + Beacon: "weaviate://localhost/some-other-uuid", + Certainty: 0.7, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with id set", func(t *testing.T) { + query := `{ Get { SomeAction( + nearObject: { + id: "some-uuid" + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-uuid", + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with id and optional distance set", func(t *testing.T) { + query := `{ Get { SomeThing( + nearObject: { + id: "some-other-uuid" + distance: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-other-uuid", + Distance: 0.7, + WithDistance: true, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with id and optional certainty set", func(t *testing.T) { + query := `{ Get { SomeThing( + nearObject: { + id: "some-other-uuid" + certainty: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-other-uuid", + Certainty: 0.7, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with optional distance and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 5 + nearObject: { + id: "some-other-uuid" + distance: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: 5}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-other-uuid", + Distance: 0.7, + WithDistance: true, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with optional certainty and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 5 + nearObject: { + id: "some-other-uuid" + certainty: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: 5}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-other-uuid", + Certainty: 0.7, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with optional distance and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearObject: { + id: "some-other-uuid" + distance: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-other-uuid", + Distance: 0.7, + WithDistance: true, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with optional certainty and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearObject: { + id: "some-other-uuid" + certainty: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-other-uuid", + Certainty: 0.7, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) +} + +func TestNearTextNoNoModules(t *testing.T) { + t.Parallel() + + resolver := newMockResolverWithNoModules() + + t.Run("for nearText that is not available", func(t *testing.T) { + query := `{ Get { SomeAction(nearText: { + concepts: ["c1", "c2", "c3"], + moveTo: { + concepts:["positive"], + force: 0.5 + }, + moveAwayFrom: { + concepts:["epic"], + force: 0.25 + } + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + } + + resolver.On("GetClass", expectedParams). + Return(nil, nil).Once() + + resolver.AssertFailToResolve(t, query) + }) +} + +func TestBM25WithSort(t *testing.T) { + t.Parallel() + resolver := newMockResolverWithNoModules() + query := `{Get{SomeAction(bm25:{query:"apple",properties:["name"]},sort:[{path:["name"],order:desc}]){intField}}}` + resolver.AssertFailToResolve(t, query, "bm25 search is not compatible with sort") +} + +func TestHybridWithSort(t *testing.T) { + t.Parallel() + resolver := newMockResolverWithNoModules() + query := `{Get{SomeAction(hybrid:{query:"apple"},sort:[{path:["name"],order:desc}]){intField}}}` + resolver.AssertFailToResolve(t, query, "hybrid search is not compatible with sort") +} + +func TestHybridWithTargets(t *testing.T) { + t.Parallel() + resolver := newMockResolverWithNoModules() + var emptySubsearches []searchparams.WeightedSearchResult + + t.Run("hybrid search", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple"} + ){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with targets", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title3"],} + ){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title3"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with targets and vector", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + vector: [0.123, 0.984], + targetVectors: ["title1", "title2", "title3"],} + ){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title3"}, + Vector: []float32{0.123, 0.984}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with targets and vector", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + vector: [0.123, 0.984], + targetVectors: ["title1", "title2", "title3"],} + ){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title3"}, + Vector: []float32{0.123, 0.984}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title3"], + searches: {nearVector:{ + vector: [0.123, 0.984], + }} + }){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title3"}, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}, []float32{0.123, 0.984}, []float32{0.123, 0.984}}, + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and multiple vectors", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title3"], + searches: {nearVector:{ + vectorPerTarget: {title1: [1, 0], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }} + }){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title3"}, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1.0, 0}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and multiple vectors with multivector", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title3"], + searches: {nearVector:{ + vectorPerTarget: {title1: [[[1, 0]]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }} + }){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title3"}, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1.0, 0}}, []float32{0, 0, 1}, []float32{0, 0, 0, 1}}, + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and wrong input", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title3"], + searches: {nearVector:{ + vectorPerTarget: {title1: [1, "fish"], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }} + }){intField}}}` + resolver.AssertFailToResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and wrong input in multivector", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title3"], + searches: {nearVector:{ + vectorPerTarget: {title1: [[[1, "fish"]]], title2: [0, 0, 1], title3: [0, 0, 0, 1]} + }} + }){intField}}}` + resolver.AssertFailToResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and multi vector", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title2", "title3", "title3"], + searches: {nearVector:{ + vectorPerTarget: {title1: [1, 0], title2: [[0, 0, 1], [1,0,0]], title3: [[0, 0, 0, 1], [1, 0, 0, 1]]} + }} + }){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title2", "title3", "title3"}, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1.0, 0}, []float32{0, 0, 1}, []float32{1, 0, 0}, []float32{0, 0, 0, 1}, []float32{1, 0, 0, 1}}, + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and multiple vectors with multivector2", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title2", "title3", "title3"], + searches: {nearVector:{ + vectorPerTarget: {title1: [1, 0], title2: [[[0, 0, 1]], [[1,0,0]]], title3: [[0, 0, 0, 1], [1, 0, 0, 1]]} + }} + }){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title2", "title3", "title3"}, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1.0, 0}, [][]float32{{0, 0, 1}}, [][]float32{{1, 0, 0}}, []float32{0, 0, 0, 1}, []float32{1, 0, 0, 1}}, + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and multiple vectors missing target vectors", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + searches: {nearVector:{ + vectorPerTarget: {title1: [1, 0], title2: [[0, 0, 1], [1,0,0]], title3: [[0, 0, 0, 1], [1, 0, 0, 1]]} + }} + }){intField}}}` + resolver.AssertFailToResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and multiple vectors and multivector missing target vectors", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + searches: {nearVector:{ + vectorPerTarget: {title1: [[[1, 0]]], title2: [[[0, 0, 1]], [[1,0,0]]], title3: [[[0, 0, 0, 1]], [[1, 0, 0, 1]]]} + }} + }){intField}}}` + resolver.AssertFailToResolve(t, query) + }) + + t.Run("hybrid search with near vector subsearch and multi vector2", func(t *testing.T) { + query := `{Get{SomeAction(hybrid:{ + query:"apple", + targetVectors: ["title1", "title2", "title2", "title3", "title3"], + searches: {nearVector:{ + vectorPerTarget: {title1: [1, 0], title2: [[0, 0, 1], [1,0,0]], title3: [[0, 0, 0, 1], [1, 0, 0, 1]]} + }} + }){intField}}}` + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Alpha: 0.75, + Type: "hybrid", + FusionAlgorithm: 1, + SubSearches: emptySubsearches, + TargetVectors: []string{"title1", "title2", "title2", "title3", "title3"}, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1.0, 0}, []float32{0, 0, 1}, []float32{1, 0, 0}, []float32{0, 0, 0, 1}, []float32{1, 0, 0, 1}}, + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, query) + }) +} + +func TestHybridWithVectorDistance(t *testing.T) { + t.Parallel() + resolver := newMockResolverWithNoModules() + query := `{Get{SomeAction(hybrid:{query:"apple", maxVectorDistance: 0.5}){intField}}}` + + var emptySubsearches []searchparams.WeightedSearchResult + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + HybridSearch: &searchparams.HybridSearch{ + Query: "apple", + Distance: 0.5, + WithDistance: true, + FusionAlgorithm: 1, + Alpha: 0.75, + Type: "hybrid", + SubSearches: emptySubsearches, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) +} + +func TestNearObjectNoModules(t *testing.T) { + t.Parallel() + + resolver := newMockResolverWithNoModules() + + t.Run("for objects with beacon", func(t *testing.T) { + query := `{ Get { SomeAction( + nearObject: { + beacon: "weaviate://localhost/some-uuid" + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + Beacon: "weaviate://localhost/some-uuid", + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with ID and distance set", func(t *testing.T) { + query := `{ Get { SomeThing( + nearObject: { + id: "some-uuid" + distance: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-uuid", + Distance: 0.7, + WithDistance: true, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with ID and certainty set", func(t *testing.T) { + query := `{ Get { SomeThing( + nearObject: { + id: "some-uuid" + certainty: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-uuid", + Certainty: 0.7, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with distance and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 12 + nearObject: { + id: "some-uuid" + distance: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: 12}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-uuid", + Distance: 0.7, + WithDistance: true, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with certainty and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 12 + nearObject: { + id: "some-uuid" + certainty: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: 12}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-uuid", + Certainty: 0.7, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with distance and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearObject: { + id: "some-uuid" + distance: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-uuid", + Distance: 0.7, + WithDistance: true, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for objects with certainty and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearObject: { + id: "some-uuid" + certainty: 0.7 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearObject: &searchparams.NearObject{ + ID: "some-uuid", + Certainty: 0.7, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) +} + +func TestNearVectorNoModules(t *testing.T) { + t.Parallel() + + resolver := newMockResolverWithNoModules() + + t.Run("for actions", func(t *testing.T) { + query := `{ Get { SomeAction(nearVector: { + vector: [0.123, 0.984] + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + }, + } + + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance set", func(t *testing.T) { + query := `{ Get { SomeThing(nearVector: { + vector: [0.123, 0.984] + distance: 0.4 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Distance: 0.4, + WithDistance: true, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty set", func(t *testing.T) { + query := `{ Get { SomeThing(nearVector: { + vector: [0.123, 0.984] + certainty: 0.4 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Certainty: 0.4, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty and limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: 4 + nearVector: { + vector: [0.123, 0.984] + certainty: 0.4 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: 4}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Certainty: 0.4, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional distance and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearVector: { + vector: [0.123, 0.984] + distance: 0.4 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Distance: 0.4, + WithDistance: true, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("for things with optional certainty and negative limit set", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearVector: { + vector: [0.123, 0.984] + certainty: 0.4 + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}}, + Certainty: 0.4, + }, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("vector and targets", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearVector: { + vector: [0.123, 0.984] + targetVectors: ["test1", "test2"] + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}, []float32{0.123, 0.984}}, + TargetVectors: []string{"test1", "test2"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("vectorPerTarget and targets", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget:{ test1: [0.123, 0.984], test2: [0.456, 0.789]} + targetVectors: ["test1", "test2"] + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{0.123, 0.984}, []float32{0.456, 0.789}}, + TargetVectors: []string{"test1", "test2"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) + + t.Run("vectorPerTarget and targets multivector", func(t *testing.T) { + query := `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget:{ test1: [[[0.123, 0.984]]], test2: [0.456, 0.789]} + targetVectors: ["test1", "test2"] + }) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{0.123, 0.984}}, []float32{0.456, 0.789}}, + TargetVectors: []string{"test1", "test2"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + } + resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + resolver.AssertResolve(t, query) + }) +} + +func TestNearVectorNoModulesMultiVector(t *testing.T) { + t.Parallel() + + resolver := newMockResolverWithNoModules() + + tt := []struct { + name string + query string + expectedParams *dto.GetParams + }{ + // single 3x2 multi-vector => valid if no `targetVectors` is given (only 1 multi-vector) + { + name: "vectorPerTarget and targets multivector", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget:{ mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]] + ]} + }) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{ + [][]float32{ + {0.1, 0.1}, {0.2, 0.2}, {0.3, 0.3}, + }, + }, + TargetVectors: []string{"mymultivec2d"}, + }, + }, + }, + // 4 levels of nesting is not handled, only support 3 levels right now (eg list of 2d multi-vectors) + { + name: "4 levels too much vector nesting", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget:{ mymultivec2d: [ + [[[0.1,0.1],[0.2,0.2],[0.3,0.3]]] + ]} + }) { intField } } }`, + // 4 levels => parse error => expect nil + expectedParams: nil, + }, + // two multi-vectors with two targetVectors => valid + { + name: "vectorPerTarget + targetVectors for multi and normal", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget:{ test1: [[[0.123, 0.984]]], test2: [0.456, 0.789]} + targetVectors: ["test1", "test2"] + }) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{ + [][]float32{{0.123, 0.984}}, // multi-vector for "test1" (3 levels) + []float32{0.456, 0.789}, // normal vector for "test2" (2 levels) + }, + TargetVectors: []string{"test1", "test2"}, + }, + // The original example shows a minimum combination: + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + }, + }, + // single 3x2 multi-vector => valid if no `targetVectors` is given (only 1 multi-vector) + { + name: "single 3x2 multi-vector with no targetVectors", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]] + ] + } + } + ) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{ + [][]float32{ + {0.1, 0.1}, {0.2, 0.2}, {0.3, 0.3}, + }, + }, + TargetVectors: []string{"mymultivec2d"}, + }, + }, + }, + // two 3x2 multi-vectors => shape is valid, but missing `targetVectors`, + // so final parse must fail (multiple multi-vectors with no labels). + { + name: "two 3x2 multi-vectors with no targetVectors error)", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [[0.4,0.4],[0.5,0.5],[0.6,0.6]] + ] + } + } + ) { intField } } }`, + expectedParams: nil, // fails because multiple multi-vectors require targetVectors + }, + // one 3x2 multi-vector and one 1x2 multi-vector => shape is valid, + // but no targetVectors => must fail for same reason (multiple MVs). + { + name: "2 multi-vectors with no targetVectors error", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [[0.4,0.4]] + ] + } + } + ) { intField } } }`, + expectedParams: nil, + }, + // one 3x2 multi-vector, one 1x2 multi-vector, one 4x2 multi-vector => shape valid, + // but still multiple MVs with no targetVectors => error. + { + name: "3 multi-vectors, no targetVectors error", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [[0.4,0.4]], + [[0.5,0.5],[0.6,0.6],[0.7,0.7],[0.8,0.8]] + ] + } + } + ) { intField } } }`, + expectedParams: nil, + }, + // This fails because it is interpreted as a list of normal vectors, which should require + // targetVectors to be specified since there are more than one. + { + name: "2 levels (multiple normal vectors), no targetVectors", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [0.1,0.1],[0.2,0.2],[0.3,0.3] + ] + } + } + ) { intField } } }`, + expectedParams: nil, + }, + // 3 levels but sub-vectors have inconsistent lengths, this is not a parse error, but + // the query layer should handle this to decide if it is an error. + { + name: "3 levels but sub-vector length mismatch => error", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2,0.2,0.2],[0.3]] + ] + } + } + ) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{ + [][]float32{ + {0.1, 0.1}, {0.2, 0.2, 0.2, 0.2}, {0.3}, + }, + }, + TargetVectors: []string{"mymultivec2d"}, + }, + }, + }, + // one multi-vector with a target label + { + name: "single multi-vector + matching label", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]] + ] + } + targetVectors: ["mymultivec2d"] + } + ) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{ + [][]float32{ + {0.1, 0.1}, {0.2, 0.2}, {0.3, 0.3}, + }, + }, + TargetVectors: []string{"mymultivec2d"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + }, + }, + // two multi-vectors for the same target label => we have 2 items, each of shape >=3 levels. + // The doc example: + // => valid if we parse each multi-vector separately with the same label. + { + name: "2 multi-vectors for same label", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [[0.4,0.4]] + ] + } + targetVectors: ["mymultivec2d", "mymultivec2d"] + } + ) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + // two multi-vectors, each a [][]float32 + Vectors: []models.Vector{ + [][]float32{ + {0.1, 0.1}, {0.2, 0.2}, {0.3, 0.3}, + }, + [][]float32{ + {0.4, 0.4}, + }, + }, + TargetVectors: []string{"mymultivec2d", "mymultivec2d"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + }, + }, + // two multi-vectors over two labels, no target vectors => error + { + name: "2 multi-vectors, no multi-vector labels error", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + ], + mymultivec1d: [ + [[0.5],[0.6]] + ], + } + } + ) { intField } } }`, + expectedParams: nil, + }, + // multiple multi-vectors over multiple labels, total # must match. + // => 3 multi-vectors total => 3 target labels => valid + { + name: "3 multi-vectors, 3 matching labels", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [[0.4,0.4]] + ], + mymultivec1d: [ + [[0.5],[0.6]] + ], + } + targetVectors: ["mymultivec2d","mymultivec2d","mymultivec1d"] + } + ) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{ + // 3) 2x1 + [][]float32{ + {0.5}, + {0.6}, + }, + // 1) 3x2 + [][]float32{ + {0.1, 0.1}, {0.2, 0.2}, {0.3, 0.3}, + }, + // 2) 1x2 + [][]float32{ + {0.4, 0.4}, + }, + }, + TargetVectors: []string{"mymultivec1d", "mymultivec2d", "mymultivec2d"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + }, + }, + // If multiple multi-vectors appear but `targetVectors` doesn’t match the count => error + { + name: "mismatch between multi-vectors and labels error", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [[0.4,0.4]] + ], + mymultivec1d: [ + [[0.1],[0.2]] + ], + targetVectors: ["mymultivec2d","mymultivec1d"] + } + } + ) { intField } } }`, + expectedParams: nil, // we've got 3 multi-vectors but only 2 labels => error + }, + // Within a single target vector, you cannot mix normal vectors (2-level) and multi-vectors (>=3-level). + { + name: "mix normal + multi in one target error", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [0.4,0.5] + ], + targetVectors: ["mymultivec2d","mymultivec2d"] + } + } + ) { intField } } }`, + expectedParams: nil, + }, + // However, you *can* do multi-vectors for one target vector and normal vectors for another, as + // long as each target is “internally consistent” and the `targetVectors` count lines up. + { + name: "multi-vector in one, normal vectors in another", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]], + [[0.5,0.5],[0.6,0.6],[0.7,0.7],[0.8,0.8]] + ], + mymultivec3d: [ + [0.1,0.2,0.3], + [0.4,0.5,0.6], + [0.7,0.8,0.9] + ], + } + targetVectors: ["mymultivec2d","mymultivec2d","mymultivec3d","mymultivec3d","mymultivec3d"] + } + ) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{ + {Name: "intField", IsPrimitive: true}, + }, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + // two multi-vectors, each a [][]float32: + Vectors: []models.Vector{ + // 1) the first multi-vector (3x2) + [][]float32{ + {0.1, 0.1}, {0.2, 0.2}, {0.3, 0.3}, + }, + // 2) the second multi-vector (4x2) + [][]float32{ + {0.5, 0.5}, {0.6, 0.6}, {0.7, 0.7}, {0.8, 0.8}, + }, + // Next 3 are normal vectors of dimension 3 + []float32{0.1, 0.2, 0.3}, + []float32{0.4, 0.5, 0.6}, + []float32{0.7, 0.8, 0.9}, + }, + TargetVectors: []string{ + "mymultivec2d", "mymultivec2d", + "mymultivec3d", "mymultivec3d", "mymultivec3d", + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + }, + }, + // multi-vector in one target, single vector in another => valid + { + name: "multi-vector in one, single vector in other", + query: `{ Get { SomeThing( + limit: -1 + nearVector: { + vectorPerTarget: { + mymultivec2d: [ + [[0.1,0.1],[0.2,0.2],[0.3,0.3]] + ], + mymultivec3d: [ + 0.1, 0.2, 0.3 + ] + } + targetVectors: ["mymultivec2d","mymultivec3d"] + } + ) { intField } } }`, + expectedParams: &dto.GetParams{ + ClassName: "SomeThing", + Properties: []search.SelectProperty{ + {Name: "intField", IsPrimitive: true}, + }, + Pagination: &filters.Pagination{Limit: -1}, + NearVector: &searchparams.NearVector{ + // two multi-vectors, each a [][]float32: + Vectors: []models.Vector{ + // the first multi-vector (3x2) + [][]float32{ + {0.1, 0.1}, {0.2, 0.2}, {0.3, 0.3}, + }, + // 3 dimensional normal vector + []float32{0.1, 0.2, 0.3}, + }, + TargetVectors: []string{ + "mymultivec2d", + "mymultivec3d", + }, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.expectedParams != nil { + // If we expect a successful parse, mock the resolver calls + resolver.On("GetClass", *tc.expectedParams). + Return([]interface{}{}, nil).Once() + resolver.AssertResolve(t, tc.query) + } else { + // Otherwise, we expect a parse/validation error + resolver.AssertFailToResolve(t, tc.query) + } + }) + } +} + +func TestSort(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + resolver *mockResolver + }{ + { + name: "with modules", + resolver: newMockResolver(), + }, + { + name: "with no modules", + resolver: newMockResolverWithNoModules(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run("simple sort", func(t *testing.T) { + query := `{ Get { SomeAction(sort:[{ + path: ["path"] order: asc + }]) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Sort: []filters.Sort{{Path: []string{"path"}, Order: "asc"}}, + } + + tt.resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + tt.resolver.AssertResolve(t, query) + }) + + t.Run("simple sort with two paths", func(t *testing.T) { + query := `{ Get { SomeAction(sort:[{ + path: ["path1", "path2"] order: desc + }]) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Sort: []filters.Sort{{Path: []string{"path1", "path2"}, Order: "desc"}}, + } + + tt.resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + tt.resolver.AssertResolve(t, query) + }) + + t.Run("simple sort with two sort filters", func(t *testing.T) { + query := `{ Get { SomeAction(sort:[{ + path: ["first1", "first2", "first3", "first4"] order: asc + } { + path: ["second1"] order: desc + }]) { intField } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + Properties: []search.SelectProperty{{Name: "intField", IsPrimitive: true}}, + Sort: []filters.Sort{ + {Path: []string{"first1", "first2", "first3", "first4"}, Order: "asc"}, + {Path: []string{"second1"}, Order: "desc"}, + }, + } + + tt.resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + tt.resolver.AssertResolve(t, query) + }) + }) + } +} + +func TestGroupBy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + resolver *mockResolver + }{ + { + name: "with modules", + resolver: newMockResolver(), + }, + { + name: "with no modules", + resolver: newMockResolverWithNoModules(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run("simple groupBy", func(t *testing.T) { + query := `{ Get { + SomeAction( + groupBy:{path: ["path"] groups: 2 objectsPerGroup:3} + ) { + _additional{group{count groupedBy {value path} maxDistance minDistance hits {_additional{distance}}} + } + } } }` + + expectedParams := dto.GetParams{ + ClassName: "SomeAction", + GroupBy: &searchparams.GroupBy{Property: "path", Groups: 2, ObjectsPerGroup: 3, Properties: search.SelectProperties{}}, + AdditionalProperties: additional.Properties{Group: true}, + } + + tt.resolver.On("GetClass", expectedParams). + Return([]interface{}{}, nil).Once() + + tt.resolver.AssertResolve(t, query) + }) + }) + } +} + +func ptFloat32(in float32) *float32 { + return &in +} + +func timeMust(t strfmt.DateTime, err error) strfmt.DateTime { + if err != nil { + panic(err) + } + + return t +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/group_argument.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/group_argument.go new file mode 100644 index 0000000000000000000000000000000000000000..1d8c29f3e499a7404117f7fde5348ec7b6efb9d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/group_argument.go @@ -0,0 +1,52 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func groupArgument(className string) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("GetObjects%s", className) + return &graphql.ArgumentConfig{ + // Description: descriptions.GetGroup, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sGroupInpObj", prefix), + Fields: groupFields(prefix), + Description: descriptions.GetWhereInpObj, + }, + ), + } +} + +func groupFields(prefix string) graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "type": &graphql.InputObjectFieldConfig{ + // Description: descriptions.Concepts, + Type: graphql.NewEnum(graphql.EnumConfig{ + Name: fmt.Sprintf("%sGroupInpObjTypeEnum", prefix), + Values: graphql.EnumValueConfigMap{ + "closest": &graphql.EnumValueConfig{}, + "merge": &graphql.EnumValueConfig{}, + }, + }), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/group_by_argument.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/group_by_argument.go new file mode 100644 index 0000000000000000000000000000000000000000..4e4e7eac29c3e45b93a8b9a477628c554640dfd8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/group_by_argument.go @@ -0,0 +1,49 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func groupByArgument(className string) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("GetObjects%s", className) + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sGroupByInpObj", prefix), + Fields: groupByFields(prefix), + Description: descriptions.GroupByFilter, + }, + ), + } +} + +func groupByFields(prefix string) graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "path": &graphql.InputObjectFieldConfig{ + Description: descriptions.GroupByPath, + Type: graphql.NewNonNull(graphql.NewList(graphql.String)), + }, + "groups": &graphql.InputObjectFieldConfig{ + Description: descriptions.GroupByGroups, + Type: graphql.NewNonNull(graphql.Int), + }, + "objectsPerGroup": &graphql.InputObjectFieldConfig{ + Description: descriptions.GroupByObjectsPerGroup, + Type: graphql.NewNonNull(graphql.Int), + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/helper_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/helper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..018900bb3234b940d98b17d2ddfb2b50a4c3879d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/helper_test.go @@ -0,0 +1,709 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "context" + "fmt" + "sort" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + test_helper "github.com/weaviate/weaviate/adapters/handlers/graphql/test/helper" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" +) + +type mockRequestsLog struct{} + +func (m *mockRequestsLog) Register(first string, second string) { +} + +type mockResolver struct { + test_helper.MockResolver +} + +type fakeInterpretation struct { + returnArgs []search.Result +} + +func (f *fakeInterpretation) AdditionalPropertyFn(ctx context.Context, + in []search.Result, params interface{}, limit *int, + argumentModuleParams map[string]interface{}, +) ([]search.Result, error) { + return f.returnArgs, nil +} + +func (f *fakeInterpretation) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} { + return true +} + +func (f *fakeInterpretation) AdditonalPropertyDefaultValue() interface{} { + return true +} + +type fakeExtender struct { + returnArgs []search.Result +} + +func (f *fakeExtender) AdditionalPropertyFn(ctx context.Context, + in []search.Result, params interface{}, limit *int, + argumentModuleParams map[string]interface{}, +) ([]search.Result, error) { + return f.returnArgs, nil +} + +func (f *fakeExtender) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} { + return true +} + +func (f *fakeExtender) AdditonalPropertyDefaultValue() interface{} { + return true +} + +type fakeProjectorParams struct { + Enabled bool + Algorithm string + Dimensions int + Perplexity int + Iterations int + LearningRate int + IncludeNeighbors bool +} + +type fakeProjector struct { + returnArgs []search.Result +} + +func (f *fakeProjector) AdditionalPropertyFn(ctx context.Context, + in []search.Result, params interface{}, limit *int, + argumentModuleParams map[string]interface{}, +) ([]search.Result, error) { + return f.returnArgs, nil +} + +func (f *fakeProjector) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} { + if len(param) > 0 { + return &fakeProjectorParams{ + Enabled: true, + Algorithm: "tsne", + Dimensions: 3, + Iterations: 100, + LearningRate: 15, + Perplexity: 10, + } + } + return &fakeProjectorParams{ + Enabled: true, + } +} + +func (f *fakeProjector) AdditonalPropertyDefaultValue() interface{} { + return &fakeProjectorParams{} +} + +type pathBuilderParams struct{} + +type fakePathBuilder struct { + returnArgs []search.Result +} + +func (f *fakePathBuilder) AdditionalPropertyFn(ctx context.Context, + in []search.Result, params interface{}, limit *int, +) ([]search.Result, error) { + return f.returnArgs, nil +} + +func (f *fakePathBuilder) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} { + return &pathBuilderParams{} +} + +func (f *fakePathBuilder) AdditonalPropertyDefaultValue() interface{} { + return &pathBuilderParams{} +} + +type nearCustomTextParams struct { + Values []string + MoveTo nearExploreMove + MoveAwayFrom nearExploreMove + Certainty float64 + Distance float64 + WithDistance bool + TargetVectors []string +} + +// implements the modulecapabilities.NearParam interface +func (n *nearCustomTextParams) GetCertainty() float64 { + return n.Certainty +} + +func (n nearCustomTextParams) GetDistance() float64 { + return n.Distance +} + +func (n nearCustomTextParams) SimilarityMetricProvided() bool { + return n.Certainty != 0 || n.WithDistance +} + +func (n nearCustomTextParams) GetTargetVectors() []string { + return n.TargetVectors +} + +func (n nearCustomTextParams) GetTargetCombination() *dto.TargetCombination { + return nil +} + +type nearExploreMove struct { + Values []string + Force float32 + Objects []nearObjectMove +} + +type nearObjectMove struct { + ID string + Beacon string +} + +type nearCustomTextModule struct { + fakePathBuilder *fakePathBuilder + fakeProjector *fakeProjector + fakeExtender *fakeExtender + fakeInterpretation *fakeInterpretation +} + +func newNearCustomTextModule() *nearCustomTextModule { + return &nearCustomTextModule{ + fakePathBuilder: &fakePathBuilder{}, + fakeProjector: &fakeProjector{}, + fakeExtender: &fakeExtender{}, + fakeInterpretation: &fakeInterpretation{}, + } +} + +func (m *nearCustomTextModule) Name() string { + return "mock-custom-near-text-module" +} + +func (m *nearCustomTextModule) Init(params moduletools.ModuleInitParams) error { + return nil +} + +func (m *nearCustomTextModule) getNearCustomTextArgument(classname string) *graphql.ArgumentConfig { + prefix := classname + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearCustomTextInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.NewList(graphql.String)), + }, + "moveTo": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveTo", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Description: descriptions.Keywords, + Type: graphql.NewList(graphql.String), + }, + "objects": &graphql.InputObjectFieldConfig{ + Description: "objects", + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMovementObjectsToInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: "id of an object", + }, + "beacon": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.Beacon, + }, + }, + Description: "Movement Object", + }, + )), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + }, + }), + }, + "moveAwayFrom": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveAway", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Description: descriptions.Keywords, + Type: graphql.NewList(graphql.String), + }, + "objects": &graphql.InputObjectFieldConfig{ + Description: "objects", + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMovementObjectsAwayInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: "id of an object", + }, + "beacon": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.Beacon, + }, + }, + Description: "Movement Object", + }, + )), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + }, + }), + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + "distance": &graphql.InputObjectFieldConfig{ + Description: descriptions.Distance, + Type: graphql.Float, + }, + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + }, + Description: descriptions.GetWhereInpObj, + }, + ), + } +} + +func (m *nearCustomTextModule) extractNearCustomTextArgument(source map[string]interface{}) (*nearCustomTextParams, *dto.TargetCombination, error) { + var args nearCustomTextParams + + concepts := source["concepts"].([]interface{}) + args.Values = make([]string, len(concepts)) + for i, value := range concepts { + args.Values[i] = value.(string) + } + + certainty, ok := source["certainty"] + if ok { + args.Certainty = certainty.(float64) + } + + distance, ok := source["distance"] + if ok { + args.Distance = distance.(float64) + args.WithDistance = true + } + + // moveTo is an optional arg, so it could be nil + moveTo, ok := source["moveTo"] + if ok { + moveToMap := moveTo.(map[string]interface{}) + args.MoveTo = m.parseMoveParam(moveToMap) + } + + moveAwayFrom, ok := source["moveAwayFrom"] + if ok { + moveAwayFromMap := moveAwayFrom.(map[string]interface{}) + args.MoveAwayFrom = m.parseMoveParam(moveAwayFromMap) + } + + return &args, nil, nil +} + +func (m *nearCustomTextModule) parseMoveParam(source map[string]interface{}) nearExploreMove { + res := nearExploreMove{} + res.Force = float32(source["force"].(float64)) + + concepts, ok := source["concepts"].([]interface{}) + if ok { + res.Values = make([]string, len(concepts)) + for i, value := range concepts { + res.Values[i] = value.(string) + } + } + + objects, ok := source["objects"].([]interface{}) + if ok { + res.Objects = make([]nearObjectMove, len(objects)) + for i, value := range objects { + v, ok := value.(map[string]interface{}) + if ok { + if v["id"] != nil { + res.Objects[i].ID = v["id"].(string) + } + if v["beacon"] != nil { + res.Objects[i].Beacon = v["beacon"].(string) + } + } + } + } + + return res +} + +func (m *nearCustomTextModule) Arguments() map[string]modulecapabilities.GraphQLArgument { + arguments := map[string]modulecapabilities.GraphQLArgument{} + // define nearCustomText argument + arguments["nearCustomText"] = modulecapabilities.GraphQLArgument{ + GetArgumentsFunction: func(classname string) *graphql.ArgumentConfig { + return m.getNearCustomTextArgument(classname) + }, + ExtractFunction: func(source map[string]interface{}) (interface{}, *dto.TargetCombination, error) { + return m.extractNearCustomTextArgument(source) + }, + ValidateFunction: func(param interface{}) error { + // all is valid + return nil + }, + } + return arguments +} + +// additional properties +func (m *nearCustomTextModule) AdditionalProperties() map[string]modulecapabilities.AdditionalProperty { + additionalProperties := map[string]modulecapabilities.AdditionalProperty{} + additionalProperties["featureProjection"] = m.getFeatureProjection() + additionalProperties["nearestNeighbors"] = m.getNearestNeighbors() + additionalProperties["semanticPath"] = m.getSemanticPath() + additionalProperties["interpretation"] = m.getInterpretation() + return additionalProperties +} + +func (m *nearCustomTextModule) getFeatureProjection() modulecapabilities.AdditionalProperty { + return modulecapabilities.AdditionalProperty{ + DefaultValue: m.fakeProjector.AdditonalPropertyDefaultValue(), + GraphQLNames: []string{"featureProjection"}, + GraphQLFieldFunction: func(classname string) *graphql.Field { + return &graphql.Field{ + Args: graphql.FieldConfigArgument{ + "algorithm": &graphql.ArgumentConfig{ + Type: graphql.String, + DefaultValue: nil, + }, + "dimensions": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + "learningRate": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + "iterations": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + "perplexity": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + }, + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalFeatureProjection", classname), + Fields: graphql.Fields{ + "vector": &graphql.Field{Type: graphql.NewList(graphql.Float)}, + }, + }), + } + }, + GraphQLExtractFunction: m.fakeProjector.ExtractAdditionalFn, + } +} + +func (m *nearCustomTextModule) getNearestNeighbors() modulecapabilities.AdditionalProperty { + return modulecapabilities.AdditionalProperty{ + DefaultValue: m.fakeExtender.AdditonalPropertyDefaultValue(), + GraphQLNames: []string{"nearestNeighbors"}, + GraphQLFieldFunction: func(classname string) *graphql.Field { + return &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalNearestNeighbors", classname), + Fields: graphql.Fields{ + "neighbors": &graphql.Field{Type: graphql.NewList(graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalNearestNeighborsNeighbors", classname), + Fields: graphql.Fields{ + "concept": &graphql.Field{Type: graphql.String}, + "distance": &graphql.Field{Type: graphql.Float}, + }, + }))}, + }, + }), + } + }, + GraphQLExtractFunction: m.fakeExtender.ExtractAdditionalFn, + } +} + +func (m *nearCustomTextModule) getSemanticPath() modulecapabilities.AdditionalProperty { + return modulecapabilities.AdditionalProperty{ + DefaultValue: m.fakePathBuilder.AdditonalPropertyDefaultValue(), + GraphQLNames: []string{"semanticPath"}, + GraphQLFieldFunction: func(classname string) *graphql.Field { + return &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalSemanticPath", classname), + Fields: graphql.Fields{ + "path": &graphql.Field{Type: graphql.NewList(graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalSemanticPathElement", classname), + Fields: graphql.Fields{ + "concept": &graphql.Field{Type: graphql.String}, + "distanceToQuery": &graphql.Field{Type: graphql.Float}, + "distanceToResult": &graphql.Field{Type: graphql.Float}, + "distanceToNext": &graphql.Field{Type: graphql.Float}, + "distanceToPrevious": &graphql.Field{Type: graphql.Float}, + }, + }))}, + }, + }), + } + }, + GraphQLExtractFunction: m.fakePathBuilder.ExtractAdditionalFn, + } +} + +func (m *nearCustomTextModule) getInterpretation() modulecapabilities.AdditionalProperty { + return modulecapabilities.AdditionalProperty{ + DefaultValue: m.fakeInterpretation.AdditonalPropertyDefaultValue(), + GraphQLNames: []string{"interpretation"}, + GraphQLFieldFunction: func(classname string) *graphql.Field { + return &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalInterpretation", classname), + Fields: graphql.Fields{ + "source": &graphql.Field{Type: graphql.NewList(graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalInterpretationSource", classname), + Fields: graphql.Fields{ + "concept": &graphql.Field{Type: graphql.String}, + "weight": &graphql.Field{Type: graphql.Float}, + "occurrence": &graphql.Field{Type: graphql.Int}, + }, + }))}, + }, + }), + } + }, + GraphQLExtractFunction: m.fakeInterpretation.ExtractAdditionalFn, + } +} + +type fakeModulesProvider struct { + nearCustomTextModule *nearCustomTextModule +} + +func newFakeModulesProvider() *fakeModulesProvider { + return &fakeModulesProvider{newNearCustomTextModule()} +} + +func (fmp *fakeModulesProvider) GetAll() []modulecapabilities.Module { + panic("implement me") +} + +func (fmp *fakeModulesProvider) VectorFromInput(ctx context.Context, className, input, targetVector string) ([]float32, error) { + panic("not implemented") +} + +func (fmp *fakeModulesProvider) GetArguments(class *models.Class) map[string]*graphql.ArgumentConfig { + args := map[string]*graphql.ArgumentConfig{} + if class.Vectorizer == fmp.nearCustomTextModule.Name() { + for name, argument := range fmp.nearCustomTextModule.Arguments() { + args[name] = argument.GetArgumentsFunction(class.Class) + } + } + return args +} + +func (fmp *fakeModulesProvider) ExtractSearchParams(arguments map[string]interface{}, className string) (map[string]interface{}, map[string]*dto.TargetCombination) { + exractedParams := map[string]interface{}{} + if param, ok := arguments["nearCustomText"]; ok { + exractedParams["nearCustomText"] = extractNearTextParam(param.(map[string]interface{})) + } + return exractedParams, nil +} + +func (fmp *fakeModulesProvider) GetAdditionalFields(class *models.Class) map[string]*graphql.Field { + additionalProperties := map[string]*graphql.Field{} + for name, additionalProperty := range fmp.nearCustomTextModule.AdditionalProperties() { + if additionalProperty.GraphQLFieldFunction != nil { + additionalProperties[name] = additionalProperty.GraphQLFieldFunction(class.Class) + } + } + return additionalProperties +} + +func (fmp *fakeModulesProvider) ExtractAdditionalField(className, name string, params []*ast.Argument) interface{} { + if additionalProperties := fmp.nearCustomTextModule.AdditionalProperties(); len(additionalProperties) > 0 { + if additionalProperty, ok := additionalProperties[name]; ok { + if additionalProperty.GraphQLExtractFunction != nil { + return additionalProperty.GraphQLExtractFunction(params, nil) + } + } + } + return nil +} + +func (fmp *fakeModulesProvider) GraphQLAdditionalFieldNames() []string { + additionalPropertiesNames := []string{} + for _, additionalProperty := range fmp.nearCustomTextModule.AdditionalProperties() { + if additionalProperty.GraphQLNames != nil { + additionalPropertiesNames = append(additionalPropertiesNames, additionalProperty.GraphQLNames...) + } + } + return additionalPropertiesNames +} + +func extractNearTextParam(param map[string]interface{}) interface{} { + nearCustomTextModule := newNearCustomTextModule() + argument := nearCustomTextModule.Arguments()["nearCustomText"] + params, _, _ := argument.ExtractFunction(param) + return params +} + +func createArg(name string, value string) *ast.Argument { + n := ast.Name{ + Value: name, + } + val := ast.StringValue{ + Kind: "Kind", + Value: value, + } + arg := ast.Argument{ + Name: ast.NewName(&n), + Kind: "Kind", + Value: ast.NewStringValue(&val), + } + a := ast.NewArgument(&arg) + return a +} + +func extractAdditionalParam(name string, args []*ast.Argument) interface{} { + nearCustomTextModule := newNearCustomTextModule() + additionalProperties := nearCustomTextModule.AdditionalProperties() + switch name { + case "semanticPath", "featureProjection": + if ap, ok := additionalProperties[name]; ok { + return ap.GraphQLExtractFunction(args, nil) + } + return nil + default: + return nil + } +} + +func getFakeModulesProvider() ModulesProvider { + return newFakeModulesProvider() +} + +type fakeAuthorizer struct{} + +func (f *fakeAuthorizer) Authorize(ctx context.Context, principal *models.Principal, action string, resource ...string) error { + return nil +} + +func (f *fakeAuthorizer) AuthorizeSilent(ctx context.Context, principal *models.Principal, action string, resource ...string) error { + return nil +} + +func (f *fakeAuthorizer) FilterAuthorizedResources(ctx context.Context, principal *models.Principal, action string, resources ...string) ([]string, error) { + return resources, nil +} + +func getFakeAuthorizer() authorization.Authorizer { + return &fakeAuthorizer{} +} + +func newMockResolver() *mockResolver { + return newMockResolverWithVectorizer(config.VectorizerModuleText2VecContextionary) +} + +func newMockResolverWithVectorizer(vectorizer string) *mockResolver { + logger, _ := test.NewNullLogger() + simpleSchema := test_helper.CreateSimpleSchema(vectorizer) + field, err := Build(&simpleSchema, logger, getFakeModulesProvider(), getFakeAuthorizer()) + if err != nil { + panic(fmt.Sprintf("could not build graphql test schema: %s", err)) + } + mocker := &mockResolver{} + mockLog := &mockRequestsLog{} + mocker.RootFieldName = "Get" + mocker.RootField = field + mocker.RootObject = map[string]interface{}{"Resolver": Resolver(mocker), "RequestsLog": RequestsLog(mockLog)} + return mocker +} + +func newMockResolverWithNoModules() *mockResolver { + logger, _ := test.NewNullLogger() + field, err := Build(&test_helper.SimpleSchema, logger, nil, getFakeAuthorizer()) + if err != nil { + panic(fmt.Sprintf("could not build graphql test schema: %s", err)) + } + mocker := &mockResolver{} + mockLog := &mockRequestsLog{} + mocker.RootFieldName = "Get" + mocker.RootField = field + mocker.RootObject = map[string]interface{}{"Resolver": Resolver(mocker), "RequestsLog": RequestsLog(mockLog)} + return mocker +} + +func (m *mockResolver) GetClass(ctx context.Context, principal *models.Principal, + params dto.GetParams, +) ([]interface{}, error) { + // order is random due to map access, sort to make tests deterministic + if params.NearVector != nil && params.NearVector.TargetVectors != nil && params.NearVector.Vectors != nil { + tv := targetsAndVectors{targets: params.NearVector.TargetVectors, vectors: params.NearVector.Vectors} + sort.Sort(tv) + params.NearVector.TargetVectors = tv.targets + params.NearVector.Vectors = tv.vectors + } + + args := m.Called(params) + return args.Get(0).([]interface{}), args.Error(1) +} + +type targetsAndVectors struct { + targets []string + vectors []models.Vector +} + +func (t targetsAndVectors) Len() int { + return len(t.targets) +} + +func (t targetsAndVectors) Swap(i, j int) { + t.targets[i], t.targets[j] = t.targets[j], t.targets[i] + t.vectors[i], t.vectors[j] = t.vectors[j], t.vectors[i] +} + +func (t targetsAndVectors) Less(i, j int) bool { + return t.targets[i] < t.targets[j] +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/hybrid_search.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/hybrid_search.go new file mode 100644 index 0000000000000000000000000000000000000000..95cfdb6bf4851c554c645cf4bf8176fd95a423f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/hybrid_search.go @@ -0,0 +1,165 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + "os" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/entities/models" +) + +func hybridArgument(classObject *graphql.Object, + class *models.Class, modulesProvider ModulesProvider, fusionEnum *graphql.Enum, +) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("GetObjects%s", class.Class) + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sHybridInpObj", prefix), + Fields: hybridOperands(classObject, class, modulesProvider, fusionEnum), + Description: "Hybrid search", + }, + ), + } +} + +func hybridOperands(classObject *graphql.Object, + class *models.Class, modulesProvider ModulesProvider, fusionEnum *graphql.Enum, +) graphql.InputObjectConfigFieldMap { + ss := graphql.NewInputObject(graphql.InputObjectConfig{ + Name: class.Class + "SubSearch", + Fields: hybridSubSearch(classObject, class, modulesProvider), + }) + + prefixName := class.Class + "SubSearch" + + fieldMap := graphql.InputObjectConfigFieldMap{ + "query": &graphql.InputObjectFieldConfig{ + Description: "Query string", + Type: graphql.String, + }, + "alpha": &graphql.InputObjectFieldConfig{ + Description: "Search weight", + Type: graphql.Float, + }, + "maxVectorDistance": &graphql.InputObjectFieldConfig{ + Description: "Removes all results that have a vector distance larger than the given value", + Type: graphql.Float, + }, + "vector": &graphql.InputObjectFieldConfig{ + Description: "Vector search", + Type: graphql.NewList(graphql.Float), + }, + "properties": &graphql.InputObjectFieldConfig{ + Description: "Which properties should be included in the sparse search", + Type: graphql.NewList(graphql.String), + }, + "fusionType": &graphql.InputObjectFieldConfig{ + Description: "Algorithm used for fusing results from vector and keyword search", + Type: fusionEnum, + }, + "targetVectors": &graphql.InputObjectFieldConfig{ + Description: "Target vectors", + Type: graphql.NewList(graphql.String), + }, + "bm25SearchOperator": common_filters.GenerateBM25SearchOperatorFields(prefixName), + + "searches": &graphql.InputObjectFieldConfig{ + Description: "Subsearch list", + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Description: "Subsearch list", + Name: fmt.Sprintf("%sSearchesInpObj", prefixName), + Fields: (func() graphql.InputObjectConfigFieldMap { + subSearchFields := make(graphql.InputObjectConfigFieldMap) + fieldMap := graphql.InputObjectConfigFieldMap{ + "nearText": &graphql.InputObjectFieldConfig{ + Description: "nearText element", + + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearTextInpObj", prefixName), + Fields: nearTextFields(prefixName), + Description: "Near text search", + }, + ), + }, + "nearVector": &graphql.InputObjectFieldConfig{ + Description: "nearVector element", + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearVectorInpObj", prefixName), + Description: "Near vector search", + Fields: common_filters.NearVectorFields(prefixName, true), + }, + ), + }, + } + for key, fieldConfig := range fieldMap { + subSearchFields[key] = fieldConfig + } + return subSearchFields + })(), + }, + )), + }, + } + fieldMap = common_filters.AddTargetArgument(fieldMap, prefixName+"hybrid", true) + + if os.Getenv("ENABLE_EXPERIMENTAL_HYBRID_OPERANDS") != "" { + fieldMap["operands"] = &graphql.InputObjectFieldConfig{ + Description: "Subsearch list", + Type: graphql.NewList(ss), + } + } + + return fieldMap +} + +func hybridSubSearch(classObject *graphql.Object, + class *models.Class, modulesProvider ModulesProvider, +) graphql.InputObjectConfigFieldMap { + prefixName := class.Class + "SubSearch" + + return graphql.InputObjectConfigFieldMap{ + "weight": &graphql.InputObjectFieldConfig{ + Description: "weight, 0 to 1", + Type: graphql.Float, + }, + "sparseSearch": &graphql.InputObjectFieldConfig{ + Description: "Sparse Search", + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sHybridGetBM25InpObj", prefixName), + Fields: bm25Fields(prefixName), + Description: "BM25f search", + }, + ), + }, + + "nearText": &graphql.InputObjectFieldConfig{ + Description: "nearText element", + + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearTextInpObj", prefixName), + Fields: nearTextFields(prefixName), + Description: descriptions.GetWhereInpObj, + }, + ), + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/models_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/models_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..45639e5c507ac942ecb2c57aa9a6100ff1fc2293 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/models_for_test.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +type FeatureProjection struct { + Vector []float32 `json:"vector"` +} + +type NearestNeighbors struct { + Neighbors []*NearestNeighbor `json:"neighbors"` +} + +type NearestNeighbor struct { + Concept string `json:"concept,omitempty"` + Distance float32 `json:"distance,omitempty"` + Vector []float32 `json:"vector"` +} + +type SemanticPath struct { + Path []*SemanticPathElement `json:"path"` +} + +type SemanticPathElement struct { + Concept string `json:"concept,omitempty"` + DistanceToNext *float32 `json:"distanceToNext,omitempty"` + DistanceToPrevious *float32 `json:"distanceToPrevious,omitempty"` + DistanceToQuery float32 `json:"distanceToQuery,omitempty"` + DistanceToResult float32 `json:"distanceToResult,omitempty"` +} + +type Interpretation struct { + Source []*InterpretationSource `json:"source"` +} + +type InterpretationSource struct { + Concept string `json:"concept,omitempty"` + Occurrence uint64 `json:"occurrence,omitempty"` + Weight float64 `json:"weight,omitempty"` +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/multi_tenancy.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/multi_tenancy.go new file mode 100644 index 0000000000000000000000000000000000000000..ecb9d476a8c9b8ee9e36174cd2282458ff66599e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/multi_tenancy.go @@ -0,0 +1,24 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func tenantArgument() *graphql.ArgumentConfig { + return &graphql.ArgumentConfig{ + Description: descriptions.Tenant, + Type: graphql.String, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/replication.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/replication.go new file mode 100644 index 0000000000000000000000000000000000000000..118b1dc5486dae2f8fb173ea1611176360ea8bca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/replication.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/models" +) + +func replicationEnabled(class *models.Class) bool { + return class.ReplicationConfig != nil && class.ReplicationConfig.Factor > 1 +} + +func consistencyLevelArgument(class *models.Class) *graphql.ArgumentConfig { + return &graphql.ArgumentConfig{ + Description: descriptions.ConsistencyLevel, + Type: graphql.NewEnum(graphql.EnumConfig{ + Name: fmt.Sprintf("%sConsistencyLevelEnum", class.Class), + Values: graphql.EnumValueConfigMap{ + string(types.ConsistencyLevelOne): &graphql.EnumValueConfig{}, + string(types.ConsistencyLevelQuorum): &graphql.EnumValueConfig{}, + string(types.ConsistencyLevelAll): &graphql.EnumValueConfig{}, + }, + }), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/resolver.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..fe829a59980ae2f622bc202e5ae4451deab1072b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/resolver.go @@ -0,0 +1,30 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "context" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" +) + +// Resolver is a local abstraction of the required UC resolvers +type Resolver interface { + GetClass(ctx context.Context, principal *models.Principal, info dto.GetParams) ([]interface{}, error) +} + +// RequestsLog is a local abstraction on the RequestsLog that needs to be +// provided to the graphQL API in order to log Local.Get queries. +type RequestsLog interface { + Register(requestType string, identifier string) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/sort_argument.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/sort_argument.go new file mode 100644 index 0000000000000000000000000000000000000000..3990f2bd236d3a7d9e3201eafda0b5acda05cc4b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/sort_argument.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" +) + +func sortArgument(className string) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("GetObjects%s", className) + return &graphql.ArgumentConfig{ + Type: graphql.NewList( + graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sSortInpObj", prefix), + Fields: sortFields(prefix), + Description: descriptions.GetWhereInpObj, + }, + ), + ), + } +} + +func sortFields(prefix string) graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "path": &graphql.InputObjectFieldConfig{ + Description: descriptions.SortPath, + Type: graphql.NewList(graphql.String), + }, + "order": &graphql.InputObjectFieldConfig{ + Description: descriptions.SortOrder, + Type: graphql.NewEnum(graphql.EnumConfig{ + Name: fmt.Sprintf("%sSortInpObjTypeEnum", prefix), + Values: graphql.EnumValueConfigMap{ + "asc": &graphql.EnumValueConfig{}, + "desc": &graphql.EnumValueConfig{}, + }, + }), + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/sparse_search.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/sparse_search.go new file mode 100644 index 0000000000000000000000000000000000000000..68d7dcf46ce0f33444d23e41786e40e047eeba28 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/get/sparse_search.go @@ -0,0 +1,45 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package get + +import ( + "fmt" + + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" +) + +func bm25Argument(className string) *graphql.ArgumentConfig { + prefix := fmt.Sprintf("GetObjects%s", className) + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sHybridGetBm25InpObj", prefix), + Fields: bm25Fields(prefix), + }, + ), + } +} + +func bm25Fields(prefix string) graphql.InputObjectConfigFieldMap { + return graphql.InputObjectConfigFieldMap{ + "query": &graphql.InputObjectFieldConfig{ + Description: "The query to search for", + Type: graphql.String, + }, + "properties": &graphql.InputObjectFieldConfig{ + Description: "The properties to search in", + Type: graphql.NewList(graphql.String), + }, + "searchOperator": common_filters.GenerateBM25SearchOperatorFields(prefix), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/local.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/local.go new file mode 100644 index 0000000000000000000000000000000000000000..a1d9b54f400e8427fdb1ba7ae271ef8a38b69aff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/local.go @@ -0,0 +1,58 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package local + +import ( + "github.com/sirupsen/logrus" + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/aggregate" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/explore" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/get" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modules" +) + +// Build the local queries from the database schema. +func Build(dbSchema *schema.SchemaWithAliases, logger logrus.FieldLogger, + config config.Config, modulesProvider *modules.Provider, authorizer authorization.Authorizer, +) (graphql.Fields, error) { + getField, err := get.Build(dbSchema, logger, modulesProvider, authorizer) + if err != nil { + return nil, err + } + + aggregateField, err := aggregate.Build(dbSchema, config, modulesProvider, authorizer) + if err != nil { + return nil, err + } + + if modulesProvider.HasMultipleVectorizers() { + localFields := graphql.Fields{ + "Get": getField, + "Aggregate": aggregateField, + } + + return localFields, nil + } + + exploreField := explore.Build(dbSchema.Objects, modulesProvider, authorizer) + + localFields := graphql.Fields{ + "Get": getField, + "Aggregate": aggregateField, + "Explore": exploreField, + } + + return localFields, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/local_component_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/local_component_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e6e99ec52ce85eb8b5d53b08a67b90d1f7b5cdb2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/local_component_test.go @@ -0,0 +1,305 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package local + +import ( + "fmt" + "runtime/debug" + "testing" + + logrus "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" + usecaseModules "github.com/weaviate/weaviate/usecases/modules" +) + +// These tests are component tests for the local package including all its +// subpackages, such as get, getmeta, etc.. However, they only assert that the +// graphql tree can be built under certain circumstances. This helps us to +// catch errors on edge cases like empty schemas, classes with empty +// properties, empty peer lists, peers with empty schemas, etc. However, we +// don't get any guarantee of whether the individual queries resolve +// correctly. For those cases we have unit tests in die individual subpackages +// (i.e. get, getmeta, aggregate, etc.). Additionally we have (a few) e2e +// tests. + +func TestBuild_GraphQLNetwork(t *testing.T) { + tests := testCases{ + // This tests asserts that an action-only schema doesn't lead to errors. + testCase{ + name: "with only objects locally", + localSchema: schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "BestLocalAction", + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Name: "myStringProp", + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + }, + }, + }, + + // This tests asserts that a things-only schema doesn't lead to errors. + testCase{ + name: "with only objects locally", + localSchema: schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "BestLocalThing", + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Name: "myStringProp", + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + }, + }, + }, + + // // This tests asserts that a class without any properties doesn't lead to + // // errors. + testCase{ + name: "with things without properties locally", + localSchema: schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "BestLocalThing", + Properties: []*models.Property{}, + }, + }, + }, + }, + }, + }, + + testCase{ + name: "without any peers", + localSchema: validSchema(), + }, + } + + tests.AssertNoError(t) +} + +func TestBuild_RefProps(t *testing.T) { + t.Run("expected error logs", func(t *testing.T) { + tests := testCases{ + { + name: "build class with nonexistent ref prop", + localSchema: schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ThisClassExists", + Properties: []*models.Property{ + { + DataType: []string{"ThisClassDoesNotExist"}, + Name: "ofNonexistentClass", + }, + }, + }, + }, + }, + }, + }, + }, + } + + expectedLogMsg := "ignoring ref prop \"ofNonexistentClass\" on class \"ThisClassExists\", " + + "because it contains reference to nonexistent class [\"ThisClassDoesNotExist\"]" + + tests.AssertErrorLogs(t, expectedLogMsg) + }) + + t.Run("expected success", func(t *testing.T) { + tests := testCases{ + { + name: "build class with existing non-circular ref prop", + localSchema: schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ThisClassExists", + Properties: []*models.Property{ + { + DataType: []string{"ThisClassAlsoExists"}, + Name: "ofExistingClass", + }, + }, + }, + { + Class: "ThisClassAlsoExists", + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Name: "stringProp", + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "build class with existing circular ref prop", + localSchema: schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ThisClassExists", + Properties: []*models.Property{ + { + DataType: []string{"ThisClassAlsoExists"}, + Name: "ofExistingClass", + }, + }, + }, + { + Class: "ThisClassAlsoExists", + Properties: []*models.Property{ + { + DataType: []string{"ThisClassExists"}, + Name: "ofExistingClass", + }, + }, + }, + }, + }, + }, + }, + }, + } + + tests.AssertNoError(t) + }) +} + +type testCase struct { + name string + localSchema schema.SchemaWithAliases +} + +type testCases []testCase + +func (tests testCases) AssertNoError(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + logger, _ := logrus.NewNullLogger() + modules := usecaseModules.NewProvider(logger, config.Config{}) + localSchema, err := Build(&test.localSchema, nil, config.Config{}, modules, nil) + require.Nil(t, err, test.name) + + schemaObject := graphql.ObjectConfig{ + Name: "WeaviateObj", + Description: "Location of the root query", + Fields: localSchema, + } + + func() { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v at %s", r, debug.Stack()) + } + }() + + _, err = graphql.NewSchema(graphql.SchemaConfig{ + Query: graphql.NewObject(schemaObject), + }) + }() + + assert.Nil(t, err, test.name) + }) + } +} + +// AssertErrorLogs still expects the test to pass without errors, +// but does expect the Build logger to contain errors messages +// from the GQL schema rebuilding thunk +func (tests testCases) AssertErrorLogs(t *testing.T, expectedMsg string) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + logger, logsHook := logrus.NewNullLogger() + modules := usecaseModules.NewProvider(logger, config.Config{}) + localSchema, err := Build(&test.localSchema, logger, config.Config{}, modules, nil) + require.Nil(t, err, test.name) + + schemaObject := graphql.ObjectConfig{ + Name: "WeaviateObj", + Description: "Location of the root query", + Fields: localSchema, + } + + func() { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v at %s", r, debug.Stack()) + } + }() + + _, err = graphql.NewSchema(graphql.SchemaConfig{ + Query: graphql.NewObject(schemaObject), + }) + }() + + last := logsHook.LastEntry() + assert.Contains(t, last.Message, expectedMsg) + assert.Nil(t, err) + }) + } +} + +func validSchema() schema.SchemaWithAliases { + return schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "BestLocalThing", + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Name: "myStringProp", + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/resolver.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..77d85db1077f0b389fe0280289db15583cf0d75b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/local/resolver.go @@ -0,0 +1,23 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package local + +import ( + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/aggregate" + get "github.com/weaviate/weaviate/adapters/handlers/graphql/local/get" +) + +// Resolver for local GraphQL queries +type Resolver interface { + get.Resolver + aggregate.Resolver +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/schema.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/schema.go new file mode 100644 index 0000000000000000000000000000000000000000..aa8eb7491e4a90a0d3356ff26be28c6589eedebc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/schema.go @@ -0,0 +1,122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package graphql provides the graphql endpoint for Weaviate +package graphql + +import ( + "context" + "fmt" + "runtime/debug" + + "github.com/sirupsen/logrus" + "github.com/tailor-inc/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/get" + "github.com/weaviate/weaviate/entities/schema" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modules" +) + +type Traverser interface { + local.Resolver +} + +type RequestsLogger interface { + get.RequestsLog +} + +// The communication interface between the REST API and the GraphQL API. +type GraphQL interface { + // Resolve the GraphQL query in 'query'. + Resolve(context context.Context, query string, operationName string, variables map[string]interface{}) *graphql.Result +} + +type graphQL struct { + schema graphql.Schema + traverser Traverser + config config.Config +} + +// Construct a GraphQL API from the database schema, and resolver interface. +func Build(schema *schema.SchemaWithAliases, traverser Traverser, + logger logrus.FieldLogger, config config.Config, modulesProvider *modules.Provider, authorizer authorization.Authorizer, +) (GraphQL, error) { + logger.WithField("action", "graphql_rebuild"). + WithField("schema", schema). + Debug("rebuilding the graphql schema") + + graphqlSchema, err := buildGraphqlSchema(schema, logger, config, modulesProvider, authorizer) + if err != nil { + return nil, err + } + + return &graphQL{ + schema: graphqlSchema, + traverser: traverser, + config: config, + }, nil +} + +// Resolve at query time +func (g *graphQL) Resolve(context context.Context, query string, operationName string, variables map[string]interface{}) *graphql.Result { + return graphql.Do(graphql.Params{ + Schema: g.schema, + RootObject: map[string]interface{}{ + "Resolver": g.traverser, + "Config": g.config, + }, + RequestString: query, + OperationName: operationName, + VariableValues: variables, + Context: context, + }) +} + +func buildGraphqlSchema(dbSchema *schema.SchemaWithAliases, logger logrus.FieldLogger, + config config.Config, modulesProvider *modules.Provider, authorizer authorization.Authorizer, +) (graphql.Schema, error) { + localSchema, err := local.Build(dbSchema, logger, config, modulesProvider, authorizer) + if err != nil { + return graphql.Schema{}, err + } + + schemaObject := graphql.ObjectConfig{ + Name: "WeaviateObj", + Description: "Location of the root query", + Fields: localSchema, + } + + // Run graphql.NewSchema in a sub-closure, so that we can recover from panics. + // We need to use panics to return errors deep inside the dynamic generation of the GraphQL schema, + // inside the FieldThunks. There is _no_ way to bubble up an error besides panicking. + var result graphql.Schema + func() { + defer func() { + if r := recover(); r != nil { + entsentry.Recover(r) + err = fmt.Errorf("%v at %s", r, debug.Stack()) + } + }() + + result, err = graphql.NewSchema(graphql.SchemaConfig{ + Query: graphql.NewObject(schemaObject), + }) + }() + + if err != nil { + return graphql.Schema{}, fmt.Errorf("could not build GraphQL schema, because: %w", err) + } + + return result, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/mock_resolver.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/mock_resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..bfeeba38af5f8e0f4bfe09eff7e7f292cdde3c8e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/mock_resolver.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helper + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/gqlerrors" + "github.com/weaviate/weaviate/entities/schema" +) + +type MockResolver struct { + mock.Mock + Schema *schema.Schema + RootField *graphql.Field + RootFieldName string + RootObject map[string]interface{} +} + +var schemaBuildLock sync.Mutex + +func (mr *MockResolver) Resolve(query string) *graphql.Result { + fields := graphql.Fields{} + fields[mr.RootFieldName] = mr.RootField + schemaObject := graphql.ObjectConfig{ + Name: "RootObj", + Description: "Location of the root query", + Fields: fields, + } + + schemaBuildLock.Lock() // serialize creation of GraphQL schema. + schema, err := graphql.NewSchema(graphql.SchemaConfig{ + Query: graphql.NewObject(schemaObject), + }) + schemaBuildLock.Unlock() + + if err != nil { + panic(err) + } + + result := graphql.Do(graphql.Params{ + Schema: schema, + RequestString: query, + RootObject: mr.RootObject, + Context: context.Background(), + }) + + return result +} + +func (mr *MockResolver) AssertResolve(t *testing.T, query string) *GraphQLResult { + result := mr.Resolve(query) + if len(result.Errors) > 0 { + t.Fatalf("Failed to resolve; %s", spew.Sdump(result.Errors)) + } + + mr.AssertExpectations(t) + return &GraphQLResult{Result: result.Data} +} + +func (mr *MockResolver) AssertFailToResolve(t *testing.T, query string, errors ...string) { + result := mr.Resolve(query) + if len(result.Errors) == 0 { + t.Fatalf("Expected to not resolve; %#v", result.Errors) + } else { + t.Log("Resolve failed, as expected, with error", result.Errors) + } + if len(errors) > 0 { + require.Equal(t, errors[0], result.Errors[0].Error()) + } +} + +func (mr *MockResolver) AssertErrors(t *testing.T, query string, errors []gqlerrors.FormattedError) { + result := mr.Resolve(query) + for i, actual := range result.Errors { + assert.Equal(t, errors[i].Error(), actual.Error(), "should have failed in a specific way, but didnt") + } +} + +func (mr *MockResolver) AssertJSONResponse(t *testing.T, query string, expectedResponseString string) { + var expectedResponse map[string]interface{} + err := json.Unmarshal([]byte(expectedResponseString), &expectedResponse) + if err != nil { + t.Fatalf("Could not parse '%s' as json: %v", expectedResponseString, err) + } + + response := mr.AssertResolve(t, query) + + assert.Equal(t, expectedResponse, response) +} + +type GraphQLResult struct { + Result interface{} +} + +// Drill down in the result +func (g GraphQLResult) Get(paths ...string) *GraphQLResult { + current := g.Result + for _, path := range paths { + var ok bool + currentAsMap, ok := (current.(map[string]interface{})) + if !ok { + panic(fmt.Sprintf("Cannot get element %s in %#v; result: %#v", path, paths, g.Result)) + } + + current, ok = currentAsMap[path] + if !ok { + panic(fmt.Sprintf("Cannot get element %s in %#v; result: %#v", path, paths, g.Result)) + } + } + + return &GraphQLResult{ + Result: current, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/schema_fixtures.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/schema_fixtures.go new file mode 100644 index 0000000000000000000000000000000000000000..0b9df5371b619baa75bdad88f2f6b4000699208f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/schema_fixtures.go @@ -0,0 +1,140 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helper + +import ( + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" +) + +var SimpleSchema = CreateSimpleSchema(config.VectorizerModuleText2VecContextionary) + +func CreateSimpleSchema(vectorizer string) schema.SchemaWithAliases { + return schema.SchemaWithAliases{ + Aliases: map[string]string{ + "SomeThingAlias": "SomeThing", + "CustomVectorClassAlias": "CustomVectorClass", + "SomeActionAlias": "SomeAction", + }, + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "SomeThing", + Vectorizer: vectorizer, + Properties: []*models.Property{ + { + Name: "intField", + DataType: []string{"int"}, + }, + }, + }, + { + Class: "CustomVectorClass", + Vectorizer: config.VectorizerModuleNone, + Properties: []*models.Property{ + { + Name: "intField", + DataType: []string{"int"}, + }, + }, + }, + { + Vectorizer: vectorizer, + Class: "SomeAction", + Properties: []*models.Property{ + { + Name: "intField", + DataType: []string{"int"}, + }, + { + Name: "uuidField", + DataType: []string{"uuid"}, + }, + { + Name: "uuidArrayField", + DataType: []string{"uuid[]"}, + }, + { + Name: "location", + DataType: []string{"geoCoordinates"}, + }, + { + Name: "phone", + DataType: []string{"phoneNumber"}, + }, + { + Name: "hasAction", + DataType: []string{"SomeAction"}, + }, + { + Name: "hasActions", + DataType: []string{"SomeAction"}, + }, + }, + }, + }, + }, + }, + } +} + +// CarSchema contains a car which has every primitive field and a ref field there is +var CarSchema = schema.SchemaWithAliases{ + Schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Manufacturer", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "Car", + Properties: []*models.Property{ + { + Name: "horsepower", + DataType: []string{"int"}, + }, + { + Name: "weight", + DataType: []string{"number"}, + }, + { + Name: "modelName", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "madeBy", + DataType: []string{"Manufacturer"}, + }, + { + Name: "startOfProduction", + DataType: []string{"date"}, + }, + { + Name: "stillInProduction", + DataType: []string{"boolean"}, + }, + }, + }, + }, + }, + }, +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/thunks.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/thunks.go new file mode 100644 index 0000000000000000000000000000000000000000..230c0d4ee44b5da3b10baff8f0e6c9162876b9d0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/test/helper/thunks.go @@ -0,0 +1,41 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helper + +func NilThunk() func() interface{} { + return func() interface{} { + return nil + } +} + +func IdentityThunk(x interface{}) func() interface{} { + return func() interface{} { + return x + } +} + +func EmptyListThunk() func() interface{} { + return func() interface{} { + list := []interface{}{} + return interface{}(list) + } +} + +func EmptyList() interface{} { + return []interface{}{} +} + +func SingletonThunk(x interface{}) func() interface{} { + return func() interface{} { + return interface{}(x) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/utils/helper_objects.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/utils/helper_objects.go new file mode 100644 index 0000000000000000000000000000000000000000..b9e964cd0f648fab2d7801a978e9e5bf60b3c722 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/graphql/utils/helper_objects.go @@ -0,0 +1,42 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package utils provides utility methods and classes to support the graphql endpoint for Weaviate +package utils + +import ( + "errors" + + "github.com/tailor-inc/graphql" +) + +// GraphQLNetworkFieldContents contains all objects regarding GraphQL fields +type GraphQLNetworkFieldContents struct { + NetworkGetObject *graphql.Object // Object containing all fields for GraphQL Network Get schema generation + NetworkMetaObject *graphql.Object // Object containing all fields for GraphQL Network Meta schema generation + NetworkFetchObject *graphql.Object // Object containing all fields for GraphQL Network Fetch schema generation + NetworkIntrospectObject *graphql.Object // Object containing all fields for GraphQL Network Introspect schema generation + NetworkAggregateObject *graphql.Object // Object containing all fields for GraphQL Network Aggregate schema generation +} + +// FilterContainer contains all objects regarding GraphQL filters. Some filter elements are declared as global variables in the prototype, this struct achieves the same goal. +type FilterContainer struct { + WhereOperatorEnum *graphql.Enum // Object containing all fields for the Where filter + Operands *graphql.InputObject // Object containing all Operands + LocalFilterOptions map[string]*graphql.InputObject // Object containing all fields for Local filters + NetworkFilterOptions map[string]*graphql.InputObject // Object containing all fields for Network filters + FetchThingsActionsWhereFilterArgConf *graphql.ArgumentConfig // Object containing the Where filter fields for Fetch Objects + IntrospectThingsActionsWhereFilterArgConf *graphql.ArgumentConfig // Object containing the Where filter fields for Introspect Objects + WeaviateNetworkWhereKeywordsInpObj *graphql.InputObject // Object containing a global filter element + WeaviateNetworkIntrospectPropertiesObjField *graphql.Field // Object containing a global filter element +} + +var ErrEmptySchema = errors.New("there are no classes defined yet") diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/server.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/server.go new file mode 100644 index 0000000000000000000000000000000000000000..11224e3804b52842fbb6f8a8e02353d6cae5f59b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/server.go @@ -0,0 +1,302 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package grpc + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net" + "strings" + "time" + + "google.golang.org/grpc/peer" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_sentry "github.com/johnbellone/grpc-middleware-sentry" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + pbv0 "github.com/weaviate/weaviate/grpc/generated/protocol/v0" + pbv1 "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/auth/authentication/composer" + authErrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + _ "google.golang.org/grpc/encoding/gzip" // Install the gzip compressor + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + + v0 "github.com/weaviate/weaviate/adapters/handlers/grpc/v0" + v1 "github.com/weaviate/weaviate/adapters/handlers/grpc/v1" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" +) + +// CreateGRPCServer creates *grpc.Server with optional grpc.Serveroption passed. +func CreateGRPCServer(state *state.State, shutdown *batch.Shutdown, options ...grpc.ServerOption) *grpc.Server { + o := []grpc.ServerOption{ + grpc.MaxRecvMsgSize(state.ServerConfig.Config.GRPC.MaxMsgSize), + grpc.MaxSendMsgSize(state.ServerConfig.Config.GRPC.MaxMsgSize), + } + + o = append(o, options...) + + // Add TLS creds for the GRPC connection, if defined. + if len(state.ServerConfig.Config.GRPC.CertFile) > 0 || len(state.ServerConfig.Config.GRPC.KeyFile) > 0 { + c, err := credentials.NewServerTLSFromFile(state.ServerConfig.Config.GRPC.CertFile, + state.ServerConfig.Config.GRPC.KeyFile) + if err != nil { + state.Logger.WithField("action", "grpc_startup"). + Fatalf("grpc server TLS credential error: %s", err) + } + o = append(o, grpc.Creds(c)) + } + + var interceptors []grpc.UnaryServerInterceptor + + interceptors = append(interceptors, makeAuthInterceptor()) + + basicAuth := state.ServerConfig.Config.Cluster.AuthConfig.BasicAuth + if basicAuth.Enabled() { + interceptors = append(interceptors, + basicAuthUnaryInterceptor("/weaviate.v1.FileReplicationService", basicAuth.Username, basicAuth.Password)) + + o = append(o, grpc.StreamInterceptor( + basicAuthStreamInterceptor("/weaviate.v1.FileReplicationService", basicAuth.Username, basicAuth.Password), + )) + } + + // If sentry is enabled add automatic spans on gRPC requests + if state.ServerConfig.Config.Sentry.Enabled { + interceptors = append(interceptors, grpc_middleware.ChainUnaryServer( + grpc_sentry.UnaryServerInterceptor(), + )) + } + + if state.Metrics != nil { + interceptors = append(interceptors, makeMetricsInterceptor(state.Logger, state.Metrics)) + } + + interceptors = append(interceptors, makeIPInterceptor()) + + if len(interceptors) > 0 { + o = append(o, grpc.ChainUnaryInterceptor(interceptors...)) + } + + s := grpc.NewServer(o...) + weaviateV0 := v0.NewService() + weaviateV1 := v1.NewService( + state.Traverser, + composer.New( + state.ServerConfig.Config.Authentication, + state.APIKey, state.OIDC), + state.ServerConfig.Config.Authentication.AnonymousAccess.Enabled, + state.SchemaManager, + state.BatchManager, + &state.ServerConfig.Config, + state.Authorizer, + state.Logger, + shutdown, + ) + pbv0.RegisterWeaviateServer(s, weaviateV0) + pbv1.RegisterWeaviateServer(s, weaviateV1) + + weaviateV1FileReplicationService := v1.NewFileReplicationService(state.DB, state.ClusterService.SchemaReader()) + pbv1.RegisterFileReplicationServiceServer(s, weaviateV1FileReplicationService) + + grpc_health_v1.RegisterHealthServer(s, weaviateV1) + + return s +} + +func makeMetricsInterceptor(logger logrus.FieldLogger, metrics *monitoring.PrometheusMetrics) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if info.FullMethod != "/weaviate.v1.Weaviate/BatchObjects" { + return handler(ctx, req) + } + + // For now only Batch has specific metrics (in line with http API) + startTime := time.Now() + reqSizeBytes := float64(proto.Size(req.(proto.Message))) + reqSizeMB := float64(reqSizeBytes) / (1024 * 1024) + // Invoke the handler to process the request + resp, err := handler(ctx, req) + + // Measure duration + duration := time.Since(startTime) + + logger.WithFields(logrus.Fields{ + "action": "grpc_batch_objects", + "method": info.FullMethod, + "request_size_bytes": reqSizeBytes, + "duration": duration, + }).Debugf("grpc BatchObjects request (%fMB) took %s", reqSizeMB, duration) + + // Metric uses non-standard base unit ms, use ms for backwards compatibility + metrics.BatchTime.WithLabelValues("total_api_level_grpc", "n/a", "n/a"). + Observe(float64(duration.Milliseconds())) + metrics.BatchSizeBytes.WithLabelValues("grpc").Observe(reqSizeBytes) + + return resp, err + } +} + +func makeAuthInterceptor() grpc.UnaryServerInterceptor { + return func( + ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, + ) (any, error) { + resp, err := handler(ctx, req) + + if errors.As(err, &authErrs.Unauthenticated{}) { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + + if errors.As(err, &authErrs.Forbidden{}) { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + return resp, err + } +} + +func makeIPInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + clientIP := getRealClientIP(ctx) + + // Add IP to context + ctx = context.WithValue(ctx, "sourceIp", clientIP) + return handler(ctx, req) + } +} + +func basicAuthUnaryInterceptor(servicePrefix, expectedUsername, expectedPassword string) grpc.UnaryServerInterceptor { + return func( + ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, + ) (any, error) { + if !strings.HasPrefix(info.FullMethod, servicePrefix) { + return handler(ctx, req) + } + + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.Unauthenticated, "missing metadata") + } + + authHeader := md["authorization"] + if len(authHeader) == 0 || !strings.HasPrefix(authHeader[0], "Basic ") { + return nil, status.Error(codes.Unauthenticated, "missing or invalid auth header") + } + + // Decode and validate Basic Auth credentials + payload, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(authHeader[0], "Basic ")) + if err != nil { + return nil, status.Error(codes.Unauthenticated, "invalid base64 encoding") + } + + parts := strings.SplitN(string(payload), ":", 2) + if len(parts) != 2 || parts[0] != expectedUsername || parts[1] != expectedPassword { + return nil, status.Error(codes.Unauthenticated, "invalid username or password") + } + + return handler(ctx, req) + } +} + +func getRealClientIP(ctx context.Context) string { + // First, check for forwarded headers in metadata + md, ok := metadata.FromIncomingContext(ctx) + if ok { + if xRealIP := md.Get("x-real-ip"); len(xRealIP) > 0 { + return xRealIP[0] + } + + if xForwardedFor := md.Get("x-forwarded-for"); len(xForwardedFor) > 0 { + // X-Forwarded-For can contain multiple IPs, take the first one + ips := strings.Split(xForwardedFor[0], ",") + if len(ips) > 0 { + return strings.TrimSpace(ips[0]) + } + } + } + + // Fall back to peer address + if p, ok := peer.FromContext(ctx); ok { + host, _, err := net.SplitHostPort(p.Addr.String()) + if err != nil { + return convertIP6ToIP4Loopback(p.Addr.String()) + } + return convertIP6ToIP4Loopback(host) + } + + return "unknown" +} + +func convertIP6ToIP4Loopback(ip string) string { + if ip == "::1" { + return "127.0.0.1" // Convert IPv6 loopback to IPv4 + } + return ip +} + +func basicAuthStreamInterceptor(servicePrefix, expectedUsername, expectedPassword string) grpc.StreamServerInterceptor { + return func( + srv interface{}, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + if !strings.HasPrefix(info.FullMethod, servicePrefix) { + return handler(srv, ss) // no auth needed + } + + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return status.Error(codes.Unauthenticated, "missing metadata") + } + + authHeader := md["authorization"] + if len(authHeader) == 0 || !strings.HasPrefix(authHeader[0], "Basic ") { + return status.Error(codes.Unauthenticated, "missing or invalid auth header") + } + + decoded, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(authHeader[0], "Basic ")) + if err != nil { + return status.Error(codes.Unauthenticated, "invalid base64 encoding") + } + + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 || parts[0] != expectedUsername || parts[1] != expectedPassword { + return status.Error(codes.Unauthenticated, "invalid username or password") + } + + return handler(srv, ss) + } +} + +func StartAndListen(s *grpc.Server, state *state.State) error { + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", + state.ServerConfig.Config.GRPC.Port)) + if err != nil { + return err + } + state.Logger.WithField("action", "grpc_startup"). + Infof("grpc server listening at %v", lis.Addr()) + if err := s.Serve(lis); err != nil { + return fmt.Errorf("failed to serve: %w", err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v0/service.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v0/service.go new file mode 100644 index 0000000000000000000000000000000000000000..1ec705706fcd196515ce7b02279e2aa999534be6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v0/service.go @@ -0,0 +1,40 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package weaviategrpc + +import ( + "context" + + "github.com/pkg/errors" + + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v0" +) + +type Service struct { + pb.UnimplementedWeaviateServer +} + +func NewService() *Service { + return &Service{} +} + +func (s *Service) BatchObjects(ctx context.Context, req *pb.BatchObjectsRequest) (*pb.BatchObjectsReply, error) { + return nil, errors.New( + "The V0 gRPC API is deprecated and will be removed in the next major release. Please use the V1 API instead by upgrading your client.", + ) +} + +func (s *Service) Search(ctx context.Context, req *pb.SearchRequest) (*pb.SearchReply, error) { + return nil, errors.New( + "The V0 gRPC API is deprecated and will be removed in the next major release. Please use the V1 API instead by upgrading your client.", + ) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/auth.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/auth.go new file mode 100644 index 0000000000000000000000000000000000000000..d9510d82a988c214a9cefb92519946214c6ff6b8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/auth.go @@ -0,0 +1,71 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "context" + "strings" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/composer" + "google.golang.org/grpc/metadata" +) + +type authHandler struct { + allowAnonymousAccess bool + authComposer composer.TokenFunc +} + +func NewAuthHandler(allowAnonymousAccess bool, authComposer composer.TokenFunc) *authHandler { + return &authHandler{ + allowAnonymousAccess: allowAnonymousAccess, + authComposer: authComposer, + } +} + +// This should probably be run as part of a middleware. In the initial gRPC +// implementation there is only a single endpoint, so it's fine to run this +// straight from the endpoint. But the moment we add a second endpoint, this +// should be called from a central place. This way we can make sure it's +// impossible to forget to add it to a new endpoint. +func (a *authHandler) PrincipalFromContext(ctx context.Context) (*models.Principal, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return a.tryAnonymous() + } + + // the grpc library will lowercase all md keys, so we need to make sure to + // check a lowercase key + authValue, ok := md["authorization"] + if !ok { + return a.tryAnonymous() + } + + if len(authValue) == 0 { + return a.tryAnonymous() + } + + if !strings.HasPrefix(authValue[0], "Bearer ") { + return a.tryAnonymous() + } + + token := strings.TrimPrefix(authValue[0], "Bearer ") + return a.authComposer(token, nil) +} + +func (a *authHandler) tryAnonymous() (*models.Principal, error) { + if a.allowAnonymousAccess { + return nil, nil + } + + return a.authComposer("", nil) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/auth_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/auth_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1f2dc4b63d0dcf32c339be4522adf4954234f723 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/auth_test.go @@ -0,0 +1,128 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "google.golang.org/grpc/metadata" +) + +func TestAuth(t *testing.T) { + tests := []struct { + name string + buildCtx func() context.Context + shouldErr bool + expectedOut *models.Principal + allowAnon bool + }{ + { + name: "nothing provided, anon allowed", + buildCtx: func() context.Context { + return context.Background() + }, + allowAnon: true, + shouldErr: false, + }, + { + name: "nothing provided, anon forbidden", + buildCtx: func() context.Context { + return context.Background() + }, + allowAnon: false, + shouldErr: true, + }, + { + name: "with md, but nothing usable, anon allowed", + buildCtx: func() context.Context { + md := metadata.Pairs("unrelated", "unrelated") + return metadata.NewIncomingContext(context.Background(), md) + }, + allowAnon: true, + shouldErr: false, + }, + { + name: "with md, but nothing usable, anon forbidden", + buildCtx: func() context.Context { + md := metadata.Pairs("unrelated", "unrelated") + return metadata.NewIncomingContext(context.Background(), md) + }, + allowAnon: false, + shouldErr: true, + }, + { + name: "with md, but nothing usable, anon allowed", + buildCtx: func() context.Context { + md := metadata.Pairs("authorization", "wrong-format") + return metadata.NewIncomingContext(context.Background(), md) + }, + allowAnon: true, + shouldErr: false, + }, + { + name: "with md, but nothing usable, anon forbidden", + buildCtx: func() context.Context { + md := metadata.Pairs("authorization", "wrong-format") + return metadata.NewIncomingContext(context.Background(), md) + }, + allowAnon: false, + shouldErr: true, + }, + { + name: "with md, and a token", + buildCtx: func() context.Context { + md := metadata.Pairs("authorization", "Bearer Foo") + return metadata.NewIncomingContext(context.Background(), md) + }, + shouldErr: false, + expectedOut: &models.Principal{Username: "Foo"}, + }, + { + name: "with a token that makes extraction error", + buildCtx: func() context.Context { + md := metadata.Pairs("authorization", "Bearer err") + return metadata.NewIncomingContext(context.Background(), md) + }, + shouldErr: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + a := &authHandler{ + allowAnonymousAccess: test.allowAnon, + authComposer: func(token string, scopes []string) (*models.Principal, error) { + if token == "" { + return nil, fmt.Errorf("not allowed") + } + if token == "err" { + return nil, fmt.Errorf("other error") + } + return &models.Principal{Username: token}, nil + }, + } + + p, err := a.PrincipalFromContext(test.buildCtx()) + if test.shouldErr { + require.NotNil(t, err) + } else { + require.Nil(t, err) + assert.Equal(t, test.expectedOut, p) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/handler.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/handler.go new file mode 100644 index 0000000000000000000000000000000000000000..40398062042aec89205e6ee02dc8aea5ba357f28 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/handler.go @@ -0,0 +1,177 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch + +import ( + "context" + "fmt" + "time" + + "github.com/sirupsen/logrus" + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/versioned" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/objects" + schemaManager "github.com/weaviate/weaviate/usecases/schema" +) + +type Handler struct { + authorizer authorization.Authorizer + authenticator authenticator + batchManager *objects.BatchManager + logger logrus.FieldLogger + schemaManager *schemaManager.Manager +} + +type authenticator interface { + PrincipalFromContext(ctx context.Context) (*models.Principal, error) +} + +func NewHandler(authorizer authorization.Authorizer, batchManager *objects.BatchManager, logger logrus.FieldLogger, authenticator authenticator, schemaManager *schemaManager.Manager) *Handler { + return &Handler{ + authorizer: authorizer, + authenticator: authenticator, + batchManager: batchManager, + logger: logger, + schemaManager: schemaManager, + } +} + +func (h *Handler) BatchObjects(ctx context.Context, req *pb.BatchObjectsRequest) (*pb.BatchObjectsReply, error) { + before := time.Now() + principal, err := h.authenticator.PrincipalFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("extract auth: %w", err) + } + ctx = restCtx.AddPrincipalToContext(ctx, principal) + ctx = classcache.ContextWithClassCache(ctx) + + // we need to save the class two times: + // - to check if we already authorized the class+shard combination and if yes skip the auth, this is indexed by + // a combination of class+shard + // - to pass down the stack to reuse, index by classname so it can be found easily + knownClasses := map[string]versioned.Class{} + knownClassesAuthCheck := map[string]*models.Class{} + classGetter := func(classname, shard string) (*models.Class, error) { + // classname might be an alias + if cls := h.schemaManager.ResolveAlias(classname); cls != "" { + classname = cls + } + // use a letter that cannot be in class/shard name to not allow different combinations leading to the same combined name + classTenantName := classname + "#" + shard + class, ok := knownClassesAuthCheck[classTenantName] + if ok { + return class, nil + } + + // batch is upsert + if err := h.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.ShardsData(classname, shard)...); err != nil { + return nil, err + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.CREATE, authorization.ShardsData(classname, shard)...); err != nil { + return nil, err + } + + // we don't leak any info that someone who inserts data does not have anyway + vClass, err := h.schemaManager.GetCachedClassNoAuth(ctx, classname) + if err != nil { + return nil, err + } + knownClasses[classname] = vClass[classname] + knownClassesAuthCheck[classTenantName] = vClass[classname].Class + return vClass[classname].Class, nil + } + objs, objOriginalIndex, objectParsingErrors := BatchObjectsFromProto(req, classGetter) + + var objErrors []*pb.BatchObjectsReply_BatchError + for i, err := range objectParsingErrors { + objErrors = append(objErrors, &pb.BatchObjectsReply_BatchError{Index: int32(i), Error: err.Error()}) + } + + // If every object failed to parse, return early with the errors + if len(objs) == 0 { + result := &pb.BatchObjectsReply{ + Took: float32(time.Since(before).Seconds()), + Errors: objErrors, + } + return result, nil + } + + replicationProperties := extractReplicationProperties(req.ConsistencyLevel) + + response, err := h.batchManager.AddObjectsGRPCAfterAuth(ctx, principal, objs, replicationProperties, knownClasses) + if err != nil { + return nil, err + } + + for i, obj := range response { + if obj.Err != nil { + objErrors = append(objErrors, &pb.BatchObjectsReply_BatchError{Index: int32(objOriginalIndex[i]), Error: obj.Err.Error()}) + } + } + + result := &pb.BatchObjectsReply{ + Took: float32(time.Since(before).Seconds()), + Errors: objErrors, + } + return result, nil +} + +func (h *Handler) BatchReferences(ctx context.Context, req *pb.BatchReferencesRequest) (*pb.BatchReferencesReply, error) { + before := time.Now() + principal, err := h.authenticator.PrincipalFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("extract auth: %w", err) + } + ctx = restCtx.AddPrincipalToContext(ctx, principal) + replProps := extractReplicationProperties(req.ConsistencyLevel) + + response, err := h.batchManager.AddReferences(ctx, principal, BatchReferencesFromProto(req), replProps) + if err != nil { + return nil, err + } + + var refErrors []*pb.BatchReferencesReply_BatchError + for i, ref := range response { + if ref.Err != nil { + refErrors = append(refErrors, &pb.BatchReferencesReply_BatchError{Index: int32(i), Error: ref.Err.Error()}) + } + } + + result := &pb.BatchReferencesReply{ + Took: float32(time.Since(before).Seconds()), + Errors: refErrors, + } + return result, nil +} + +func extractReplicationProperties(level *pb.ConsistencyLevel) *additional.ReplicationProperties { + if level == nil { + return nil + } + + switch *level { + case pb.ConsistencyLevel_CONSISTENCY_LEVEL_ONE: + return &additional.ReplicationProperties{ConsistencyLevel: "ONE"} + case pb.ConsistencyLevel_CONSISTENCY_LEVEL_QUORUM: + return &additional.ReplicationProperties{ConsistencyLevel: "QUORUM"} + case pb.ConsistencyLevel_CONSISTENCY_LEVEL_ALL: + return &additional.ReplicationProperties{ConsistencyLevel: "ALL"} + default: + return nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/mocks/mock_batcher.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/mocks/mock_batcher.go new file mode 100644 index 0000000000000000000000000000000000000000..2f1ca48f6dc0a953484a25de5544b1eec4417e22 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/mocks/mock_batcher.go @@ -0,0 +1,166 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + protocol "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +// MockBatcher is an autogenerated mock type for the Batcher type +type MockBatcher struct { + mock.Mock +} + +type MockBatcher_Expecter struct { + mock *mock.Mock +} + +func (_m *MockBatcher) EXPECT() *MockBatcher_Expecter { + return &MockBatcher_Expecter{mock: &_m.Mock} +} + +// BatchObjects provides a mock function with given fields: ctx, req +func (_m *MockBatcher) BatchObjects(ctx context.Context, req *protocol.BatchObjectsRequest) (*protocol.BatchObjectsReply, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for BatchObjects") + } + + var r0 *protocol.BatchObjectsReply + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *protocol.BatchObjectsRequest) (*protocol.BatchObjectsReply, error)); ok { + return rf(ctx, req) + } + if rf, ok := ret.Get(0).(func(context.Context, *protocol.BatchObjectsRequest) *protocol.BatchObjectsReply); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.BatchObjectsReply) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *protocol.BatchObjectsRequest) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBatcher_BatchObjects_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BatchObjects' +type MockBatcher_BatchObjects_Call struct { + *mock.Call +} + +// BatchObjects is a helper method to define mock.On call +// - ctx context.Context +// - req *protocol.BatchObjectsRequest +func (_e *MockBatcher_Expecter) BatchObjects(ctx interface{}, req interface{}) *MockBatcher_BatchObjects_Call { + return &MockBatcher_BatchObjects_Call{Call: _e.mock.On("BatchObjects", ctx, req)} +} + +func (_c *MockBatcher_BatchObjects_Call) Run(run func(ctx context.Context, req *protocol.BatchObjectsRequest)) *MockBatcher_BatchObjects_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*protocol.BatchObjectsRequest)) + }) + return _c +} + +func (_c *MockBatcher_BatchObjects_Call) Return(_a0 *protocol.BatchObjectsReply, _a1 error) *MockBatcher_BatchObjects_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBatcher_BatchObjects_Call) RunAndReturn(run func(context.Context, *protocol.BatchObjectsRequest) (*protocol.BatchObjectsReply, error)) *MockBatcher_BatchObjects_Call { + _c.Call.Return(run) + return _c +} + +// BatchReferences provides a mock function with given fields: ctx, req +func (_m *MockBatcher) BatchReferences(ctx context.Context, req *protocol.BatchReferencesRequest) (*protocol.BatchReferencesReply, error) { + ret := _m.Called(ctx, req) + + if len(ret) == 0 { + panic("no return value specified for BatchReferences") + } + + var r0 *protocol.BatchReferencesReply + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *protocol.BatchReferencesRequest) (*protocol.BatchReferencesReply, error)); ok { + return rf(ctx, req) + } + if rf, ok := ret.Get(0).(func(context.Context, *protocol.BatchReferencesRequest) *protocol.BatchReferencesReply); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.BatchReferencesReply) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *protocol.BatchReferencesRequest) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBatcher_BatchReferences_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BatchReferences' +type MockBatcher_BatchReferences_Call struct { + *mock.Call +} + +// BatchReferences is a helper method to define mock.On call +// - ctx context.Context +// - req *protocol.BatchReferencesRequest +func (_e *MockBatcher_Expecter) BatchReferences(ctx interface{}, req interface{}) *MockBatcher_BatchReferences_Call { + return &MockBatcher_BatchReferences_Call{Call: _e.mock.On("BatchReferences", ctx, req)} +} + +func (_c *MockBatcher_BatchReferences_Call) Run(run func(ctx context.Context, req *protocol.BatchReferencesRequest)) *MockBatcher_BatchReferences_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*protocol.BatchReferencesRequest)) + }) + return _c +} + +func (_c *MockBatcher_BatchReferences_Call) Return(_a0 *protocol.BatchReferencesReply, _a1 error) *MockBatcher_BatchReferences_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBatcher_BatchReferences_Call) RunAndReturn(run func(context.Context, *protocol.BatchReferencesRequest) (*protocol.BatchReferencesReply, error)) *MockBatcher_BatchReferences_Call { + _c.Call.Return(run) + return _c +} + +// NewMockBatcher creates a new instance of MockBatcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockBatcher(t interface { + mock.TestingT + Cleanup(func()) +}) *MockBatcher { + mock := &MockBatcher{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/mocks/mock_weaviate_batch_stream_server.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/mocks/mock_weaviate_batch_stream_server.go new file mode 100644 index 0000000000000000000000000000000000000000..9b23c1053781181acc34136841ed44b857f24519 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/mocks/mock_weaviate_batch_stream_server.go @@ -0,0 +1,361 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + metadata "google.golang.org/grpc/metadata" + + mock "github.com/stretchr/testify/mock" + + protocol "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +// MockWeaviate_BatchStreamServer is an autogenerated mock type for the Weaviate_BatchStreamServer type +type MockWeaviate_BatchStreamServer[Res interface{}] struct { + mock.Mock +} + +type MockWeaviate_BatchStreamServer_Expecter[Res interface{}] struct { + mock *mock.Mock +} + +func (_m *MockWeaviate_BatchStreamServer[Res]) EXPECT() *MockWeaviate_BatchStreamServer_Expecter[Res] { + return &MockWeaviate_BatchStreamServer_Expecter[Res]{mock: &_m.Mock} +} + +// Context provides a mock function with no fields +func (_m *MockWeaviate_BatchStreamServer[Res]) Context() context.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Context") + } + + var r0 context.Context + if rf, ok := ret.Get(0).(func() context.Context); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + return r0 +} + +// MockWeaviate_BatchStreamServer_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context' +type MockWeaviate_BatchStreamServer_Context_Call[Res interface{}] struct { + *mock.Call +} + +// Context is a helper method to define mock.On call +func (_e *MockWeaviate_BatchStreamServer_Expecter[Res]) Context() *MockWeaviate_BatchStreamServer_Context_Call[Res] { + return &MockWeaviate_BatchStreamServer_Context_Call[Res]{Call: _e.mock.On("Context")} +} + +func (_c *MockWeaviate_BatchStreamServer_Context_Call[Res]) Run(run func()) *MockWeaviate_BatchStreamServer_Context_Call[Res] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_Context_Call[Res]) Return(_a0 context.Context) *MockWeaviate_BatchStreamServer_Context_Call[Res] { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_Context_Call[Res]) RunAndReturn(run func() context.Context) *MockWeaviate_BatchStreamServer_Context_Call[Res] { + _c.Call.Return(run) + return _c +} + +// RecvMsg provides a mock function with given fields: m +func (_m *MockWeaviate_BatchStreamServer[Res]) RecvMsg(m interface{}) error { + ret := _m.Called(m) + + if len(ret) == 0 { + panic("no return value specified for RecvMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(m) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockWeaviate_BatchStreamServer_RecvMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecvMsg' +type MockWeaviate_BatchStreamServer_RecvMsg_Call[Res interface{}] struct { + *mock.Call +} + +// RecvMsg is a helper method to define mock.On call +// - m interface{} +func (_e *MockWeaviate_BatchStreamServer_Expecter[Res]) RecvMsg(m interface{}) *MockWeaviate_BatchStreamServer_RecvMsg_Call[Res] { + return &MockWeaviate_BatchStreamServer_RecvMsg_Call[Res]{Call: _e.mock.On("RecvMsg", m)} +} + +func (_c *MockWeaviate_BatchStreamServer_RecvMsg_Call[Res]) Run(run func(m interface{})) *MockWeaviate_BatchStreamServer_RecvMsg_Call[Res] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_RecvMsg_Call[Res]) Return(_a0 error) *MockWeaviate_BatchStreamServer_RecvMsg_Call[Res] { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_RecvMsg_Call[Res]) RunAndReturn(run func(interface{}) error) *MockWeaviate_BatchStreamServer_RecvMsg_Call[Res] { + _c.Call.Return(run) + return _c +} + +// Send provides a mock function with given fields: _a0 +func (_m *MockWeaviate_BatchStreamServer[Res]) Send(_a0 *protocol.BatchStreamMessage) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Send") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*protocol.BatchStreamMessage) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockWeaviate_BatchStreamServer_Send_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Send' +type MockWeaviate_BatchStreamServer_Send_Call[Res interface{}] struct { + *mock.Call +} + +// Send is a helper method to define mock.On call +// - _a0 *protocol.BatchStreamMessage +func (_e *MockWeaviate_BatchStreamServer_Expecter[Res]) Send(_a0 interface{}) *MockWeaviate_BatchStreamServer_Send_Call[Res] { + return &MockWeaviate_BatchStreamServer_Send_Call[Res]{Call: _e.mock.On("Send", _a0)} +} + +func (_c *MockWeaviate_BatchStreamServer_Send_Call[Res]) Run(run func(_a0 *protocol.BatchStreamMessage)) *MockWeaviate_BatchStreamServer_Send_Call[Res] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*protocol.BatchStreamMessage)) + }) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_Send_Call[Res]) Return(_a0 error) *MockWeaviate_BatchStreamServer_Send_Call[Res] { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_Send_Call[Res]) RunAndReturn(run func(*protocol.BatchStreamMessage) error) *MockWeaviate_BatchStreamServer_Send_Call[Res] { + _c.Call.Return(run) + return _c +} + +// SendHeader provides a mock function with given fields: _a0 +func (_m *MockWeaviate_BatchStreamServer[Res]) SendHeader(_a0 metadata.MD) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SendHeader") + } + + var r0 error + if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockWeaviate_BatchStreamServer_SendHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendHeader' +type MockWeaviate_BatchStreamServer_SendHeader_Call[Res interface{}] struct { + *mock.Call +} + +// SendHeader is a helper method to define mock.On call +// - _a0 metadata.MD +func (_e *MockWeaviate_BatchStreamServer_Expecter[Res]) SendHeader(_a0 interface{}) *MockWeaviate_BatchStreamServer_SendHeader_Call[Res] { + return &MockWeaviate_BatchStreamServer_SendHeader_Call[Res]{Call: _e.mock.On("SendHeader", _a0)} +} + +func (_c *MockWeaviate_BatchStreamServer_SendHeader_Call[Res]) Run(run func(_a0 metadata.MD)) *MockWeaviate_BatchStreamServer_SendHeader_Call[Res] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(metadata.MD)) + }) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SendHeader_Call[Res]) Return(_a0 error) *MockWeaviate_BatchStreamServer_SendHeader_Call[Res] { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SendHeader_Call[Res]) RunAndReturn(run func(metadata.MD) error) *MockWeaviate_BatchStreamServer_SendHeader_Call[Res] { + _c.Call.Return(run) + return _c +} + +// SendMsg provides a mock function with given fields: m +func (_m *MockWeaviate_BatchStreamServer[Res]) SendMsg(m interface{}) error { + ret := _m.Called(m) + + if len(ret) == 0 { + panic("no return value specified for SendMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(m) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockWeaviate_BatchStreamServer_SendMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendMsg' +type MockWeaviate_BatchStreamServer_SendMsg_Call[Res interface{}] struct { + *mock.Call +} + +// SendMsg is a helper method to define mock.On call +// - m interface{} +func (_e *MockWeaviate_BatchStreamServer_Expecter[Res]) SendMsg(m interface{}) *MockWeaviate_BatchStreamServer_SendMsg_Call[Res] { + return &MockWeaviate_BatchStreamServer_SendMsg_Call[Res]{Call: _e.mock.On("SendMsg", m)} +} + +func (_c *MockWeaviate_BatchStreamServer_SendMsg_Call[Res]) Run(run func(m interface{})) *MockWeaviate_BatchStreamServer_SendMsg_Call[Res] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SendMsg_Call[Res]) Return(_a0 error) *MockWeaviate_BatchStreamServer_SendMsg_Call[Res] { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SendMsg_Call[Res]) RunAndReturn(run func(interface{}) error) *MockWeaviate_BatchStreamServer_SendMsg_Call[Res] { + _c.Call.Return(run) + return _c +} + +// SetHeader provides a mock function with given fields: _a0 +func (_m *MockWeaviate_BatchStreamServer[Res]) SetHeader(_a0 metadata.MD) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SetHeader") + } + + var r0 error + if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockWeaviate_BatchStreamServer_SetHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetHeader' +type MockWeaviate_BatchStreamServer_SetHeader_Call[Res interface{}] struct { + *mock.Call +} + +// SetHeader is a helper method to define mock.On call +// - _a0 metadata.MD +func (_e *MockWeaviate_BatchStreamServer_Expecter[Res]) SetHeader(_a0 interface{}) *MockWeaviate_BatchStreamServer_SetHeader_Call[Res] { + return &MockWeaviate_BatchStreamServer_SetHeader_Call[Res]{Call: _e.mock.On("SetHeader", _a0)} +} + +func (_c *MockWeaviate_BatchStreamServer_SetHeader_Call[Res]) Run(run func(_a0 metadata.MD)) *MockWeaviate_BatchStreamServer_SetHeader_Call[Res] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(metadata.MD)) + }) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SetHeader_Call[Res]) Return(_a0 error) *MockWeaviate_BatchStreamServer_SetHeader_Call[Res] { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SetHeader_Call[Res]) RunAndReturn(run func(metadata.MD) error) *MockWeaviate_BatchStreamServer_SetHeader_Call[Res] { + _c.Call.Return(run) + return _c +} + +// SetTrailer provides a mock function with given fields: _a0 +func (_m *MockWeaviate_BatchStreamServer[Res]) SetTrailer(_a0 metadata.MD) { + _m.Called(_a0) +} + +// MockWeaviate_BatchStreamServer_SetTrailer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetTrailer' +type MockWeaviate_BatchStreamServer_SetTrailer_Call[Res interface{}] struct { + *mock.Call +} + +// SetTrailer is a helper method to define mock.On call +// - _a0 metadata.MD +func (_e *MockWeaviate_BatchStreamServer_Expecter[Res]) SetTrailer(_a0 interface{}) *MockWeaviate_BatchStreamServer_SetTrailer_Call[Res] { + return &MockWeaviate_BatchStreamServer_SetTrailer_Call[Res]{Call: _e.mock.On("SetTrailer", _a0)} +} + +func (_c *MockWeaviate_BatchStreamServer_SetTrailer_Call[Res]) Run(run func(_a0 metadata.MD)) *MockWeaviate_BatchStreamServer_SetTrailer_Call[Res] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(metadata.MD)) + }) + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SetTrailer_Call[Res]) Return() *MockWeaviate_BatchStreamServer_SetTrailer_Call[Res] { + _c.Call.Return() + return _c +} + +func (_c *MockWeaviate_BatchStreamServer_SetTrailer_Call[Res]) RunAndReturn(run func(metadata.MD)) *MockWeaviate_BatchStreamServer_SetTrailer_Call[Res] { + _c.Run(run) + return _c +} + +// NewMockWeaviate_BatchStreamServer creates a new instance of MockWeaviate_BatchStreamServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockWeaviate_BatchStreamServer[Res interface{}](t interface { + mock.TestingT + Cleanup(func()) +}) *MockWeaviate_BatchStreamServer[Res] { + mock := &MockWeaviate_BatchStreamServer[Res]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/parse.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/parse.go new file mode 100644 index 0000000000000000000000000000000000000000..c1e21a3433af152d358632d805b724b8a8917725 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/parse.go @@ -0,0 +1,248 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch + +import ( + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/weaviate/weaviate/usecases/byteops" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +const BEACON_START = "weaviate://localhost/" + +func sliceToInterface[T any](values []T) []interface{} { + tmpArray := make([]interface{}, len(values)) + for k := range values { + tmpArray[k] = values[k] + } + return tmpArray +} + +func BatchObjectsFromProto(req *pb.BatchObjectsRequest, authorizedGetClass func(string, string) (*models.Class, error)) ([]*models.Object, map[int]int, map[int]error) { + objectsBatch := req.Objects + objs := make([]*models.Object, 0, len(objectsBatch)) + objOriginalIndex := make(map[int]int) + objectErrors := make(map[int]error, len(objectsBatch)) + + insertCounter := 0 + for i, obj := range objectsBatch { + var props map[string]interface{} + + collection := schema.UppercaseClassName(obj.Collection) + class, err := authorizedGetClass(collection, obj.Tenant) + if err != nil { + objectErrors[i] = err + continue + } + obj.Collection = collection + if class != nil { + // class is nil when we are relying on auto schema to create a collection + // aliases cannot be created for non-existent classes + obj.Collection = class.Class + } + + if obj.Properties != nil { + props = extractPrimitiveProperties(&pb.ObjectPropertiesValue{ + NonRefProperties: obj.Properties.NonRefProperties, + BooleanArrayProperties: obj.Properties.BooleanArrayProperties, + NumberArrayProperties: obj.Properties.NumberArrayProperties, + TextArrayProperties: obj.Properties.TextArrayProperties, + IntArrayProperties: obj.Properties.IntArrayProperties, + ObjectProperties: obj.Properties.ObjectProperties, + ObjectArrayProperties: obj.Properties.ObjectArrayProperties, + EmptyListProps: obj.Properties.EmptyListProps, + }) + // If class is not in schema, continue as there is no ref to extract + if class != nil { + if err := extractSingleRefTarget(class, obj.Properties.SingleTargetRefProps, props); err != nil { + objectErrors[i] = err + continue + } + if err := extractMultiRefTarget(class, obj.Properties.MultiTargetRefProps, props); err != nil { + objectErrors[i] = err + continue + } + } + } + + if _, err := uuid.Parse(obj.Uuid); err != nil { + objectErrors[i] = err + continue + } + + var vector []float32 = nil + // bytes vector has precedent for being more efficient + if len(obj.VectorBytes) > 0 { + vector = byteops.Fp32SliceFromBytes(obj.VectorBytes) + } else if len(obj.Vector) > 0 { + vector = obj.Vector + } + + var vectors models.Vectors = nil + if len(obj.Vectors) > 0 { + parsedVectors := make(map[string][]float32) + parsedMultiVectors := make(map[string][][]float32) + for _, vec := range obj.Vectors { + switch vec.Type { + case *pb.Vectors_VECTOR_TYPE_UNSPECIFIED.Enum(), *pb.Vectors_VECTOR_TYPE_SINGLE_FP32.Enum(): + parsedVectors[vec.Name] = byteops.Fp32SliceFromBytes(vec.VectorBytes) + case *pb.Vectors_VECTOR_TYPE_MULTI_FP32.Enum(): + out, err := byteops.Fp32SliceOfSlicesFromBytes(vec.VectorBytes) + if err != nil { + objectErrors[i] = err + continue + } + parsedMultiVectors[vec.Name] = out + default: + // do nothing + } + } + vectors = make(models.Vectors, len(parsedVectors)+len(parsedMultiVectors)) + for targetVector, vector := range parsedVectors { + vectors[targetVector] = vector + } + for targetVector, multiVector := range parsedMultiVectors { + vectors[targetVector] = multiVector + } + } + + objOriginalIndex[insertCounter] = i + objs = append(objs, &models.Object{ + Class: obj.Collection, + Tenant: obj.Tenant, + Vector: vector, + Properties: props, + ID: strfmt.UUID(obj.Uuid), + Vectors: vectors, + }) + insertCounter += 1 + } + return objs[:insertCounter], objOriginalIndex, objectErrors +} + +func extractSingleRefTarget(class *models.Class, properties []*pb.BatchObject_SingleTargetRefProps, props map[string]interface{}) error { + for _, refSingle := range properties { + propName := refSingle.GetPropName() + prop, err := schema.GetPropertyByName(class, propName) + if err != nil { + return err + } + if len(prop.DataType) > 1 { + return fmt.Errorf("target is a multi-target reference, need single target %v", prop.DataType) + } + toClass := prop.DataType[0] + beacons := make([]interface{}, len(refSingle.Uuids)) + for j, uuid := range refSingle.Uuids { + beacons[j] = map[string]interface{}{"beacon": BEACON_START + toClass + "/" + uuid} + } + props[propName] = beacons + } + return nil +} + +func extractMultiRefTarget(class *models.Class, properties []*pb.BatchObject_MultiTargetRefProps, props map[string]interface{}) error { + for _, refMulti := range properties { + propName := refMulti.GetPropName() + prop, err := schema.GetPropertyByName(class, propName) + if err != nil { + return err + } + if len(prop.DataType) < 2 { + return fmt.Errorf("target is a single-target reference, need multi-target %v", prop.DataType) + } + beacons := make([]interface{}, len(refMulti.Uuids)) + refMulti.TargetCollection = schema.UppercaseClassName(refMulti.TargetCollection) + for j, uid := range refMulti.Uuids { + beacons[j] = map[string]interface{}{"beacon": BEACON_START + refMulti.TargetCollection + "/" + uid} + } + props[propName] = beacons + } + return nil +} + +func extractPrimitiveProperties(properties *pb.ObjectPropertiesValue) map[string]interface{} { + var props map[string]interface{} + if properties.NonRefProperties != nil { + props = properties.NonRefProperties.AsMap() + } else { + props = make(map[string]interface{}) + } + + // arrays cannot be part of a GRPC map, so we need to handle each type separately + for j := range properties.BooleanArrayProperties { + props[properties.BooleanArrayProperties[j].PropName] = sliceToInterface(properties.BooleanArrayProperties[j].Values) + } + + for j := range properties.NumberArrayProperties { + inputValuesBytes := properties.NumberArrayProperties[j].ValuesBytes + var values []float64 + + if len(inputValuesBytes) > 0 { + values = byteops.Fp64SliceFromBytes(inputValuesBytes) + } else { + values = properties.NumberArrayProperties[j].Values + } + + props[properties.NumberArrayProperties[j].PropName] = sliceToInterface(values) + } + + for j := range properties.TextArrayProperties { + props[properties.TextArrayProperties[j].PropName] = sliceToInterface(properties.TextArrayProperties[j].Values) + } + + for j := range properties.IntArrayProperties { + props[properties.IntArrayProperties[j].PropName] = sliceToInterface(properties.IntArrayProperties[j].Values) + } + + for j := range properties.ObjectProperties { + props[properties.ObjectProperties[j].PropName] = extractPrimitiveProperties(properties.ObjectProperties[j].Value) + } + + for _, prop := range properties.ObjectArrayProperties { + nested := make([]interface{}, len(prop.Values)) + for k := range prop.Values { + nested[k] = extractPrimitiveProperties(prop.Values[k]) + } + props[prop.PropName] = nested + } + + for _, propName := range properties.EmptyListProps { + props[propName] = []interface{}{} + } + + return props +} + +func BatchReferencesFromProto(req *pb.BatchReferencesRequest) []*models.BatchReference { + refs := make([]*models.BatchReference, 0, len(req.GetReferences())) + for _, ref := range req.GetReferences() { + var to string + if ref.ToCollection == nil { + to = fmt.Sprintf("%s%s", BEACON_START, ref.ToUuid) + } else { + to = fmt.Sprintf("%s%s/%s", BEACON_START, *ref.ToCollection, ref.ToUuid) + } + from := fmt.Sprintf("%s%s/%s/%s", BEACON_START, ref.FromCollection, ref.FromUuid, ref.Name) + refs = append(refs, &models.BatchReference{ + From: strfmt.URI(from), + To: strfmt.URI(to), + Tenant: ref.Tenant, + }) + } + return refs +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/parse_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/parse_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7ccd4582f1e382bb572126297b86b306ea6c6eb5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/parse_test.go @@ -0,0 +1,487 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch_test + +import ( + "encoding/binary" + "encoding/json" + "math" + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/structpb" +) + +const ( + UUID3 = "a4de3ca0-6975-464f-b23b-adddd83630d7" + UUID4 = "7e10ec81-a26d-4ac7-8264-3e3e05397ddc" +) + +func newStruct(t *testing.T, values map[string]interface{}) *structpb.Struct { + b, err := json.Marshal(values) + require.Nil(t, err) + s := &structpb.Struct{} + err = protojson.Unmarshal(b, s) + require.Nil(t, err) + return s +} + +func byteVector(vec []float32) []byte { + vector := make([]byte, len(vec)*4) + + for i := 0; i < len(vec); i++ { + binary.LittleEndian.PutUint32(vector[i*4:i*4+4], math.Float32bits(vec[i])) + } + + return vector +} + +func byteVectorMulti(mat [][]float32) []byte { + matrix := make([]byte, 2) + binary.LittleEndian.PutUint16(matrix, uint16(len(mat[0]))) + for _, vec := range mat { + matrix = append(matrix, byteVector(vec)...) + } + return matrix +} + +func TestGRPCBatchRequest(t *testing.T) { + collection := "TestClass" + refClass1 := "OtherClass" + refClass2 := "AnotherClass" + multiVecClass := "MultiVec" + scheme := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: collection, + Properties: []*models.Property{ + {Name: "name", DataType: schema.DataTypeText.PropString()}, + {Name: "number", DataType: []string{"int"}}, + {Name: "ref", DataType: []string{refClass1}}, + {Name: "multiRef", DataType: []string{refClass1, refClass2}}, + }, + }, + { + Class: refClass1, + Properties: []*models.Property{ + {Name: "something", DataType: schema.DataTypeText.PropString()}, + {Name: "ref2", DataType: []string{refClass2}}, + }, + }, + { + Class: refClass2, + Properties: []*models.Property{ + {Name: "else", DataType: schema.DataTypeText.PropString()}, + {Name: "ref3", DataType: []string{refClass2}}, + }, + }, + { + Class: multiVecClass, + Properties: []*models.Property{ + {Name: "first", DataType: schema.DataTypeText.PropString()}, + }, + VectorConfig: map[string]models.VectorConfig{ + "custom": { + VectorIndexType: "hnsw", + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + "first": { + VectorIndexType: "flat", + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + }, + }, + }, + }, + } + + var nilMap map[string]interface{} + tests := []struct { + name string + req []*pb.BatchObject + out []*models.Object + outError []int + origIndex map[int]int + }{ + { + name: "empty object", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4}}, + out: []*models.Object{{Class: collection, Properties: nilMap, ID: UUID4}}, + }, + { + name: "no UUID", + req: []*pb.BatchObject{{Collection: collection}}, + out: []*models.Object{}, + outError: []int{0}, + }, + { + name: "only normal props", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + NonRefProperties: newStruct(t, map[string]interface{}{ + "name": "something", + "age": 45, + }), + }}}, + out: []*models.Object{{Class: collection, ID: UUID4, Properties: map[string]interface{}{ + "name": "something", + "age": float64(45), + }}}, + }, + { + name: "only single refs", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + SingleTargetRefProps: []*pb.BatchObject_SingleTargetRefProps{ + {PropName: "ref", Uuids: []string{UUID3, UUID4}}, + }, + }}}, + out: []*models.Object{{Class: collection, ID: UUID4, Properties: map[string]interface{}{ + "ref": []interface{}{ + map[string]interface{}{"beacon": batch.BEACON_START + refClass1 + "/" + UUID3}, + map[string]interface{}{"beacon": batch.BEACON_START + refClass1 + "/" + UUID4}, + }, + }}}, + }, + { + name: "named vectors", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Vectors: []*pb.Vectors{ + { + Name: "custom", + VectorBytes: byteVector([]float32{0.1, 0.2, 0.3}), + }, + }}}, + out: []*models.Object{{ + Class: collection, ID: UUID4, Properties: nilMap, + Vectors: map[string]models.Vector{ + "custom": []float32{0.1, 0.2, 0.3}, + }, + }}, + }, + { + name: "only mult ref", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + MultiTargetRefProps: []*pb.BatchObject_MultiTargetRefProps{ + {PropName: "multiRef", Uuids: []string{UUID3, UUID4}, TargetCollection: refClass2}, + }, + }}}, + out: []*models.Object{{Class: collection, ID: UUID4, Properties: map[string]interface{}{ + "multiRef": []interface{}{ + map[string]interface{}{"beacon": batch.BEACON_START + refClass2 + "/" + UUID3}, + map[string]interface{}{"beacon": batch.BEACON_START + refClass2 + "/" + UUID4}, + }, + }}}, + }, + { + name: "all property types", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + MultiTargetRefProps: []*pb.BatchObject_MultiTargetRefProps{ + {PropName: "multiRef", Uuids: []string{UUID4, UUID3}, TargetCollection: refClass2}, + }, + SingleTargetRefProps: []*pb.BatchObject_SingleTargetRefProps{ + {PropName: "ref", Uuids: []string{UUID4, UUID3}}, + }, + NonRefProperties: newStruct(t, map[string]interface{}{ + "name": "else", + "age": 46, + }), + }}}, + out: []*models.Object{{Class: collection, ID: UUID4, Properties: map[string]interface{}{ + "multiRef": []interface{}{ + map[string]interface{}{"beacon": batch.BEACON_START + refClass2 + "/" + UUID4}, + map[string]interface{}{"beacon": batch.BEACON_START + refClass2 + "/" + UUID3}, + }, + "ref": []interface{}{ + map[string]interface{}{"beacon": batch.BEACON_START + refClass1 + "/" + UUID4}, + map[string]interface{}{"beacon": batch.BEACON_START + refClass1 + "/" + UUID3}, + }, + "name": "else", + "age": float64(46), + }}}, + }, + { + name: "mult ref to single target", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + MultiTargetRefProps: []*pb.BatchObject_MultiTargetRefProps{ + {PropName: "ref", Uuids: []string{UUID3, UUID4}, TargetCollection: refClass2}, + }, + }}}, + out: []*models.Object{}, + outError: []int{0}, + }, + { + name: "single ref to multi target", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + SingleTargetRefProps: []*pb.BatchObject_SingleTargetRefProps{ + {PropName: "multiRef", Uuids: []string{UUID3, UUID4}}, + }, + }}}, + out: []*models.Object{}, + outError: []int{0}, + }, + { + name: "slice props", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + NonRefProperties: newStruct(t, map[string]interface{}{"name": "something"}), + BooleanArrayProperties: []*pb.BooleanArrayProperties{ + {PropName: "boolArray1", Values: []bool{true, true}}, + {PropName: "boolArray2", Values: []bool{false, true}}, + }, + IntArrayProperties: []*pb.IntArrayProperties{ + {PropName: "int1", Values: []int64{2, 3, 4}}, {PropName: "int2", Values: []int64{7, 8}}, + }, + NumberArrayProperties: []*pb.NumberArrayProperties{ + {PropName: "float1", Values: []float64{1, 2, 3}}, {PropName: "float2", Values: []float64{4, 5}}, + }, + TextArrayProperties: []*pb.TextArrayProperties{ + {PropName: "text1", Values: []string{"first", "second"}}, {PropName: "text2", Values: []string{"third"}}, + }, + EmptyListProps: []string{"text3"}, + }}}, + out: []*models.Object{{Class: collection, ID: UUID4, Properties: map[string]interface{}{ + "name": "something", + "boolArray1": []interface{}{true, true}, + "boolArray2": []interface{}{false, true}, + "int1": []interface{}{int64(2), int64(3), int64(4)}, + "int2": []interface{}{int64(7), int64(8)}, + "float1": []interface{}{1., 2., 3.}, + "float2": []interface{}{4., 5.}, + "text1": []interface{}{"first", "second"}, + "text2": []interface{}{"third"}, + "text3": []interface{}{}, + }}}, + }, + { + name: "object props", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + ObjectProperties: []*pb.ObjectProperties{ + { + PropName: "simpleObj", Value: &pb.ObjectPropertiesValue{ + NonRefProperties: newStruct(t, map[string]interface{}{"name": "something"}), + }, + }, + { + PropName: "nestedObj", Value: &pb.ObjectPropertiesValue{ + ObjectProperties: []*pb.ObjectProperties{{ + PropName: "obj", Value: &pb.ObjectPropertiesValue{ + NonRefProperties: newStruct(t, map[string]interface{}{"name": "something"}), + EmptyListProps: []string{"empty"}, + }, + }}, + }, + }, + }, + }}}, + out: []*models.Object{{Class: collection, ID: UUID4, Properties: map[string]interface{}{ + "simpleObj": map[string]interface{}{"name": "something"}, + "nestedObj": map[string]interface{}{ + "obj": map[string]interface{}{"name": "something", "empty": []interface{}{}}, + }, + }}}, + }, + { + name: "object array props", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Properties: &pb.BatchObject_Properties{ + ObjectArrayProperties: []*pb.ObjectArrayProperties{ + { + PropName: "simpleObjs", Values: []*pb.ObjectPropertiesValue{ + { + NonRefProperties: newStruct(t, map[string]interface{}{"name": "something"}), + }, + { + NonRefProperties: newStruct(t, map[string]interface{}{"name": "something else"}), + }, + }, + }, + { + PropName: "nestedObjs", Values: []*pb.ObjectPropertiesValue{ + { + ObjectProperties: []*pb.ObjectProperties{{ + PropName: "obj", Value: &pb.ObjectPropertiesValue{ + NonRefProperties: newStruct(t, map[string]interface{}{"name": "something"}), + }, + }}, + }, + { + ObjectProperties: []*pb.ObjectProperties{{ + PropName: "obj", Value: &pb.ObjectPropertiesValue{ + NonRefProperties: newStruct(t, map[string]interface{}{"name": "something else"}), + }, + }}, + }, + }, + }, + }, + }}}, + out: []*models.Object{{Class: collection, ID: UUID4, Properties: map[string]interface{}{ + "simpleObjs": []interface{}{map[string]interface{}{"name": "something"}, map[string]interface{}{"name": "something else"}}, + "nestedObjs": []interface{}{ + map[string]interface{}{"obj": map[string]interface{}{"name": "something"}}, + map[string]interface{}{"obj": map[string]interface{}{"name": "something else"}}, + }, + }}}, + }, + { + name: "mix of errors and no errors", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4}, {Collection: collection}, {Collection: collection}, {Collection: collection, Uuid: UUID3}}, + out: []*models.Object{{Class: collection, Properties: nilMap, ID: UUID4}, {Class: collection, Properties: nilMap, ID: UUID3}}, + outError: []int{1, 2}, + origIndex: map[int]int{0: 0, 1: 3}, + }, + { + name: "named multi vectors", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Vectors: []*pb.Vectors{ + { + Name: "custom", + VectorBytes: byteVector([]float32{0.1, 0.2, 0.3}), + }, + { + Name: "colbert", + VectorBytes: byteVectorMulti([][]float32{ + {0.1, 0.2, 0.3}, + {0.4, 0.5, 0.6}, + }), + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }, + }}}, + out: []*models.Object{{ + Class: collection, ID: UUID4, Properties: nilMap, + Vectors: map[string]models.Vector{ + "custom": []float32{0.1, 0.2, 0.3}, + "colbert": [][]float32{{0.1, 0.2, 0.3}, {0.4, 0.5, 0.6}}, + }, + }}, + }, + { + name: "named multi vectors with 1 token level embedding", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Vectors: []*pb.Vectors{ + { + Name: "custom", + VectorBytes: byteVector([]float32{0.1, 0.2, 0.3}), + }, + { + Name: "colbert", + VectorBytes: byteVectorMulti([][]float32{{0.1, 0.2, 0.3}, {0.3, 0.2, 0.1}}), + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }, + { + Name: "colbert1", + VectorBytes: byteVectorMulti([][]float32{{0.1}}), + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }, + }}}, + out: []*models.Object{{ + Class: collection, ID: UUID4, Properties: nilMap, + Vectors: map[string]models.Vector{ + "custom": []float32{0.1, 0.2, 0.3}, + "colbert": [][]float32{{0.1, 0.2, 0.3}, {0.3, 0.2, 0.1}}, + "colbert1": [][]float32{{0.1}}, + }, + }}, + }, + { + name: "named regular vectors", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Vectors: []*pb.Vectors{ + { + Name: "regular_without_specified_type", + VectorBytes: byteVector([]float32{0.1, 0.2, 0.3}), + }, + { + Name: "regular_with_type", + Index: 0, + VectorBytes: byteVector([]float32{0.11, 0.22, 0.33}), + Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32, + }, + { + Name: "regular_with_explicit_unspecified_type", + Index: 0, + VectorBytes: byteVector([]float32{0.111, 0.222, 0.333}), + Type: pb.Vectors_VECTOR_TYPE_UNSPECIFIED, + }, + }}}, + out: []*models.Object{{ + Class: collection, ID: UUID4, Properties: nilMap, + Vectors: map[string]models.Vector{ + "regular_without_specified_type": []float32{0.1, 0.2, 0.3}, + "regular_with_type": []float32{0.11, 0.22, 0.33}, + "regular_with_explicit_unspecified_type": []float32{0.111, 0.222, 0.333}, + }, + }}, + }, + { + name: "named mix of regular and colbert vectors with all possible types", + req: []*pb.BatchObject{{Collection: collection, Uuid: UUID4, Vectors: []*pb.Vectors{ + { + Name: "regular_without_specified_type", + VectorBytes: byteVector([]float32{0.1, 0.2, 0.3}), + }, + { + Name: "regular_with_type", + VectorBytes: byteVector([]float32{0.11, 0.22, 0.33}), + Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32, + }, + { + Name: "regular_with_explicit_unspecified_type", + VectorBytes: byteVector([]float32{0.111, 0.222, 0.333}), + Type: pb.Vectors_VECTOR_TYPE_UNSPECIFIED, + }, + { + Name: "colbert_fp32_1_token_level_embedding", + VectorBytes: byteVectorMulti([][]float32{{0.111}}), + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }, + { + Name: "colbert_fp32_normal_case", + VectorBytes: byteVectorMulti([][]float32{{0.1, 0.1, 0.1}, {0.2, 0.2, 0.2}}), + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }, + { + Name: "regular_with_only_one_value", + VectorBytes: byteVector([]float32{0.1}), + }, + }}}, + out: []*models.Object{{ + Class: collection, ID: UUID4, Properties: nilMap, + Vectors: map[string]models.Vector{ + "regular_without_specified_type": []float32{0.1, 0.2, 0.3}, + "regular_with_type": []float32{0.11, 0.22, 0.33}, + "regular_with_explicit_unspecified_type": []float32{0.111, 0.222, 0.333}, + "colbert_fp32_1_token_level_embedding": [][]float32{{0.111}}, + "colbert_fp32_normal_case": [][]float32{{0.1, 0.1, 0.1}, {0.2, 0.2, 0.2}}, + "regular_with_only_one_value": []float32{0.1}, + }, + }}, + }, + } + getClass := func(class, shard string) (*models.Class, error) { + return scheme.GetClass(class), nil + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out, origIndex, batchErrors := batch.BatchObjectsFromProto(&pb.BatchObjectsRequest{Objects: tt.req}, getClass) + if len(tt.outError) > 0 { + require.NotNil(t, batchErrors) + if len(tt.out) > 0 { + require.Equal(t, tt.out, out) + require.Equal(t, tt.origIndex, origIndex) + } + } else { + require.Len(t, batchErrors, 0) + require.Equal(t, tt.out, out) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/queues.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/queues.go new file mode 100644 index 0000000000000000000000000000000000000000..516dafea25523a6d37bd17cd2123df8eba85d435 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/queues.go @@ -0,0 +1,457 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +type QueuesHandler struct { + shuttingDownCtx context.Context + logger logrus.FieldLogger + writeQueues *WriteQueues + readQueues *ReadQueues + sendWg *sync.WaitGroup + streamWg *sync.WaitGroup + shutdownFinished chan struct{} +} + +const POLLING_INTERVAL = 100 * time.Millisecond + +func NewQueuesHandler(shuttingDownCtx context.Context, sendWg, streamWg *sync.WaitGroup, shutdownFinished chan struct{}, writeQueues *WriteQueues, readQueues *ReadQueues, logger logrus.FieldLogger) *QueuesHandler { + // Poll until the batch logic starts shutting down + // Then wait for all BatchSend requests to finish and close all the write queues + // Scheduler will then drain the write queues expecting the channels to be closed + + enterrors.GoWrapper(func() { + ticker := time.NewTicker(POLLING_INTERVAL) + defer ticker.Stop() + for { + select { + case <-shuttingDownCtx.Done(): + logger.Info("shutting down batch queues handler, waiting for in-flight requests to finish") + sendWg.Wait() + logger.Info("all in-flight requests finished, closing write queues") + writeQueues.Close() + logger.Info("write queues closed, exiting handlers shutdown listener") + return + case <-ticker.C: + } + } + }, logger) + return &QueuesHandler{ + shuttingDownCtx: shuttingDownCtx, + logger: logger, + writeQueues: writeQueues, + readQueues: readQueues, + sendWg: sendWg, + streamWg: streamWg, + shutdownFinished: shutdownFinished, + } +} + +func (h *QueuesHandler) wait(ctx context.Context) error { + ticker := time.NewTicker(POLLING_INTERVAL) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + } + } +} + +func (h *QueuesHandler) Stream(ctx context.Context, streamId string, stream pb.Weaviate_BatchStreamServer) error { + h.streamWg.Add(1) + defer h.streamWg.Done() + if err := stream.Send(newBatchStartMessage(streamId)); err != nil { + return err + } + // shuttingDown acts as a soft cancel here so we can send the shutting down message to the client. + // Once the workers are drained then h.shutdownFinished will be closed and we will shutdown completely + shuttingDown := h.shuttingDownCtx.Done() + for { + if readQueue, ok := h.readQueues.Get(streamId); ok { + select { + case <-ctx.Done(): + if innerErr := stream.Send(newBatchStopMessage(streamId)); innerErr != nil { + return innerErr + } + return ctx.Err() + case <-shuttingDown: + if innerErr := stream.Send(newBatchShuttingDownMessage(streamId)); innerErr != nil { + return innerErr + } + shuttingDown = nil + case <-h.shutdownFinished: + if innerErr := stream.Send(newBatchShutdownMessage(streamId)); innerErr != nil { + return innerErr + } + return h.wait(ctx) + case readObj, ok := <-readQueue: + if !ok { + if innerErr := stream.Send(newBatchStopMessage(streamId)); innerErr != nil { + return innerErr + } + return h.wait(ctx) + } + for _, err := range readObj.Errors { + if innerErr := stream.Send(newBatchErrorMessage(streamId, err)); innerErr != nil { + return innerErr + } + } + } + } else { + // This should never happen, but if it does, we log it + h.logger.WithField("streamId", streamId).Error("read queue not found") + return fmt.Errorf("read queue for stream %s not found", streamId) + } + } +} + +// Send adds a batch send request to the write queue and returns the number of objects in the request. +func (h *QueuesHandler) Send(ctx context.Context, request *pb.BatchSendRequest) (*pb.BatchSendReply, error) { + h.sendWg.Add(1) + defer h.sendWg.Done() + if h.shuttingDownCtx.Err() != nil { + return nil, fmt.Errorf("grpc shutdown in progress, no more requests are permitted on this node") + } + streamId := request.GetStreamId() + queue, ok := h.writeQueues.GetQueue(streamId) + if !ok { + h.logger.WithField("streamId", streamId).Error("write queue not found") + return nil, fmt.Errorf("write queue for stream %s not found", streamId) + } + if request.GetObjects() != nil { + for _, obj := range request.GetObjects().GetValues() { + queue <- &writeObject{Object: obj} + } + } else if request.GetReferences() != nil { + for _, ref := range request.GetReferences().GetValues() { + queue <- &writeObject{Reference: ref} + } + } else if request.GetStop() != nil { + queue <- &writeObject{Stop: true} + } else { + return nil, fmt.Errorf("invalid batch send request: neither objects, references nor stop signal provided") + } + batchSize, backoff := h.writeQueues.NextBatch(streamId, len(request.GetObjects().GetValues())+len(request.GetReferences().GetValues())) + return &pb.BatchSendReply{ + NextBatchSize: batchSize, + BackoffSeconds: backoff, + }, nil +} + +// Setup initializes a read queue for the given stream ID and adds it to the read queues map. +func (h *QueuesHandler) Setup(streamId string, req *pb.BatchStreamRequest) { + h.readQueues.Make(streamId) + h.writeQueues.Make(streamId, req.ConsistencyLevel, req.GetObjectIndex(), req.GetReferenceIndex()) +} + +// Teardown closes the read queue for the given stream ID and removes it from the read queues map. +func (h *QueuesHandler) Teardown(streamId string) { + if _, ok := h.readQueues.Get(streamId); ok { + h.readQueues.Delete(streamId) + } else { + h.logger.WithField("streamId", streamId).Warn("teardown called for non-existing stream") + } + if _, ok := h.writeQueues.Get(streamId); ok { + h.writeQueues.Delete(streamId) + } else { + h.logger.WithField("streamId", streamId).Warn("teardown called for non-existing write queue") + } + h.logger.WithField("streamId", streamId).Info("teardown completed") +} + +func newBatchStartMessage(streamId string) *pb.BatchStreamMessage { + return &pb.BatchStreamMessage{ + StreamId: streamId, + Message: &pb.BatchStreamMessage_Start_{ + Start: &pb.BatchStreamMessage_Start{}, + }, + } +} + +func newBatchErrorMessage(streamId string, err *pb.BatchStreamMessage_Error) *pb.BatchStreamMessage { + return &pb.BatchStreamMessage{ + StreamId: streamId, + Message: &pb.BatchStreamMessage_Error_{ + Error: err, + }, + } +} + +func newBatchStopMessage(streamId string) *pb.BatchStreamMessage { + return &pb.BatchStreamMessage{ + StreamId: streamId, + Message: &pb.BatchStreamMessage_Stop_{ + Stop: &pb.BatchStreamMessage_Stop{}, + }, + } +} + +func newBatchShutdownMessage(streamId string) *pb.BatchStreamMessage { + return &pb.BatchStreamMessage{ + StreamId: streamId, + Message: &pb.BatchStreamMessage_Shutdown_{ + Shutdown: &pb.BatchStreamMessage_Shutdown{}, + }, + } +} + +func newBatchShuttingDownMessage(streamId string) *pb.BatchStreamMessage { + return &pb.BatchStreamMessage{ + StreamId: streamId, + Message: &pb.BatchStreamMessage_ShuttingDown_{ + ShuttingDown: &pb.BatchStreamMessage_ShuttingDown{}, + }, + } +} + +type readObject struct { + Errors []*pb.BatchStreamMessage_Error +} + +type writeObject struct { + Object *pb.BatchObject + Reference *pb.BatchReference + Stop bool +} + +type ( + internalQueue chan *ProcessRequest + writeQueue chan *writeObject + readQueue chan *readObject +) + +// NewBatchWriteQueue creates a buffered channel to store objects for batch writing. +// +// The buffer size can be adjusted based on expected load and performance requirements +// to optimize throughput and resource usage. But is required so that there is a small buffer +// that can be quickly flushed in the event of a shutdown. +func NewBatchInternalQueue() internalQueue { + return make(internalQueue, 10) +} + +func NewBatchWriteQueue(buffer int) writeQueue { + return make(writeQueue, buffer) // Adjust buffer size as needed +} + +func NewBatchWriteQueues() *WriteQueues { + return &WriteQueues{ + queues: make(map[string]*WriteQueue), + } +} + +func NewBatchReadQueues() *ReadQueues { + return &ReadQueues{ + queues: make(map[string]readQueue), + } +} + +func NewBatchReadQueue() readQueue { + return make(readQueue) +} + +func NewStopWriteObject() *writeObject { + return &writeObject{ + Object: nil, + Stop: true, + } +} + +func NewErrorsObject(errs []*pb.BatchStreamMessage_Error) *readObject { + return &readObject{ + Errors: errs, + } +} + +func NewWriteObject(obj *pb.BatchObject) *writeObject { + return &writeObject{ + Object: obj, + Stop: false, + } +} + +type ReadQueues struct { + lock sync.RWMutex + queues map[string]readQueue +} + +// Get retrieves the read queue for the given stream ID. +func (r *ReadQueues) Get(streamId string) (readQueue, bool) { + r.lock.RLock() + defer r.lock.RUnlock() + queue, ok := r.queues[streamId] + return queue, ok +} + +// Delete removes the read queue for the given stream ID. +func (r *ReadQueues) Delete(streamId string) { + r.lock.Lock() + defer r.lock.Unlock() + delete(r.queues, streamId) +} + +func (r *ReadQueues) Close(streamId string) { + r.lock.Lock() + defer r.lock.Unlock() + if queue, ok := r.queues[streamId]; ok { + close(queue) + } +} + +// Make initializes a read queue for the given stream ID if it does not already exist. +func (r *ReadQueues) Make(streamId string) { + r.lock.Lock() + defer r.lock.Unlock() + if _, ok := r.queues[streamId]; !ok { + r.queues[streamId] = make(readQueue) + } +} + +type WriteQueue struct { + queue writeQueue + consistencyLevel *pb.ConsistencyLevel + + // Indexes for the next object and reference to be sent + objIndex int32 + refIndex int32 + + lock sync.RWMutex // Used when concurrently re-calculating NextBatchSize in Send method + emaQueueLen float32 // Exponential moving average of the queue length + buffer int // Buffer size for the write queue + alpha float32 // Smoothing factor for EMA, typically between 0 and 1 +} + +// Cubic backoff function: backoff(r) = b * max(0, (r - 0.6) / 0.4) ^ 3, with b = 10s +// E.g. +// - usageRatio = 0.6 -> 0s +// - usageRatio = 0.8 -> 1.3s +// - usageRatio = 0.9 -> 4.22s +// - usageRatio = 1.0 -> 10s +func (w *WriteQueue) thresholdCubicBackoff(usageRatio float32) float32 { + maximumBackoffSeconds := float32(10.0) // Adjust this value as needed, defines maximum backoff in seconds + return maximumBackoffSeconds * float32(math.Pow(float64(max(0, (usageRatio-0.6)/0.4)), 3)) +} + +func (w *WriteQueue) NextBatch(batchSize int) (int32, float32) { + w.lock.Lock() + defer w.lock.Unlock() + + maxSize := w.buffer * 2 / 5 + nowLen := len(w.queue) + + if w.emaQueueLen == 0 { + w.emaQueueLen = float32(nowLen) + } else { + w.emaQueueLen = w.alpha*float32(nowLen) + (1-w.alpha)*w.emaQueueLen + } + usageRatio := w.emaQueueLen / float32(w.buffer) + + // threshold linear batch size scaling + if usageRatio < 0.6 { + // If usage is lower than 60% threshold, increase by an order of magnitude and cap at 40% of the buffer size + return int32(min(maxSize, batchSize*10)), w.thresholdCubicBackoff(usageRatio) + } + scaledSize := int32(float64(maxSize) * (1 - float64(usageRatio))) + if scaledSize < 1 { + scaledSize = 1 // Ensure at least one object is always sent in worst-case scenario + } + + return scaledSize, w.thresholdCubicBackoff(usageRatio) +} + +type WriteQueues struct { + lock sync.RWMutex + queues map[string]*WriteQueue + uuids []string +} + +func (w *WriteQueues) Uuids() []string { + w.lock.RLock() + defer w.lock.RUnlock() + return w.uuids +} + +func (w *WriteQueues) NextBatch(streamId string, batchSize int) (int32, float32) { + wq, ok := w.Get(streamId) + if !ok { + return 0, 0 + } + return wq.NextBatch(batchSize) +} + +func (w *WriteQueues) Get(streamId string) (*WriteQueue, bool) { + w.lock.RLock() + defer w.lock.RUnlock() + queue, ok := w.queues[streamId] + if !ok { + return nil, false + } + return queue, true +} + +func (w *WriteQueues) GetQueue(streamId string) (writeQueue, bool) { + w.lock.RLock() + defer w.lock.RUnlock() + queue, ok := w.queues[streamId] + if !ok { + return nil, false + } + return queue.queue, true +} + +func (w *WriteQueues) Delete(streamId string) { + w.lock.Lock() + defer w.lock.Unlock() + delete(w.queues, streamId) + // Remove from uuids slice + for i, uuid := range w.uuids { + if uuid == streamId { + w.uuids = append(w.uuids[:i], w.uuids[i+1:]...) + break + } + } +} + +func (w *WriteQueues) Close() { + w.lock.Lock() + defer w.lock.Unlock() + for _, queue := range w.queues { + close(queue.queue) + } +} + +func (w *WriteQueues) Make(streamId string, consistencyLevel *pb.ConsistencyLevel, objIndex, refIndex int32) { + w.lock.Lock() + defer w.lock.Unlock() + buffer := 10000 // Default buffer size + if _, ok := w.queues[streamId]; !ok { + w.queues[streamId] = &WriteQueue{ + queue: NewBatchWriteQueue(buffer), + consistencyLevel: consistencyLevel, + buffer: buffer, + alpha: 0.2, // Smoothing factor for EMA of queue length + } + w.uuids = append(w.uuids, streamId) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/queues_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/queues_test.go new file mode 100644 index 0000000000000000000000000000000000000000..91058f7eca26f2d8bfb371e2286e86006f17b9d5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/queues_test.go @@ -0,0 +1,320 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch_test + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch/mocks" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func TestHandler(t *testing.T) { + ctx := context.Background() + logger := logrus.New() + + t.Run("Send", func(t *testing.T) { + t.Run("send objects using the scheduler", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + // Arrange + req := &pb.BatchSendRequest{ + StreamId: "test-stream", + Message: &pb.BatchSendRequest_Objects_{ + Objects: &pb.BatchSendRequest_Objects{ + Values: []*pb.BatchObject{{Collection: "TestClass"}}, + }, + }, + } + + shutdownCtx, shutdownCancel := context.WithCancel(context.Background()) + defer shutdownCancel() + + writeQueues := batch.NewBatchWriteQueues() + readQueues := batch.NewBatchReadQueues() + internalQueue := batch.NewBatchInternalQueue() + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + handler := batch.NewQueuesHandler(shutdownCtx, &sendWg, &streamWg, nil, writeQueues, readQueues, logger) + var sWg sync.WaitGroup + batch.StartScheduler(shutdownCtx, &sWg, writeQueues, internalQueue, logger) + + writeQueues.Make(req.StreamId, nil, 0, 0) + res, err := handler.Send(ctx, req) + require.NoError(t, err, "Expected no error when sending objects") + require.Equal(t, int32(10), res.NextBatchSize, "Expected to be told to scale up by an order of magnitude") + + // Verify that the internal queue has the object + obj := <-internalQueue + require.NotNil(t, obj, "Expected object to be sent to internal queue") + + // Shutdown + shutdownCancel() + + _, err = handler.Send(ctx, req) + require.Equal(t, "grpc shutdown in progress, no more requests are permitted on this node", err.Error(), "Expected error when sending after shutdown") + }) + + t.Run("dynamic batch size calulation", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + writeQueues := batch.NewBatchWriteQueues() + readQueues := batch.NewBatchReadQueues() + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + handler := batch.NewQueuesHandler(ctx, &sendWg, &streamWg, nil, writeQueues, readQueues, logger) + + writeQueues.Make(StreamId, nil, 0, 0) + // Send 8000 objects + req := &pb.BatchSendRequest{ + StreamId: StreamId, + Message: &pb.BatchSendRequest_Objects_{ + Objects: &pb.BatchSendRequest_Objects{}, + }, + } + for i := 0; i < 8000; i++ { + req.GetObjects().Values = append(req.GetObjects().Values, &pb.BatchObject{Collection: "TestClass"}) + } + res, err := handler.Send(ctx, req) + require.NoError(t, err, "Expected no error when sending 8000 objects") + require.Equal(t, int32(799), res.NextBatchSize, "Expected to be told to send 799 objects next") + require.Equal(t, float32(1.2499998), res.BackoffSeconds, "Expected to be told to backoff by 1.2499998 seconds") + + // Saturate the buffer + req = &pb.BatchSendRequest{ + StreamId: StreamId, + Message: &pb.BatchSendRequest_Objects_{ + Objects: &pb.BatchSendRequest_Objects{}, + }, + } + for i := 0; i < 2000; i++ { + req.GetObjects().Values = append(req.GetObjects().Values, &pb.BatchObject{Collection: "TestClass"}) + } + res, err = handler.Send(ctx, req) + require.NoError(t, err, "Expected no error when sending 2000 objects") + require.Equal(t, int32(640), res.NextBatchSize, "Expected to be told to send 640 objects once buffer is saturated") + require.Equal(t, float32(2.1599982), res.BackoffSeconds, "Expected to be told to backoff by 2.1599982 seconds") + }) + }) + + t.Run("Stream", func(t *testing.T) { + t.Run("start and stop due to cancellation", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + stream := mocks.NewMockWeaviate_BatchStreamServer[pb.BatchStreamMessage](t) + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Start_{ + Start: &pb.BatchStreamMessage_Start{}, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Stop_{ + Stop: &pb.BatchStreamMessage_Stop{}, + }, + }).Return(nil).Once() + + writeQueues := batch.NewBatchWriteQueues() + readQueues := batch.NewBatchReadQueues() + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + handler := batch.NewQueuesHandler(context.Background(), &sendWg, &streamWg, nil, writeQueues, readQueues, logger) + + writeQueues.Make(StreamId, nil, 0, 0) + readQueues.Make(StreamId) + err := handler.Stream(ctx, StreamId, stream) + require.Equal(t, ctx.Err(), err, "Expected context cancelled error") + }) + + t.Run("start and stop due to sentinel", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + stream := mocks.NewMockWeaviate_BatchStreamServer[pb.BatchStreamMessage](t) + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Start_{ + Start: &pb.BatchStreamMessage_Start{}, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Stop_{ + Stop: &pb.BatchStreamMessage_Stop{}, + }, + }).Return(nil).Once() + + writeQueues := batch.NewBatchWriteQueues() + readQueues := batch.NewBatchReadQueues() + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + handler := batch.NewQueuesHandler(context.Background(), &sendWg, &streamWg, nil, writeQueues, readQueues, logger) + + writeQueues.Make(StreamId, nil, 0, 0) + readQueues.Make(StreamId) + ch, ok := readQueues.Get(StreamId) + require.True(t, ok, "Expected read queue to exist") + go func() { + close(ch) + }() + + err := handler.Stream(ctx, StreamId, stream) + require.NoError(t, err, "Expected no error when streaming") + }) + + t.Run("start and stop due to shutdown", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + shutdownHandlersCtx, shutdownHandlersCancel := context.WithCancel(context.Background()) + shutdownFinished := make(chan struct{}) + stream := mocks.NewMockWeaviate_BatchStreamServer[pb.BatchStreamMessage](t) + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Start_{ + Start: &pb.BatchStreamMessage_Start{}, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_ShuttingDown_{ + ShuttingDown: &pb.BatchStreamMessage_ShuttingDown{}, + }, + }).RunAndReturn(func(*pb.BatchStreamMessage) error { + // Ensure handler cancel call comes after this message has been emitted to avoid races + close(shutdownFinished) // Trigger shutdown, which emits the shutdown message + return nil + }).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Shutdown_{ + Shutdown: &pb.BatchStreamMessage_Shutdown{}, + }, + }).Return(nil).Once() + + writeQueues := batch.NewBatchWriteQueues() + readQueues := batch.NewBatchReadQueues() + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + handler := batch.NewQueuesHandler(shutdownHandlersCtx, &sendWg, &streamWg, shutdownFinished, writeQueues, readQueues, logger) + + writeQueues.Make(StreamId, nil, 0, 0) + readQueues.Make(StreamId) + + shutdownHandlersCancel() // Trigger shutdown of handlers, which emits the shutting down message + + err := handler.Stream(ctx, StreamId, stream) + require.NoError(t, err, "Expected no error when streaming") + }) + + t.Run("start process error and stop due to cancellation", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + stream := mocks.NewMockWeaviate_BatchStreamServer[pb.BatchStreamMessage](t) + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Start_{ + Start: &pb.BatchStreamMessage_Start{}, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Error_{ + Error: &pb.BatchStreamMessage_Error{ + Error: "processing error", + }, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Stop_{ + Stop: &pb.BatchStreamMessage_Stop{}, + }, + }).Return(nil).Once() + + writeQueues := batch.NewBatchWriteQueues() + readQueues := batch.NewBatchReadQueues() + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + handler := batch.NewQueuesHandler(context.Background(), &sendWg, &streamWg, nil, writeQueues, readQueues, logger) + + writeQueues.Make(StreamId, nil, 0, 0) + readQueues.Make(StreamId) + ch, ok := readQueues.Get(StreamId) + require.True(t, ok, "Expected read queue to exist") + go func() { + ch <- batch.NewErrorsObject([]*pb.BatchStreamMessage_Error{{Error: "processing error"}}) + }() + + readQueues.Make(StreamId) + err := handler.Stream(ctx, StreamId, stream) + require.Equal(t, ctx.Err(), err, "Expected context cancelled error") + }) + + t.Run("start process error and stop due to sentinel", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + stream := mocks.NewMockWeaviate_BatchStreamServer[pb.BatchStreamMessage](t) + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Start_{ + Start: &pb.BatchStreamMessage_Start{}, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Error_{ + Error: &pb.BatchStreamMessage_Error{ + Error: "processing error", + }, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Stop_{ + Stop: &pb.BatchStreamMessage_Stop{}, + }, + }).Return(nil).Once() + + writeQueues := batch.NewBatchWriteQueues() + readQueues := batch.NewBatchReadQueues() + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + handler := batch.NewQueuesHandler(ctx, &sendWg, &streamWg, nil, writeQueues, readQueues, logger) + + writeQueues.Make(StreamId, nil, 0, 0) + readQueues.Make(StreamId) + ch, ok := readQueues.Get(StreamId) + require.True(t, ok, "Expected read queue to exist") + go func() { + ch <- batch.NewErrorsObject([]*pb.BatchStreamMessage_Error{{Error: "processing error"}}) + close(ch) + }() + + readQueues.Make(StreamId) + err := handler.Stream(ctx, StreamId, stream) + require.NoError(t, err, "Expected error when processing") + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/scheduler.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/scheduler.go new file mode 100644 index 0000000000000000000000000000000000000000..c4e8b30a62500531d57986131cae101cd8058d10 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/scheduler.go @@ -0,0 +1,154 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch + +import ( + "context" + "sync" + "time" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +type Scheduler struct { + logger logrus.FieldLogger + writeQueues *WriteQueues + internalQueue internalQueue +} + +func NewScheduler(writeQueues *WriteQueues, internalQueue internalQueue, logger logrus.FieldLogger) *Scheduler { + return &Scheduler{ + logger: logger, + writeQueues: writeQueues, + internalQueue: internalQueue, + } +} + +func (s *Scheduler) Loop(ctx context.Context) { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + s.logger.Info("shutting down scheduler loop") + s.loop(s.drain) + // Close the internal queue so that the workers can exit once they've drained the queue + close(s.internalQueue) + return + case <-ticker.C: + s.loop(s.schedule) + } + } +} + +func (s *Scheduler) loop(op func(streamId string, wq *WriteQueue)) { + for _, uuid := range s.writeQueues.Uuids() { + wq, ok := s.writeQueues.Get(uuid) + if !ok { + continue + } + op(uuid, wq) + } +} + +func (s *Scheduler) drain(streamId string, wq *WriteQueue) { + objs := make([]*pb.BatchObject, 0, 1000) + refs := make([]*pb.BatchReference, 0, 1000) + for obj := range wq.queue { + if obj.Object != nil { + objs = append(objs, obj.Object) + } + if obj.Reference != nil { + refs = append(refs, obj.Reference) + } + if len(objs) >= 1000 || len(refs) >= 1000 || obj.Stop { + req := newProcessRequest(objs, refs, streamId, obj.Stop, wq) + s.internalQueue <- req + // Reset the queues + objs = make([]*pb.BatchObject, 0, 1000) + refs = make([]*pb.BatchReference, 0, 1000) + } + } + if len(objs) > 0 || len(refs) > 0 { + req := newProcessRequest(objs, refs, streamId, false, wq) + s.internalQueue <- req + } +} + +func (s *Scheduler) schedule(streamId string, wq *WriteQueue) { + objs, refs, stop := s.pull(wq.queue, 1000) + req := newProcessRequest(objs, refs, streamId, stop, wq) + if (req.Objects != nil && len(req.Objects.Values) > 0) || (req.References != nil && len(req.References.Values) > 0) || req.Stop { + s.internalQueue <- req + } +} + +func (s *Scheduler) pull(queue writeQueue, max int) ([]*pb.BatchObject, []*pb.BatchReference, bool) { + objs := make([]*pb.BatchObject, 0, max) + refs := make([]*pb.BatchReference, 0, max) + for i := 0; i < max && len(queue) > 0; i++ { + select { + case obj, ok := <-queue: + if !ok { + // channel is closed + return objs, refs, false + } + if obj.Object != nil { + objs = append(objs, obj.Object) + } + if obj.Reference != nil { + refs = append(refs, obj.Reference) + } + if obj.Stop { + return objs, refs, true + } + default: + return objs, refs, false + } + } + return objs, refs, false +} + +func newProcessRequest(objs []*pb.BatchObject, refs []*pb.BatchReference, streamId string, stop bool, wq *WriteQueue) *ProcessRequest { + req := &ProcessRequest{ + StreamId: streamId, + Stop: stop, + } + if len(objs) > 0 { + req.Objects = &SendObjects{ + Values: objs, + ConsistencyLevel: wq.consistencyLevel, + Index: wq.objIndex, + } + wq.objIndex += int32(len(objs)) + } + if len(refs) > 0 { + req.References = &SendReferences{ + Values: refs, + ConsistencyLevel: wq.consistencyLevel, + Index: wq.refIndex, + } + wq.refIndex += int32(len(refs)) + } + return req +} + +func StartScheduler(ctx context.Context, wg *sync.WaitGroup, writeQueues *WriteQueues, internalQueue internalQueue, logger logrus.FieldLogger) { + scheduler := NewScheduler(writeQueues, internalQueue, logger) + wg.Add(1) + enterrors.GoWrapper(func() { + defer wg.Done() + scheduler.Loop(ctx) + }, logger) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/scheduler_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/scheduler_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ce8ba6d1e7b30e5faccc1fa1a1fd169e20c5c86c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/scheduler_test.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch_test + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func TestScheduler(t *testing.T) { + ctx := context.Background() + + logger := logrus.New() + + t.Run("dynamic", func(t *testing.T) { + shutdownCtx, shutdownCancel := context.WithCancel(ctx) + defer shutdownCancel() + + writeQueues := batch.NewBatchWriteQueues() + internalQueue := batch.NewBatchInternalQueue() + + writeQueues.Make("test-stream", nil, 0, 0) + var wg sync.WaitGroup + batch.StartScheduler(shutdownCtx, &wg, writeQueues, internalQueue, logger) + + queue, ok := writeQueues.GetQueue("test-stream") + require.True(t, ok, "Expected write queue to exist") + + obj := &pb.BatchObject{} + queue <- batch.NewWriteObject(obj) + + require.Eventually(t, func() bool { + select { + case receivedObj := <-internalQueue: + return receivedObj.Objects.Values[0] == obj + default: + return false + } + }, 1*time.Second, 10*time.Millisecond, "Expected object to be sent to internal queue") + + shutdownCancel() // Trigger shutdown + close(queue) // Close the write queue as part of shutdown + wg.Wait() + + require.Empty(t, internalQueue, "Expected internal queue to be empty after shutdown") + ch, ok := writeQueues.GetQueue("test-stream") + require.True(t, ok, "Expected write queue to still exist after shutdown") + require.Empty(t, ch, "Expected write queue to be empty after shutdown") + require.Equal(t, context.Canceled, shutdownCtx.Err(), "Expected context to be canceled") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/shutdown.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/shutdown.go new file mode 100644 index 0000000000000000000000000000000000000000..504b5c9cb219e18a6edc995960982ee931dbc7ba --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/shutdown.go @@ -0,0 +1,106 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch + +import ( + "context" + "sync" + + "github.com/sirupsen/logrus" +) + +type Shutdown struct { + HandlersCtx context.Context + HandlersCancel context.CancelFunc + SendWg *sync.WaitGroup + StreamWg *sync.WaitGroup + SchedulerCtx context.Context + SchedulerCancel context.CancelFunc + SchedulerWg *sync.WaitGroup + WorkersCtx context.Context + WorkersCancel context.CancelFunc + WorkersWg *sync.WaitGroup + ShutdownFinished chan struct{} +} + +func NewShutdown(ctx context.Context) *Shutdown { + var sendWg sync.WaitGroup + var streamWg sync.WaitGroup + var schedulerWg sync.WaitGroup + var workersWg sync.WaitGroup + + hCtx, hCancel := context.WithCancel(ctx) + sCtx, sCancel := context.WithCancel(ctx) + wCtx, wCancel := context.WithCancel(ctx) + + shutdownFinished := make(chan struct{}) + return &Shutdown{ + HandlersCtx: hCtx, + HandlersCancel: hCancel, + SendWg: &sendWg, + StreamWg: &streamWg, + SchedulerCtx: sCtx, + SchedulerCancel: sCancel, + SchedulerWg: &schedulerWg, + WorkersCtx: wCtx, + WorkersCancel: wCancel, + WorkersWg: &workersWg, + ShutdownFinished: shutdownFinished, + } +} + +// Drain handles the graceful shutdown of all batch processing components. +// +// The order of operations needs to be as follows to ensure that there are no missed objects/references in any of the +// write queues nor any missed errors in the read queues: +// +// 1. Stop accepting new requests in the handlers +// - This prevents new requests from being added to the system while we are shutting down +// +// 2. Wait for all in-flight Send requests to finish +// - This ensures that the write queues are no longer being written to +// +// 3. Stop the scheduler loop and drain the write queues +// - This ensures that all currently waiting write objects/references are added to the internal queues +// +// 4. Stop the worker loops and drain the internal queue +// - This ensures that all currently waiting batch requests in the internal queue are processed +// +// 5. Signal shutdown complete and wait for all streams to communicate this to clients +// - This ensures that all clients have acknowledged shutdown so that they can successfully reconnect to another node +// +// The gRPC shutdown is then considered complete as every queue has been drained successfully so the server +// can move onto switching off the HTTP handlers and shutting itself down completely. +func (s *Shutdown) Drain(logger logrus.FieldLogger) { + // stop handlers first + s.HandlersCancel() + logger.Info("shutting down grpc batch handlers") + // wait for all send requests to finish + logger.Info("draining in-flight BatchSend methods") + s.SendWg.Wait() + // stop the scheduler + s.SchedulerCancel() + logger.Info("shutting down grpc batch scheduler") + // wait for all objs in write queues to be added to internal queue + s.SchedulerWg.Wait() + // stop the workers now + s.WorkersCancel() + logger.Info("shutting down grpc batch workers") + // wait for all the objects to be processed from the internal queue + s.WorkersWg.Wait() + logger.Info("finished draining the internal queues") + // signal that shutdown is complete + close(s.ShutdownFinished) + logger.Info("waiting for all streams to exit") + // wait for all streams to exit, i.e. be hungup by their clients + s.StreamWg.Wait() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/shutdown_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/shutdown_test.go new file mode 100644 index 0000000000000000000000000000000000000000..74ccdcf32e95f424d98fa9fcd010fe38eb870d11 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/shutdown_test.go @@ -0,0 +1,88 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch_test + +import ( + "context" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch/mocks" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func TestShutdownLogic(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + logger := logrus.New() + + mockBatcher := mocks.NewMockBatcher(t) + + readQueues := batch.NewBatchReadQueues() + readQueues.Make(StreamId) + writeQueues := batch.NewBatchWriteQueues() + writeQueues.Make(StreamId, nil, 0, 0) + wq, ok := writeQueues.GetQueue(StreamId) + require.Equal(t, true, ok, "write queue should exist") + internalQueue := batch.NewBatchInternalQueue() + + howManyObjs := 5000 + // 5000 objs will be sent five times in batches of 1000 + mockBatcher.EXPECT().BatchObjects(mock.Anything, mock.Anything).RunAndReturn(func(context.Context, *pb.BatchObjectsRequest) (*pb.BatchObjectsReply, error) { + time.Sleep(1 * time.Second) + return &pb.BatchObjectsReply{ + Took: float32(1), + Errors: nil, + }, nil + }).Times(5) + + for i := 0; i < howManyObjs; i++ { + wq <- batch.NewWriteObject(&pb.BatchObject{}) + } + + stream := mocks.NewMockWeaviate_BatchStreamServer[pb.BatchStreamMessage](t) + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Start_{ + Start: &pb.BatchStreamMessage_Start{}, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_ShuttingDown_{ + ShuttingDown: &pb.BatchStreamMessage_ShuttingDown{}, + }, + }).Return(nil).Once() + stream.EXPECT().Send(&pb.BatchStreamMessage{ + StreamId: StreamId, + Message: &pb.BatchStreamMessage_Shutdown_{ + Shutdown: &pb.BatchStreamMessage_Shutdown{}, + }, + }).Return(nil).Once() + + shutdown := batch.NewShutdown(ctx) + handler := batch.NewQueuesHandler(shutdown.HandlersCtx, shutdown.SendWg, shutdown.StreamWg, shutdown.ShutdownFinished, writeQueues, readQueues, logger) + batch.StartScheduler(shutdown.SchedulerCtx, shutdown.SchedulerWg, writeQueues, internalQueue, logger) + batch.StartBatchWorkers(shutdown.WorkersCtx, shutdown.WorkersWg, 1, internalQueue, readQueues, mockBatcher, logger) + + go func() { + err := handler.Stream(ctx, StreamId, stream) + require.NoError(t, err, "Expected no error when streaming") + }() + shutdown.Drain(logger) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/worker.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/worker.go new file mode 100644 index 0000000000000000000000000000000000000000..fac6065c467e8c0adeba8cadada356ba0ba8d759 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/worker.go @@ -0,0 +1,194 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/replica" +) + +type Batcher interface { + BatchObjects(ctx context.Context, req *pb.BatchObjectsRequest) (*pb.BatchObjectsReply, error) + BatchReferences(ctx context.Context, req *pb.BatchReferencesRequest) (*pb.BatchReferencesReply, error) +} + +type Worker struct { + batcher Batcher + logger logrus.FieldLogger + readQueues *ReadQueues + internalQueue internalQueue + wgs *sync.Map // map[string]*sync.WaitGroup; streamID -> wg +} + +type SendObjects struct { + Values []*pb.BatchObject + ConsistencyLevel *pb.ConsistencyLevel + Index int32 +} + +type SendReferences struct { + Values []*pb.BatchReference + ConsistencyLevel *pb.ConsistencyLevel + Index int32 +} + +type ProcessRequest struct { + StreamId string + Objects *SendObjects + References *SendReferences + Stop bool +} + +func (w *Worker) wgForStream(streamId string) *sync.WaitGroup { + actual, _ := w.wgs.LoadOrStore(streamId, &sync.WaitGroup{}) + return actual.(*sync.WaitGroup) +} + +func (w *Worker) isReplicationError(err string) bool { + return strings.Contains(err, replica.ErrReplicas.Error()) || // broadcast error to shutdown node + (strings.Contains(err, "connect: Post") && strings.Contains(err, ":commit")) || // failed to connect to shutdown node when committing + (strings.Contains(err, "status code: 404, error: request not found")) // failed to find request on shutdown node +} + +func (w *Worker) sendObjects(ctx context.Context, wg *sync.WaitGroup, streamId string, req *SendObjects) error { + if req == nil { + return fmt.Errorf("received nil sendObjects request") + } + wg.Add(1) + defer wg.Done() + reply, err := w.batcher.BatchObjects(ctx, &pb.BatchObjectsRequest{ + Objects: req.Values, + ConsistencyLevel: req.ConsistencyLevel, + }) + if err != nil { + return err + } + if len(reply.GetErrors()) > 0 { + errs := make([]*pb.BatchStreamMessage_Error, 0, len(reply.GetErrors())) + for _, err := range reply.GetErrors() { + if err == nil { + continue + } + errs = append(errs, &pb.BatchStreamMessage_Error{ + Error: err.Error, + IsObject: true, + Index: req.Index + int32(err.Index), + IsRetriable: w.isReplicationError(err.Error), + }) + } + if ch, ok := w.readQueues.Get(streamId); ok { + ch <- &readObject{Errors: errs} + } + } + return nil +} + +func (w *Worker) sendReferences(ctx context.Context, wg *sync.WaitGroup, streamId string, req *SendReferences) error { + if req == nil { + return fmt.Errorf("received nil sendReferences request") + } + wg.Add(1) + defer wg.Done() + reply, err := w.batcher.BatchReferences(ctx, &pb.BatchReferencesRequest{ + References: req.Values, + ConsistencyLevel: req.ConsistencyLevel, + }) + if err != nil { + return err + } + if len(reply.GetErrors()) > 0 { + errs := make([]*pb.BatchStreamMessage_Error, 0, len(reply.GetErrors())) + for _, err := range reply.GetErrors() { + if err == nil { + continue + } + errs = append(errs, &pb.BatchStreamMessage_Error{ + Error: err.Error, + IsReference: true, + Index: req.Index + int32(err.Index), + IsRetriable: w.isReplicationError(err.Error), + }) + } + if ch, ok := w.readQueues.Get(streamId); ok { + ch <- &readObject{Errors: errs} + } + } + return nil +} + +// Loop processes objects from the write queue, sending them to the batcher and handling shutdown signals. +func (w *Worker) Loop(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + // Drain the write queue and process any remaining requests + for req := range w.internalQueue { + if err := w.process(ctx, req); err != nil { + return fmt.Errorf("failed to process batch request: %w", err) + } + } + return nil + case req, ok := <-w.internalQueue: + if req != nil { + if err := w.process(ctx, req); err != nil { + return fmt.Errorf("failed to process batch request: %w", err) + } + } + if !ok { + return nil // channel closed, exit loop + } + } + } +} + +func (w *Worker) process(ctx context.Context, req *ProcessRequest) error { + wg := w.wgForStream(req.StreamId) + if req.Objects != nil { + if err := w.sendObjects(ctx, wg, req.StreamId, req.Objects); err != nil { + return err + } + } + if req.References != nil { + if err := w.sendReferences(ctx, wg, req.StreamId, req.References); err != nil { + return err + } + } + // This should only ever be received once so it’s okay to wait on the shared wait group here + // If the scheduler does send more than one stop per stream then deadlocks may occur + if req.Stop { + wg.Wait() // Wait for all processing requests to complete + // Signal to the reply handler that we are done + w.readQueues.Close(req.StreamId) + w.wgs.Delete(req.StreamId) // Clean up the wait group map + } + return nil +} + +func StartBatchWorkers(ctx context.Context, wg *sync.WaitGroup, concurrency int, internalQueue internalQueue, readQueues *ReadQueues, batcher Batcher, logger logrus.FieldLogger) { + eg := enterrors.NewErrorGroupWrapper(logger) + wgs := sync.Map{} + for range concurrency { + wg.Add(1) + eg.Go(func() error { + defer wg.Done() + w := &Worker{batcher: batcher, logger: logger, readQueues: readQueues, internalQueue: internalQueue, wgs: &wgs} + return w.Loop(ctx) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/worker_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/worker_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2b4a878dde42c273480844d10fa9df4340ef7460 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch/worker_test.go @@ -0,0 +1,235 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package batch_test + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch/mocks" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/replica" +) + +var StreamId string = "329c306b-c912-4ec7-9b1d-55e5e0ca8dea" + +func TestWorkerLoop(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + logger := logrus.New() + + t.Run("should process from the queue and send data without error", func(t *testing.T) { + mockBatcher := mocks.NewMockBatcher(t) + + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + + readQueues := batch.NewBatchReadQueues() + readQueues.Make(StreamId) + internalQueue := batch.NewBatchInternalQueue() + + mockBatcher.EXPECT().BatchObjects(ctx, mock.Anything).Return(&pb.BatchObjectsReply{ + Took: float32(1), + Errors: nil, + }, nil).Times(1) + mockBatcher.EXPECT().BatchReferences(ctx, mock.Anything).Return(&pb.BatchReferencesReply{ + Took: float32(1), + Errors: nil, + }, nil).Times(1) + var wg sync.WaitGroup + batch.StartBatchWorkers(ctx, &wg, 1, internalQueue, readQueues, mockBatcher, logger) + + // Send data + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + Objects: &batch.SendObjects{ + Values: []*pb.BatchObject{}, + }, + } + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + References: &batch.SendReferences{ + Values: []*pb.BatchReference{}, + }, + } + + // Send sentinel + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + Stop: true, + } + + // Accept the stop message + ch, ok := readQueues.Get(StreamId) + require.True(t, ok, "Expected read queue to exist and to contain message") + _, ok = <-ch + require.False(t, ok, "Expected read queue to be closed") + + cancel() // Cancel the context to stop the worker loop + close(internalQueue) // Allow the draining logic to exit naturally + wg.Wait() + require.Empty(t, internalQueue, "Expected internal queue to be empty after processing") + require.Empty(t, ch, "Expected read queue to be empty after processing") + require.Equal(t, ctx.Err(), context.Canceled, "Expected context to be canceled") + }) + + t.Run("should process from the queue during shutdown", func(t *testing.T) { + mockBatcher := mocks.NewMockBatcher(t) + + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + + readQueues := batch.NewBatchReadQueues() + readQueues.Make(StreamId) + internalQueue := batch.NewBatchInternalQueue() + + mockBatcher.EXPECT().BatchObjects(ctx, mock.Anything).Return(&pb.BatchObjectsReply{ + Took: float32(1), + Errors: nil, + }, nil).Times(1) + mockBatcher.EXPECT().BatchReferences(ctx, mock.Anything).Return(&pb.BatchReferencesReply{ + Took: float32(1), + Errors: nil, + }, nil).Times(1) + var wg sync.WaitGroup + batch.StartBatchWorkers(ctx, &wg, 1, internalQueue, readQueues, mockBatcher, logger) + + cancel() // Cancel the context to simulate shutdown + // Send data after context cancellation to ensure that the worker processes it + // in its shutdown select-case + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + Objects: &batch.SendObjects{ + Values: []*pb.BatchObject{}, + }, + } + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + References: &batch.SendReferences{ + Values: []*pb.BatchReference{}, + }, + } + // Send sentinel + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + Stop: true, + } + close(internalQueue) // Close the internal queue to stop processing as part of the shutdown + + // Accept the stop message + ch, ok := readQueues.Get(StreamId) + require.True(t, ok, "Expected read queue to exist and to contain message") + _, ok = <-ch + require.False(t, ok, "Expected read queue to be closed") + + wg.Wait() // Wait for the worker to finish processing + require.Empty(t, internalQueue, "Expected internal queue to be empty after processing") + require.Empty(t, ch, "Expected read queue to be empty after processing") + require.Equal(t, ctx.Err(), context.Canceled, "Expected context to be canceled") + }) + + t.Run("should process from the queue and send data returning partial error", func(t *testing.T) { + mockBatcher := mocks.NewMockBatcher(t) + + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + + readQueues := batch.NewBatchReadQueues() + readQueues.Make(StreamId) + internalQueue := batch.NewBatchInternalQueue() + + errorsObj := []*pb.BatchObjectsReply_BatchError{ + { + Error: replica.ErrReplicas.Error(), + Index: 0, + }, + } + errorsRefs := []*pb.BatchReferencesReply_BatchError{ + { + Error: "refs error", + Index: 0, + }, + } + mockBatcher.EXPECT().BatchObjects(ctx, mock.Anything).Return(&pb.BatchObjectsReply{ + Took: float32(1), + Errors: errorsObj, + }, nil) + mockBatcher.EXPECT().BatchReferences(ctx, mock.Anything).Return(&pb.BatchReferencesReply{ + Took: float32(1), + Errors: errorsRefs, + }, nil) + var wg sync.WaitGroup + batch.StartBatchWorkers(ctx, &wg, 1, internalQueue, readQueues, mockBatcher, logger) + + // Send data + obj := &pb.BatchObject{} + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + Objects: &batch.SendObjects{ + Values: []*pb.BatchObject{obj, obj}, + }, + } + ref := &pb.BatchReference{} + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + References: &batch.SendReferences{ + Values: []*pb.BatchReference{ref, ref}, + }, + } + + // Send sentinel + internalQueue <- &batch.ProcessRequest{ + StreamId: StreamId, + Stop: true, + } + + ch, ok := readQueues.Get(StreamId) + require.True(t, ok, "Expected read queue to exist and to contain message") + + // Read first error + errs := <-ch + require.NotNil(t, errs.Errors, "Expected errors to be returned") + require.Len(t, errs.Errors, 1, "Expected one error to be returned") + require.Equal(t, replica.ErrReplicas.Error(), errs.Errors[0].Error, "Expected error message to match") + require.True(t, errs.Errors[0].IsObject, "Expected IsObject to be true for object errors") + require.False(t, errs.Errors[0].IsReference, "Expected IsReference to be false for object errors") + require.True(t, errs.Errors[0].IsRetriable, "Expected IsRetriable to be true for this error") + + // Read second error + errs = <-ch + require.NotNil(t, errs.Errors, "Expected errors to be returned") + require.Len(t, errs.Errors, 1, "Expected one error to be returned") + require.Equal(t, "refs error", errs.Errors[0].Error, "Expected error message to match") + require.False(t, errs.Errors[0].IsObject, "Expected IsObject to be false for reference errors") + require.True(t, errs.Errors[0].IsReference, "Expected IsReference to be true for reference errors") + require.False(t, errs.Errors[0].IsRetriable, "Expected IsRetriable to be false for this error") + + // Read sentinel + _, ok = <-ch + require.False(t, ok, "Expected read queue to be closed") + + cancel() // Cancel the context to stop the worker loop + close(internalQueue) // Allow the draining logic to exit naturally + wg.Wait() + require.Empty(t, internalQueue, "Expected internal queue to be empty after processing") + require.Empty(t, ch, "Expected read queue to be empty after processing") + require.Equal(t, ctx.Err(), context.Canceled, "Expected context to be canceled") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..36849a4ff605180623b4e042f014c6721132151d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch_delete.go @@ -0,0 +1,107 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "fmt" + "math/big" + "strings" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/objects" +) + +func batchDeleteParamsFromProto(req *pb.BatchDeleteRequest, authorizedGetClass classGetterWithAuthzFunc) (objects.BatchDeleteParams, error) { + params := objects.BatchDeleteParams{} + + tenant := "" + if req.Tenant != nil { + tenant = *req.Tenant + } + // make sure collection exists + class, err := authorizedGetClass(req.Collection) + if err != nil { + return params, err + } + if class == nil { + return objects.BatchDeleteParams{}, fmt.Errorf("could not find class %s in schema", req.Collection) + } + + params.ClassName = schema.ClassName(class.Class) + + if req.Verbose { + params.Output = "verbose" + } else { + params.Output = "minimal" + } + + params.DryRun = req.DryRun + + if req.Filters == nil { + return objects.BatchDeleteParams{}, fmt.Errorf("no filters in batch delete request") + } + + clause, err := ExtractFilters(req.Filters, authorizedGetClass, req.Collection, tenant) + if err != nil { + return objects.BatchDeleteParams{}, err + } + filter := &filters.LocalFilter{Root: &clause} + if err := filters.ValidateFilters(authorizedGetClass, filter); err != nil { + return objects.BatchDeleteParams{}, err + } + params.Filters = filter + + return params, nil +} + +func batchDeleteReplyFromObjects(response objects.BatchDeleteResult, verbose bool) (*pb.BatchDeleteReply, error) { + var successful, failed int64 + + var objs []*pb.BatchDeleteObject + if verbose { + objs = make([]*pb.BatchDeleteObject, 0, len(response.Objects)) + } + for _, obj := range response.Objects { + if obj.Err == nil { + successful += 1 + } else { + failed += 1 + } + if verbose { + hexInteger, success := new(big.Int).SetString(strings.ReplaceAll(obj.UUID.String(), "-", ""), 16) + if !success { + return nil, fmt.Errorf("failed to parse hex string to integer") + } + errorString := "" + if obj.Err != nil { + errorString = obj.Err.Error() + } + + resultObj := &pb.BatchDeleteObject{ + Uuid: hexInteger.Bytes(), + Successful: obj.Err == nil, + Error: &errorString, + } + objs = append(objs, resultObj) + } + } + reply := &pb.BatchDeleteReply{ + Successful: successful, + Failed: failed, + Matches: response.Matches, + Objects: objs, + } + + return reply, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch_delete_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch_delete_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ce94bfaa4686d2cb4c79f9d4fa5b7edd127ac8df --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/batch_delete_test.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/objects" +) + +func TestBatchDeleteRequest(t *testing.T) { + collection := "TestClass" + scheme := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: collection, + Properties: []*models.Property{ + {Name: "name", DataType: schema.DataTypeText.PropString()}, + }, + }, + }, + }, + } + + getClass := func(name string) (*models.Class, error) { + return scheme.GetClass(name), nil + } + + simpleFilterOutput := &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{Class: schema.ClassName(collection), Property: "name"}, + Operator: filters.OperatorEqual, + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + }, + } + simpleFilterInput := &pb.Filters{Operator: pb.Filters_OPERATOR_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: "test"}, Target: &pb.FilterTarget{Target: &pb.FilterTarget_Property{Property: "name"}}} + + tests := []struct { + name string + req *pb.BatchDeleteRequest + out objects.BatchDeleteParams + error error + }{ + { + name: "simple filter", + req: &pb.BatchDeleteRequest{ + Collection: collection, + Filters: simpleFilterInput, + }, + out: objects.BatchDeleteParams{ + ClassName: schema.ClassName(collection), + DryRun: false, + Output: "minimal", + Filters: simpleFilterOutput, + }, + error: nil, + }, + { + name: "collection does not exist", + req: &pb.BatchDeleteRequest{Collection: "does not exist"}, + error: errors.New("could not find class does not exist in schema"), + }, + { + name: "no filter", + req: &pb.BatchDeleteRequest{Collection: collection}, + error: fmt.Errorf("no filters in batch delete request"), + }, + { + name: "dry run", + req: &pb.BatchDeleteRequest{ + Collection: collection, + Filters: simpleFilterInput, + DryRun: true, + }, + out: objects.BatchDeleteParams{ + ClassName: schema.ClassName(collection), + DryRun: true, + Output: "minimal", + Filters: simpleFilterOutput, + }, + error: nil, + }, + { + name: "verbose", + req: &pb.BatchDeleteRequest{ + Collection: collection, + Filters: simpleFilterInput, + DryRun: false, + Verbose: true, + }, + out: objects.BatchDeleteParams{ + ClassName: schema.ClassName(collection), + DryRun: false, + Output: "verbose", + Filters: simpleFilterOutput, + }, + error: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out, err := batchDeleteParamsFromProto(tt.req, getClass) + require.Equal(t, tt.error, err) + + if tt.error == nil { + require.Equal(t, tt.out, out) + } + }) + } +} + +var ( + errorString = "error" + noErrorString = "" +) + +func TestBatchDeleteReply(t *testing.T) { + tests := []struct { + name string + response objects.BatchDeleteResult + verbose bool + out *pb.BatchDeleteReply + }{ + { + name: "single object", + response: objects.BatchDeleteResult{Matches: 1, Objects: objects.BatchSimpleObjects{{UUID: UUID1, Err: nil}}}, + out: &pb.BatchDeleteReply{Matches: 1, Successful: 1, Failed: 0}, + }, + { + name: "single object with err", + response: objects.BatchDeleteResult{Matches: 1, Objects: objects.BatchSimpleObjects{{UUID: UUID1, Err: errors.New("error")}}}, + out: &pb.BatchDeleteReply{Matches: 1, Successful: 0, Failed: 1}, + }, + { + name: "one error, one successful", + response: objects.BatchDeleteResult{Matches: 2, Objects: objects.BatchSimpleObjects{{UUID: UUID1, Err: errors.New("error")}, {UUID: UUID2, Err: nil}}}, + out: &pb.BatchDeleteReply{Matches: 2, Successful: 1, Failed: 1}, + }, + { + name: "one error, one successful - with verbosity", + response: objects.BatchDeleteResult{Matches: 2, Objects: objects.BatchSimpleObjects{{UUID: UUID1, Err: errors.New("error")}, {UUID: UUID2, Err: nil}}}, + verbose: true, + out: &pb.BatchDeleteReply{Matches: 2, Successful: 1, Failed: 1, Objects: []*pb.BatchDeleteObject{ + {Uuid: idByte(string(UUID1)), Successful: false, Error: &errorString}, + {Uuid: idByte(string(UUID2)), Successful: true, Error: &noErrorString}, + }}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out, err := batchDeleteReplyFromObjects(tt.response, tt.verbose) + require.Nil(t, err) + require.Equal(t, tt.out, out) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/file_replication_service.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/file_replication_service.go new file mode 100644 index 0000000000000000000000000000000000000000..eb44e8de046060bc5a46ab6f7b33249b94e82e66 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/file_replication_service.go @@ -0,0 +1,211 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "context" + "fmt" + "io" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/sharding" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// fileChunkSize defines the size of each file chunk sent over gRPC. +// Currently set to 64 KB, which is a reasonable size for network transmission. +// It can be made configurable in the future if needed. +const fileChunkSize = 64 * 1024 // 64 KB + +type FileReplicationService struct { + pb.UnimplementedFileReplicationServiceServer + + repo sharding.RemoteIncomingRepo + schema sharding.RemoteIncomingSchema +} + +func NewFileReplicationService(repo sharding.RemoteIncomingRepo, schema sharding.RemoteIncomingSchema) *FileReplicationService { + return &FileReplicationService{ + repo: repo, + schema: schema, + } +} + +func (fps *FileReplicationService) PauseFileActivity(ctx context.Context, req *pb.PauseFileActivityRequest) (*pb.PauseFileActivityResponse, error) { + indexName := req.GetIndexName() + shardName := req.GetShardName() + schemaVersion := req.GetSchemaVersion() + + index, err := fps.indexForIncomingWrite(ctx, indexName, schemaVersion) + if err != nil { + return nil, status.Errorf(codes.Internal, "local index %q not found: %v", indexName, err) + } + + err = index.IncomingPauseFileActivity(ctx, shardName) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to pause file activity for index %q, shard %q: %v", indexName, shardName, err) + } + + return &pb.PauseFileActivityResponse{ + IndexName: indexName, + ShardName: shardName, + }, nil +} + +func (fps *FileReplicationService) ResumeFileActivity(ctx context.Context, req *pb.ResumeFileActivityRequest) (*pb.ResumeFileActivityResponse, error) { + indexName := req.GetIndexName() + shardName := req.GetShardName() + + index := fps.repo.GetIndexForIncomingSharding(schema.ClassName(indexName)) + if index == nil { + return nil, status.Errorf(codes.Internal, "local index %q not found", indexName) + } + + err := index.IncomingResumeFileActivity(ctx, shardName) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to resume file activity for index %q, shard %q: %v", indexName, shardName, err) + } + + return &pb.ResumeFileActivityResponse{ + IndexName: indexName, + ShardName: shardName, + }, nil +} + +func (fps *FileReplicationService) ListFiles(ctx context.Context, req *pb.ListFilesRequest) (*pb.ListFilesResponse, error) { + indexName := req.GetIndexName() + shardName := req.GetShardName() + + index := fps.repo.GetIndexForIncomingSharding(schema.ClassName(indexName)) + if index == nil { + return nil, status.Errorf(codes.Internal, "local index %q not found", indexName) + } + + files, err := index.IncomingListFiles(ctx, shardName) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to list files for index %q, shard %q: %v", indexName, shardName, err) + } + + return &pb.ListFilesResponse{ + IndexName: indexName, + ShardName: shardName, + FileNames: files, + }, nil +} + +func (fps *FileReplicationService) GetFileMetadata(stream pb.FileReplicationService_GetFileMetadataServer) error { + for { + req, err := stream.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + return status.Errorf(codes.Internal, "failed to receive request: %v", err) + } + + indexName := req.GetIndexName() + shardName := req.GetShardName() + fileName := req.GetFileName() + + index := fps.repo.GetIndexForIncomingSharding(schema.ClassName(indexName)) + if index == nil { + return status.Errorf(codes.Internal, "local index %q not found", indexName) + } + + md, err := index.IncomingGetFileMetadata(stream.Context(), shardName, fileName) + if err != nil { + return status.Errorf(codes.Internal, "failed to get file metadata for %q in shard %q: %v", fileName, shardName, err) + } + + if err := stream.Send(&pb.FileMetadata{ + IndexName: indexName, + ShardName: shardName, + FileName: fileName, + Size: md.Size, + Crc32: md.CRC32, + }); err != nil { + return status.Errorf(codes.Internal, "failed to send file metadata response: %v", err) + } + } +} + +func (fps *FileReplicationService) GetFile(stream pb.FileReplicationService_GetFileServer) error { + for { + req, err := stream.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + return status.Errorf(codes.Internal, "failed to receive request: %v", err) + } + + if req.GetCompression() != pb.CompressionType_COMPRESSION_TYPE_UNSPECIFIED { + return status.Errorf(codes.Unimplemented, "compression type %q is not supported", req.GetCompression()) + } + + indexName := req.GetIndexName() + shardName := req.GetShardName() + fileName := req.GetFileName() + + index := fps.repo.GetIndexForIncomingSharding(schema.ClassName(indexName)) + if index == nil { + return status.Errorf(codes.Internal, "local index %q not found", indexName) + } + + fileReader, err := index.IncomingGetFile(stream.Context(), shardName, fileName) + if err != nil { + return status.Errorf(codes.Internal, "failed to get file %q in shard %q: %v", fileName, shardName, err) + } + defer fileReader.Close() + + buf := make([]byte, fileChunkSize) + + offset := 0 + + for { + n, err := fileReader.Read(buf) + eof := err != nil && errors.Is(err, io.EOF) + + if err := stream.Send(&pb.FileChunk{ + Offset: int64(offset), + Data: buf[:n], + Eof: eof, + }); err != nil { + return status.Errorf(codes.Internal, "failed to send file chunk: %v", err) + } + + if eof { + break + } + + offset += n + } + } +} + +func (fps *FileReplicationService) indexForIncomingWrite(ctx context.Context, indexName string, + schemaVersion uint64, +) (sharding.RemoteIndexIncomingRepo, error) { + // wait for schema and store to reach version >= schemaVersion + if _, err := fps.schema.ReadOnlyClassWithVersion(ctx, indexName, schemaVersion); err != nil { + return nil, fmt.Errorf("local index %q not found: %w", indexName, err) + } + index := fps.repo.GetIndexForIncomingSharding(schema.ClassName(indexName)) + if index == nil { + return nil, fmt.Errorf("local index %q not found", indexName) + } + + return index, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/filters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/filters.go new file mode 100644 index 0000000000000000000000000000000000000000..835fe586dc99e4398bb7b7d3d2889c7b431e4c57 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/filters.go @@ -0,0 +1,311 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func ExtractFilters(filterIn *pb.Filters, authorizedGetClass classGetterWithAuthzFunc, className, tenant string) (filters.Clause, error) { + returnFilter := filters.Clause{} + + switch filterIn.Operator { + case pb.Filters_OPERATOR_AND, pb.Filters_OPERATOR_OR, pb.Filters_OPERATOR_NOT: + switch filterIn.Operator { + case pb.Filters_OPERATOR_AND: + returnFilter.Operator = filters.OperatorAnd + case pb.Filters_OPERATOR_OR: + returnFilter.Operator = filters.OperatorOr + case pb.Filters_OPERATOR_NOT: + returnFilter.Operator = filters.OperatorNot + default: + } + + clauses := make([]filters.Clause, len(filterIn.Filters)) + for i, clause := range filterIn.Filters { + retClause, err := ExtractFilters(clause, authorizedGetClass, className, tenant) + if err != nil { + return filters.Clause{}, err + } + clauses[i] = retClause + } + + returnFilter.Operands = clauses + + default: + if filterIn.Target == nil && len(filterIn.On)%2 != 1 { + return filters.Clause{}, fmt.Errorf( + "paths needs to have a uneven number of components: property, class, property, ...., got %v", filterIn.On, + ) + } + + switch filterIn.Operator { + case pb.Filters_OPERATOR_EQUAL: + returnFilter.Operator = filters.OperatorEqual + case pb.Filters_OPERATOR_NOT_EQUAL: + returnFilter.Operator = filters.OperatorNotEqual + case pb.Filters_OPERATOR_GREATER_THAN: + returnFilter.Operator = filters.OperatorGreaterThan + case pb.Filters_OPERATOR_GREATER_THAN_EQUAL: + returnFilter.Operator = filters.OperatorGreaterThanEqual + case pb.Filters_OPERATOR_LESS_THAN: + returnFilter.Operator = filters.OperatorLessThan + case pb.Filters_OPERATOR_LESS_THAN_EQUAL: + returnFilter.Operator = filters.OperatorLessThanEqual + case pb.Filters_OPERATOR_WITHIN_GEO_RANGE: + returnFilter.Operator = filters.OperatorWithinGeoRange + case pb.Filters_OPERATOR_LIKE: + returnFilter.Operator = filters.OperatorLike + case pb.Filters_OPERATOR_IS_NULL: + returnFilter.Operator = filters.OperatorIsNull + case pb.Filters_OPERATOR_CONTAINS_ANY: + returnFilter.Operator = filters.ContainsAny + case pb.Filters_OPERATOR_CONTAINS_ALL: + returnFilter.Operator = filters.ContainsAll + case pb.Filters_OPERATOR_CONTAINS_NONE: + returnFilter.Operator = filters.ContainsNone + default: + return filters.Clause{}, fmt.Errorf("unknown filter operator %v", filterIn.Operator) + } + + var dataType schema.DataType + if filterIn.Target == nil { + path, err := extractPath(className, filterIn.On) + if err != nil { + return filters.Clause{}, err + } + returnFilter.On = path + + dataType, err = extractDataType(authorizedGetClass, returnFilter.Operator, className, tenant, filterIn.On) + if err != nil { + return filters.Clause{}, err + } + } else { + path, dataType2, err := extractPathNew(authorizedGetClass, className, tenant, filterIn.Target, returnFilter.Operator) + if err != nil { + return filters.Clause{}, err + } + dataType = dataType2 + returnFilter.On = path + } + + // datatype UUID is just a string + if dataType == schema.DataTypeUUID { + dataType = schema.DataTypeText + } + + var val interface{} + switch filterIn.TestValue.(type) { + case *pb.Filters_ValueText: + val = filterIn.GetValueText() + case *pb.Filters_ValueInt: + val = int(filterIn.GetValueInt()) + case *pb.Filters_ValueBoolean: + val = filterIn.GetValueBoolean() + case *pb.Filters_ValueNumber: + val = filterIn.GetValueNumber() + case *pb.Filters_ValueIntArray: + // convert from int32 GRPC to go-int + valInt32 := filterIn.GetValueIntArray().Values + valInt := make([]int, len(valInt32)) + for i := 0; i < len(valInt32); i++ { + valInt[i] = int(valInt32[i]) + } + val = valInt + case *pb.Filters_ValueTextArray: + val = filterIn.GetValueTextArray().Values + case *pb.Filters_ValueNumberArray: + val = filterIn.GetValueNumberArray().Values + case *pb.Filters_ValueBooleanArray: + val = filterIn.GetValueBooleanArray().Values + case *pb.Filters_ValueGeo: + valueFilter := filterIn.GetValueGeo() + val = filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: &valueFilter.Latitude, + Longitude: &valueFilter.Longitude, + }, + Distance: valueFilter.Distance, + } + default: + return filters.Clause{}, fmt.Errorf("unknown value type %v", filterIn.TestValue) + } + + // correct the type of value when filtering on a float/int property but sending an int/float. This is easy to + // get wrong + if number, ok := val.(int); ok && dataType == schema.DataTypeNumber { + val = float64(number) + } + if number, ok := val.(float64); ok && dataType == schema.DataTypeInt { + val = int(number) + if float64(int(number)) != number { + return filters.Clause{}, fmt.Errorf("filtering for integer, but received a floating point number %v", number) + } + } + + // correct type for containsXXX in case users send int/float for a float/int array + if returnFilter.Operator.IsContains() && dataType == schema.DataTypeNumber { + valSlice, ok := val.([]int) + if ok { + val64 := make([]float64, len(valSlice)) + for i := 0; i < len(valSlice); i++ { + val64[i] = float64(valSlice[i]) + } + val = val64 + } + } + + if returnFilter.Operator.IsContains() && dataType == schema.DataTypeInt { + valSlice, ok := val.([]float64) + if ok { + valInt := make([]int, len(valSlice)) + for i := 0; i < len(valSlice); i++ { + if float64(int(valSlice[i])) != valSlice[i] { + return filters.Clause{}, fmt.Errorf("filtering for integer, but received a floating point number %v", valSlice[i]) + } + valInt[i] = int(valSlice[i]) + } + val = valInt + } + } + + value := filters.Value{Value: val, Type: dataType} + returnFilter.Value = &value + + } + return returnFilter, nil +} + +func extractDataTypeProperty(authorizedGetClass classGetterWithAuthzFunc, operator filters.Operator, className, tenant string, on []string) (schema.DataType, error) { + var dataType schema.DataType + if operator == filters.OperatorIsNull { + dataType = schema.DataTypeBoolean + } else if len(on) > 1 { + propToCheck := on[len(on)-1] + _, isPropLengthFilter := schema.IsPropertyLength(propToCheck, 0) + if isPropLengthFilter { + return schema.DataTypeInt, nil + } + + classOfProp := on[len(on)-2] + class, err := authorizedGetClass(classOfProp) + if err != nil { + return dataType, err + } + prop, err := schema.GetPropertyByName(class, propToCheck) + if err != nil { + return dataType, err + } + dataType = schema.DataType(prop.DataType[0]) + } else { + propToCheck := on[0] + _, isPropLengthFilter := schema.IsPropertyLength(propToCheck, 0) + if isPropLengthFilter { + return schema.DataTypeInt, nil + } + + class, err := authorizedGetClass(className) + if err != nil { + return dataType, err + } + if class == nil { + return dataType, fmt.Errorf("could not find class %s in schema", className) + } + prop, err := schema.GetPropertyByName(class, propToCheck) + if err != nil { + return dataType, err + } + if schema.IsRefDataType(prop.DataType) { + // This is a filter on a reference property without a path so is counting + // the number of references. Needs schema.DataTypeInt: entities/filters/filters_validator.go#L116-L127 + return schema.DataTypeInt, nil + } + dataType = schema.DataType(prop.DataType[0]) + } + + // searches on array datatypes always need the base-type as value-type + if baseType, isArray := schema.IsArrayType(dataType); isArray { + return baseType, nil + } + return dataType, nil +} + +func extractDataType(authorizedGetClass classGetterWithAuthzFunc, operator filters.Operator, classname, tenant string, on []string) (schema.DataType, error) { + propToFilterOn := on[len(on)-1] + if propToFilterOn == filters.InternalPropID { + return schema.DataTypeText, nil + } else if propToFilterOn == filters.InternalPropCreationTimeUnix || propToFilterOn == filters.InternalPropLastUpdateTimeUnix { + return schema.DataTypeDate, nil + } else { + return extractDataTypeProperty(authorizedGetClass, operator, classname, tenant, on) + } +} + +func extractPath(className string, on []string) (*filters.Path, error) { + if len(on) > 1 { + var err error + child, err := extractPath(on[1], on[2:]) + if err != nil { + return nil, err + } + return &filters.Path{Class: schema.ClassName(className), Property: schema.PropertyName(on[0]), Child: child}, nil + + } + return &filters.Path{Class: schema.ClassName(className), Property: schema.PropertyName(on[0]), Child: nil}, nil +} + +func extractPathNew(authorizedGetClass classGetterWithAuthzFunc, className, tenant string, target *pb.FilterTarget, operator filters.Operator) (*filters.Path, schema.DataType, error) { + class, err := authorizedGetClass(className) + if err != nil { + return nil, "", err + } + switch target.Target.(type) { + case *pb.FilterTarget_Property: + dt, err := extractDataType(authorizedGetClass, operator, className, tenant, []string{target.GetProperty()}) + if err != nil { + return nil, "", err + } + return &filters.Path{Class: schema.ClassName(className), Property: schema.PropertyName(target.GetProperty()), Child: nil}, dt, nil + case *pb.FilterTarget_SingleTarget: + singleTarget := target.GetSingleTarget() + normalizedRefPropName := schema.LowercaseFirstLetter(singleTarget.On) + refProp, err := schema.GetPropertyByName(class, normalizedRefPropName) + if err != nil { + return nil, "", err + } + if len(refProp.DataType) != 1 { + return nil, "", fmt.Errorf("expected reference property with a single target, got %v for %v ", refProp.DataType, refProp.Name) + } + child, property, err := extractPathNew(authorizedGetClass, refProp.DataType[0], tenant, singleTarget.Target, operator) + if err != nil { + return nil, "", err + } + return &filters.Path{Class: schema.ClassName(className), Property: schema.PropertyName(normalizedRefPropName), Child: child}, property, nil + case *pb.FilterTarget_MultiTarget: + multiTarget := target.GetMultiTarget() + child, property, err := extractPathNew(authorizedGetClass, multiTarget.TargetCollection, tenant, multiTarget.Target, operator) + if err != nil { + return nil, "", err + } + return &filters.Path{Class: schema.ClassName(className), Property: schema.PropertyName(schema.LowercaseFirstLetter(multiTarget.On)), Child: child}, property, nil + case *pb.FilterTarget_Count: + count := target.GetCount() + return &filters.Path{Class: schema.ClassName(className), Property: schema.PropertyName(schema.LowercaseFirstLetter(count.On)), Child: nil}, schema.DataTypeInt, nil + default: + return nil, "", fmt.Errorf("unknown target type %v", target) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/parser.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..23e0694e9f8c8230ce79c09216650efbf61a9ad5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/parser.go @@ -0,0 +1,444 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package generative + +import ( + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + anthropicParams "github.com/weaviate/weaviate/modules/generative-anthropic/parameters" + anyscaleParams "github.com/weaviate/weaviate/modules/generative-anyscale/parameters" + awsParams "github.com/weaviate/weaviate/modules/generative-aws/parameters" + cohereParams "github.com/weaviate/weaviate/modules/generative-cohere/parameters" + databricksParams "github.com/weaviate/weaviate/modules/generative-databricks/parameters" + friendliaiParams "github.com/weaviate/weaviate/modules/generative-friendliai/parameters" + googleParams "github.com/weaviate/weaviate/modules/generative-google/parameters" + mistralParams "github.com/weaviate/weaviate/modules/generative-mistral/parameters" + nvidiaParams "github.com/weaviate/weaviate/modules/generative-nvidia/parameters" + ollamaParams "github.com/weaviate/weaviate/modules/generative-ollama/parameters" + openaiParams "github.com/weaviate/weaviate/modules/generative-openai/parameters" + xaiParams "github.com/weaviate/weaviate/modules/generative-xai/parameters" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate" +) + +type Parser struct { + uses127Api bool + providerName string + returnMetadata returnMetadata + returnDebug returnDebug +} + +type returnMetadata struct { + single bool + grouped bool +} + +type returnDebug struct { + single bool + grouped bool +} + +func NewParser(uses127Api bool) *Parser { + return &Parser{ + uses127Api: uses127Api, + returnMetadata: returnMetadata{}, + } +} + +func (p *Parser) Extract(req *pb.GenerativeSearch, class *models.Class) *generate.Params { + if req == nil { + return nil + } + if p.uses127Api { + return p.extract(req, class) + } else { + return p.extractDeprecated(req, class) + } +} + +func (p *Parser) ProviderName() string { + return p.providerName +} + +func (p *Parser) ReturnMetadataForSingle() bool { + return p.returnMetadata.single +} + +func (p *Parser) ReturnMetadataForGrouped() bool { + return p.returnMetadata.grouped +} + +func (p *Parser) ReturnDebugForSingle() bool { + return p.returnDebug.single +} + +func (p *Parser) ReturnDebugForGrouped() bool { + return p.returnDebug.grouped +} + +func (p *Parser) extractDeprecated(req *pb.GenerativeSearch, class *models.Class) *generate.Params { + generative := generate.Params{} + if req.SingleResponsePrompt != "" { + generative.Prompt = &req.SingleResponsePrompt + singleResultPrompts := generate.ExtractPropsFromPrompt(generative.Prompt) + generative.PropertiesToExtract = append(generative.PropertiesToExtract, singleResultPrompts...) + } + if req.GroupedResponseTask != "" { + generative.Task = &req.GroupedResponseTask + if len(req.GroupedProperties) > 0 { + generative.Properties = req.GroupedProperties + generative.PropertiesToExtract = append(generative.PropertiesToExtract, generative.Properties...) + } else { + // if users do not supply a properties, all properties need to be extracted + generative.PropertiesToExtract = append(generative.PropertiesToExtract, schema.GetPropertyNamesFromClass(class, false)...) + } + } + return &generative +} + +func (p *Parser) extractFromQuery(generative *generate.Params, queries []*pb.GenerativeProvider) bool { + if len(queries) == 0 { + return false + } + query := queries[0] + switch query.Kind.(type) { + case *pb.GenerativeProvider_Anthropic: + opts := query.GetAnthropic() + if opts.GetImageProperties() != nil { + generative.Properties = append(generative.Properties, opts.GetImageProperties().Values...) + } + generative.Options = p.anthropic(opts) + p.providerName = anthropicParams.Name + case *pb.GenerativeProvider_Anyscale: + generative.Options = p.anyscale(query.GetAnyscale()) + p.providerName = anyscaleParams.Name + case *pb.GenerativeProvider_Aws: + opts := query.GetAws() + if opts.GetImageProperties() != nil { + generative.Properties = append(generative.Properties, opts.GetImageProperties().Values...) + } + generative.Options = p.aws(opts) + p.providerName = awsParams.Name + case *pb.GenerativeProvider_Cohere: + generative.Options = p.cohere(query.GetCohere()) + p.providerName = cohereParams.Name + case *pb.GenerativeProvider_Mistral: + generative.Options = p.mistral(query.GetMistral()) + p.providerName = mistralParams.Name + case *pb.GenerativeProvider_Nvidia: + generative.Options = p.nvidia(query.GetNvidia()) + p.providerName = nvidiaParams.Name + case *pb.GenerativeProvider_Ollama: + generative.Options = p.ollama(query.GetOllama()) + p.providerName = ollamaParams.Name + case *pb.GenerativeProvider_Openai: + opts := query.GetOpenai() + if opts.GetImageProperties() != nil { + generative.Properties = append(generative.Properties, opts.GetImageProperties().Values...) + } + generative.Options = p.openai(opts) + p.providerName = openaiParams.Name + case *pb.GenerativeProvider_Google: + opts := query.GetGoogle() + if opts.GetImageProperties() != nil { + generative.Properties = append(generative.Properties, opts.GetImageProperties().Values...) + } + generative.Options = p.google(opts) + p.providerName = googleParams.Name + case *pb.GenerativeProvider_Databricks: + generative.Options = p.databricks(query.GetDatabricks()) + p.providerName = databricksParams.Name + case *pb.GenerativeProvider_Friendliai: + generative.Options = p.friendliai(query.GetFriendliai()) + p.providerName = friendliaiParams.Name + case *pb.GenerativeProvider_Xai: + generative.Options = p.xai(query.GetXai()) + p.providerName = xaiParams.Name + default: + // do nothing + } + return query.ReturnMetadata +} + +func (p *Parser) extract(req *pb.GenerativeSearch, class *models.Class) *generate.Params { + generative := generate.Params{} + if req.Single != nil { + generative.Prompt = &req.Single.Prompt + p.returnMetadata.single = p.extractFromQuery(&generative, req.Single.Queries) + + p.returnDebug.single = req.Single.Debug + generative.Debug = req.Single.Debug + + singleResultPrompts := generate.ExtractPropsFromPrompt(generative.Prompt) + generative.PropertiesToExtract = append(generative.PropertiesToExtract, singleResultPrompts...) + } + if req.Grouped != nil { + generative.Task = &req.Grouped.Task + p.returnMetadata.grouped = p.extractFromQuery(&generative, req.Grouped.Queries) // populates generative.Properties with any values in provider.ImageProperties (if supported) + + p.returnDebug.grouped = req.Grouped.Debug + generative.Debug = req.Grouped.Debug + + if len(generative.Properties) == 0 && len(req.Grouped.GetProperties().GetValues()) == 0 { + // if users do not supply any properties, all properties need to be extracted + generative.PropertiesToExtract = append(generative.PropertiesToExtract, schema.GetPropertyNamesFromClass(class, false)...) + } else { + generative.Properties = append(generative.Properties, req.Grouped.Properties.GetValues()...) + generative.PropertiesToExtract = append(generative.PropertiesToExtract, generative.Properties...) + } + } + return &generative +} + +func (p *Parser) anthropic(in *pb.GenerativeAnthropic) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + anthropicParams.Name: anthropicParams.Params{ + BaseURL: in.GetBaseUrl(), + Model: in.GetModel(), + Temperature: in.Temperature, + MaxTokens: p.int64ToInt(in.MaxTokens), + StopSequences: in.StopSequences.GetValues(), + TopP: in.TopP, + TopK: p.int64ToInt(in.TopK), + Images: p.getStringPtrs(in.Images), + ImageProperties: p.getStrings(in.ImageProperties), + }, + } +} + +func (p *Parser) anyscale(in *pb.GenerativeAnyscale) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + anyscaleParams.Name: anyscaleParams.Params{ + BaseURL: in.GetBaseUrl(), + Model: in.GetModel(), + Temperature: in.Temperature, + }, + } +} + +func (p *Parser) aws(in *pb.GenerativeAWS) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + awsParams.Name: awsParams.Params{ + Service: in.GetService(), + Region: in.GetRegion(), + Endpoint: in.GetEndpoint(), + TargetModel: in.GetTargetModel(), + TargetVariant: in.GetTargetVariant(), + Model: in.GetModel(), + Temperature: in.Temperature, + Images: p.getStringPtrs(in.Images), + ImageProperties: p.getStrings(in.ImageProperties), + }, + } +} + +func (p *Parser) cohere(in *pb.GenerativeCohere) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + cohereParams.Name: cohereParams.Params{ + BaseURL: in.GetBaseUrl(), + Model: in.GetModel(), + Temperature: in.Temperature, + MaxTokens: p.int64ToInt(in.MaxTokens), + K: p.int64ToInt(in.K), + P: in.P, + StopSequences: in.StopSequences.GetValues(), + FrequencyPenalty: in.FrequencyPenalty, + PresencePenalty: in.PresencePenalty, + }, + } +} + +func (p *Parser) mistral(in *pb.GenerativeMistral) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + mistralParams.Name: mistralParams.Params{ + BaseURL: in.GetBaseUrl(), + MaxTokens: p.int64ToInt(in.MaxTokens), + Model: in.GetModel(), + Temperature: in.Temperature, + TopP: in.TopP, + }, + } +} + +func (p *Parser) ollama(in *pb.GenerativeOllama) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + ollamaParams.Name: ollamaParams.Params{ + ApiEndpoint: in.GetApiEndpoint(), + Model: in.GetModel(), + Temperature: in.Temperature, + Images: p.getStringPtrs(in.Images), + ImageProperties: p.getStrings(in.ImageProperties), + }, + } +} + +func (p *Parser) openai(in *pb.GenerativeOpenAI) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + openaiParams.Name: openaiParams.Params{ + BaseURL: in.GetBaseUrl(), + ApiVersion: in.GetApiVersion(), + ResourceName: in.GetResourceName(), + DeploymentID: in.GetDeploymentId(), + IsAzure: in.GetIsAzure(), + Model: in.GetModel(), + FrequencyPenalty: in.FrequencyPenalty, + MaxTokens: p.int64ToInt(in.MaxTokens), + N: p.int64ToInt(in.N), + PresencePenalty: in.PresencePenalty, + Stop: in.Stop.GetValues(), + Temperature: in.Temperature, + TopP: in.TopP, + Images: p.getStringPtrs(in.Images), + ImageProperties: p.getStrings(in.ImageProperties), + }, + } +} + +func (p *Parser) google(in *pb.GenerativeGoogle) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + googleParams.Name: googleParams.Params{ + ApiEndpoint: in.GetApiEndpoint(), + ProjectID: in.GetProjectId(), + EndpointID: in.GetEndpointId(), + Region: in.GetRegion(), + Model: in.GetModel(), + Temperature: in.Temperature, + MaxTokens: p.int64ToInt(in.MaxTokens), + TopP: in.TopP, + TopK: p.int64ToInt(in.TopK), + StopSequences: in.StopSequences.GetValues(), + PresencePenalty: in.PresencePenalty, + FrequencyPenalty: in.FrequencyPenalty, + Images: p.getStringPtrs(in.Images), + ImageProperties: p.getStrings(in.ImageProperties), + }, + } +} + +func (p *Parser) databricks(in *pb.GenerativeDatabricks) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + databricksParams.Name: databricksParams.Params{ + Endpoint: in.GetEndpoint(), + Model: in.GetModel(), + FrequencyPenalty: in.FrequencyPenalty, + Logprobs: in.LogProbs, + TopLogprobs: p.int64ToInt(in.TopLogProbs), + MaxTokens: p.int64ToInt(in.MaxTokens), + N: p.int64ToInt(in.N), + PresencePenalty: in.PresencePenalty, + Stop: in.Stop.GetValues(), + Temperature: in.Temperature, + TopP: in.TopP, + }, + } +} + +func (p *Parser) friendliai(in *pb.GenerativeFriendliAI) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + friendliaiParams.Name: friendliaiParams.Params{ + BaseURL: in.GetBaseUrl(), + Model: in.GetModel(), + MaxTokens: p.int64ToInt(in.MaxTokens), + Temperature: in.Temperature, + N: p.int64ToInt(in.N), + TopP: in.TopP, + }, + } +} + +func (p *Parser) nvidia(in *pb.GenerativeNvidia) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + nvidiaParams.Name: nvidiaParams.Params{ + BaseURL: in.GetBaseUrl(), + Model: in.GetModel(), + Temperature: in.Temperature, + TopP: in.TopP, + MaxTokens: p.int64ToInt(in.MaxTokens), + }, + } +} + +func (p *Parser) xai(in *pb.GenerativeXAI) map[string]any { + if in == nil { + return nil + } + return map[string]any{ + xaiParams.Name: xaiParams.Params{ + BaseURL: in.GetBaseUrl(), + Model: in.GetModel(), + Temperature: in.Temperature, + TopP: in.TopP, + MaxTokens: p.int64ToInt(in.MaxTokens), + Images: p.getStringPtrs(in.Images), + ImageProperties: p.getStrings(in.ImageProperties), + }, + } +} + +func (p *Parser) getStringPtrs(in *pb.TextArray) []*string { + if in != nil && len(in.Values) > 0 { + vals := make([]*string, len(in.Values)) + for i, v := range in.Values { + vals[i] = &v + } + return vals + } + return nil +} + +func (p *Parser) getStrings(in *pb.TextArray) []string { + if in != nil && len(in.Values) > 0 { + return in.Values + } + return nil +} + +func (p *Parser) int64ToInt(in *int64) *int { + if in != nil && *in > 0 { + out := int(*in) + return &out + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/parser_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/parser_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c8008c3f7d638c2396858d65dd831994a2c23c14 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/parser_test.go @@ -0,0 +1,1122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package generative + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + anthropic "github.com/weaviate/weaviate/modules/generative-anthropic/parameters" + anyscale "github.com/weaviate/weaviate/modules/generative-anyscale/parameters" + aws "github.com/weaviate/weaviate/modules/generative-aws/parameters" + cohere "github.com/weaviate/weaviate/modules/generative-cohere/parameters" + databricks "github.com/weaviate/weaviate/modules/generative-databricks/parameters" + friendliai "github.com/weaviate/weaviate/modules/generative-friendliai/parameters" + google "github.com/weaviate/weaviate/modules/generative-google/parameters" + mistral "github.com/weaviate/weaviate/modules/generative-mistral/parameters" + nvidia "github.com/weaviate/weaviate/modules/generative-nvidia/parameters" + ollama "github.com/weaviate/weaviate/modules/generative-ollama/parameters" + openai "github.com/weaviate/weaviate/modules/generative-openai/parameters" + xai "github.com/weaviate/weaviate/modules/generative-xai/parameters" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate" +) + +func makeStrPtr(s string) *string { + return &s +} + +func makeInt64Ptr(i int) *int64 { + v := int64(i) + return &v +} + +func makeIntPtr(i int) *int { + return &i +} + +func makeFloat64Ptr(f float64) *float64 { + return &f +} + +func makeBoolPtr(b bool) *bool { + return &b +} + +func getPropNames(props []*models.Property) []string { + names := make([]string, len(props)) + for i, prop := range props { + names[i] = prop.Name + } + return names +} + +func Test_RequestParser(t *testing.T) { + class := &models.Class{ + Class: "Test", + Properties: []*models.Property{ + { + Name: "prop", + DataType: []string{"text"}, + }, + }, + } + tests := []struct { + name string + uses127Api bool + in *pb.GenerativeSearch + expected *generate.Params + }{ + { + name: "empty request; old", + uses127Api: false, + }, + { + name: "empty request; new", + uses127Api: true, + }, + { + name: "generative search without props; old", + uses127Api: false, + in: &pb.GenerativeSearch{ + SingleResponsePrompt: "prompt", + GroupedResponseTask: "task", + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Task: makeStrPtr("task"), + PropertiesToExtract: getPropNames(class.Properties), + }, + }, + { + name: "generative search with props; old", + uses127Api: false, + in: &pb.GenerativeSearch{ + SingleResponsePrompt: "prompt", + GroupedResponseTask: "task", + GroupedProperties: getPropNames(class.Properties), + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Task: makeStrPtr("task"), + Properties: getPropNames(class.Properties), + PropertiesToExtract: getPropNames(class.Properties), + }, + }, + { + name: "generative search without props; new non-dynamic", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + }, + Grouped: &pb.GenerativeSearch_Grouped{ + Task: "task", + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Task: makeStrPtr("task"), + PropertiesToExtract: getPropNames(class.Properties), + }, + }, + { + name: "generative search with props; new non-dynamic", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + }, + Grouped: &pb.GenerativeSearch_Grouped{ + Task: "task", + Properties: &pb.TextArray{ + Values: getPropNames(class.Properties), + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Task: makeStrPtr("task"), + Properties: getPropNames(class.Properties), + PropertiesToExtract: getPropNames(class.Properties), + }, + }, + { + name: "generative search; single response; nil modelValue dynamic", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: nil, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; nil dynamic anthropic", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Anthropic{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic anthropic", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Anthropic{ + Anthropic: &pb.GenerativeAnthropic{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "anthropic": anthropic.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic anthropic", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Anthropic{ + Anthropic: &pb.GenerativeAnthropic{ + BaseUrl: makeStrPtr("url"), + MaxTokens: makeInt64Ptr(10), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + TopK: makeInt64Ptr(5), + TopP: makeFloat64Ptr(0.5), + StopSequences: &pb.TextArray{ + Values: []string{"stop"}, + }, + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "anthropic": anthropic.Params{ + BaseURL: "url", + MaxTokens: makeIntPtr(10), + Model: "model", + Temperature: makeFloat64Ptr(0.5), + TopK: makeIntPtr(5), + TopP: makeFloat64Ptr(0.5), + StopSequences: []string{"stop"}, + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic anyscale", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Anyscale{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic anyscale", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Anyscale{ + Anyscale: &pb.GenerativeAnyscale{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "anyscale": anyscale.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic anyscale", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Anyscale{ + Anyscale: &pb.GenerativeAnyscale{ + BaseUrl: makeStrPtr("url"), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "anyscale": anyscale.Params{ + BaseURL: "url", + Model: "model", + Temperature: makeFloat64Ptr(0.5), + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic aws", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Aws{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic aws", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Aws{ + Aws: &pb.GenerativeAWS{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "aws": aws.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic aws", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Aws{ + Aws: &pb.GenerativeAWS{ + Service: makeStrPtr("service"), + Region: makeStrPtr("region"), + Endpoint: makeStrPtr("endpoint"), + TargetModel: makeStrPtr("targetModel"), + TargetVariant: makeStrPtr("targetVariant"), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "aws": aws.Params{ + Service: "service", + Region: "region", + Endpoint: "endpoint", + TargetModel: "targetModel", + TargetVariant: "targetVariant", + Model: "model", + Temperature: makeFloat64Ptr(0.5), + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic cohere", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Cohere{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic cohere", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Cohere{ + Cohere: &pb.GenerativeCohere{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "cohere": cohere.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic cohere", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Cohere{ + Cohere: &pb.GenerativeCohere{ + BaseUrl: makeStrPtr("url"), + MaxTokens: makeInt64Ptr(10), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + K: makeInt64Ptr(5), + P: makeFloat64Ptr(0.5), + FrequencyPenalty: makeFloat64Ptr(0.5), + PresencePenalty: makeFloat64Ptr(0.5), + StopSequences: &pb.TextArray{ + Values: []string{"stop"}, + }, + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "cohere": cohere.Params{ + BaseURL: "url", + MaxTokens: makeIntPtr(10), + Model: "model", + Temperature: makeFloat64Ptr(0.5), + K: makeIntPtr(5), + P: makeFloat64Ptr(0.5), + FrequencyPenalty: makeFloat64Ptr(0.5), + PresencePenalty: makeFloat64Ptr(0.5), + StopSequences: []string{"stop"}, + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic mistral", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Mistral{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic mistral", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Mistral{ + Mistral: &pb.GenerativeMistral{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "mistral": mistral.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic mistral", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Mistral{ + Mistral: &pb.GenerativeMistral{ + BaseUrl: makeStrPtr("url"), + MaxTokens: makeInt64Ptr(10), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "mistral": mistral.Params{ + BaseURL: "url", + MaxTokens: makeIntPtr(10), + Model: "model", + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic ollama", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Ollama{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic ollama", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Ollama{ + Ollama: &pb.GenerativeOllama{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "ollama": ollama.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic ollama", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Ollama{ + Ollama: &pb.GenerativeOllama{ + ApiEndpoint: makeStrPtr("url"), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "ollama": ollama.Params{ + ApiEndpoint: "url", + Model: "model", + Temperature: makeFloat64Ptr(0.5), + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic openai", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Openai{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic openai", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Openai{ + Openai: &pb.GenerativeOpenAI{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "openai": openai.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic openai", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Openai{ + Openai: &pb.GenerativeOpenAI{ + BaseUrl: makeStrPtr("baseURL"), + ApiVersion: makeStrPtr("apiVersion"), + ResourceName: makeStrPtr("resourceName"), + DeploymentId: makeStrPtr("deploymentId"), + IsAzure: makeBoolPtr(true), + MaxTokens: makeInt64Ptr(10), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + N: makeInt64Ptr(5), + TopP: makeFloat64Ptr(0.5), + FrequencyPenalty: makeFloat64Ptr(0.5), + PresencePenalty: makeFloat64Ptr(0.5), + Stop: &pb.TextArray{ + Values: []string{"stop"}, + }, + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "openai": openai.Params{ + BaseURL: "baseURL", + ApiVersion: "apiVersion", + ResourceName: "resourceName", + DeploymentID: "deploymentId", + IsAzure: true, + MaxTokens: makeIntPtr(10), + Model: "model", + Temperature: makeFloat64Ptr(0.5), + N: makeIntPtr(5), + TopP: makeFloat64Ptr(0.5), + FrequencyPenalty: makeFloat64Ptr(0.5), + PresencePenalty: makeFloat64Ptr(0.5), + Stop: []string{"stop"}, + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic google", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Google{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic google", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Google{ + Google: &pb.GenerativeGoogle{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "google": google.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic google", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Google{ + Google: &pb.GenerativeGoogle{ + MaxTokens: makeInt64Ptr(10), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + TopK: makeInt64Ptr(5), + TopP: makeFloat64Ptr(0.5), + FrequencyPenalty: makeFloat64Ptr(0.5), + PresencePenalty: makeFloat64Ptr(0.5), + StopSequences: &pb.TextArray{ + Values: []string{"stop"}, + }, + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "google": google.Params{ + MaxTokens: makeIntPtr(10), + Model: "model", + Temperature: makeFloat64Ptr(0.5), + TopK: makeIntPtr(5), + TopP: makeFloat64Ptr(0.5), + FrequencyPenalty: makeFloat64Ptr(0.5), + PresencePenalty: makeFloat64Ptr(0.5), + StopSequences: []string{"stop"}, + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic databricks", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Databricks{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic databricks", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Databricks{ + Databricks: &pb.GenerativeDatabricks{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "databricks": databricks.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic databricks", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Databricks{ + Databricks: &pb.GenerativeDatabricks{ + Endpoint: makeStrPtr("endpoint"), + Model: makeStrPtr("model"), + FrequencyPenalty: makeFloat64Ptr(0.5), + LogProbs: makeBoolPtr(true), + TopLogProbs: makeInt64Ptr(1), + MaxTokens: makeInt64Ptr(10), + N: makeInt64Ptr(5), + PresencePenalty: makeFloat64Ptr(0.5), + Stop: &pb.TextArray{ + Values: []string{"stop"}, + }, + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "databricks": databricks.Params{ + Endpoint: "endpoint", + Model: "model", + FrequencyPenalty: makeFloat64Ptr(0.5), + Logprobs: makeBoolPtr(true), + TopLogprobs: makeIntPtr(1), + MaxTokens: makeIntPtr(10), + N: makeIntPtr(5), + PresencePenalty: makeFloat64Ptr(0.5), + Stop: []string{"stop"}, + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic friendli", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Friendliai{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic friendli", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Friendliai{ + Friendliai: &pb.GenerativeFriendliAI{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "friendliai": friendliai.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic friendli", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Friendliai{ + Friendliai: &pb.GenerativeFriendliAI{ + BaseUrl: makeStrPtr("baseURL"), + Model: makeStrPtr("model"), + MaxTokens: makeInt64Ptr(10), + Temperature: makeFloat64Ptr(0.5), + N: makeInt64Ptr(5), + TopP: makeFloat64Ptr(0.5), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "friendliai": friendliai.Params{ + BaseURL: "baseURL", + Model: "model", + MaxTokens: makeIntPtr(10), + N: makeIntPtr(5), + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic nvidia", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Nvidia{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic nvidia", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Nvidia{ + Nvidia: &pb.GenerativeNvidia{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "nvidia": nvidia.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic nvidia", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Nvidia{ + Nvidia: &pb.GenerativeNvidia{ + BaseUrl: makeStrPtr("baseURL"), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + MaxTokens: makeInt64Ptr(10), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "nvidia": nvidia.Params{ + BaseURL: "baseURL", + Model: "model", + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + MaxTokens: makeIntPtr(10), + }, + }, + }, + }, + { + name: "generative search; single response; nil dynamic xai", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Xai{}, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: nil, + }, + }, + { + name: "generative search; single response; empty dynamic xai", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Xai{ + Xai: &pb.GenerativeXAI{}, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "xai": xai.Params{}, + }, + }, + }, + { + name: "generative search; single response; full dynamic xai", + uses127Api: true, + in: &pb.GenerativeSearch{ + Single: &pb.GenerativeSearch_Single{ + Prompt: "prompt", + Queries: []*pb.GenerativeProvider{ + { + Kind: &pb.GenerativeProvider_Xai{ + Xai: &pb.GenerativeXAI{ + BaseUrl: makeStrPtr("baseURL"), + Model: makeStrPtr("model"), + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + MaxTokens: makeInt64Ptr(10), + }, + }, + }, + }, + }, + }, + expected: &generate.Params{ + Prompt: makeStrPtr("prompt"), + Options: map[string]any{ + "xai": xai.Params{ + BaseURL: "baseURL", + Model: "model", + Temperature: makeFloat64Ptr(0.5), + TopP: makeFloat64Ptr(0.5), + MaxTokens: makeIntPtr(10), + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + parser := NewParser(test.uses127Api) + extracted := parser.Extract(test.in, class) + require.Equal(t, test.expected, extracted) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/replier.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/replier.go new file mode 100644 index 0000000000000000000000000000000000000000..85d1d84c8e80ee49f0b563eca7e808c238018d67 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/generative/replier.go @@ -0,0 +1,399 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package generative + +import ( + "errors" + "fmt" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/modulecapabilities" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + anthropicClients "github.com/weaviate/weaviate/modules/generative-anthropic/clients" + anthropicParams "github.com/weaviate/weaviate/modules/generative-anthropic/parameters" + anyscaleParams "github.com/weaviate/weaviate/modules/generative-anyscale/parameters" + awsParams "github.com/weaviate/weaviate/modules/generative-aws/parameters" + cohereClients "github.com/weaviate/weaviate/modules/generative-cohere/clients" + cohereParams "github.com/weaviate/weaviate/modules/generative-cohere/parameters" + databricksClients "github.com/weaviate/weaviate/modules/generative-databricks/clients" + databricksParams "github.com/weaviate/weaviate/modules/generative-databricks/parameters" + friendliClients "github.com/weaviate/weaviate/modules/generative-friendliai/clients" + friendliParams "github.com/weaviate/weaviate/modules/generative-friendliai/parameters" + googleClients "github.com/weaviate/weaviate/modules/generative-google/clients" + googleParams "github.com/weaviate/weaviate/modules/generative-google/parameters" + mistralClients "github.com/weaviate/weaviate/modules/generative-mistral/clients" + mistralParams "github.com/weaviate/weaviate/modules/generative-mistral/parameters" + nvidiaClients "github.com/weaviate/weaviate/modules/generative-nvidia/clients" + nvidiaParams "github.com/weaviate/weaviate/modules/generative-nvidia/parameters" + ollamaParams "github.com/weaviate/weaviate/modules/generative-ollama/parameters" + openaiClients "github.com/weaviate/weaviate/modules/generative-openai/clients" + openaiParams "github.com/weaviate/weaviate/modules/generative-openai/parameters" + xaiClients "github.com/weaviate/weaviate/modules/generative-xai/clients" + xaiParams "github.com/weaviate/weaviate/modules/generative-xai/parameters" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate" + additionalModels "github.com/weaviate/weaviate/usecases/modulecomponents/additional/models" +) + +type Replier struct { + logger logrus.FieldLogger + queryParams queryParams + uses127Api bool +} + +type queryParams interface { + ProviderName() string + ReturnMetadataForSingle() bool + ReturnMetadataForGrouped() bool + ReturnDebugForSingle() bool + ReturnDebugForGrouped() bool +} + +func NewReplier(logger logrus.FieldLogger, queryParams queryParams, uses127Api bool) *Replier { + return &Replier{ + logger: logger, + queryParams: queryParams, + uses127Api: uses127Api, + } +} + +func (r *Replier) Extract(_additional map[string]any, params any, metadata *pb.MetadataResult) (*pb.GenerativeResult, *pb.GenerativeResult, string, error) { + if r.uses127Api { + single, grouped, err := r.extractGenerativeResult(_additional, params) + return single, grouped, "", err + } else { + grouped, err := r.extractDeprecated(_additional, params, metadata) + if err != nil { + return nil, nil, "", err + } + return nil, nil, grouped, nil + } +} + +func (r *Replier) extractGenerativeResult(_additional map[string]any, params any) (*pb.GenerativeResult, *pb.GenerativeResult, error) { + single, grouped, err := r.extractGenerativeReply(_additional, params) + if err != nil { + return nil, nil, err + } + return &pb.GenerativeResult{Values: []*pb.GenerativeReply{single}}, &pb.GenerativeResult{Values: []*pb.GenerativeReply{grouped}}, nil +} + +func (r *Replier) extractDeprecated(_additional map[string]any, params any, metadata *pb.MetadataResult) (string, error) { + var generativeGroupResults string + generateFmt, err := r.extractGenerateResultDeprecated(_additional, params) + if err != nil { + return "", err + } + + if generateFmt.SingleResult != nil && *generateFmt.SingleResult != "" { + metadata.Generative = *generateFmt.SingleResult + metadata.GenerativePresent = true + } + + // grouped results are only added to the first object for GQL reasons + // however, reranking can result in a different order, so we need to check every object + // recording the result if it's present assuming that it is at least somewhere and will be caught + if generateFmt.GroupedResult != nil && *generateFmt.GroupedResult != "" { + generativeGroupResults = *generateFmt.GroupedResult + } + return generativeGroupResults, nil +} + +func (r *Replier) extractGenerateResultDeprecated(_additional map[string]any, params any) (*additionalModels.GenerateResult, error) { + generateFmt := &additionalModels.GenerateResult{} + if generate, ok := _additional["generate"]; ok { + generateParams, ok := generate.(map[string]any) + if !ok { + return nil, errors.New("could not cast generative result additional prop") + } + if generateParams["singleResult"] != nil { + if singleResult, ok := generateParams["singleResult"].(*string); ok { + generateFmt.SingleResult = singleResult + } + } + if generateParams["groupedResult"] != nil { + if groupedResult, ok := generateParams["groupedResult"].(*string); ok { + generateFmt.GroupedResult = groupedResult + } + } + if generateParams["error"] != nil { + if err, ok := generateParams["error"].(error); ok { + generateFmt.Error = err + } + } + } + if generateFmt.Error != nil { + return nil, generateFmt.Error + } + generativeSearch, ok := params.(*generate.Params) + if !ok { + return nil, errors.New("could not cast generative search params") + } + if generativeSearch.Prompt != nil && generateFmt.SingleResult == nil { + return nil, errors.New("no results for generative search despite a search request. Is a generative module enabled?") + } + return generateFmt, nil +} + +func (r *Replier) extractGenerativeMetadata(results map[string]any) (*pb.GenerativeMetadata, error) { + metadata := &pb.GenerativeMetadata{} + providerName := r.queryParams.ProviderName() + switch providerName { + case anthropicParams.Name: + params := anthropicClients.GetResponseParams(results) + if params == nil { + r.logger.WithField("results", results).WithField("provider", providerName).Error("could not get metadata") + return metadata, nil + } + anthropic := &pb.GenerativeAnthropicMetadata{} + if params.Usage != nil { + anthropic.Usage = &pb.GenerativeAnthropicMetadata_Usage{ + InputTokens: int64(params.Usage.InputTokens), + OutputTokens: int64(params.Usage.OutputTokens), + } + } + metadata.Kind = &pb.GenerativeMetadata_Anthropic{Anthropic: anthropic} + case anyscaleParams.Name: + // Do nothing, no metadata for Anyscale + case awsParams.Name: + // Do nothing, no metadata for AWS + case cohereParams.Name: + params := cohereClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s", providerName) + } + cohere := &pb.GenerativeCohereMetadata{} + if params.Meta != nil { + if params.Meta.ApiVersion != nil { + cohere.ApiVersion = &pb.GenerativeCohereMetadata_ApiVersion{ + Version: params.Meta.ApiVersion.Version, + IsDeprecated: params.Meta.ApiVersion.IsDeprecated, + IsExperimental: params.Meta.ApiVersion.IsExperimental, + } + } + if params.Meta.BilledUnits != nil { + cohere.BilledUnits = &pb.GenerativeCohereMetadata_BilledUnits{ + InputTokens: params.Meta.BilledUnits.InputTokens, + OutputTokens: params.Meta.BilledUnits.OutputTokens, + SearchUnits: params.Meta.BilledUnits.SearchUnits, + Classifications: params.Meta.BilledUnits.Classifications, + } + } + if params.Meta.Tokens != nil { + cohere.Tokens = &pb.GenerativeCohereMetadata_Tokens{ + InputTokens: params.Meta.Tokens.InputTokens, + OutputTokens: params.Meta.Tokens.OutputTokens, + } + } + } + metadata.Kind = &pb.GenerativeMetadata_Cohere{Cohere: cohere} + case mistralParams.Name: + params := mistralClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s from results: %+v", providerName, results) + } + mistral := &pb.GenerativeMistralMetadata{} + if params.Usage != nil { + mistral.Usage = &pb.GenerativeMistralMetadata_Usage{ + PromptTokens: convertIntPtrToInt64Ptr(params.Usage.PromptTokens), + CompletionTokens: convertIntPtrToInt64Ptr(params.Usage.CompletionTokens), + TotalTokens: convertIntPtrToInt64Ptr(params.Usage.TotalTokens), + } + } + metadata.Kind = &pb.GenerativeMetadata_Mistral{Mistral: mistral} + case ollamaParams.Name: + // Do nothing, no metadata for Ollama + case openaiParams.Name: + params := openaiClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s", providerName) + } + openai := &pb.GenerativeOpenAIMetadata{} + if params.Usage != nil { + openai.Usage = &pb.GenerativeOpenAIMetadata_Usage{ + PromptTokens: convertIntPtrToInt64Ptr(params.Usage.PromptTokens), + CompletionTokens: convertIntPtrToInt64Ptr(params.Usage.CompletionTokens), + TotalTokens: convertIntPtrToInt64Ptr(params.Usage.TotalTokens), + } + } + metadata.Kind = &pb.GenerativeMetadata_Openai{Openai: openai} + case googleParams.Name: + params := googleClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s", providerName) + } + google := &pb.GenerativeGoogleMetadata{} + if params.Metadata != nil { + metadata := &pb.GenerativeGoogleMetadata_Metadata{} + if params.Metadata.TokenMetadata != nil { + tokenMetadata := &pb.GenerativeGoogleMetadata_TokenMetadata{} + if params.Metadata.TokenMetadata.InputTokenCount != nil { + tokenMetadata.InputTokenCount = &pb.GenerativeGoogleMetadata_TokenCount{ + TotalBillableCharacters: ¶ms.Metadata.TokenMetadata.InputTokenCount.TotalBillableCharacters, + TotalTokens: ¶ms.Metadata.TokenMetadata.InputTokenCount.TotalTokens, + } + } + if params.Metadata.TokenMetadata.OutputTokenCount != nil { + tokenMetadata.OutputTokenCount = &pb.GenerativeGoogleMetadata_TokenCount{ + TotalBillableCharacters: ¶ms.Metadata.TokenMetadata.OutputTokenCount.TotalBillableCharacters, + TotalTokens: ¶ms.Metadata.TokenMetadata.OutputTokenCount.TotalTokens, + } + } + metadata.TokenMetadata = tokenMetadata + } + google.Metadata = metadata + } + if params.UsageMetadata != nil { + google.UsageMetadata = &pb.GenerativeGoogleMetadata_UsageMetadata{ + PromptTokenCount: convertIntToInt64Ptr(params.UsageMetadata.PromptTokenCount), + CandidatesTokenCount: convertIntToInt64Ptr(params.UsageMetadata.CandidatesTokenCount), + TotalTokenCount: convertIntToInt64Ptr(params.UsageMetadata.TotalTokenCount), + } + } + metadata.Kind = &pb.GenerativeMetadata_Google{Google: google} + case databricksParams.Name: + params := databricksClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s", providerName) + } + databricks := &pb.GenerativeDatabricksMetadata{} + if params.Usage != nil { + databricks.Usage = &pb.GenerativeDatabricksMetadata_Usage{ + PromptTokens: convertIntPtrToInt64Ptr(params.Usage.PromptTokens), + CompletionTokens: convertIntPtrToInt64Ptr(params.Usage.CompletionTokens), + TotalTokens: convertIntPtrToInt64Ptr(params.Usage.TotalTokens), + } + } + metadata.Kind = &pb.GenerativeMetadata_Databricks{Databricks: databricks} + case friendliParams.Name: + params := friendliClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s", providerName) + } + friendliai := &pb.GenerativeFriendliAIMetadata{} + if params.Usage != nil { + friendliai.Usage = &pb.GenerativeFriendliAIMetadata_Usage{ + PromptTokens: convertIntPtrToInt64Ptr(params.Usage.PromptTokens), + CompletionTokens: convertIntPtrToInt64Ptr(params.Usage.CompletionTokens), + TotalTokens: convertIntPtrToInt64Ptr(params.Usage.TotalTokens), + } + } + metadata.Kind = &pb.GenerativeMetadata_Friendliai{Friendliai: friendliai} + case nvidiaParams.Name: + params := nvidiaClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s", providerName) + } + nvidia := &pb.GenerativeNvidiaMetadata{} + if params.Usage != nil { + nvidia.Usage = &pb.GenerativeNvidiaMetadata_Usage{ + PromptTokens: convertIntPtrToInt64Ptr(params.Usage.PromptTokens), + CompletionTokens: convertIntPtrToInt64Ptr(params.Usage.CompletionTokens), + TotalTokens: convertIntPtrToInt64Ptr(params.Usage.TotalTokens), + } + } + metadata.Kind = &pb.GenerativeMetadata_Nvidia{Nvidia: nvidia} + case xaiParams.Name: + params := xaiClients.GetResponseParams(results) + if params == nil { + return nil, fmt.Errorf("could not get request metadata for provider: %s", providerName) + } + xai := &pb.GenerativeXAIMetadata{} + if params.Usage != nil { + xai.Usage = &pb.GenerativeXAIMetadata_Usage{ + PromptTokens: convertIntPtrToInt64Ptr(params.Usage.PromptTokens), + CompletionTokens: convertIntPtrToInt64Ptr(params.Usage.CompletionTokens), + TotalTokens: convertIntPtrToInt64Ptr(params.Usage.TotalTokens), + } + } + metadata.Kind = &pb.GenerativeMetadata_Xai{Xai: xai} + default: + return nil, fmt.Errorf("provider: %s, not supported", providerName) + } + return metadata, nil +} + +func (r *Replier) extractGenerativeReply(_additional map[string]any, params any) (*pb.GenerativeReply, *pb.GenerativeReply, error) { + reply := &pb.GenerativeReply{} + grouped := &pb.GenerativeReply{} + + generateParams, ok := params.(*generate.Params) + if !ok { + return nil, nil, errors.New("could not cast generative search params") + } + + if generate, ok := _additional["generate"]; ok { + generateResults, ok := generate.(map[string]any) + if !ok { + return nil, nil, errors.New("could not cast generative result additional prop") + } + if generateResults["singleResult"] != nil { + if singleResult, ok := generateResults["singleResult"].(*string); ok && singleResult != nil { + reply.Result = *singleResult + } + } else { + if generateParams.Prompt != nil { + return nil, nil, errors.New("no results for generative search despite a search request. Is a generative module enabled?") + } + } + // grouped results are only added to the first object for GQL reasons + // however, reranking can result in a different order, so we need to check every object + // recording the result if it's present assuming that it is at least somewhere and will be caught + if generateResults["groupedResult"] != nil { + if groupedResult, ok := generateResults["groupedResult"].(*string); ok && groupedResult != nil { + grouped.Result = *groupedResult + } + } + if generateResults["error"] != nil { + if err, ok := generateResults["error"].(error); ok { + return nil, nil, err + } + } + if generateResults["debug"] != nil && (r.queryParams.ReturnDebugForSingle() || r.queryParams.ReturnDebugForGrouped()) { + if debug, ok := generateResults["debug"].(*modulecapabilities.GenerateDebugInformation); ok && debug != nil { + prompt := debug.Prompt + if r.queryParams.ReturnDebugForSingle() { + reply.Debug = &pb.GenerativeDebug{FullPrompt: &prompt} + } + if r.queryParams.ReturnDebugForGrouped() { + grouped.Debug = &pb.GenerativeDebug{FullPrompt: &prompt} + } + } + } + if r.queryParams.ReturnMetadataForSingle() || r.queryParams.ReturnMetadataForGrouped() { + metadata, err := r.extractGenerativeMetadata(generateResults) + if err != nil { + return nil, nil, err + } + if r.queryParams.ReturnMetadataForSingle() { + reply.Metadata = metadata + } + g := r.queryParams.ReturnMetadataForGrouped() + if generateResults["groupedResult"] != nil && g { + grouped.Metadata = metadata + } + } + } + return reply, grouped, nil +} + +func convertIntPtrToInt64Ptr(i *int) *int64 { + if i == nil { + return nil + } + converted := int64(*i) + return &converted +} + +func convertIntToInt64Ptr(i int) *int64 { + converted := int64(i) + return &converted +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/health.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/health.go new file mode 100644 index 0000000000000000000000000000000000000000..6c9cfadb8484001b712332624b781fdca9bb2819 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/health.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "context" + + "google.golang.org/grpc/health/grpc_health_v1" +) + +func (s *Service) healthCheck() *grpc_health_v1.HealthCheckResponse { + return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING} +} + +func (s *Service) Check(ctx context.Context, request *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { + return s.healthCheck(), nil +} + +func (s *Service) Watch(request *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error { + return server.Send(s.healthCheck()) +} + +func (s *Service) List(ctx context.Context, request *grpc_health_v1.HealthListRequest) (*grpc_health_v1.HealthListResponse, error) { + return &grpc_health_v1.HealthListResponse{ + Statuses: map[string]*grpc_health_v1.HealthCheckResponse{ + "weaviate": s.healthCheck(), + }, + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/mapping.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/mapping.go new file mode 100644 index 0000000000000000000000000000000000000000..27b4f9ba7a7d84c51219e73c8c546c2a07a6f426 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/mapping.go @@ -0,0 +1,387 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/byteops" + "google.golang.org/protobuf/runtime/protoimpl" +) + +type Mapper struct{} + +func NewMapping() *Mapper { + return &Mapper{} +} + +func (m *Mapper) NewPrimitiveValue(v interface{}, dt schema.DataType) (*pb.Value, error) { + if v == nil { + return m.NewNilValue(), nil + } + innerDt, ok := schema.IsArrayType(dt) + if ok { + return m.parsePrimitiveArray(v, dt, innerDt) + } else { + switch dt { + case schema.DataTypeBoolean: + val, ok := v.(bool) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected bool when serializing bool", v) + } + return NewBoolValue(val), nil + case schema.DataTypeDate: + val, ok := v.(string) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected string when serializing date", v) + } + return NewDateValue(val), nil + case schema.DataTypeNumber: + val, ok := v.(float64) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected float64 when serializing number", v) + } + return NewNumberValue(val), nil + case schema.DataTypeInt: + val, ok := v.(float64) + if !ok { // integers are returned as float64 from search + return nil, protoimpl.X.NewError("invalid type: %T expected float64 when serializing int property", v) + } + return NewIntValue(int64(val)), nil + case schema.DataTypeString: + val, ok := v.(string) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected string when serializing string property", v) + } + return NewTextValue(val), nil + case schema.DataTypeText: + val, ok := v.(string) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected string when serializing text property", v) + } + return NewTextValue(val), nil + case schema.DataTypeUUID: + val, ok := v.(string) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected string when serializing uuid property", v) + } + return NewUuidValue(val), nil + case schema.DataTypeGeoCoordinates: + val, ok := v.(*models.GeoCoordinates) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected *models.GeoCoordinates when serializing geocoordinate property", v) + } + return NewGeoValue(val), nil + case schema.DataTypeBlob: + val, ok := v.(string) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected string when serializing blob property", v) + } + return newBlobValue(val), nil + case schema.DataTypePhoneNumber: + val, ok := v.(*models.PhoneNumber) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected *models.PhoneNumber when serializing phone number property", v) + } + return newPhoneNumberValue(val), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } + } +} + +func (m *Mapper) NewNestedValue(v interface{}, dt schema.DataType, parent schema.PropertyInterface, prop search.SelectProperty) (*pb.Value, error) { + if v == nil { + return m.NewNilValue(), nil + } + switch dt { + case schema.DataTypeObject: + if _, ok := v.(map[string]interface{}); !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected map[string]interface{}", v) + } + obj, err := m.newObject(v.(map[string]interface{}), parent, prop) + if err != nil { + return nil, errors.Wrap(err, "creating nested object") + } + return NewObjectValue(obj), nil + case schema.DataTypeObjectArray: + if _, ok := v.([]interface{}); !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected []map[string]interface{}", v) + } + list, err := m.newObjectList125(v.([]interface{}), parent, prop) + if err != nil { + return nil, errors.Wrap(err, "creating nested object array") + } + return newListValue(list), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewObject constructs a Object from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func (m *Mapper) newObject(v map[string]interface{}, parent schema.PropertyInterface, selectProp search.SelectProperty) (*pb.Properties, error) { + if !selectProp.IsObject { + return nil, errors.New("select property is not an object") + } + x := &pb.Properties{Fields: make(map[string]*pb.Value, len(v))} + for _, selectProp := range selectProp.Props { + val, ok := v[selectProp.Name] + if !ok { + continue + } + + dt, err := schema.GetNestedPropertyDataType(parent, selectProp.Name) + if err != nil { + return nil, errors.Wrapf(err, "getting data type of nested property %s", selectProp.Name) + } + if *dt == schema.DataTypeObject || *dt == schema.DataTypeObjectArray { + nested, err := schema.GetNestedPropertyByName(parent, selectProp.Name) + if err != nil { + return nil, errors.Wrapf(err, "getting nested property %s", selectProp.Name) + } + x.Fields[selectProp.Name], err = m.NewNestedValue(val, *dt, &NestedProperty{NestedProperty: nested}, selectProp) + if err != nil { + return nil, errors.Wrapf(err, "creating nested object value %s", selectProp.Name) + } + } else { + x.Fields[selectProp.Name], err = m.NewPrimitiveValue(val, *dt) + if err != nil { + return nil, errors.Wrapf(err, "creating nested primitive value %s", selectProp.Name) + } + } + } + return x, nil +} + +func parseArray[T float64 | bool | string](v interface{}, dt schema.DataType) ([]T, error) { + val, ok := v.([]T) + if !ok { + return nil, protoimpl.X.NewError("invalid type: %T when serializing %v", v, dt.String()) + } + return val, nil +} + +func (m *Mapper) newListValueBool(v interface{}) (*pb.Value, error) { + var listValue *pb.ListValue + makeListValue := func(v []bool) *pb.ListValue { + return &pb.ListValue{Kind: &pb.ListValue_BoolValues{BoolValues: &pb.BoolValues{Values: v}}} + } + if _, ok := v.([]interface{}); ok { + listValue = makeListValue([]bool{}) + } else { + values, err := parseArray[bool](v, schema.DataTypeBooleanArray) + if err != nil { + return nil, err + } + listValue = makeListValue(values) + + } + return &pb.Value{Kind: &pb.Value_ListValue{ListValue: listValue}}, nil +} + +func (m *Mapper) newListValueDate(v interface{}) (*pb.Value, error) { + var listValue *pb.ListValue + makeListValue := func(v []string) *pb.ListValue { + return &pb.ListValue{Kind: &pb.ListValue_DateValues{DateValues: &pb.DateValues{Values: v}}} + } + if _, ok := v.([]interface{}); ok { + listValue = makeListValue([]string{}) + } else { + values, err := parseArray[string](v, schema.DataTypeDateArray) + if err != nil { + return nil, err + } + listValue = makeListValue(values) + } + return &pb.Value{Kind: &pb.Value_ListValue{ListValue: listValue}}, nil +} + +func (m *Mapper) newListValueNumber(v interface{}) (*pb.Value, error) { + var listValue *pb.ListValue + makeListValue := func(v []float64) *pb.ListValue { + return &pb.ListValue{Kind: &pb.ListValue_NumberValues{NumberValues: &pb.NumberValues{Values: byteops.Fp64SliceToBytes(v)}}} + } + if _, ok := v.([]interface{}); ok { + listValue = makeListValue([]float64{}) + } else { + values, err := parseArray[float64](v, schema.DataTypeNumberArray) + if err != nil { + return nil, err + } + listValue = makeListValue(values) + } + return &pb.Value{Kind: &pb.Value_ListValue{ListValue: listValue}}, nil +} + +func (m *Mapper) newListValueInt(v interface{}) (*pb.Value, error) { + var listValue *pb.ListValue + makeListValue := func(v []float64) *pb.ListValue { + return &pb.ListValue{Kind: &pb.ListValue_IntValues{IntValues: &pb.IntValues{Values: byteops.IntsToByteVector((v))}}} + } + if _, ok := v.([]interface{}); ok { + listValue = makeListValue([]float64{}) + } else { + values, err := parseArray[float64](v, schema.DataTypeIntArray) + if err != nil { + return nil, err + } + listValue = makeListValue(values) + } + return &pb.Value{Kind: &pb.Value_ListValue{ListValue: listValue}}, nil +} + +func (m *Mapper) newListValueText(v interface{}) (*pb.Value, error) { + var listValue *pb.ListValue + makeListValue := func(v []string) *pb.ListValue { + return &pb.ListValue{Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: v}}} + } + if _, ok := v.([]interface{}); ok { + listValue = makeListValue([]string{}) + } else { + values, err := parseArray[string](v, schema.DataTypeTextArray) + if err != nil { + return nil, err + } + listValue = makeListValue(values) + } + return &pb.Value{Kind: &pb.Value_ListValue{ListValue: listValue}}, nil +} + +func (m *Mapper) newListValueUuid(v interface{}) (*pb.Value, error) { + var listValue *pb.ListValue + makeListValue := func(v []string) *pb.ListValue { + return &pb.ListValue{Kind: &pb.ListValue_UuidValues{UuidValues: &pb.UuidValues{Values: v}}} + } + if _, ok := v.([]interface{}); ok { + listValue = makeListValue([]string{}) + } else { + values, err := parseArray[string](v, schema.DataTypeUUIDArray) + if err != nil { + return nil, err + } + listValue = makeListValue(values) + } + return &pb.Value{Kind: &pb.Value_ListValue{ListValue: listValue}}, nil +} + +func (m *Mapper) parsePrimitiveArray(v interface{}, dt, innerDt schema.DataType) (*pb.Value, error) { + switch dt { + case schema.DataTypeBooleanArray: + return m.newListValueBool(v) + case schema.DataTypeDateArray: + return m.newListValueDate(v) + case schema.DataTypeNumberArray: + return m.newListValueNumber(v) + case schema.DataTypeIntArray: + return m.newListValueInt(v) + case schema.DataTypeStringArray: + return m.newListValueText(v) + case schema.DataTypeTextArray: + return m.newListValueText(v) + case schema.DataTypeUUIDArray: + return m.newListValueUuid(v) + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +func (m *Mapper) newObjectList125(v []interface{}, parent schema.PropertyInterface, selectProp search.SelectProperty) (*pb.ListValue, error) { + if !selectProp.IsObject { + return nil, errors.New("select property is not an object") + } + x := make([]*pb.Properties, len(v)) + for i, v := range v { + if _, ok := v.(map[string]interface{}); !ok { + return nil, protoimpl.X.NewError("invalid type: %T expected map[string]interface{}", v) + } + value, err := m.newObject(v.(map[string]interface{}), parent, selectProp) + if err != nil { + return nil, err + } + x[i] = value + } + return &pb.ListValue{Kind: &pb.ListValue_ObjectValues{ObjectValues: &pb.ObjectValues{Values: x}}}, nil +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *pb.Value { + return &pb.Value{Kind: &pb.Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *pb.Value { + return &pb.Value{Kind: &pb.Value_NumberValue{NumberValue: v}} +} + +// NewIntValue constructs a new number Value. +func NewIntValue(v int64) *pb.Value { + return &pb.Value{Kind: &pb.Value_IntValue{IntValue: v}} +} + +func NewTextValue(v string) *pb.Value { + return &pb.Value{Kind: &pb.Value_TextValue{TextValue: v}} +} + +// NewDateValue constructs a new string Value. +func NewDateValue(v string) *pb.Value { + return &pb.Value{Kind: &pb.Value_DateValue{DateValue: v}} +} + +// NewUuidValue constructs a new string Value. +func NewUuidValue(v string) *pb.Value { + return &pb.Value{Kind: &pb.Value_UuidValue{UuidValue: v}} +} + +// NewGeoValue constructs a new geo Value. +func NewGeoValue(v *models.GeoCoordinates) *pb.Value { + return &pb.Value{Kind: &pb.Value_GeoValue{GeoValue: &pb.GeoCoordinate{Latitude: *v.Latitude, Longitude: *v.Longitude}}} +} + +// NewObjectValue constructs a new struct Value. +func NewObjectValue(v *pb.Properties) *pb.Value { + return &pb.Value{Kind: &pb.Value_ObjectValue{ObjectValue: v}} +} + +// NewListValue constructs a new list Value. +func newListValue(v *pb.ListValue) *pb.Value { + return &pb.Value{Kind: &pb.Value_ListValue{ListValue: v}} +} + +// NewBlobValue constructs a new blob Value. +func newBlobValue(v string) *pb.Value { + return &pb.Value{Kind: &pb.Value_BlobValue{BlobValue: v}} +} + +// NewPhoneNumberValue constructs a new phone number Value. +func newPhoneNumberValue(v *models.PhoneNumber) *pb.Value { + return &pb.Value{Kind: &pb.Value_PhoneValue{ + PhoneValue: &pb.PhoneNumber{ + CountryCode: v.CountryCode, + DefaultCountry: v.DefaultCountry, + Input: v.Input, + InternationalFormatted: v.InternationalFormatted, + National: v.National, + NationalFormatted: v.NationalFormatted, + Valid: v.Valid, + }, + }} +} + +// NewNilValue constructs a new nil Value. +func (m *Mapper) NewNilValue() *pb.Value { + return &pb.Value{Kind: &pb.Value_NullValue{}} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/mapping_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/mapping_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ae792bbdabbaa4f8c9abb982ba5735bc9bced4ad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/mapping_test.go @@ -0,0 +1,184 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "testing" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/byteops" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +type innerTest struct { + datatype schema.DataType + out *pb.Value + shouldError bool +} + +func makeTestList(succeedingInnerTests map[schema.DataType]*pb.Value) []innerTest { + dtypes := append(schema.PrimitiveDataTypes, schema.DeprecatedPrimitiveDataTypes...) + list := make([]innerTest, len(dtypes)) + for idx, dtype := range dtypes { + out, ok := succeedingInnerTests[dtype] + if ok { + list[idx] = innerTest{ + datatype: dtype, + out: out, + shouldError: false, + } + } else { + list[idx] = innerTest{ + datatype: dtype, + out: nil, + shouldError: true, + } + } + } + return list +} + +func TestNewPrimitiveValue(t *testing.T) { + float_val := float32(1.1) + + tests := []struct { + name string + in any + tests []innerTest + }{ + { + name: "bools", + in: []bool{true, false}, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeBooleanArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_BoolValues{BoolValues: &pb.BoolValues{Values: []bool{true, false}}}, + }}}, + }), + }, + { + name: "strings", + in: []string{"a string", "another string"}, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeDateArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_DateValues{DateValues: &pb.DateValues{Values: []string{"a string", "another string"}}}, + }}}, + schema.DataTypeStringArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{"a string", "another string"}}}, + }}}, + schema.DataTypeTextArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{"a string", "another string"}}}, + }}}, + schema.DataTypeUUIDArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_UuidValues{UuidValues: &pb.UuidValues{Values: []string{"a string", "another string"}}}, + }}}, + }), + }, + { + name: "float64s", + in: []float64{1.1, 2.2, 3.3}, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeNumberArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_NumberValues{NumberValues: &pb.NumberValues{Values: byteops.Fp64SliceToBytes([]float64{1.1, 2.2, 3.3})}}, + }}}, + schema.DataTypeIntArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_IntValues{IntValues: &pb.IntValues{Values: byteops.IntsToByteVector([]float64{1, 2, 3})}}, + }}}, + }), + }, + { + name: "empty array", + in: []interface{}{}, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeBooleanArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_BoolValues{BoolValues: &pb.BoolValues{Values: []bool{}}}, + }}}, + schema.DataTypeDateArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_DateValues{DateValues: &pb.DateValues{Values: []string{}}}, + }}}, + schema.DataTypeNumberArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_NumberValues{NumberValues: &pb.NumberValues{Values: byteops.Fp64SliceToBytes([]float64{})}}, + }}}, + schema.DataTypeIntArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_IntValues{IntValues: &pb.IntValues{Values: byteops.IntsToByteVector([]float64{})}}, + }}}, + schema.DataTypeStringArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{}}}, + }}}, + schema.DataTypeTextArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{}}}, + }}}, + schema.DataTypeUUIDArray: {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_UuidValues{UuidValues: &pb.UuidValues{Values: []string{}}}, + }}}, + }), + }, + { + name: "bool", + in: true, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeBoolean: {Kind: &pb.Value_BoolValue{BoolValue: true}}, + }), + }, + { + name: "string", + in: "a string", + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeDate: {Kind: &pb.Value_DateValue{DateValue: "a string"}}, + schema.DataTypeString: {Kind: &pb.Value_TextValue{TextValue: "a string"}}, + schema.DataTypeText: {Kind: &pb.Value_TextValue{TextValue: "a string"}}, + schema.DataTypeUUID: {Kind: &pb.Value_UuidValue{UuidValue: "a string"}}, + schema.DataTypeBlob: {Kind: &pb.Value_BlobValue{BlobValue: "a string"}}, + }), + }, + { + name: "float64", + in: 1.1, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeNumber: {Kind: &pb.Value_NumberValue{NumberValue: 1.1}}, + schema.DataTypeInt: {Kind: &pb.Value_IntValue{IntValue: 1}}, + }), + }, + { + name: "geo", + in: &models.GeoCoordinates{Longitude: &float_val, Latitude: &float_val}, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypeGeoCoordinates: {Kind: &pb.Value_GeoValue{GeoValue: &pb.GeoCoordinate{Latitude: float_val, Longitude: float_val}}}, + }), + }, + { + name: "phone number", + in: &models.PhoneNumber{Input: "1234567890"}, + tests: makeTestList(map[schema.DataType]*pb.Value{ + schema.DataTypePhoneNumber: {Kind: &pb.Value_PhoneValue{PhoneValue: &pb.PhoneNumber{Input: "1234567890"}}}, + }), + }, + } + + for _, tt := range tests { + for _, test := range tt.tests { + m := NewMapping() + out, err := m.NewPrimitiveValue(tt.in, test.datatype) + if test.shouldError { + if err == nil { + t.Logf("expected an error for %v and %s", tt.in, test.datatype) + } + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, test.out, out) + } + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/models.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/models.go new file mode 100644 index 0000000000000000000000000000000000000000..ebcdaf586de1cb55bac99a4e3b9727752aab591a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/models.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import "github.com/weaviate/weaviate/entities/models" + +type Property struct { + *models.Property +} + +type NestedProperty struct { + *models.NestedProperty +} + +func (p *Property) GetName() string { + return p.Property.Name +} + +func (p *Property) GetNestedProperties() []*models.NestedProperty { + return p.Property.NestedProperties +} + +func (p *NestedProperty) GetName() string { + return p.NestedProperty.Name +} + +func (p *NestedProperty) GetNestedProperties() []*models.NestedProperty { + return p.NestedProperty.NestedProperties +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_aggregate_request.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_aggregate_request.go new file mode 100644 index 0000000000000000000000000000000000000000..ca0bc09af7810b2b8a87f05bbcdb039c97c6619c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_aggregate_request.go @@ -0,0 +1,539 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "fmt" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modelsext" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/byteops" +) + +type AggregateParser struct { + authorizedGetClass classGetterWithAuthzFunc +} + +func NewAggregateParser(authorizedGetClass classGetterWithAuthzFunc) *AggregateParser { + return &AggregateParser{ + authorizedGetClass: authorizedGetClass, + } +} + +func (p *AggregateParser) Aggregate(req *pb.AggregateRequest) (*aggregation.Params, error) { + params := &aggregation.Params{} + class, err := p.authorizedGetClass(req.Collection) + if err != nil { + return nil, err + } + + params.ClassName = schema.ClassName(class.Class) + params.Tenant = req.Tenant + + if req.ObjectLimit != nil { + objectLimit := int(*req.ObjectLimit) + params.ObjectLimit = &objectLimit + } + + if req.GroupBy != nil { + params.GroupBy = &filters.Path{ + Class: schema.ClassName(req.GroupBy.Collection), + Property: schema.PropertyName(req.GroupBy.Property), + } + } + if req.Limit != nil { + limit := int(*req.Limit) + params.Limit = &limit + } + + params.IncludeMetaCount = req.ObjectsCount + + if len(req.Aggregations) > 0 { + properties := make([]aggregation.ParamProperty, len(req.Aggregations)) + for i := range req.Aggregations { + properties[i] = aggregation.ParamProperty{ + Name: schema.PropertyName(req.Aggregations[i].Property), + Aggregators: parseAggregations(req.Aggregations[i]), + } + } + params.Properties = properties + } + + if req.Filters != nil { + clause, err := ExtractFilters(req.Filters, p.authorizedGetClass, req.Collection, req.Tenant) + if err != nil { + return nil, fmt.Errorf("extract filters: %w", err) + } + filter := &filters.LocalFilter{Root: &clause} + if err := filters.ValidateFilters(p.authorizedGetClass, filter); err != nil { + return nil, fmt.Errorf("validate filters: %w", err) + } + params.Filters = filter + } + + // targetCombination is not supported with Aggregate queries + targetVectors, _, _, err := extractTargetVectorsForAggregate(req, class) + if err != nil { + return nil, fmt.Errorf("extract target vectors: %w", err) + } + + switch search := req.GetSearch().(type) { + case *pb.AggregateRequest_NearVector: + if nv := search.NearVector; nv != nil { + params.NearVector, _, err = parseNearVec(nv, targetVectors, class, nil) + if err != nil { + return nil, fmt.Errorf("parse near vector: %w", err) + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if nv.Distance != nil && nv.Certainty != nil { + return nil, fmt.Errorf("near_vector: cannot provide distance and certainty") + } + + if nv.Certainty != nil { + params.NearVector.Certainty = *nv.Certainty + } + + if nv.Distance != nil { + params.NearVector.Distance = *nv.Distance + params.NearVector.WithDistance = true + } + } + case *pb.AggregateRequest_NearObject: + if no := search.NearObject; no != nil { + if no.Id == "" { + return nil, fmt.Errorf("near_object: id is required") + } + params.NearObject = &searchparams.NearObject{ + ID: no.Id, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if no.Distance != nil && no.Certainty != nil { + return nil, fmt.Errorf("near_object: cannot provide distance and certainty") + } + + if no.Certainty != nil { + params.NearObject.Certainty = *no.Certainty + } + + if no.Distance != nil { + params.NearObject.Distance = *no.Distance + params.NearObject.WithDistance = true + } + } + case *pb.AggregateRequest_NearText: + if nt := search.NearText; nt != nil { + limit := 0 + if params.ObjectLimit != nil { + limit = *params.ObjectLimit + } + nearText, err := extractNearText(params.ClassName.String(), limit, nt, targetVectors) + if err != nil { + return nil, err + } + if params.ModuleParams == nil { + params.ModuleParams = make(map[string]interface{}) + } + params.ModuleParams["nearText"] = nearText + } + case *pb.AggregateRequest_NearImage: + if ni := search.NearImage; ni != nil { + nearImageOut, err := parseNearImage(ni, targetVectors) + if err != nil { + return nil, err + } + + if params.ModuleParams == nil { + params.ModuleParams = make(map[string]interface{}) + } + params.ModuleParams["nearImage"] = nearImageOut + } + case *pb.AggregateRequest_NearAudio: + if na := search.NearAudio; na != nil { + nearAudioOut, err := parseNearAudio(na, targetVectors) + if err != nil { + return nil, err + } + + if params.ModuleParams == nil { + params.ModuleParams = make(map[string]interface{}) + } + params.ModuleParams["nearAudio"] = nearAudioOut + } + case *pb.AggregateRequest_NearVideo: + if nv := search.NearVideo; nv != nil { + nearVideoOut, err := parseNearVideo(nv, targetVectors) + if err != nil { + return nil, err + } + + if params.ModuleParams == nil { + params.ModuleParams = make(map[string]interface{}) + } + params.ModuleParams["nearVideo"] = nearVideoOut + } + case *pb.AggregateRequest_NearDepth: + if nd := search.NearDepth; nd != nil { + nearDepthOut, err := parseNearDepth(nd, targetVectors) + if err != nil { + return nil, err + } + + if params.ModuleParams == nil { + params.ModuleParams = make(map[string]interface{}) + } + params.ModuleParams["nearDepth"] = nearDepthOut + } + case *pb.AggregateRequest_NearThermal: + if nt := search.NearThermal; nt != nil { + nearThermalOut, err := parseNearThermal(nt, targetVectors) + if err != nil { + return nil, err + } + + if params.ModuleParams == nil { + params.ModuleParams = make(map[string]interface{}) + } + params.ModuleParams["nearThermal"] = nearThermalOut + } + case *pb.AggregateRequest_NearImu: + if ni := search.NearImu; ni != nil { + nearIMUOut, err := parseNearIMU(ni, targetVectors) + if err != nil { + return nil, err + } + if params.ModuleParams == nil { + params.ModuleParams = make(map[string]interface{}) + } + params.ModuleParams["nearIMU"] = nearIMUOut + } + case *pb.AggregateRequest_Hybrid: + if hs := search.Hybrid; hs != nil { + fusionType := common_filters.HybridFusionDefault + if hs.FusionType == pb.Hybrid_FUSION_TYPE_RANKED { + fusionType = common_filters.HybridRankedFusion + } else if hs.FusionType == pb.Hybrid_FUSION_TYPE_RELATIVE_SCORE { + fusionType = common_filters.HybridRelativeScoreFusion + } + + var vector models.Vector + // vectors has precedent for being more efficient + if len(hs.Vectors) > 0 { + switch len(hs.Vectors) { + case 1: + vector, err = extractVector(hs.Vectors[0]) + if err != nil { + return nil, fmt.Errorf("hybrid: %w", err) + } + default: + return nil, fmt.Errorf("hybrid: only 1 vector supported, found %d vectors", len(hs.Vectors)) + } + } else if len(hs.VectorBytes) > 0 { + vector = byteops.Fp32SliceFromBytes(hs.VectorBytes) + } else if len(hs.Vector) > 0 { + vector = hs.Vector + } + + var distance float32 + withDistance := false + if hs.Threshold != nil { + withDistance = true + switch hs.Threshold.(type) { + case *pb.Hybrid_VectorDistance: + distance = hs.Threshold.(*pb.Hybrid_VectorDistance).VectorDistance + default: + return nil, fmt.Errorf("unknown value type %v", hs.Threshold) + } + } + + limit := 0 + if params.ObjectLimit != nil { + limit = *params.ObjectLimit + } + nearTxt, err := extractNearText(params.ClassName.String(), limit, search.Hybrid.NearText, targetVectors) + if err != nil { + return nil, err + } + nearVec := search.Hybrid.NearVector + + params.Hybrid = &searchparams.HybridSearch{ + Query: hs.Query, + Properties: schema.LowercaseFirstLetterOfStrings(hs.Properties), + Vector: vector, + Alpha: float64(hs.Alpha), + FusionAlgorithm: fusionType, + TargetVectors: targetVectors, + Distance: distance, + WithDistance: withDistance, + } + + if hs.Bm25SearchOperator != nil { + if hs.Bm25SearchOperator.MinimumOrTokensMatch != nil { + params.Hybrid.MinimumOrTokensMatch = int(*hs.Bm25SearchOperator.MinimumOrTokensMatch) + } + params.Hybrid.SearchOperator = hs.Bm25SearchOperator.Operator.String() + } + + if nearVec != nil { + params.Hybrid.NearVectorParams, _, err = parseNearVec(nearVec, targetVectors, class, nil) + if err != nil { + return nil, err + } + + params.Hybrid.TargetVectors = params.Hybrid.NearVectorParams.TargetVectors + if nearVec.Distance != nil { + params.Hybrid.NearVectorParams.Distance = *nearVec.Distance + params.Hybrid.NearVectorParams.WithDistance = true + } + if nearVec.Certainty != nil { + params.Hybrid.NearVectorParams.Certainty = *nearVec.Certainty + } + } + + if nearTxt != nil { + params.Hybrid.NearTextParams = &searchparams.NearTextParams{ + Values: nearTxt.Values, + Limit: nearTxt.Limit, + Certainty: nearTxt.Certainty, + Distance: nearTxt.Distance, + WithDistance: nearTxt.WithDistance, + MoveAwayFrom: searchparams.ExploreMove{Force: nearTxt.MoveAwayFrom.Force, Values: nearTxt.MoveAwayFrom.Values}, + MoveTo: searchparams.ExploreMove{Force: nearTxt.MoveTo.Force, Values: nearTxt.MoveTo.Values}, + TargetVectors: targetVectors, + } + } + } + case nil: + // do nothing, search is not set + default: + return nil, fmt.Errorf("unrecognized search: %T", req.GetSearch()) + } + + return params, nil +} + +func parseAggregations(in *pb.AggregateRequest_Aggregation) []aggregation.Aggregator { + switch a := in.GetAggregation().(type) { + case *pb.AggregateRequest_Aggregation_Int: + var aggregators []aggregation.Aggregator + if a.Int.Count { + aggregators = append(aggregators, aggregation.CountAggregator) + } + if a.Int.Type { + aggregators = append(aggregators, aggregation.TypeAggregator) + } + if a.Int.Mean { + aggregators = append(aggregators, aggregation.MeanAggregator) + } + if a.Int.Median { + aggregators = append(aggregators, aggregation.MedianAggregator) + } + if a.Int.Mode { + aggregators = append(aggregators, aggregation.ModeAggregator) + } + if a.Int.Maximum { + aggregators = append(aggregators, aggregation.MaximumAggregator) + } + if a.Int.Minimum { + aggregators = append(aggregators, aggregation.MinimumAggregator) + } + if a.Int.Sum { + aggregators = append(aggregators, aggregation.SumAggregator) + } + return aggregators + case *pb.AggregateRequest_Aggregation_Number_: + var aggregators []aggregation.Aggregator + if a.Number.Count { + aggregators = append(aggregators, aggregation.CountAggregator) + } + if a.Number.Type { + aggregators = append(aggregators, aggregation.TypeAggregator) + } + if a.Number.Mean { + aggregators = append(aggregators, aggregation.MeanAggregator) + } + if a.Number.Median { + aggregators = append(aggregators, aggregation.MedianAggregator) + } + if a.Number.Mode { + aggregators = append(aggregators, aggregation.ModeAggregator) + } + if a.Number.Maximum { + aggregators = append(aggregators, aggregation.MaximumAggregator) + } + if a.Number.Minimum { + aggregators = append(aggregators, aggregation.MinimumAggregator) + } + if a.Number.Sum { + aggregators = append(aggregators, aggregation.SumAggregator) + } + return aggregators + case *pb.AggregateRequest_Aggregation_Text_: + var aggregators []aggregation.Aggregator + if a.Text.Count { + aggregators = append(aggregators, aggregation.CountAggregator) + } + if a.Text.Type { + aggregators = append(aggregators, aggregation.TypeAggregator) + } + if a.Text.TopOccurences { + if a.Text.TopOccurencesLimit != nil { + limit := int(*a.Text.TopOccurencesLimit) + aggregators = append(aggregators, aggregation.NewTopOccurrencesAggregator(&limit)) + } else { + aggregators = append(aggregators, aggregation.TotalTrueAggregator) + } + } + return aggregators + case *pb.AggregateRequest_Aggregation_Boolean_: + var aggregators []aggregation.Aggregator + if a.Boolean.Count { + aggregators = append(aggregators, aggregation.CountAggregator) + } + if a.Boolean.Type { + aggregators = append(aggregators, aggregation.TypeAggregator) + } + if a.Boolean.TotalTrue { + aggregators = append(aggregators, aggregation.TotalTrueAggregator) + } + if a.Boolean.TotalFalse { + aggregators = append(aggregators, aggregation.TotalFalseAggregator) + } + if a.Boolean.PercentageTrue { + aggregators = append(aggregators, aggregation.PercentageTrueAggregator) + } + if a.Boolean.PercentageFalse { + aggregators = append(aggregators, aggregation.PercentageFalseAggregator) + } + return aggregators + case *pb.AggregateRequest_Aggregation_Date_: + var aggregators []aggregation.Aggregator + if a.Date.Count { + aggregators = append(aggregators, aggregation.CountAggregator) + } + if a.Date.Type { + aggregators = append(aggregators, aggregation.TypeAggregator) + } + if a.Date.Median { + aggregators = append(aggregators, aggregation.MedianAggregator) + } + if a.Date.Mode { + aggregators = append(aggregators, aggregation.ModeAggregator) + } + if a.Date.Maximum { + aggregators = append(aggregators, aggregation.MaximumAggregator) + } + if a.Date.Minimum { + aggregators = append(aggregators, aggregation.MinimumAggregator) + } + return aggregators + case *pb.AggregateRequest_Aggregation_Reference_: + var aggregators []aggregation.Aggregator + if a.Reference.Type { + aggregators = append(aggregators, aggregation.TypeAggregator) + } + if a.Reference.PointingTo { + aggregators = append(aggregators, aggregation.PointingToAggregator) + } + return aggregators + default: + return nil + } +} + +func extractTargetVectorsForAggregate(req *pb.AggregateRequest, class *models.Class) ([]string, *dto.TargetCombination, bool, error) { + var targetVectors []string + var targets *pb.Targets + vectorSearch := false + + extract := func(targets *pb.Targets, targetVectors *[]string) ([]string, *pb.Targets, bool) { + if targets != nil { + return targets.TargetVectors, targets, true + } else { + return *targetVectors, nil, true + } + } + if hs := req.GetHybrid(); hs != nil { + targetVectors, targets, vectorSearch = extract(hs.Targets, &hs.TargetVectors) + } + if na := req.GetNearAudio(); na != nil { + targetVectors, targets, vectorSearch = extract(na.Targets, &na.TargetVectors) + } + if nd := req.GetNearDepth(); nd != nil { + targetVectors, targets, vectorSearch = extract(nd.Targets, &nd.TargetVectors) + } + if ni := req.GetNearImage(); ni != nil { + targetVectors, targets, vectorSearch = extract(ni.Targets, &ni.TargetVectors) + } + if ni := req.GetNearImu(); ni != nil { + targetVectors, targets, vectorSearch = extract(ni.Targets, &ni.TargetVectors) + } + if no := req.GetNearObject(); no != nil { + targetVectors, targets, vectorSearch = extract(no.Targets, &no.TargetVectors) + } + if nt := req.GetNearText(); nt != nil { + targetVectors, targets, vectorSearch = extract(nt.Targets, &nt.TargetVectors) + } + if nt := req.GetNearThermal(); nt != nil { + targetVectors, targets, vectorSearch = extract(nt.Targets, &nt.TargetVectors) + } + if nv := req.GetNearVector(); nv != nil { + targetVectors, targets, vectorSearch = extract(nv.Targets, &nv.TargetVectors) + } + if nv := req.GetNearVideo(); nv != nil { + targetVectors, targets, vectorSearch = extract(nv.Targets, &nv.TargetVectors) + } + + var combination *dto.TargetCombination + if targets != nil { + var err error + if combination, err = extractTargets(targets); err != nil { + return nil, nil, false, err + } + } else if len(targetVectors) > 1 { + // here weights need to be added if the default combination requires it + combination = &dto.TargetCombination{Type: dto.DefaultTargetCombinationType} + } + + if vectorSearch && len(targetVectors) == 0 && !modelsext.ClassHasLegacyVectorIndex(class) { + if len(class.VectorConfig) > 1 { + return nil, nil, false, fmt.Errorf("class %s has multiple vectors, but no target vectors were provided", class.Class) + } else if len(class.VectorConfig) == 1 { + for targetVector := range class.VectorConfig { + targetVectors = append(targetVectors, targetVector) + } + } + } + + if vectorSearch { + for _, target := range targetVectors { + if _, ok := class.VectorConfig[target]; !ok { + return nil, nil, false, fmt.Errorf("class %s does not have named vector %v configured. Available named vectors %v", class.Class, target, class.VectorConfig) + } + } + } + + return targetVectors, combination, vectorSearch, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_aggregate_request_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_aggregate_request_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a13d099faf436faf1a9eab3800a1dfdadb1614f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_aggregate_request_test.go @@ -0,0 +1,147 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func TestGRPCAggregateRequest(t *testing.T) { + tests := []struct { + name string + req *pb.AggregateRequest + out *aggregation.Params + error bool + }{ + { + name: "mixed vector input near vector targeting legacy vector", + req: &pb.AggregateRequest{ + Collection: mixedVectorsClass, + ObjectsCount: true, + Aggregations: []*pb.AggregateRequest_Aggregation{ + { + Property: "first", + Aggregation: &pb.AggregateRequest_Aggregation_Text_{ + Text: &pb.AggregateRequest_Aggregation_Text{ + Count: true, + }, + }, + }, + }, + Search: &pb.AggregateRequest_Hybrid{ + Hybrid: &pb.Hybrid{ + Alpha: 0.5, + NearText: &pb.NearTextSearch{ + Query: []string{"hello"}, + Certainty: ptr(0.6), + }, + }, + }, + }, + out: &aggregation.Params{ + ClassName: schema.ClassName(mixedVectorsClass), + Properties: []aggregation.ParamProperty{ + { + Name: "first", + Aggregators: []aggregation.Aggregator{ + { + Type: "count", + }, + }, + }, + }, + IncludeMetaCount: true, + Hybrid: &searchparams.HybridSearch{ + Alpha: 0.5, + NearTextParams: &searchparams.NearTextParams{ + Values: []string{"hello"}, + Certainty: 0.6, + }, + FusionAlgorithm: common_filters.HybridFusionDefault, + }, + }, + error: false, + }, + { + name: "mixed vector input near vector targeting named vector", + req: &pb.AggregateRequest{ + Collection: mixedVectorsClass, + ObjectsCount: true, + Aggregations: []*pb.AggregateRequest_Aggregation{ + { + Property: "first", + Aggregation: &pb.AggregateRequest_Aggregation_Text_{ + Text: &pb.AggregateRequest_Aggregation_Text{ + Count: true, + }, + }, + }, + }, + Search: &pb.AggregateRequest_Hybrid{ + Hybrid: &pb.Hybrid{ + Alpha: 0.5, + NearText: &pb.NearTextSearch{ + Query: []string{"hello"}, + Certainty: ptr(0.6), + }, + Targets: &pb.Targets{TargetVectors: []string{"first_vec"}}, + }, + }, + }, + out: &aggregation.Params{ + ClassName: schema.ClassName(mixedVectorsClass), + Properties: []aggregation.ParamProperty{ + { + Name: "first", + Aggregators: []aggregation.Aggregator{ + { + Type: "count", + }, + }, + }, + }, + IncludeMetaCount: true, + Hybrid: &searchparams.HybridSearch{ + Alpha: 0.5, + NearTextParams: &searchparams.NearTextParams{ + TargetVectors: []string{"first_vec"}, + Values: []string{"hello"}, + Certainty: 0.6, + }, + FusionAlgorithm: common_filters.HybridFusionDefault, + TargetVectors: []string{"first_vec"}, + }, + }, + error: false, + }, + } + + parser := NewAggregateParser(getClass) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out, err := parser.Aggregate(tt.req) + if tt.error { + require.Error(t, err) + } else { + require.NoError(t, err) + require.EqualValues(t, tt.out, out) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_search_request.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_search_request.go new file mode 100644 index 0000000000000000000000000000000000000000..732cd526cfcd35529901ab0026234c7e9fcda4db --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_search_request.go @@ -0,0 +1,1350 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "fmt" + "slices" + + "github.com/weaviate/weaviate/entities/modelsext" + "github.com/weaviate/weaviate/entities/schema/configvalidation" + "github.com/weaviate/weaviate/usecases/config" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/generative" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/byteops" + additional2 "github.com/weaviate/weaviate/usecases/modulecomponents/additional" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/rank" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearAudio" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearDepth" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearImage" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearImu" + nearText2 "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearText" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearThermal" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearVideo" +) + +type generativeParser interface { + Extract(req *pb.GenerativeSearch, class *models.Class) *generate.Params + ProviderName() string + ReturnMetadataForSingle() bool + ReturnMetadataForGrouped() bool + ReturnDebugForSingle() bool + ReturnDebugForGrouped() bool +} + +type Parser struct { + generative generativeParser + authorizedGetClass classGetterWithAuthzFunc + aliasGetter aliasGetter +} + +func NewParser(uses127Api bool, + authorizedGetClass classGetterWithAuthzFunc, + aliasGetter aliasGetter, +) *Parser { + return &Parser{ + generative: generative.NewParser(uses127Api), + authorizedGetClass: authorizedGetClass, + aliasGetter: aliasGetter, + } +} + +func (p *Parser) Search(req *pb.SearchRequest, config *config.Config) (dto.GetParams, error) { + out := dto.GetParams{} + class, err := p.authorizedGetClass(req.Collection) + if err != nil { + return out, err + } + + out.Alias = p.aliasGetter(req.Collection) + out.ClassName = class.Class + out.ReplicationProperties = extractReplicationProperties(req.ConsistencyLevel) + + out.Tenant = req.Tenant + + targetVectors, targetCombination, vectorSearch, err := extractTargetVectors(req, class) + if err != nil { + return dto.GetParams{}, errors.Wrap(err, "extract target vectors") + } + out.TargetVectorCombination = targetCombination + + if req.Metadata != nil { + addProps, err := extractAdditionalPropsFromMetadata(class, req.Metadata, targetVectors, vectorSearch) + if err != nil { + return dto.GetParams{}, errors.Wrap(err, "extract additional props") + } + out.AdditionalProperties = addProps + } + + out.Properties, err = extractPropertiesRequest(req.Properties, p.authorizedGetClass, req.Collection, targetVectors, vectorSearch) + if err != nil { + return dto.GetParams{}, errors.Wrap(err, "extract properties request") + } + if len(out.Properties) == 0 { + out.AdditionalProperties.NoProps = true + } + + if bm25 := req.Bm25Search; bm25 != nil { + out.KeywordRanking = &searchparams.KeywordRanking{Query: bm25.Query, Properties: schema.LowercaseFirstLetterOfStrings(bm25.Properties), Type: "bm25", AdditionalExplanations: out.AdditionalProperties.ExplainScore} + + if bm25.SearchOperator != nil { + if bm25.SearchOperator.MinimumOrTokensMatch != nil { + out.KeywordRanking.MinimumOrTokensMatch = int(*bm25.SearchOperator.MinimumOrTokensMatch) + } + out.KeywordRanking.SearchOperator = bm25.SearchOperator.Operator.String() + } + } + + if nv := req.NearVector; nv != nil { + out.NearVector, out.TargetVectorCombination, err = parseNearVec(nv, targetVectors, class, out.TargetVectorCombination) + if err != nil { + return dto.GetParams{}, err + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if nv.Distance != nil && nv.Certainty != nil { + return out, fmt.Errorf("near_vector: cannot provide distance and certainty") + } + + if nv.Certainty != nil { + out.NearVector.Certainty = *nv.Certainty + } + + if nv.Distance != nil { + out.NearVector.Distance = *nv.Distance + out.NearVector.WithDistance = true + } + } + + if no := req.NearObject; no != nil { + if no.Id == "" { + return dto.GetParams{}, fmt.Errorf("near_object: id is required") + } + out.NearObject = &searchparams.NearObject{ + ID: no.Id, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if no.Distance != nil && no.Certainty != nil { + return out, fmt.Errorf("near_object: cannot provide distance and certainty") + } + + if no.Certainty != nil { + out.NearObject.Certainty = *no.Certainty + } + + if no.Distance != nil { + out.NearObject.Distance = *no.Distance + out.NearObject.WithDistance = true + } + } + + if ni := req.NearImage; ni != nil { + nearImageOut, err := parseNearImage(ni, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + + if out.ModuleParams == nil { + out.ModuleParams = make(map[string]interface{}) + } + out.ModuleParams["nearImage"] = nearImageOut + } + + if na := req.NearAudio; na != nil { + nearAudioOut, err := parseNearAudio(na, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + + if out.ModuleParams == nil { + out.ModuleParams = make(map[string]interface{}) + } + out.ModuleParams["nearAudio"] = nearAudioOut + } + + if nv := req.NearVideo; nv != nil { + nearVideoOut, err := parseNearVideo(nv, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + + if out.ModuleParams == nil { + out.ModuleParams = make(map[string]interface{}) + } + out.ModuleParams["nearVideo"] = nearVideoOut + } + + if nd := req.NearDepth; nd != nil { + nearDepthOut, err := parseNearDepth(nd, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + + if out.ModuleParams == nil { + out.ModuleParams = make(map[string]interface{}) + } + out.ModuleParams["nearDepth"] = nearDepthOut + } + + if nt := req.NearThermal; nt != nil { + nearThermalOut, err := parseNearThermal(nt, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + + if out.ModuleParams == nil { + out.ModuleParams = make(map[string]interface{}) + } + out.ModuleParams["nearThermal"] = nearThermalOut + } + + if ni := req.NearImu; ni != nil { + nearIMUOut, err := parseNearIMU(ni, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + if out.ModuleParams == nil { + out.ModuleParams = make(map[string]interface{}) + } + out.ModuleParams["nearIMU"] = nearIMUOut + } + + out.Pagination = &filters.Pagination{Offset: int(req.Offset), Autocut: int(req.Autocut)} + if req.Limit > 0 { + out.Pagination.Limit = int(req.Limit) + } else { + out.Pagination.Limit = int(config.QueryDefaults.Limit) + } + + // Hybrid search now has the ability to run subsearches using the real nearvector and neartext searches. So we need to extract those settings the same way we prepare for the real searches. + if hs := req.HybridSearch; hs != nil { + fusionType := common_filters.HybridFusionDefault + if hs.FusionType == pb.Hybrid_FUSION_TYPE_RANKED { + fusionType = common_filters.HybridRankedFusion + } else if hs.FusionType == pb.Hybrid_FUSION_TYPE_RELATIVE_SCORE { + fusionType = common_filters.HybridRelativeScoreFusion + } + + var vector models.Vector + // vectors has precedent for being more efficient + if len(hs.Vectors) > 0 { + switch len(hs.Vectors) { + case 1: + vector, err = extractVector(hs.Vectors[0]) + if err != nil { + return dto.GetParams{}, fmt.Errorf("hybrid: %w", err) + } + default: + return dto.GetParams{}, fmt.Errorf("hybrid: only 1 vector supported, found %d vectors", len(hs.Vectors)) + } + } else if len(hs.VectorBytes) > 0 { + vector = byteops.Fp32SliceFromBytes(hs.VectorBytes) + } else if len(hs.Vector) > 0 { + vector = hs.Vector + } + + var distance float32 + withDistance := false + if hs.Threshold != nil { + withDistance = true + switch hs.Threshold.(type) { + case *pb.Hybrid_VectorDistance: + distance = hs.Threshold.(*pb.Hybrid_VectorDistance).VectorDistance + default: + return dto.GetParams{}, fmt.Errorf("unknown value type %v", hs.Threshold) + } + } + + nearTxt, err := extractNearText(out.ClassName, out.Pagination.Limit, req.HybridSearch.NearText, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + nearVec := req.HybridSearch.NearVector + + out.HybridSearch = &searchparams.HybridSearch{ + Query: hs.Query, + Properties: schema.LowercaseFirstLetterOfStrings(hs.Properties), + Vector: vector, + Alpha: float64(hs.Alpha), + FusionAlgorithm: fusionType, + TargetVectors: targetVectors, + Distance: distance, + WithDistance: withDistance, + } + + if hs.Bm25SearchOperator != nil { + if hs.Bm25SearchOperator.MinimumOrTokensMatch != nil { + out.HybridSearch.MinimumOrTokensMatch = int(*hs.Bm25SearchOperator.MinimumOrTokensMatch) + } + out.HybridSearch.SearchOperator = hs.Bm25SearchOperator.Operator.String() + } + + if nearVec != nil { + out.HybridSearch.NearVectorParams, out.TargetVectorCombination, err = parseNearVec(nearVec, targetVectors, class, out.TargetVectorCombination) + if err != nil { + return dto.GetParams{}, err + } + + out.HybridSearch.TargetVectors = out.HybridSearch.NearVectorParams.TargetVectors + if nearVec.Distance != nil { + out.HybridSearch.NearVectorParams.Distance = *nearVec.Distance + out.HybridSearch.NearVectorParams.WithDistance = true + } + if nearVec.Certainty != nil { + out.HybridSearch.NearVectorParams.Certainty = *nearVec.Certainty + } + } + + if nearTxt != nil { + out.HybridSearch.NearTextParams = &searchparams.NearTextParams{ + Values: nearTxt.Values, + Limit: nearTxt.Limit, + MoveAwayFrom: searchparams.ExploreMove{Force: nearTxt.MoveAwayFrom.Force, Values: nearTxt.MoveAwayFrom.Values}, + MoveTo: searchparams.ExploreMove{Force: nearTxt.MoveTo.Force, Values: nearTxt.MoveTo.Values}, + TargetVectors: targetVectors, + } + } + } + + var nearText *nearText2.NearTextParams + if req.NearText != nil { + nearText, err = extractNearText(out.ClassName, out.Pagination.Limit, req.NearText, targetVectors) + if err != nil { + return dto.GetParams{}, err + } + if out.ModuleParams == nil { + out.ModuleParams = make(map[string]interface{}) + } + out.ModuleParams["nearText"] = nearText + } + + if req.Generative != nil { + if out.AdditionalProperties.ModuleParams == nil { + out.AdditionalProperties.ModuleParams = make(map[string]interface{}) + } + out.AdditionalProperties.ModuleParams["generate"] = p.generative.Extract(req.Generative, class) + } + + if req.Rerank != nil { + if out.AdditionalProperties.ModuleParams == nil { + out.AdditionalProperties.ModuleParams = make(map[string]interface{}) + } + out.AdditionalProperties.ModuleParams["rerank"] = extractRerank(req) + } + + if len(req.After) > 0 { + out.Cursor = &filters.Cursor{After: req.After, Limit: out.Pagination.Limit} + } + + if req.Filters != nil { + clause, err := ExtractFilters(req.Filters, p.authorizedGetClass, req.Collection, req.Tenant) + if err != nil { + return dto.GetParams{}, err + } + filter := &filters.LocalFilter{Root: &clause} + if err := filters.ValidateFilters(p.authorizedGetClass, filter); err != nil { + return dto.GetParams{}, err + } + out.Filters = filter + } + + if len(req.SortBy) > 0 { + if req.NearText != nil || req.NearVideo != nil || req.NearAudio != nil || req.NearImage != nil || req.NearObject != nil || req.NearVector != nil || req.HybridSearch != nil || req.Bm25Search != nil || req.Generative != nil { + return dto.GetParams{}, errors.New("sorting cannot be combined with search") + } + out.Sort = extractSorting(req.SortBy) + } + + if req.GroupBy != nil { + groupBy, err := extractGroupBy(req.GroupBy, &out, class) + if err != nil { + return dto.GetParams{}, err + } + out.AdditionalProperties.Group = true + + out.GroupBy = groupBy + } + + if out.HybridSearch != nil && out.HybridSearch.NearTextParams != nil && out.HybridSearch.NearVectorParams != nil { + return dto.GetParams{}, errors.New("cannot combine nearText and nearVector in hybrid search") + } + if out.HybridSearch != nil && out.HybridSearch.NearTextParams != nil && out.HybridSearch.Vector != nil { + return dto.GetParams{}, errors.New("cannot combine nearText and query in hybrid search") + } + if out.HybridSearch != nil && out.HybridSearch.NearVectorParams != nil && out.HybridSearch.Vector != nil { + return dto.GetParams{}, errors.New("cannot combine nearVector and vector in hybrid search") + } + if err := p.extractPropertiesForModules(&out); err != nil { + return dto.GetParams{}, err + } + return out, nil +} + +func extractGroupBy(groupIn *pb.GroupBy, out *dto.GetParams, class *models.Class) (*searchparams.GroupBy, error) { + if len(groupIn.Path) != 1 { + return nil, fmt.Errorf("groupby path can only have one entry, received %v", groupIn.Path) + } + + groupByProp := groupIn.Path[0] + + // add the property in case it was not requested as return prop - otherwise it is not resolved + if out.Properties.FindProperty(groupByProp) == nil { + dataType, err := schema.GetPropertyDataType(class, groupByProp) + if err != nil || dataType == nil { + return nil, err + } + isPrimitive := true + if *dataType == schema.DataTypeCRef { + isPrimitive = false + } + out.Properties = append(out.Properties, search.SelectProperty{Name: groupByProp, IsPrimitive: isPrimitive}) + } + + var additionalGroupProperties []search.SelectProperty + for _, prop := range out.Properties { + additionalGroupHitProp := search.SelectProperty{Name: prop.Name} + additionalGroupHitProp.Refs = append(additionalGroupHitProp.Refs, prop.Refs...) + additionalGroupHitProp.IsPrimitive = prop.IsPrimitive + additionalGroupHitProp.IsObject = prop.IsObject + additionalGroupProperties = append(additionalGroupProperties, additionalGroupHitProp) + } + + groupOut := &searchparams.GroupBy{ + Property: groupByProp, + ObjectsPerGroup: int(groupIn.ObjectsPerGroup), + Groups: int(groupIn.NumberOfGroups), + Properties: additionalGroupProperties, + } + + out.AdditionalProperties.NoProps = false + + return groupOut, nil +} + +func extractTargetVectors(req *pb.SearchRequest, class *models.Class) ([]string, *dto.TargetCombination, bool, error) { + var targetVectors []string + var targets *pb.Targets + vectorSearch := false + + extract := func(targets *pb.Targets, targetVectors *[]string) ([]string, *pb.Targets, bool) { + if targets != nil { + return targets.TargetVectors, targets, true + } else { + return *targetVectors, nil, true + } + } + if hs := req.HybridSearch; hs != nil { + targetVectors, targets, vectorSearch = extract(hs.Targets, &hs.TargetVectors) + } + if na := req.NearAudio; na != nil { + targetVectors, targets, vectorSearch = extract(na.Targets, &na.TargetVectors) + } + if nd := req.NearDepth; nd != nil { + targetVectors, targets, vectorSearch = extract(nd.Targets, &nd.TargetVectors) + } + if ni := req.NearImage; ni != nil { + targetVectors, targets, vectorSearch = extract(ni.Targets, &ni.TargetVectors) + } + if ni := req.NearImu; ni != nil { + targetVectors, targets, vectorSearch = extract(ni.Targets, &ni.TargetVectors) + } + if no := req.NearObject; no != nil { + targetVectors, targets, vectorSearch = extract(no.Targets, &no.TargetVectors) + } + if nt := req.NearText; nt != nil { + targetVectors, targets, vectorSearch = extract(nt.Targets, &nt.TargetVectors) + } + if nt := req.NearThermal; nt != nil { + targetVectors, targets, vectorSearch = extract(nt.Targets, &nt.TargetVectors) + } + if nv := req.NearVector; nv != nil { + targetVectors, targets, vectorSearch = extract(nv.Targets, &nv.TargetVectors) + } + if nv := req.NearVideo; nv != nil { + targetVectors, targets, vectorSearch = extract(nv.Targets, &nv.TargetVectors) + } + + var combination *dto.TargetCombination + if targets != nil { + var err error + if combination, err = extractTargets(targets); err != nil { + return nil, nil, false, err + } + } else if len(targetVectors) > 1 { + // here weights need to be added if the default combination requires it + combination = &dto.TargetCombination{Type: dto.DefaultTargetCombinationType} + } + + if vectorSearch && len(targetVectors) == 0 && !modelsext.ClassHasLegacyVectorIndex(class) { + if len(class.VectorConfig) > 1 { + return nil, nil, false, fmt.Errorf("class %s has multiple vectors, but no target vectors were provided", class.Class) + } + + if len(class.VectorConfig) == 1 { + for targetVector := range class.VectorConfig { + targetVectors = append(targetVectors, targetVector) + } + } + } + + if vectorSearch { + for _, target := range targetVectors { + if _, ok := class.VectorConfig[target]; !ok { + configuredNamedVectors := make([]string, 0, len(class.VectorConfig)) + for key := range class.VectorConfig { + configuredNamedVectors = append(configuredNamedVectors, key) + } + return nil, nil, false, fmt.Errorf("class %s does not have named vector %v configured. Available named vectors %v", class.Class, target, configuredNamedVectors) + } + } + } + + return targetVectors, combination, vectorSearch, nil +} + +func extractTargets(in *pb.Targets) (*dto.TargetCombination, error) { + if in == nil { + return nil, nil + } + + var combinationType dto.TargetCombinationType + weights := make([]float32, len(in.TargetVectors)) + switch in.Combination { + case pb.CombinationMethod_COMBINATION_METHOD_TYPE_AVERAGE: + combinationType = dto.Average + weights = extractTargetCombinationAverageWeights(in.TargetVectors) + case pb.CombinationMethod_COMBINATION_METHOD_TYPE_SUM: + combinationType = dto.Sum + weights = extractTargetCombinationSumWeights(in.TargetVectors) + case pb.CombinationMethod_COMBINATION_METHOD_TYPE_MIN: + combinationType = dto.Minimum + case pb.CombinationMethod_COMBINATION_METHOD_TYPE_MANUAL: + combinationType = dto.ManualWeights + if err := extractWeights(in, weights); err != nil { + return nil, err + } + case pb.CombinationMethod_COMBINATION_METHOD_TYPE_RELATIVE_SCORE: + combinationType = dto.RelativeScore + if err := extractWeights(in, weights); err != nil { + return nil, err + } + case pb.CombinationMethod_COMBINATION_METHOD_UNSPECIFIED: + combinationType = dto.DefaultTargetCombinationType + default: + return nil, fmt.Errorf("unknown combination method %v", in.Combination) + } + return &dto.TargetCombination{Weights: weights, Type: combinationType}, nil +} + +func extractTargetCombinationAverageWeights(targetVectors []string) []float32 { + weights := make([]float32, len(targetVectors)) + for i := range targetVectors { + weights[i] = 1.0 / float32(len(targetVectors)) + } + return weights +} + +func extractTargetCombinationSumWeights(targetVectors []string) []float32 { + weights := make([]float32, len(targetVectors)) + for i := range targetVectors { + weights[i] = 1.0 + } + return weights +} + +func extractWeights(in *pb.Targets, weights []float32) error { + if len(in.WeightsForTargets) != len(in.TargetVectors) { + return fmt.Errorf("number of weights (%d) does not match number of targets (%d)", len(in.WeightsForTargets), len(in.TargetVectors)) + } + + for i, v := range in.WeightsForTargets { + if v.Target != in.TargetVectors[i] { + return fmt.Errorf("target vector %s not found in target vectors", v.Target) + } + weights[i] = v.Weight + } + return nil +} + +func extractSorting(sortIn []*pb.SortBy) []filters.Sort { + sortOut := make([]filters.Sort, len(sortIn)) + for i := range sortIn { + order := "asc" + if !sortIn[i].Ascending { + order = "desc" + } + sortOut[i] = filters.Sort{Order: order, Path: sortIn[i].Path} + } + return sortOut +} + +func extractRerank(req *pb.SearchRequest) *rank.Params { + rerank := rank.Params{ + Property: &req.Rerank.Property, + } + if req.Rerank.Query != nil { + rerank.Query = req.Rerank.Query + } + return &rerank +} + +func extractNearText(classname string, limit int, nearTextIn *pb.NearTextSearch, targetVectors []string) (*nearText2.NearTextParams, error) { + if nearTextIn == nil { + return nil, nil + } + + moveAwayOut, err := extractNearTextMove(classname, nearTextIn.MoveAway) + if err != nil { + return &nearText2.NearTextParams{}, err + } + moveToOut, err := extractNearTextMove(classname, nearTextIn.MoveTo) + if err != nil { + return &nearText2.NearTextParams{}, err + } + + nearText := &nearText2.NearTextParams{ + Values: nearTextIn.Query, + Limit: limit, + MoveAwayFrom: moveAwayOut, + MoveTo: moveToOut, + TargetVectors: targetVectors, + } + + if nearTextIn.Certainty != nil { + nearText.Certainty = *nearTextIn.Certainty + } + if nearTextIn.Distance != nil { + nearText.Distance = *nearTextIn.Distance + nearText.WithDistance = true + } + return nearText, nil +} + +func extractNearTextMove(classname string, Move *pb.NearTextSearch_Move) (nearText2.ExploreMove, error) { + var moveAwayOut nearText2.ExploreMove + + if moveAwayReq := Move; moveAwayReq != nil { + moveAwayOut.Force = moveAwayReq.Force + if len(moveAwayReq.Uuids) > 0 { + moveAwayOut.Objects = make([]nearText2.ObjectMove, len(moveAwayReq.Uuids)) + for i, objUUid := range moveAwayReq.Uuids { + uuidFormat, err := uuid.Parse(objUUid) + if err != nil { + return moveAwayOut, err + } + moveAwayOut.Objects[i] = nearText2.ObjectMove{ + ID: objUUid, + Beacon: crossref.NewLocalhost(classname, strfmt.UUID(uuidFormat.String())).String(), + } + } + } + + moveAwayOut.Values = moveAwayReq.Concepts + } + return moveAwayOut, nil +} + +func extractPropertiesRequest(reqProps *pb.PropertiesRequest, authorizedGetClass classGetterWithAuthzFunc, className string, targetVectors []string, vectorSearch bool) ([]search.SelectProperty, error) { + props := make([]search.SelectProperty, 0) + + if reqProps == nil { + // No properties selected at all, return all non-ref properties. + // Ignore blobs to not overload the response + nonRefProps, err := getAllNonRefNonBlobProperties(authorizedGetClass, className) + if err != nil { + return nil, errors.Wrap(err, "get all non ref non blob properties") + } + return nonRefProps, nil + } + + if reqProps.ReturnAllNonrefProperties { + // No non-ref return properties selected, return all non-ref properties. + // Ignore blobs to not overload the response + returnProps, err := getAllNonRefNonBlobProperties(authorizedGetClass, className) + if err != nil { + return nil, errors.Wrap(err, "get all non ref non blob properties") + } + props = append(props, returnProps...) + } else if len(reqProps.NonRefProperties) > 0 { + // Non-ref properties are selected, return only those specified + // This catches the case where users send an empty list of non ref properties as their request, + // i.e. they want no non-ref properties + for _, prop := range reqProps.NonRefProperties { + props = append(props, search.SelectProperty{ + Name: schema.LowercaseFirstLetter(prop), + IsPrimitive: true, + IsObject: false, + }) + } + } + + if len(reqProps.RefProperties) > 0 { + class, err := authorizedGetClass(className) + if err != nil { + return nil, err + } + + for _, prop := range reqProps.RefProperties { + normalizedRefPropName := schema.LowercaseFirstLetter(prop.ReferenceProperty) + schemaProp, err := schema.GetPropertyByName(class, normalizedRefPropName) + if err != nil { + return nil, err + } + + var linkedClassName string + if len(schemaProp.DataType) == 1 { + // use datatype of the reference property to get the name of the linked class + linkedClassName = schemaProp.DataType[0] + } else { + linkedClassName = prop.TargetCollection + if linkedClassName == "" { + return nil, fmt.Errorf( + "multi target references from collection %v and property %v with need an explicit"+ + "linked collection. Available linked collections are %v", + className, prop.ReferenceProperty, schemaProp.DataType) + } + } + linkedClass, err := authorizedGetClass(linkedClassName) + if err != nil { + return nil, err + } + var refProperties []search.SelectProperty + var addProps additional.Properties + if prop.Properties != nil { + refProperties, err = extractPropertiesRequest(prop.Properties, authorizedGetClass, linkedClassName, targetVectors, vectorSearch) + if err != nil { + return nil, errors.Wrap(err, "extract properties request") + } + } + if prop.Metadata != nil { + addProps, err = extractAdditionalPropsFromMetadata(linkedClass, prop.Metadata, targetVectors, vectorSearch) + if err != nil { + return nil, errors.Wrap(err, "extract additional props for refs") + } + } + + if prop.Properties == nil { + refProperties, err = getAllNonRefNonBlobProperties(authorizedGetClass, linkedClassName) + if err != nil { + return nil, errors.Wrap(err, "get all non ref non blob properties") + } + } + if len(refProperties) == 0 && isIdOnlyRequest(prop.Metadata) { + // This is a pure-ID query without any properties or additional metadata. + // Indicate this to the DB, so it can optimize accordingly + addProps.NoProps = true + } + + props = append(props, search.SelectProperty{ + Name: normalizedRefPropName, + IsPrimitive: false, + IsObject: false, + Refs: []search.SelectClass{{ + ClassName: linkedClassName, + RefProperties: refProperties, + AdditionalProperties: addProps, + }}, + }) + } + } + + if len(reqProps.ObjectProperties) > 0 { + props = append(props, extractNestedProperties(reqProps.ObjectProperties)...) + } + + return props, nil +} + +func extractNestedProperties(props []*pb.ObjectPropertiesRequest) []search.SelectProperty { + selectProps := make([]search.SelectProperty, 0) + for _, prop := range props { + nestedProps := make([]search.SelectProperty, 0) + if len(prop.PrimitiveProperties) > 0 { + for _, primitive := range prop.PrimitiveProperties { + nestedProps = append(nestedProps, search.SelectProperty{ + Name: schema.LowercaseFirstLetter(primitive), + IsPrimitive: true, + IsObject: false, + }) + } + } + if len(prop.ObjectProperties) > 0 { + nestedProps = append(nestedProps, extractNestedProperties(prop.ObjectProperties)...) + } + selectProps = append(selectProps, search.SelectProperty{ + Name: schema.LowercaseFirstLetter(prop.PropName), + IsPrimitive: false, + IsObject: true, + Props: nestedProps, + }) + } + return selectProps +} + +func extractAdditionalPropsFromMetadata(class *models.Class, prop *pb.MetadataRequest, targetVectors []string, vectorSearch bool) (additional.Properties, error) { + props := additional.Properties{ + Vector: prop.Vector, + ID: prop.Uuid, + CreationTimeUnix: prop.CreationTimeUnix, + LastUpdateTimeUnix: prop.LastUpdateTimeUnix, + Distance: prop.Distance, + Score: prop.Score, + ExplainScore: prop.ExplainScore, + IsConsistent: prop.IsConsistent, + Vectors: prop.Vectors, + } + + if vectorSearch && configvalidation.CheckCertaintyCompatibility(class, targetVectors) != nil { + props.Certainty = false + } else { + props.Certainty = prop.Certainty + } + + // return all named vectors if vector is true + if prop.Vector && len(class.VectorConfig) > 0 { + props.Vectors = make([]string, 0, len(class.VectorConfig)) + for vectorName := range class.VectorConfig { + props.Vectors = append(props.Vectors, vectorName) + } + + } + + return props, nil +} + +func isIdOnlyRequest(metadata *pb.MetadataRequest) bool { + // could also use reflect here but this is more explicit + return (metadata != nil && + metadata.Uuid && + !metadata.Vector && + !metadata.CreationTimeUnix && + !metadata.LastUpdateTimeUnix && + !metadata.Distance && + !metadata.Certainty && + !metadata.Score && + !metadata.ExplainScore && + !metadata.IsConsistent) +} + +func getAllNonRefNonBlobProperties(authorizedGetClass classGetterWithAuthzFunc, className string) ([]search.SelectProperty, error) { + var props []search.SelectProperty + class, err := authorizedGetClass(className) + if err != nil { + return nil, err + } + for _, prop := range class.Properties { + dt, err := schema.GetPropertyDataType(class, prop.Name) + if err != nil { + return []search.SelectProperty{}, errors.Wrap(err, "get property data type") + } + if *dt == schema.DataTypeCRef || *dt == schema.DataTypeBlob { + continue + } + if *dt == schema.DataTypeObject || *dt == schema.DataTypeObjectArray { + nested, err := schema.GetPropertyByName(class, prop.Name) + if err != nil { + return []search.SelectProperty{}, errors.Wrap(err, "get nested property by name") + } + nestedProps, err := getAllNonRefNonBlobNestedProperties(&Property{Property: nested}) + if err != nil { + return []search.SelectProperty{}, errors.Wrap(err, "get all non ref non blob nested properties") + } + props = append(props, search.SelectProperty{ + Name: prop.Name, + IsPrimitive: false, + IsObject: true, + Props: nestedProps, + }) + } else { + props = append(props, search.SelectProperty{ + Name: prop.Name, + IsPrimitive: true, + }) + } + } + return props, nil +} + +func getAllNonRefNonBlobNestedProperties[P schema.PropertyInterface](property P) ([]search.SelectProperty, error) { + var props []search.SelectProperty + for _, prop := range property.GetNestedProperties() { + dt, err := schema.GetNestedPropertyDataType(property, prop.Name) + if err != nil { + return []search.SelectProperty{}, errors.Wrap(err, "get nested property data type") + } + if *dt == schema.DataTypeCRef || *dt == schema.DataTypeBlob { + continue + } + if *dt == schema.DataTypeObject || *dt == schema.DataTypeObjectArray { + nested, err := schema.GetNestedPropertyByName(property, prop.Name) + if err != nil { + return []search.SelectProperty{}, errors.Wrap(err, "get nested property by name") + } + nestedProps, err := getAllNonRefNonBlobNestedProperties(&NestedProperty{NestedProperty: nested}) + if err != nil { + return []search.SelectProperty{}, errors.Wrap(err, "get all non ref non blob nested properties") + } + props = append(props, search.SelectProperty{ + Name: prop.Name, + IsPrimitive: false, + IsObject: true, + Props: nestedProps, + }) + } else { + props = append(props, search.SelectProperty{ + Name: prop.Name, + IsPrimitive: true, + }) + } + } + return props, nil +} + +func parseNearImage(n *pb.NearImageSearch, targetVectors []string) (*nearImage.NearImageParams, error) { + out := &nearImage.NearImageParams{ + Image: n.Image, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if n.Distance != nil && n.Certainty != nil { + return nil, fmt.Errorf("near_image: cannot provide distance and certainty") + } + + if n.Certainty != nil { + out.Certainty = *n.Certainty + } + + if n.Distance != nil { + out.Distance = *n.Distance + out.WithDistance = true + } + + return out, nil +} + +func parseNearAudio(n *pb.NearAudioSearch, targetVectors []string) (*nearAudio.NearAudioParams, error) { + out := &nearAudio.NearAudioParams{ + Audio: n.Audio, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if n.Distance != nil && n.Certainty != nil { + return nil, fmt.Errorf("near_audio: cannot provide distance and certainty") + } + + if n.Certainty != nil { + out.Certainty = *n.Certainty + } + + if n.Distance != nil { + out.Distance = *n.Distance + out.WithDistance = true + } + + return out, nil +} + +func parseNearVideo(n *pb.NearVideoSearch, targetVectors []string) (*nearVideo.NearVideoParams, error) { + out := &nearVideo.NearVideoParams{ + Video: n.Video, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if n.Distance != nil && n.Certainty != nil { + return nil, fmt.Errorf("near_video: cannot provide distance and certainty") + } + + if n.Certainty != nil { + out.Certainty = *n.Certainty + } + + if n.Distance != nil { + out.Distance = *n.Distance + out.WithDistance = true + } + + return out, nil +} + +func parseNearDepth(n *pb.NearDepthSearch, targetVectors []string) (*nearDepth.NearDepthParams, error) { + out := &nearDepth.NearDepthParams{ + Depth: n.Depth, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if n.Distance != nil && n.Certainty != nil { + return nil, fmt.Errorf("near_depth: cannot provide distance and certainty") + } + + if n.Certainty != nil { + out.Certainty = *n.Certainty + } + + if n.Distance != nil { + out.Distance = *n.Distance + out.WithDistance = true + } + + return out, nil +} + +func parseNearThermal(n *pb.NearThermalSearch, targetVectors []string) (*nearThermal.NearThermalParams, error) { + out := &nearThermal.NearThermalParams{ + Thermal: n.Thermal, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if n.Distance != nil && n.Certainty != nil { + return nil, fmt.Errorf("near_thermal: cannot provide distance and certainty") + } + + if n.Certainty != nil { + out.Certainty = *n.Certainty + } + + if n.Distance != nil { + out.Distance = *n.Distance + out.WithDistance = true + } + + return out, nil +} + +func parseNearIMU(n *pb.NearIMUSearch, targetVectors []string) (*nearImu.NearIMUParams, error) { + out := &nearImu.NearIMUParams{ + IMU: n.Imu, + TargetVectors: targetVectors, + } + + // The following business logic should not sit in the API. However, it is + // also part of the GraphQL API, so we need to duplicate it in order to get + // the same behavior + if n.Distance != nil && n.Certainty != nil { + return nil, fmt.Errorf("near_imu: cannot provide distance and certainty") + } + + if n.Certainty != nil { + out.Certainty = *n.Certainty + } + + if n.Distance != nil { + out.Distance = *n.Distance + out.WithDistance = true + } + + return out, nil +} + +func parseNearVec(nv *pb.NearVector, targetVectors []string, + class *models.Class, targetCombination *dto.TargetCombination, +) (*searchparams.NearVector, *dto.TargetCombination, error) { + var vector models.Vector + var err error + // vectors has precedent for being more efficient + if len(nv.Vectors) > 0 { + switch len(nv.Vectors) { + case 1: + vector, err = extractVector(nv.Vectors[0]) + if err != nil { + return nil, nil, fmt.Errorf("near_vector: %w", err) + } + default: + return nil, nil, fmt.Errorf("near_vector: only 1 vector supported, found %d vectors", len(nv.Vectors)) + } + } else if len(nv.VectorBytes) > 0 { + vector = byteops.Fp32SliceFromBytes(nv.VectorBytes) + } else if len(nv.Vector) > 0 { + vector = nv.Vector + } + + if vector != nil && nv.VectorPerTarget != nil { + return nil, nil, fmt.Errorf("near_vector: either vector or VectorPerTarget must be provided, not both") + } + + targetVectorsTmp := targetVectors + if len(targetVectors) == 0 { + targetVectorsTmp = []string{""} + } + + detectCombinationWeights := false + vectors := make([]models.Vector, len(targetVectorsTmp)) + if vector != nil { + for i := range targetVectorsTmp { + _, isMultiVec := vector.([][]float32) + supportsMultiVector := isTargetVectorMultiVector(class, targetVectorsTmp[i]) + if (isMultiVec && supportsMultiVector) || (!isMultiVec && !supportsMultiVector) { + vectors[i] = vector + } else { + if isMultiVec { + return nil, nil, fmt.Errorf("near_vector: provided vector is a multi vector but vector index supports regular vectors") + } else { + return nil, nil, fmt.Errorf("near_vector: provided vector is a regular vector but vector index supports multi vectors") + } + } + } + } else if nv.VectorForTargets != nil { + if len(nv.VectorForTargets) > 0 && len(nv.VectorForTargets[0].Vectors) > 0 { + var deduplicatedTargetVectorsTmp []string + for _, targetVector := range targetVectorsTmp { + if !slices.Contains(deduplicatedTargetVectorsTmp, targetVector) { + deduplicatedTargetVectorsTmp = append(deduplicatedTargetVectorsTmp, targetVector) + } + } + targetVectorsTmp = deduplicatedTargetVectorsTmp + vectors = make([]models.Vector, len(targetVectorsTmp)) + + switch targetCombination.Type { + case dto.ManualWeights, dto.RelativeScore: + // do nothing, Manual and Relative Scores don't need adjustment + default: + detectCombinationWeights = true + } + } + + if len(nv.VectorForTargets) != len(targetVectorsTmp) { + return nil, nil, fmt.Errorf("near_vector: vector for target must have the same lengths as target vectors") + } + + for i := range nv.VectorForTargets { + if nv.VectorForTargets[i].Name != targetVectorsTmp[i] { + var allNames []string + for k := range nv.VectorForTargets { + allNames = append(allNames, nv.VectorForTargets[k].Name) + } + return nil, nil, fmt.Errorf("near_vector: vector for target %s is required. All target vectors: %v all vectors for targets %v", targetVectorsTmp[i], targetVectorsTmp, allNames) + } + if len(nv.VectorForTargets[i].Vectors) > 0 { + vectors[i], err = extractVectors(nv.VectorForTargets[i].Vectors) + if err != nil { + return nil, nil, fmt.Errorf("near_vector: vector for targets: extract vectors[%v]: %w", i, err) + } + } else { + vectors[i] = byteops.Fp32SliceFromBytes(nv.VectorForTargets[i].VectorBytes) + } + } + } else if nv.VectorPerTarget != nil { + if len(nv.VectorPerTarget) != len(targetVectorsTmp) { + return nil, nil, fmt.Errorf("near_vector: vector per target must be provided for all targets") + } + for i, target := range targetVectorsTmp { + if vec, ok := nv.VectorPerTarget[target]; ok { + vectors[i] = byteops.Fp32SliceFromBytes(vec) + } else { + var allNames []string + for k := range nv.VectorPerTarget { + allNames = append(allNames, k) + } + return nil, nil, fmt.Errorf("near_vector: vector for target %s is required. All target vectors: %v all vectors for targets %v", targetVectorsTmp[i], targetVectorsTmp, allNames) + } + } + } else { + return nil, nil, fmt.Errorf("near_vector: vector is required") + } + + if len(targetVectors) > 0 { + var detectedVectors []models.Vector + var detectedTargetVectorNames []string + adjustedTargetCombination := targetCombination + for i, targetVector := range targetVectorsTmp { + switch vectorsArray := vectors[i].(type) { + case [][][]float32: + for _, multiVec := range vectorsArray { + if isTargetVectorMultiVector(class, targetVector) { + detectedVectors = append(detectedVectors, multiVec) + detectedTargetVectorNames = append(detectedTargetVectorNames, targetVector) + } else { + for _, vec := range multiVec { + detectedVectors = append(detectedVectors, vec) + detectedTargetVectorNames = append(detectedTargetVectorNames, targetVector) + } + } + } + case [][]float32: + if isTargetVectorMultiVector(class, targetVector) { + detectedVectors = append(detectedVectors, vectorsArray) + detectedTargetVectorNames = append(detectedTargetVectorNames, targetVector) + } else { + for _, vec := range vectorsArray { + detectedVectors = append(detectedVectors, vec) + detectedTargetVectorNames = append(detectedTargetVectorNames, targetVector) + } + } + default: + detectedVectors = append(detectedVectors, vectorsArray) + detectedTargetVectorNames = append(detectedTargetVectorNames, targetVector) + } + } + + if detectCombinationWeights { + switch targetCombination.Type { + case dto.Average: + fixedWeights := extractTargetCombinationAverageWeights(detectedTargetVectorNames) + adjustedTargetCombination = &dto.TargetCombination{ + Type: dto.Average, Weights: fixedWeights, + } + case dto.Sum: + fixedWeights := extractTargetCombinationSumWeights(detectedTargetVectorNames) + adjustedTargetCombination = &dto.TargetCombination{ + Type: dto.Sum, Weights: fixedWeights, + } + default: + adjustedTargetCombination = &dto.TargetCombination{ + Type: targetCombination.Type, Weights: make([]float32, len(detectedTargetVectorNames)), + } + } + } + + return &searchparams.NearVector{ + Vectors: detectedVectors, + TargetVectors: detectedTargetVectorNames, + }, adjustedTargetCombination, nil + } + + return &searchparams.NearVector{ + Vectors: vectors, + TargetVectors: targetVectors, + }, targetCombination, nil +} + +// extractPropertiesForModules extracts properties that are needed by modules but are not requested by the user +func (p *Parser) extractPropertiesForModules(params *dto.GetParams) error { + var additionalProps []string + for _, value := range params.AdditionalProperties.ModuleParams { + extractor, ok := value.(additional2.PropertyExtractor) + if ok { + additionalProps = append(additionalProps, extractor.GetPropertiesToExtract()...) + } + } + class, err := p.authorizedGetClass(params.ClassName) + if err != nil { + return err + } + schemaProps := class.Properties + propDataTypes := make(map[string]schema.DataType) + for _, prop := range schemaProps { + propDataTypes[prop.Name] = schema.DataType(prop.DataType[0]) + } + propsToAdd := make([]search.SelectProperty, 0) +OUTER: + for _, additionalProp := range additionalProps { + for _, prop := range params.Properties { + if prop.Name == additionalProp { + continue OUTER + } + } + if propDataTypes[additionalProp] == schema.DataTypeBlob { + // make sure that blobs aren't added to the response payload by accident + propsToAdd = append(propsToAdd, search.SelectProperty{Name: additionalProp, IsPrimitive: false}) + } else { + propsToAdd = append(propsToAdd, search.SelectProperty{Name: additionalProp, IsPrimitive: true}) + } + } + params.Properties = append(params.Properties, propsToAdd...) + if len(params.Properties) > 0 { + params.AdditionalProperties.NoProps = false + } + return nil +} + +func extractVectors(vectors []*pb.Vectors) (interface{}, error) { + var vecs [][]float32 + var multiVecs [][][]float32 + for i := range vectors { + vec, err := extractVector(vectors[i]) + if err != nil { + return nil, fmt.Errorf("vectors[%d]: %w", i, err) + } + switch v := vec.(type) { + case []float32: + vecs = append(vecs, v) + case [][]float32: + multiVecs = append(multiVecs, v) + default: + return nil, fmt.Errorf("vectors[%d]: unrecognized vector type: %T", i, vec) + } + } + if len(multiVecs) > 0 { + return multiVecs, nil + } + return vecs, nil +} + +func extractVector(vector *pb.Vectors) (models.Vector, error) { + if vector != nil { + switch vector.Type { + case *pb.Vectors_VECTOR_TYPE_UNSPECIFIED.Enum(), *pb.Vectors_VECTOR_TYPE_SINGLE_FP32.Enum(): + return byteops.Fp32SliceFromBytes(vector.VectorBytes), nil + case *pb.Vectors_VECTOR_TYPE_MULTI_FP32.Enum(): + out, err := byteops.Fp32SliceOfSlicesFromBytes(vector.VectorBytes) + if err != nil { + return nil, fmt.Errorf("extract vector: %w", err) + } + return out, nil + default: + return nil, fmt.Errorf("cannot extract vector: unknown vector type: %T", vector.Type) + } + } + return nil, fmt.Errorf("cannot extract vector: empty vectors") +} + +func isTargetVectorMultiVector(class *models.Class, targetVector string) bool { + switch targetVector { + case "": + if vc, ok := class.VectorIndexConfig.(schemaConfig.VectorIndexConfig); ok { + return vc.IsMultiVector() + } + return false + default: + if vectorConfig, ok := class.VectorConfig[targetVector]; ok { + if vc, ok := vectorConfig.VectorIndexConfig.(schemaConfig.VectorIndexConfig); ok { + return vc.IsMultiVector() + } + } + return false + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_search_request_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_search_request_test.go new file mode 100644 index 0000000000000000000000000000000000000000..540a61908b06c64998948d24aea686cc8f9a149d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/parse_search_request_test.go @@ -0,0 +1,2500 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "fmt" + "sort" + "testing" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/usecases/byteops" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/rank" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearAudio" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearDepth" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearImage" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearImu" + nearText2 "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearText" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearThermal" + "github.com/weaviate/weaviate/usecases/modulecomponents/arguments/nearVideo" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + vectorIndex "github.com/weaviate/weaviate/entities/vectorindex/common" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +const ( + UUID3 = "a4de3ca0-6975-464f-b23b-adddd83630d7" + UUID4 = "7e10ec81-a26d-4ac7-8264-3e3e05397ddc" +) + +// TODO amourao: add operator and minimum should match to the tests +var ( + classname = "TestClass" + refClass1 = "OtherClass" + refClass2 = "AnotherClass" + dotClass = "DotClass" + objClass = "ObjClass" + multiVecClass = "MultiVecClass" + multiVecClassWithColBERT = "MultiVecClassWithColBERT" + singleNamedVecClass = "SingleNamedVecClass" + regularWithColBERTClass = "RegularWithColBERTClass" + mixedVectorsClass = "MixedVectorsClass" + + scheme = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: classname, + Properties: []*models.Property{ + {Name: "name", DataType: schema.DataTypeText.PropString()}, + {Name: "number", DataType: schema.DataTypeInt.PropString()}, + {Name: "floats", DataType: schema.DataTypeNumberArray.PropString()}, + {Name: "uuid", DataType: schema.DataTypeUUID.PropString()}, + {Name: "ref", DataType: []string{refClass1}}, + {Name: "multiRef", DataType: []string{refClass1, refClass2}}, + }, + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DefaultDistanceMetric}, + }, + { + Class: refClass1, + Properties: []*models.Property{ + {Name: "something", DataType: schema.DataTypeText.PropString()}, + {Name: "somethings", DataType: schema.DataTypeTextArray.PropString()}, + {Name: "ref2", DataType: []string{refClass2}}, + }, + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DefaultDistanceMetric}, + }, + { + Class: refClass2, + Properties: []*models.Property{ + {Name: "else", DataType: schema.DataTypeText.PropString()}, + {Name: "ref3", DataType: []string{refClass2}}, + }, + }, + { + Class: dotClass, + Properties: []*models.Property{ + {Name: "something", DataType: schema.DataTypeText.PropString()}, + }, + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceDot}, + }, + { + Class: objClass, + Properties: []*models.Property{ + { + Name: "something", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "else", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "elses", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + }, + }, + }, + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DefaultDistanceMetric}, + }, + { + Class: multiVecClass, + Properties: []*models.Property{ + {Name: "first", DataType: schema.DataTypeText.PropString()}, + }, + VectorConfig: map[string]models.VectorConfig{ + "custom": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + "first": { + VectorIndexType: "flat", + VectorIndexConfig: flat.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + "second": { + VectorIndexType: "flat", + VectorIndexConfig: flat.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + }, + }, + { + Class: multiVecClassWithColBERT, + Properties: []*models.Property{ + {Name: "first", DataType: schema.DataTypeText.PropString()}, + }, + VectorConfig: map[string]models.VectorConfig{ + "custom": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + "custom_colbert": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine, Multivector: hnsw.MultivectorConfig{Enabled: true}}, + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + "first": { + VectorIndexType: "flat", + VectorIndexConfig: flat.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + "second": { + VectorIndexType: "flat", + VectorIndexConfig: flat.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + }, + }, + { + Class: singleNamedVecClass, + Properties: []*models.Property{ + {Name: "first", DataType: schema.DataTypeText.PropString()}, + }, + VectorConfig: map[string]models.VectorConfig{ + "default": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + }, + }, + { + Class: regularWithColBERTClass, + Properties: []*models.Property{ + {Name: "first", DataType: schema.DataTypeText.PropString()}, + }, + VectorConfig: map[string]models.VectorConfig{ + "regular_no_type": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + "regular_unspecified": { + VectorIndexType: "flat", + VectorIndexConfig: flat.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + "regular_fp32": { + VectorIndexType: "flat", + VectorIndexConfig: flat.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + "regular_fp32_and_name": { + VectorIndexType: "flat", + VectorIndexConfig: flat.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + "colbert_fp32": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine, Multivector: hnsw.MultivectorConfig{Enabled: true}}, + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + "colbert_fp32_2": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine, Multivector: hnsw.MultivectorConfig{Enabled: true}}, + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + }, + }, + { + Class: mixedVectorsClass, + Properties: []*models.Property{ + {Name: "first", DataType: schema.DataTypeText.PropString()}, + }, + Vectorizer: "text2vec-contextionary", + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine}, + VectorConfig: map[string]models.VectorConfig{ + "first_vec": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + "second_vec": { + VectorIndexType: "hnsw", + VectorIndexConfig: hnsw.UserConfig{Distance: vectorIndex.DistanceCosine}, + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + }, + }, + }, + }, + } +) + +func TestGRPCSearchRequest(t *testing.T) { + one := float64(1.0) + + defaultTestClassProps := search.SelectProperties{{Name: "name", IsPrimitive: true}, {Name: "number", IsPrimitive: true}, {Name: "floats", IsPrimitive: true}, {Name: "uuid", IsPrimitive: true}} + defaultNamedVecProps := search.SelectProperties{{Name: "first", IsPrimitive: true}} + + defaultPagination := &filters.Pagination{Limit: 10} + quorum := pb.ConsistencyLevel_CONSISTENCY_LEVEL_QUORUM + someString1 := "a word" + someString2 := "other" + + tests := []struct { + name string + req *pb.SearchRequest + out dto.GetParams + error bool + }{ + { + name: "hybrid neartext", + req: &pb.SearchRequest{ + Collection: classname, + Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + HybridSearch: &pb.Hybrid{ + Query: "query", + NearText: &pb.NearTextSearch{ + Query: []string{"first and", "second", "query"}, + MoveTo: &pb.NearTextSearch_Move{Force: 0.5, Concepts: []string{"first", "and second"}, Uuids: []string{UUID3, UUID4}}, + MoveAway: &pb.NearTextSearch_Move{Force: 0.3, Concepts: []string{"second to last", "really last"}, Uuids: []string{UUID4}}, + }, + }, + }, + out: dto.GetParams{ + ClassName: classname, + Pagination: defaultPagination, + HybridSearch: &searchparams.HybridSearch{ + Query: "query", + FusionAlgorithm: common_filters.HybridRelativeScoreFusion, + NearTextParams: &searchparams.NearTextParams{ + Limit: 10, // default + Values: []string{"first and", "second", "query"}, + MoveTo: searchparams.ExploreMove{Force: 0.5, Values: []string{"first", "and second"}}, + MoveAwayFrom: searchparams.ExploreMove{Force: 0.3, Values: []string{"second to last", "really last"}}, + }, + }, + + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + }, + error: false, + }, + { + name: "hybrid nearvector returns all named vectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + + HybridSearch: &pb.Hybrid{ + Alpha: 1.0, + Query: "nearvecquery", + NearVector: &pb.NearVector{ + VectorBytes: byteops.Fp32SliceToBytes([]float32{1, 2, 3}), + Certainty: &one, + Distance: &one, + }, + TargetVectors: []string{"custom"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second"}, Vector: true, NoProps: true}, + HybridSearch: &searchparams.HybridSearch{ + Alpha: 1.0, + Query: "nearvecquery", + FusionAlgorithm: 1, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Certainty: 1.0, + Distance: 1.0, + WithDistance: true, + TargetVectors: []string{"custom"}, + }, + TargetVectors: []string{"custom"}, + }, + }, + error: false, + }, + { + name: "hybrid nearvector returns all named vectors with fp32 vectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + + HybridSearch: &pb.Hybrid{ + Alpha: 1.0, + Query: "nearvecquery", + NearVector: &pb.NearVector{ + Certainty: &one, + Distance: &one, + Vectors: []*pb.Vectors{ + { + VectorBytes: byteops.Fp32SliceToBytes([]float32{1, 2, 3}), + Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32, + }, + }, + }, + TargetVectors: []string{"custom"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second"}, Vector: true, NoProps: true}, + HybridSearch: &searchparams.HybridSearch{ + Alpha: 1.0, + Query: "nearvecquery", + FusionAlgorithm: 1, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Certainty: 1.0, + Distance: 1.0, + WithDistance: true, + TargetVectors: []string{"custom"}, + }, + TargetVectors: []string{"custom"}, + }, + }, + error: false, + }, + { + name: "hybrid nearvector returns all named vectors with colbert fp32 vectors", + req: &pb.SearchRequest{ + Collection: multiVecClassWithColBERT, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + + HybridSearch: &pb.Hybrid{ + Alpha: 1.0, + Query: "nearvecquery", + NearVector: &pb.NearVector{ + Certainty: &one, + Distance: &one, + Vectors: []*pb.Vectors{ + { + VectorBytes: byteops.Fp32SliceOfSlicesToBytes([][]float32{ + {1, 2, 3}, + {11, 22, 33}, + {111, 222, 333}, + }), + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }, + }, + }, + TargetVectors: []string{"custom_colbert"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClassWithColBERT, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second", "custom_colbert"}, Vector: true, NoProps: true}, + HybridSearch: &searchparams.HybridSearch{ + Alpha: 1.0, + Query: "nearvecquery", + FusionAlgorithm: 1, + NearVectorParams: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1, 2, 3}, {11, 22, 33}, {111, 222, 333}}}, + Certainty: 1.0, + Distance: 1.0, + WithDistance: true, + TargetVectors: []string{"custom_colbert"}, + }, + TargetVectors: []string{"custom_colbert"}, + }, + }, + error: false, + }, + { + name: "nearvector with fp32 vectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Distance: &one, + Vectors: []*pb.Vectors{ + { + VectorBytes: byteops.Fp32SliceToBytes([]float32{1, 2, 3}), + Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32, + }, + }, + TargetVectors: []string{"custom"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second"}, Vector: true, NoProps: true}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Distance: 1.0, + WithDistance: true, + TargetVectors: []string{"custom"}, + }, + }, + error: false, + }, + { + name: "nearvector with colbert fp32 vectors", + req: &pb.SearchRequest{ + Collection: multiVecClassWithColBERT, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Certainty: &one, + Vectors: []*pb.Vectors{ + { + VectorBytes: byteops.Fp32SliceOfSlicesToBytes([][]float32{{1, 2, 3}}), + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }, + }, + TargetVectors: []string{"custom_colbert"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClassWithColBERT, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second", "custom_colbert"}, Vector: true, NoProps: true}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1, 2, 3}}}, + Certainty: 1.0, + TargetVectors: []string{"custom_colbert"}, + }, + }, + error: false, + }, + { + name: "near text wrong uuid format", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearText: &pb.NearTextSearch{ + Query: []string{"first"}, + MoveTo: &pb.NearTextSearch_Move{Force: 0.5, Uuids: []string{"not a uuid"}}, + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "No classname", + req: &pb.SearchRequest{}, + out: dto.GetParams{}, + error: true, + }, + { + name: "No return values given", + req: &pb.SearchRequest{Collection: classname}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: defaultTestClassProps, + }, + error: false, + }, + { + name: "Empty return properties given", + req: &pb.SearchRequest{Collection: classname, Properties: &pb.PropertiesRequest{}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{}, AdditionalProperties: additional.Properties{ + NoProps: true, + }, + }, + error: false, + }, + { + name: "Empty return properties given with new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: classname, Properties: &pb.PropertiesRequest{}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{}, AdditionalProperties: additional.Properties{ + NoProps: true, + }, + }, + error: false, + }, + { + name: "No return values given for dot distance", + req: &pb.SearchRequest{Collection: dotClass}, + out: dto.GetParams{ + ClassName: dotClass, Pagination: defaultPagination, Properties: search.SelectProperties{{Name: "something", IsPrimitive: true}}, + }, + error: false, + }, + { + name: "Metadata return values", + req: &pb.SearchRequest{Collection: classname, Metadata: &pb.MetadataRequest{Vector: true, Certainty: false, IsConsistent: true}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{ + Vector: true, + NoProps: false, + IsConsistent: true, + }, + }, + error: false, + }, + { + name: "Metadata ID only query", + req: &pb.SearchRequest{Collection: classname, Properties: &pb.PropertiesRequest{}, Metadata: &pb.MetadataRequest{Uuid: true}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{ + ID: true, + NoProps: true, + }, + }, + error: false, + }, + { + name: "Metadata ID only query using new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: classname, Properties: &pb.PropertiesRequest{}, Metadata: &pb.MetadataRequest{Uuid: true}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{ + ID: true, + NoProps: true, + }, + }, + error: false, + }, + { + name: "Properties return all nonref values", + req: &pb.SearchRequest{Collection: classname}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: defaultTestClassProps, + }, + error: false, + }, + { + name: "Vectors returns all named vectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Vector: []float32{1, 2, 3}, + TargetVectors: []string{"custom"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second"}, Vector: true, NoProps: true}, + NearVector: &searchparams.NearVector{ + TargetVectors: []string{"custom"}, + Vectors: []models.Vector{[]float32{1, 2, 3}}, + }, + }, + error: false, + }, + { + name: "Vectors returns all named fp32 vectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Vectors: []*pb.Vectors{ + {VectorBytes: byteops.Fp32SliceToBytes([]float32{1, 2, 3})}, + }, + TargetVectors: []string{"custom"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second"}, Vector: true, NoProps: true}, + NearVector: &searchparams.NearVector{ + TargetVectors: []string{"custom"}, + Vectors: []models.Vector{[]float32{1, 2, 3}}, + }, + }, + error: false, + }, + { + name: "Vectors returns all named colbert fp32 vectors", + req: &pb.SearchRequest{ + Collection: multiVecClassWithColBERT, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Vectors: []*pb.Vectors{ + {VectorBytes: byteops.Fp32SliceOfSlicesToBytes([][]float32{{1, 2, 3}, {1, 2, 3}}), Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32}, + }, + TargetVectors: []string{"custom_colbert"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClassWithColBERT, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second", "custom_colbert"}, Vector: true, NoProps: true}, + NearVector: &searchparams.NearVector{ + TargetVectors: []string{"custom_colbert"}, + Vectors: []models.Vector{[][]float32{{1, 2, 3}, {1, 2, 3}}}, + }, + }, + error: false, + }, + { + name: "Should error when passed ColBERT vectors when normal vectors are only supported", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Vectors: []*pb.Vectors{ + {VectorBytes: byteops.Fp32SliceOfSlicesToBytes([][]float32{{1, 2, 3}, {1, 2, 3}}), Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32}, + }, + TargetVectors: []string{"custom"}, + }, + }, + error: true, + }, + { + name: "Vectors throws error if no target vectors are given", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Vector: []float32{1, 2, 3}, + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "Vectors does not throw error if more than one target vectors are given", + req: &pb.SearchRequest{ + Collection: multiVecClass, + Metadata: &pb.MetadataRequest{Vector: true}, + Properties: &pb.PropertiesRequest{}, + NearVector: &pb.NearVector{ + Vector: []float32{1, 2, 3}, + TargetVectors: []string{"custom", "first"}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, + Pagination: defaultPagination, + Properties: search.SelectProperties{}, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second"}, Vector: true, NoProps: true}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2, 3}}, + TargetVectors: []string{"custom", "first"}, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum}, + }, error: false, + }, + { + name: "Properties return all nonref values with new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: classname, Properties: &pb.PropertiesRequest{ReturnAllNonrefProperties: true}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: defaultTestClassProps, + }, + error: false, + }, + { + name: "Properties return all nonref values with ref and specific props using new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: classname, Properties: &pb.PropertiesRequest{ + ReturnAllNonrefProperties: true, + RefProperties: []*pb.RefPropertiesRequest{{ + ReferenceProperty: "ref", + TargetCollection: refClass1, + Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + Properties: &pb.PropertiesRequest{NonRefProperties: []string{"something"}}, + }}, + }}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{ + {Name: "name", IsPrimitive: true}, + {Name: "number", IsPrimitive: true}, + {Name: "floats", IsPrimitive: true}, + {Name: "uuid", IsPrimitive: true}, + {Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{ + { + ClassName: refClass1, + RefProperties: search.SelectProperties{{Name: "something", IsPrimitive: true}}, + AdditionalProperties: additional.Properties{Vector: true}, + }, + }}, + }, + }, + error: false, + }, + { + name: "Properties return all nonref values with ref and all nonref props using new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: classname, Properties: &pb.PropertiesRequest{ + ReturnAllNonrefProperties: true, + RefProperties: []*pb.RefPropertiesRequest{{ + ReferenceProperty: "ref", + TargetCollection: refClass1, + Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + Properties: &pb.PropertiesRequest{ReturnAllNonrefProperties: true}, + }}, + }}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{ + {Name: "name", IsPrimitive: true}, + {Name: "number", IsPrimitive: true}, + {Name: "floats", IsPrimitive: true}, + {Name: "uuid", IsPrimitive: true}, + {Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{ + { + ClassName: refClass1, + RefProperties: search.SelectProperties{ + {Name: "something", IsPrimitive: true}, + {Name: "somethings", IsPrimitive: true}, + }, + AdditionalProperties: additional.Properties{Vector: true}, + }, + }}, + }, + }, + error: false, + }, + { + name: "Properties return values only ref", + req: &pb.SearchRequest{Collection: classname, Properties: &pb.PropertiesRequest{ + RefProperties: []*pb.RefPropertiesRequest{ + { + ReferenceProperty: "ref", + TargetCollection: refClass1, + Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + Properties: &pb.PropertiesRequest{NonRefProperties: []string{"something"}}, + }, + }, + }}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{{Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{{ClassName: refClass1, RefProperties: search.SelectProperties{{Name: "something", IsPrimitive: true}}, AdditionalProperties: additional.Properties{ + Vector: true, + }}}}}, + }, + error: false, + }, + { + name: "Properties return values only ref using new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: classname, Properties: &pb.PropertiesRequest{ + RefProperties: []*pb.RefPropertiesRequest{ + { + ReferenceProperty: "ref", + TargetCollection: refClass1, + Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + Properties: &pb.PropertiesRequest{NonRefProperties: []string{"something"}}, + }, + }, + }}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{{Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{{ClassName: refClass1, RefProperties: search.SelectProperties{{Name: "something", IsPrimitive: true}}, AdditionalProperties: additional.Properties{ + Vector: true, + }}}}}, + }, + error: false, + }, + { + name: "Properties return values non-ref", + req: &pb.SearchRequest{Collection: classname, Properties: &pb.PropertiesRequest{NonRefProperties: []string{"name", "CapitalizedName"}}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{{Name: "name", IsPrimitive: true}, {Name: "capitalizedName", IsPrimitive: true}}, + }, + error: false, + }, + { + name: "Properties return values non-ref with new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: classname, Properties: &pb.PropertiesRequest{NonRefProperties: []string{"name", "CapitalizedName"}}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{{Name: "name", IsPrimitive: true}, {Name: "capitalizedName", IsPrimitive: true}}, + }, + error: false, + }, + { + name: "ref returns no values given", + req: &pb.SearchRequest{Collection: classname, Properties: &pb.PropertiesRequest{RefProperties: []*pb.RefPropertiesRequest{{ReferenceProperty: "ref", TargetCollection: refClass1}}}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{{Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{{ClassName: refClass1, RefProperties: search.SelectProperties{{Name: "something", IsPrimitive: true}, {Name: "somethings", IsPrimitive: true}}}}}}, + }, + error: false, + }, + { + name: "Properties return values multi-ref (no linked class with error)", + req: &pb.SearchRequest{Collection: classname, Properties: &pb.PropertiesRequest{RefProperties: []*pb.RefPropertiesRequest{{ReferenceProperty: "multiRef", Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, Properties: &pb.PropertiesRequest{NonRefProperties: []string{"something"}}}}}}, + out: dto.GetParams{}, + error: true, + }, + { + name: "Properties return values multi-ref", + req: &pb.SearchRequest{Collection: classname, Properties: &pb.PropertiesRequest{RefProperties: []*pb.RefPropertiesRequest{ + {ReferenceProperty: "multiRef", TargetCollection: refClass1, Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, Properties: &pb.PropertiesRequest{NonRefProperties: []string{"something"}}}, + {ReferenceProperty: "MultiRef", TargetCollection: refClass2, Metadata: &pb.MetadataRequest{Uuid: true}, Properties: &pb.PropertiesRequest{NonRefProperties: []string{"Else"}}}, + }}}, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, Properties: search.SelectProperties{ + {Name: "multiRef", IsPrimitive: false, Refs: []search.SelectClass{{ClassName: refClass1, RefProperties: search.SelectProperties{{Name: "something", IsPrimitive: true}}, AdditionalProperties: additional.Properties{Vector: true}}}}, + {Name: "multiRef", IsPrimitive: false, Refs: []search.SelectClass{{ClassName: refClass2, RefProperties: search.SelectProperties{{Name: "else", IsPrimitive: true}}, AdditionalProperties: additional.Properties{ID: true}}}}, + }, + }, + error: false, + }, + { + name: "hybrid ranked", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + HybridSearch: &pb.Hybrid{Query: "query", FusionType: pb.Hybrid_FUSION_TYPE_RANKED, Alpha: 0.75, Properties: []string{"name", "CapitalizedName"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, HybridSearch: &searchparams.HybridSearch{Query: "query", FusionAlgorithm: common_filters.HybridRankedFusion, Alpha: 0.75, Properties: []string{"name", "capitalizedName"}}, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + }, + error: false, + }, + { + name: "hybrid ranked groupby", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + GroupBy: &pb.GroupBy{Path: []string{"name"}, NumberOfGroups: 2, ObjectsPerGroup: 3}, + HybridSearch: &pb.Hybrid{Query: "query", FusionType: pb.Hybrid_FUSION_TYPE_RANKED, Alpha: 0.75, Properties: []string{"name", "CapitalizedName"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, HybridSearch: &searchparams.HybridSearch{Query: "query", FusionAlgorithm: common_filters.HybridRankedFusion, Alpha: 0.75, Properties: []string{"name", "capitalizedName"}}, + GroupBy: &searchparams.GroupBy{Property: "name", Groups: 2, ObjectsPerGroup: 3, Properties: defaultTestClassProps}, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false, Group: true}, + }, + error: false, + }, + { + name: "hybrid targetvectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + HybridSearch: &pb.Hybrid{TargetVectors: []string{"first"}, Query: "query", FusionType: pb.Hybrid_FUSION_TYPE_RANKED, Alpha: 0.75, Properties: []string{"first"}}, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, HybridSearch: &searchparams.HybridSearch{TargetVectors: []string{"first"}, Query: "query", FusionAlgorithm: common_filters.HybridRankedFusion, Alpha: 0.75, Properties: []string{"first"}}, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first", "second"}, NoProps: false, Vector: true}, + }, + error: false, + }, + { + name: "hybrid relative", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + HybridSearch: &pb.Hybrid{Query: "query", FusionType: pb.Hybrid_FUSION_TYPE_RELATIVE_SCORE}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, HybridSearch: &searchparams.HybridSearch{Query: "query", FusionAlgorithm: common_filters.HybridRelativeScoreFusion}, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + }, + error: false, + }, + { + name: "hybrid default", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true, Certainty: false}, + HybridSearch: &pb.Hybrid{Query: "query"}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, HybridSearch: &searchparams.HybridSearch{Query: "query", FusionAlgorithm: common_filters.HybridRelativeScoreFusion}, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + }, + error: false, + }, + + { + name: "bm25", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Bm25Search: &pb.BM25{Query: "query", Properties: []string{"name", "CapitalizedName"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + KeywordRanking: &searchparams.KeywordRanking{Query: "query", Properties: []string{"name", "capitalizedName"}, Type: "bm25"}, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + }, + error: false, + }, + { + name: "bm25 groupby", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + GroupBy: &pb.GroupBy{Path: []string{"name"}, NumberOfGroups: 2, ObjectsPerGroup: 3}, + Bm25Search: &pb.BM25{Query: "query", Properties: []string{"name", "CapitalizedName"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + KeywordRanking: &searchparams.KeywordRanking{Query: "query", Properties: []string{"name", "capitalizedName"}, Type: "bm25"}, + GroupBy: &searchparams.GroupBy{Property: "name", Groups: 2, ObjectsPerGroup: 3, Properties: defaultTestClassProps}, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false, Group: true}, + }, + error: false, + }, + { + name: "filter simple", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_EQUAL, + TestValue: &pb.Filters_ValueText{ValueText: "test"}, + On: []string{"name"}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{Class: schema.ClassName(classname), Property: "name"}, + Operator: filters.OperatorEqual, + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "filter simple (new type)", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: "test"}, Target: &pb.FilterTarget{Target: &pb.FilterTarget_Property{Property: "name"}}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{Class: schema.ClassName(classname), Property: "name"}, + Operator: filters.OperatorEqual, + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "filter uuid", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: UUID3}, On: []string{"uuid"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{Class: schema.ClassName(classname), Property: "uuid"}, + Operator: filters.OperatorEqual, + Value: &filters.Value{Value: UUID3, Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "filter or", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_OR, Filters: []*pb.Filters{ + {Operator: pb.Filters_OPERATOR_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: "test"}, On: []string{"name"}}, + {Operator: pb.Filters_OPERATOR_NOT_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: "other"}, On: []string{"name"}}, + }}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + On: &filters.Path{Class: schema.ClassName(classname), Property: "name"}, + Operator: filters.OperatorEqual, + }, + { + Value: &filters.Value{Value: "other", Type: schema.DataTypeText}, + On: &filters.Path{Class: schema.ClassName(classname), Property: "name"}, + Operator: filters.OperatorNotEqual, + }, + }, + }, + }, + }, + error: false, + }, + { + name: "filter and", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_AND, Filters: []*pb.Filters{ + {Operator: pb.Filters_OPERATOR_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: "test"}, On: []string{"name"}}, + {Operator: pb.Filters_OPERATOR_NOT_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: "other"}, On: []string{"name"}}, + }}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + On: &filters.Path{Class: schema.ClassName(classname), Property: "name"}, + Operator: filters.OperatorEqual, + }, + { + Value: &filters.Value{Value: "other", Type: schema.DataTypeText}, + On: &filters.Path{Class: schema.ClassName(classname), Property: "name"}, + Operator: filters.OperatorNotEqual, + }, + }, + }, + }, + }, + error: false, + }, + { + name: "filter not", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_NOT, Filters: []*pb.Filters{ + {Operator: pb.Filters_OPERATOR_EQUAL, TestValue: &pb.Filters_ValueText{ValueText: "test"}, On: []string{"name"}}, + }}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + { + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + On: &filters.Path{Class: schema.ClassName(classname), Property: "name"}, + Operator: filters.OperatorEqual, + }, + }, + }, + }, + }, + error: false, + }, + { + name: "filter reference", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_LESS_THAN, TestValue: &pb.Filters_ValueText{ValueText: "test"}, On: []string{"ref", refClass1, "something"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + Child: &filters.Path{Class: schema.ClassName(refClass1), Property: "something"}, + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "filter reference (new filters)", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_LESS_THAN, TestValue: &pb.Filters_ValueText{ValueText: "test"}, Target: &pb.FilterTarget{Target: &pb.FilterTarget_SingleTarget{SingleTarget: &pb.FilterReferenceSingleTarget{On: "ref", Target: &pb.FilterTarget{Target: &pb.FilterTarget_Property{Property: "something"}}}}}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + Child: &filters.Path{Class: schema.ClassName(refClass1), Property: "something"}, + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "nested ref", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_LESS_THAN, TestValue: &pb.Filters_ValueText{ValueText: "test"}, On: []string{"ref", refClass1, "ref2", refClass2, "ref3", refClass2, "else"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + Child: &filters.Path{ + Class: schema.ClassName(refClass1), + Property: "ref2", + Child: &filters.Path{ + Class: schema.ClassName(refClass2), + Property: "ref3", + Child: &filters.Path{ + Class: schema.ClassName(refClass2), + Property: "else", + }, + }, + }, + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: "test", Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "filter reference on array prop with contains", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_CONTAINS_ANY, TestValue: &pb.Filters_ValueTextArray{ValueTextArray: &pb.TextArray{Values: []string{"text"}}}, On: []string{"ref", refClass1, "somethings"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + Child: &filters.Path{ + Class: schema.ClassName(refClass1), + Property: "somethings", + }, + }, + Operator: filters.ContainsAny, + Value: &filters.Value{Value: []string{"text"}, Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "filter reference", + req: &pb.SearchRequest{ + Collection: classname, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_LESS_THAN, + TestValue: &pb.Filters_ValueText{ValueText: "test"}, + On: []string{"ref", refClass1}, // two values do not work, property is missing + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "length filter ref", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_LESS_THAN, + TestValue: &pb.Filters_ValueInt{ValueInt: 3}, + On: []string{"ref", refClass1, "len(something)"}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + Child: &filters.Path{ + Class: schema.ClassName(refClass1), + Property: "len(something)", + }, + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: 3, Type: schema.DataTypeInt}, + }, + }, + }, + error: false, + }, + { + name: "count filter single target ref old", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_LESS_THAN, + TestValue: &pb.Filters_ValueInt{ValueInt: 3}, + On: []string{"ref"}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: 3, Type: schema.DataTypeInt}, + }, + }, + }, + error: false, + }, + { + name: "count filter single target ref new", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_LESS_THAN, + TestValue: &pb.Filters_ValueInt{ValueInt: 3}, + Target: &pb.FilterTarget{Target: &pb.FilterTarget_Count{Count: &pb.FilterReferenceCount{On: "ref"}}}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: 3, Type: schema.DataTypeInt}, + }, + }, + }, + error: false, + }, + { + name: "count filter multi target ref old", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_LESS_THAN, + TestValue: &pb.Filters_ValueInt{ValueInt: 3}, + On: []string{"multiRef"}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "multiRef", + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: 3, Type: schema.DataTypeInt}, + }, + }, + }, + error: false, + }, + { + name: "count filter multi target ref new", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_LESS_THAN, + TestValue: &pb.Filters_ValueInt{ValueInt: 3}, + Target: &pb.FilterTarget{Target: &pb.FilterTarget_Count{Count: &pb.FilterReferenceCount{ + On: "multiRef", + }}}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "multiRef", + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: 3, Type: schema.DataTypeInt}, + }, + }, + }, + error: false, + }, + { + name: "length filter", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_LESS_THAN, + TestValue: &pb.Filters_ValueInt{ValueInt: 3}, + On: []string{"len(name)"}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "len(name)", + }, + Operator: filters.OperatorLessThan, + Value: &filters.Value{Value: 3, Type: schema.DataTypeInt}, + }, + }, + }, + error: false, + }, + { + name: "contains all filter with int value on float prop", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_CONTAINS_ALL, + TestValue: &pb.Filters_ValueIntArray{ValueIntArray: &pb.IntArray{Values: []int64{3}}}, + On: []string{"floats"}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "floats", + }, + Operator: filters.ContainsAll, + Value: &filters.Value{Value: []float64{3}, Type: schema.DataTypeNumber}, + }, + }, + }, + error: false, + }, + { + name: "contains none filter with text value", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_CONTAINS_NONE, + TestValue: &pb.Filters_ValueTextArray{ValueTextArray: &pb.TextArray{Values: []string{"name1", "name2"}}}, + On: []string{"name"}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "name", + }, + Operator: filters.ContainsNone, + Value: &filters.Value{Value: []string{"name1", "name2"}, Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + + { + name: "filter reference on array prop with contains", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Filters: &pb.Filters{Operator: pb.Filters_OPERATOR_CONTAINS_ANY, TestValue: &pb.Filters_ValueTextArray{ValueTextArray: &pb.TextArray{Values: []string{"text"}}}, On: []string{"ref", refClass1, "somethings"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: "ref", + Child: &filters.Path{ + Class: schema.ClassName(refClass1), + Property: "somethings", + }, + }, + Operator: filters.ContainsAny, + Value: &filters.Value{Value: []string{"text"}, Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + + { + name: "metadata filter id", + req: &pb.SearchRequest{ + Collection: classname, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_EQUAL, + TestValue: &pb.Filters_ValueText{ValueText: UUID4}, + On: []string{filters.InternalPropID}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: filters.InternalPropID, + }, + Operator: filters.OperatorEqual, + Value: &filters.Value{Value: UUID4, Type: schema.DataTypeText}, + }, + }, + }, + error: false, + }, + { + name: "metadata filter time", + req: &pb.SearchRequest{ + Collection: classname, + Filters: &pb.Filters{ + Operator: pb.Filters_OPERATOR_EQUAL, + TestValue: &pb.Filters_ValueText{ValueText: "2022-03-18T20:26:34.586-05:00"}, + On: []string{filters.InternalPropCreationTimeUnix}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{NoProps: false}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName(classname), + Property: filters.InternalPropCreationTimeUnix, + }, + Operator: filters.OperatorEqual, + Value: &filters.Value{Value: "2022-03-18T20:26:34.586-05:00", Type: schema.DataTypeDate}, + }, + }, + }, + error: false, + }, + { + name: "near text search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearText: &pb.NearTextSearch{ + Query: []string{"first and", "second", "query"}, + MoveTo: &pb.NearTextSearch_Move{Force: 0.5, Concepts: []string{"first", "and second"}, Uuids: []string{UUID3, UUID4}}, + MoveAway: &pb.NearTextSearch_Move{Force: 0.3, Concepts: []string{"second to last", "really last"}, Uuids: []string{UUID4}}, + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ModuleParams: map[string]interface{}{ + "nearText": &nearText2.NearTextParams{ + Values: []string{"first and", "second", "query"}, + MoveTo: nearText2.ExploreMove{ + Force: 0.5, + Values: []string{"first", "and second"}, + Objects: []nearText2.ObjectMove{ + {ID: UUID3, Beacon: crossref.NewLocalhost(classname, UUID3).String()}, + {ID: UUID4, Beacon: crossref.NewLocalhost(classname, UUID4).String()}, + }, + }, + MoveAwayFrom: nearText2.ExploreMove{ + Force: 0.3, + Values: []string{"second to last", "really last"}, + Objects: []nearText2.ObjectMove{ + {ID: UUID4, Beacon: crossref.NewLocalhost(classname, UUID4).String()}, + }, + }, + Limit: 10, // default + }, + }, + }, + error: false, + }, + { + name: "near audio search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearAudio: &pb.NearAudioSearch{ + Audio: "audio file", + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ModuleParams: map[string]interface{}{ + "nearAudio": &nearAudio.NearAudioParams{ + Audio: "audio file", + }, + }, + }, + error: false, + }, + { + name: "near video search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearVideo: &pb.NearVideoSearch{ + Video: "video file", + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ModuleParams: map[string]interface{}{ + "nearVideo": &nearVideo.NearVideoParams{ + Video: "video file", + }, + }, + }, + error: false, + }, + { + name: "near image search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearImage: &pb.NearImageSearch{ + Image: "image file", + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ModuleParams: map[string]interface{}{ + "nearImage": &nearImage.NearImageParams{ + Image: "image file", + }, + }, + }, + error: false, + }, + { + name: "near depth search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearDepth: &pb.NearDepthSearch{ + Depth: "depth file", + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ModuleParams: map[string]interface{}{ + "nearDepth": &nearDepth.NearDepthParams{ + Depth: "depth file", + }, + }, + }, + error: false, + }, + { + name: "near thermal search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearThermal: &pb.NearThermalSearch{ + Thermal: "thermal file", + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ModuleParams: map[string]interface{}{ + "nearThermal": &nearThermal.NearThermalParams{ + Thermal: "thermal file", + }, + }, + }, + error: false, + }, + { + name: "near IMU search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + NearImu: &pb.NearIMUSearch{ + Imu: "IMU file", + }, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ModuleParams: map[string]interface{}{ + "nearIMU": &nearImu.NearIMUParams{ + IMU: "IMU file", + }, + }, + }, + error: false, + }, + { + name: "Consistency", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + ConsistencyLevel: &quorum, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{Vector: true, NoProps: false}, + ReplicationProperties: &additional.ReplicationProperties{ConsistencyLevel: "QUORUM"}, + }, + error: false, + }, + { + name: "Generative", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + Generative: &pb.GenerativeSearch{SingleResponsePrompt: someString1, GroupedResponseTask: someString2, GroupedProperties: []string{"one", "two"}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: append(defaultTestClassProps, []search.SelectProperty{{Name: "one", IsPrimitive: true}, {Name: "two", IsPrimitive: true}}...), + AdditionalProperties: additional.Properties{ + Vector: true, + NoProps: false, + ModuleParams: map[string]interface{}{ + "generate": &generate.Params{Prompt: &someString1, Task: &someString2, Properties: []string{"one", "two"}, PropertiesToExtract: []string{"one", "two"}}, + }, + }, + }, + error: false, + }, + { + name: "Generative without properties", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, Properties: &pb.PropertiesRequest{NonRefProperties: []string{}}, + Generative: &pb.GenerativeSearch{GroupedResponseTask: someString2}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{ + Vector: true, + NoProps: false, + ModuleParams: map[string]interface{}{ + "generate": &generate.Params{Task: &someString2, PropertiesToExtract: []string{"name", "number", "floats", "uuid"}}, + }, + }, + }, + error: false, + }, + { + name: "Sort", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + SortBy: []*pb.SortBy{{Ascending: false, Path: []string{"name"}}}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: defaultTestClassProps, + AdditionalProperties: additional.Properties{ + Vector: true, + NoProps: false, + }, + Sort: []filters.Sort{{Order: "desc", Path: []string{"name"}}}, + }, + error: false, + }, + { + name: "Sort and vector search", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + SortBy: []*pb.SortBy{{Ascending: false, Path: []string{"name"}}}, + NearVector: &pb.NearVector{Vector: []float32{1, 2, 3}}, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "group by normal prop", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + GroupBy: &pb.GroupBy{Path: []string{"name"}, NumberOfGroups: 2, ObjectsPerGroup: 3}, + NearVector: &pb.NearVector{Vector: []float32{1, 2, 3}}, + Properties: &pb.PropertiesRequest{}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: search.SelectProperties{{Name: "name", IsPrimitive: true, IsObject: false}}, + AdditionalProperties: additional.Properties{ + Vector: true, + NoProps: false, + Group: true, + }, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}}}, + GroupBy: &searchparams.GroupBy{Groups: 2, ObjectsPerGroup: 3, Property: "name", Properties: search.SelectProperties{{Name: "name", IsPrimitive: true, IsObject: false}}}, + }, + error: false, + }, + { + name: "group by ref prop", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + GroupBy: &pb.GroupBy{Path: []string{"ref"}, NumberOfGroups: 2, ObjectsPerGroup: 3}, + NearVector: &pb.NearVector{Vector: []float32{1, 2, 3}}, + Properties: &pb.PropertiesRequest{}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: search.SelectProperties{{Name: "ref", IsPrimitive: false, IsObject: false}}, + AdditionalProperties: additional.Properties{ + Vector: true, + NoProps: false, + Group: true, + }, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}}}, + GroupBy: &searchparams.GroupBy{Groups: 2, ObjectsPerGroup: 3, Property: "ref", Properties: search.SelectProperties{{Name: "ref", IsPrimitive: false, IsObject: false}}}, + }, + error: false, + }, + { + name: "group by ref prop with fp32 vectors", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + GroupBy: &pb.GroupBy{Path: []string{"ref"}, NumberOfGroups: 2, ObjectsPerGroup: 3}, + NearVector: &pb.NearVector{ + Vectors: []*pb.Vectors{ + {VectorBytes: byteops.Fp32SliceToBytes([]float32{1, 2, 3})}, + }, + }, + Properties: &pb.PropertiesRequest{}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: search.SelectProperties{{Name: "ref", IsPrimitive: false, IsObject: false}}, + AdditionalProperties: additional.Properties{ + Vector: true, + NoProps: false, + Group: true, + }, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}}}, + GroupBy: &searchparams.GroupBy{Groups: 2, ObjectsPerGroup: 3, Property: "ref", Properties: search.SelectProperties{{Name: "ref", IsPrimitive: false, IsObject: false}}}, + }, + error: false, + }, + { + name: "should error group by ref prop with unsupported colbert fp32 vectors", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + GroupBy: &pb.GroupBy{Path: []string{"ref"}, NumberOfGroups: 2, ObjectsPerGroup: 3}, + NearVector: &pb.NearVector{ + Vectors: []*pb.Vectors{ + {VectorBytes: byteops.Fp32SliceOfSlicesToBytes([][]float32{{1, 2, 3}, {11, 22, 33}, {111, 222, 333}}), Index: 0, Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32}, + }, + }, + Properties: &pb.PropertiesRequest{}, + }, + error: true, + }, + { + name: "group by with too long path", + req: &pb.SearchRequest{ + Collection: classname, Metadata: &pb.MetadataRequest{Vector: true}, + GroupBy: &pb.GroupBy{Path: []string{"ref", "Class"}, NumberOfGroups: 2, ObjectsPerGroup: 3}, + NearVector: &pb.NearVector{Vector: []float32{1, 2, 3}}, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "Object properties return", + req: &pb.SearchRequest{ + Collection: objClass, + Properties: &pb.PropertiesRequest{ + ObjectProperties: []*pb.ObjectPropertiesRequest{ + { + PropName: "something", + PrimitiveProperties: []string{"name"}, + ObjectProperties: []*pb.ObjectPropertiesRequest{ + { + PropName: "else", + PrimitiveProperties: []string{"name"}, + }, + { + PropName: "elses", + PrimitiveProperties: []string{"name"}, + }, + }, + }, + }, + }, + }, + out: dto.GetParams{ + ClassName: objClass, Pagination: defaultPagination, + Properties: search.SelectProperties{ + { + Name: "something", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{ + {Name: "name", IsPrimitive: true}, + { + Name: "else", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{{ + Name: "name", IsPrimitive: true, + }}, + }, + { + Name: "elses", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{{ + Name: "name", IsPrimitive: true, + }}, + }, + }, + }, + }, + }, + }, + { + name: "Empty return values given nested", + req: &pb.SearchRequest{Collection: objClass}, + out: dto.GetParams{ + ClassName: objClass, Pagination: defaultPagination, + Properties: search.SelectProperties{ + { + Name: "something", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{ + {Name: "name", IsPrimitive: true}, + { + Name: "else", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{{ + Name: "name", IsPrimitive: true, + }}, + }, + { + Name: "elses", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{{ + Name: "name", IsPrimitive: true, + }}, + }, + }, + }, + }, + }, + error: false, + }, + { + name: "No return values given nested with new default logic", + req: &pb.SearchRequest{Uses_123Api: true, Collection: objClass, Properties: &pb.PropertiesRequest{ReturnAllNonrefProperties: true}}, + out: dto.GetParams{ + ClassName: objClass, Pagination: defaultPagination, + Properties: search.SelectProperties{ + { + Name: "something", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{ + {Name: "name", IsPrimitive: true}, + { + Name: "else", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{{ + Name: "name", IsPrimitive: true, + }}, + }, + { + Name: "elses", IsPrimitive: false, IsObject: true, + Props: search.SelectProperties{{ + Name: "name", IsPrimitive: true, + }}, + }, + }, + }, + }, + }, + error: false, + }, + { + name: "Rerank without query", + req: &pb.SearchRequest{ + Collection: classname, + Rerank: &pb.Rerank{Property: someString1}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: append(defaultTestClassProps, search.SelectProperty{Name: someString1, IsPrimitive: true}), + AdditionalProperties: additional.Properties{ + NoProps: false, + ModuleParams: map[string]interface{}{"rerank": &rank.Params{Property: &someString1}}, + }, + }, + error: false, + }, + { + name: "Rerank with query", + req: &pb.SearchRequest{ + Collection: classname, + Rerank: &pb.Rerank{Property: someString1, Query: &someString2}, + }, + out: dto.GetParams{ + ClassName: classname, Pagination: defaultPagination, + Properties: append(defaultTestClassProps, search.SelectProperty{Name: someString1, IsPrimitive: true}), + AdditionalProperties: additional.Properties{ + NoProps: false, + ModuleParams: map[string]interface{}{"rerank": &rank.Params{Property: &someString1, Query: &someString2}}, + }, + }, + error: false, + }, + + { + name: "Target vector join min", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_MIN}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2, 3}}, TargetVectors: []string{"first", "second"}}, + }, + error: false, + }, + { + name: "Target vector join avg", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_AVERAGE}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Average, Weights: []float32{0.5, 0.5}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2, 3}}, TargetVectors: []string{"first", "second"}}, + }, + error: false, + }, + { + name: "Target vector join manual weights", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_MANUAL, WeightsForTargets: []*pb.WeightsForTarget{{Target: "first", Weight: 0.1}, {Target: "second", Weight: 0.8}}}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.ManualWeights, Weights: []float32{0.1, 0.8}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2, 3}}, TargetVectors: []string{"first", "second"}}, + }, + error: false, + }, + { + name: "Target vector join manual weights missing", + req: &pb.SearchRequest{ + Collection: classname, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_MANUAL, WeightsForTargets: []*pb.WeightsForTarget{{Target: "first", Weight: 0.1}}}, + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "Target vector join manual weights non-existing", + req: &pb.SearchRequest{ + Collection: classname, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_MANUAL, WeightsForTargets: []*pb.WeightsForTarget{{Target: "nonExistent", Weight: 0.1}}}, + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "Target vector does not exist", + req: &pb.SearchRequest{ + Collection: classname, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first", "IdoNotExist"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_SUM}, + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "Near vector with targets per vector", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_SUM}, + VectorPerTarget: map[string][]byte{"first": byteVector([]float32{1, 2, 3}), "second": byteVector([]float32{1, 2, 3, 4})}, + }, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Sum, Weights: []float32{1, 1}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2, 3, 4}}, TargetVectors: []string{"first", "second"}}, + }, + error: false, + }, + { + name: "Near vector with vector and targets per vector", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_SUM}, + VectorPerTarget: map[string][]byte{"first": byteVector([]float32{1, 2, 3}), "second": byteVector([]float32{1, 2, 3, 4})}, + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "Near vector with targets per vector and wrong target vectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + Targets: &pb.Targets{TargetVectors: []string{"first"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_SUM}, + VectorPerTarget: map[string][]byte{"first": byteVector([]float32{1, 2, 3}), "second": byteVector([]float32{1, 2, 3, 4})}, + }, + }, + out: dto.GetParams{}, + error: true, + }, + { + name: "Near vector with single named vector config and no target", + req: &pb.SearchRequest{ + Collection: singleNamedVecClass, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + }, + }, + out: dto.GetParams{ + ClassName: singleNamedVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, + }, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}}, TargetVectors: []string{"default"}}, + }, + error: false, + }, + { + name: "Dont disable certainty for compatible parameters", + req: &pb.SearchRequest{ + Collection: singleNamedVecClass, + NearVector: &pb.NearVector{ + VectorBytes: byteVector([]float32{1, 2, 3}), + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: singleNamedVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: true, + }, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}}, TargetVectors: []string{"default"}}, + }, + error: false, + }, + { + name: "Disable certainty for incompatible parameters", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"first", "second"}}, + VectorPerTarget: map[string][]byte{"first": byteVector([]float32{1, 2, 3}), "second": byteVector([]float32{1, 2, 3, 4})}, + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{1, 2, 3, 4}}, TargetVectors: []string{"first", "second"}}, + }, + error: false, + }, + { + name: "Multi vector input", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"first", "first", "second"}}, + VectorForTargets: []*pb.VectorForTarget{{Name: "first", VectorBytes: byteVector([]float32{1, 2, 3})}, {Name: "first", VectorBytes: byteVector([]float32{2, 3, 4})}, {Name: "second", VectorBytes: byteVector([]float32{1, 2, 3, 4})}}, + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0, 0}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{2, 3, 4}, []float32{1, 2, 3, 4}}, TargetVectors: []string{"first", "first", "second"}}, + }, + error: false, + }, + { + name: "Multi vector input with weights", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"first", "first", "second"}, Combination: pb.CombinationMethod_COMBINATION_METHOD_TYPE_MANUAL, WeightsForTargets: []*pb.WeightsForTarget{{Target: "first", Weight: 0.1}, {Target: "first", Weight: 0.9}, {Target: "second", Weight: 0.5}}}, + VectorForTargets: []*pb.VectorForTarget{{Name: "first", VectorBytes: byteVector([]float32{1, 2, 3})}, {Name: "first", VectorBytes: byteVector([]float32{2, 3, 4})}, {Name: "second", VectorBytes: byteVector([]float32{1, 2, 3, 4})}}, + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.ManualWeights, Weights: []float32{0.1, 0.9, 0.5}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{2, 3, 4}, []float32{1, 2, 3, 4}}, TargetVectors: []string{"first", "first", "second"}}, + }, + error: false, + }, + { + name: "Multi vector input with fp32 vectors", + req: &pb.SearchRequest{ + Collection: multiVecClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"first", "first", "second"}}, + VectorForTargets: []*pb.VectorForTarget{ + { + Name: "first", + Vectors: []*pb.Vectors{ + { + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + VectorBytes: byteVectorMulti([][]float32{{1, 2, 3}, {2, 3, 4}}), + }, + }, + }, + { + Name: "second", + Vectors: []*pb.Vectors{ + { + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + VectorBytes: byteVectorMulti([][]float32{{1, 2, 3, 4}}), + }, + }, + }, + }, + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: multiVecClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0, 0}}, + NearVector: &searchparams.NearVector{Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{2, 3, 4}, []float32{1, 2, 3, 4}}, TargetVectors: []string{"first", "first", "second"}}, + }, + error: false, + }, + { + name: "Multi vector input with mix of fp32 vectors", + req: &pb.SearchRequest{ + Collection: regularWithColBERTClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"regular_no_type", "regular_unspecified", "regular_fp32", "regular_fp32_and_name"}}, + VectorForTargets: []*pb.VectorForTarget{ + {Name: "regular_no_type", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{1, 2, 3})}}}, + {Name: "regular_unspecified", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{11, 22, 33}), Type: pb.Vectors_VECTOR_TYPE_UNSPECIFIED}}}, + {Name: "regular_fp32", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{111, 222, 333}), Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32}}}, + {Name: "regular_fp32_and_name", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{1, 2, 3}), Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32, Name: "regular_fp32_and_name"}}}, + }, + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: regularWithColBERTClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0, 0, 0}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{11, 22, 33}, []float32{111, 222, 333}, []float32{1, 2, 3}}, + TargetVectors: []string{"regular_no_type", "regular_unspecified", "regular_fp32", "regular_fp32_and_name"}, + }, + }, + error: false, + }, + { + name: "Multi vector input with mix of fp32 and colbert fp32 vectors", + req: &pb.SearchRequest{ + Collection: regularWithColBERTClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"regular_no_type", "regular_unspecified", "regular_fp32", "regular_fp32_and_name", "colbert_fp32"}}, + VectorForTargets: []*pb.VectorForTarget{ + {Name: "regular_no_type", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{1, 2, 3})}}}, + {Name: "regular_unspecified", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{11, 22, 33}), Type: pb.Vectors_VECTOR_TYPE_UNSPECIFIED}}}, + {Name: "regular_fp32", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{111, 222, 333}), Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32}}}, + {Name: "regular_fp32_and_name", Vectors: []*pb.Vectors{{VectorBytes: byteVector([]float32{1, 2, 3}), Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32, Name: "regular_fp32_and_name"}}}, + {Name: "colbert_fp32", Vectors: []*pb.Vectors{ + {VectorBytes: byteVectorMulti([][]float32{{1, 2, 3}, {1, 2, 3}}), Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, Name: "colbert_fp32"}, + }}, + }, + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: regularWithColBERTClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0, 0, 0, 0}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}, []float32{11, 22, 33}, []float32{111, 222, 333}, []float32{1, 2, 3}, [][]float32{{1, 2, 3}, {1, 2, 3}}}, + TargetVectors: []string{"regular_no_type", "regular_unspecified", "regular_fp32", "regular_fp32_and_name", "colbert_fp32"}, + }, + }, + error: false, + }, + { + name: "Multi vector input with only colbert fp32 vectors", + req: &pb.SearchRequest{ + Collection: regularWithColBERTClass, + NearVector: &pb.NearVector{ + Targets: &pb.Targets{TargetVectors: []string{"colbert_fp32", "colbert_fp32_2"}}, + VectorForTargets: []*pb.VectorForTarget{ + {Name: "colbert_fp32", Vectors: []*pb.Vectors{ + {VectorBytes: byteVectorMulti([][]float32{{1, 2, 3}, {1, 2, 3}}), Index: 0, Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, Name: "colbert_fp32"}, + }}, + {Name: "colbert_fp32_2", Vectors: []*pb.Vectors{ + {VectorBytes: byteVectorMulti([][]float32{{11, 22, 33}, {11, 22, 33}}), Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32}, + }}, + }, + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: regularWithColBERTClass, Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: false, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0, 0}}, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[][]float32{{1, 2, 3}, {1, 2, 3}}, [][]float32{{11, 22, 33}, {11, 22, 33}}}, + TargetVectors: []string{"colbert_fp32", "colbert_fp32_2"}, + }, + }, + error: false, + }, + { + name: "mixed vector input near vector targeting legacy vector", + req: &pb.SearchRequest{ + Collection: mixedVectorsClass, + NearVector: &pb.NearVector{ + Vector: []float32{1, 2, 3}, + Certainty: ptr(0.6), + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: mixedVectorsClass, + Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: true, + }, + NearVector: &searchparams.NearVector{ + Vectors: []models.Vector{[]float32{1, 2, 3}}, + TargetVectors: nil, + Certainty: 0.6, + }, + }, + error: false, + }, + { + name: "mixed vector input near vector targeting named vector", + req: &pb.SearchRequest{ + Collection: mixedVectorsClass, + NearVector: &pb.NearVector{ + Vector: []float32{1, 2, 3}, + Targets: &pb.Targets{ + TargetVectors: []string{"first_vec"}, + }, + Certainty: ptr(0.6), + }, + Metadata: &pb.MetadataRequest{Certainty: true}, + }, + out: dto.GetParams{ + ClassName: mixedVectorsClass, + Pagination: defaultPagination, + Properties: defaultNamedVecProps, + AdditionalProperties: additional.Properties{ + NoProps: false, Certainty: true, + }, + NearVector: &searchparams.NearVector{ + TargetVectors: []string{"first_vec"}, + Vectors: []models.Vector{[]float32{1, 2, 3}}, + Certainty: 0.6, + }, + TargetVectorCombination: &dto.TargetCombination{Type: dto.Minimum, Weights: []float32{0}}, + }, + error: false, + }, + } + + parser := NewParser(false, getClass, getAlias) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out, err := parser.Search(tt.req, &config.Config{QueryDefaults: config.QueryDefaults{Limit: 10}}) + if tt.error { + require.NotNil(t, err) + } else { + require.Nil(t, err) + // The order of vector names in slice is non-deterministic, + // causing this test to be flaky. Sort first, no more flake + sortNamedVecs(tt.out.AdditionalProperties.Vectors) + sortNamedVecs(out.AdditionalProperties.Vectors) + require.EqualValues(t, tt.out.Properties, out.Properties) + require.EqualValues(t, tt.out, out) + } + }) + } +} + +func getClass(name string) (*models.Class, error) { + class := scheme.GetClass(name) + if class == nil { + return nil, fmt.Errorf("class %s not found", name) + } + return class, nil +} + +func getAlias(name string) string { + return "" +} + +func sortNamedVecs(vecs []string) { + sort.Slice(vecs, func(i, j int) bool { + return vecs[i] < vecs[j] + }) +} + +func ptr[T any](t T) *T { + return &t +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_aggregate_reply.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_aggregate_reply.go new file mode 100644 index 0000000000000000000000000000000000000000..f78e82d9ef8a68a95402f0ef3fa65a0b6a7f6084 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_aggregate_reply.go @@ -0,0 +1,359 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +type AggregateReplier struct { + authorizedGetDataTypeOfProp func(string) (string, error) +} + +func NewAggregateReplier(authorizedGetClass classGetterWithAuthzFunc, params *aggregation.Params) *AggregateReplier { + return &AggregateReplier{ + authorizedGetDataTypeOfProp: func(propName string) (string, error) { + class, err := authorizedGetClass(string(params.ClassName)) + if err != nil { + return "", fmt.Errorf("get class: %w", err) + } + schemaProp, err := schema.GetPropertyByName(class, propName) + if err != nil { + return "", fmt.Errorf("get property by name: %w", err) + } + return string(schema.DataType(schemaProp.DataType[0])), nil + }, + } +} + +func (r *AggregateReplier) Aggregate(res interface{}, isGroupby bool) (*pb.AggregateReply, error) { + var groups []*pb.AggregateReply_Group + + if res != nil { + result, ok := res.(*aggregation.Result) + if !ok { + return nil, fmt.Errorf("unexpected aggregate result type: %T", res) + } + + if !isGroupby { + if len(result.Groups) == 0 { + return &pb.AggregateReply{}, fmt.Errorf("no groups found in aggregate result") + } + group := result.Groups[0] + count := int64(group.Count) + aggregations, err := r.parseAggregatedProperties(group.Properties) + if err != nil { + return nil, fmt.Errorf("aggregations: %w", err) + } + return &pb.AggregateReply{Result: &pb.AggregateReply_SingleResult{SingleResult: &pb.AggregateReply_Single{ + ObjectsCount: &count, + Aggregations: aggregations, + }}}, nil + } + + if len(result.Groups) > 0 { + groups = make([]*pb.AggregateReply_Group, len(result.Groups)) + for i := range result.Groups { + count := int64(result.Groups[i].Count) + aggregations, err := r.parseAggregatedProperties(result.Groups[i].Properties) + if err != nil { + return nil, fmt.Errorf("aggregations: %w", err) + } + groupedBy, err := r.parseAggregateGroupedBy(result.Groups[i].GroupedBy) + if err != nil { + return nil, fmt.Errorf("groupedBy: %w", err) + } + groups[i] = &pb.AggregateReply_Group{ + ObjectsCount: &count, + Aggregations: aggregations, + GroupedBy: groupedBy, + } + } + return &pb.AggregateReply{Result: &pb.AggregateReply_GroupedResults{GroupedResults: &pb.AggregateReply_Grouped{Groups: groups}}}, nil + } + } + return &pb.AggregateReply{}, nil +} + +func (r *AggregateReplier) parseAggregateGroupedBy(in *aggregation.GroupedBy) (*pb.AggregateReply_Group_GroupedBy, error) { + if in != nil { + switch val := in.Value.(type) { + case string: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Text{Text: val}, + }, nil + case bool: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Boolean{Boolean: val}, + }, nil + case float64: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Number{Number: val}, + }, nil + case int64: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Int{Int: val}, + }, nil + case []string: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Texts{Texts: &pb.TextArray{Values: val}}, + }, nil + case []bool: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Booleans{Booleans: &pb.BooleanArray{Values: val}}, + }, nil + case []float64: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Numbers{Numbers: &pb.NumberArray{Values: val}}, + }, nil + case []int64: + return &pb.AggregateReply_Group_GroupedBy{ + Path: in.Path, + Value: &pb.AggregateReply_Group_GroupedBy_Ints{Ints: &pb.IntArray{Values: val}}, + }, nil + default: + return nil, fmt.Errorf("unrecognized grouped by value type: %T", in.Value) + } + } + return nil, nil +} + +func (r *AggregateReplier) parseAggregatedProperties(in map[string]aggregation.Property) (*pb.AggregateReply_Aggregations, error) { + var aggregations *pb.AggregateReply_Aggregations + if len(in) > 0 { + propertyAggregations := []*pb.AggregateReply_Aggregations_Aggregation{} + for name, property := range in { + aggregationResult, err := r.parseAggregationResult(name, property) + if err != nil { + return nil, fmt.Errorf("parse aggregation property: %w", err) + } + propertyAggregations = append(propertyAggregations, aggregationResult) + } + aggregations = &pb.AggregateReply_Aggregations{ + Aggregations: propertyAggregations, + } + } + return aggregations, nil +} + +func (r *AggregateReplier) parseAggregationResult(propertyName string, property aggregation.Property) (*pb.AggregateReply_Aggregations_Aggregation, error) { + switch property.Type { + case aggregation.PropertyTypeNumerical: + dataType, err := r.authorizedGetDataTypeOfProp(propertyName) + if err != nil { + return nil, fmt.Errorf("get data type of property: %w", err) + } + switch dataType { + case "int", "int[]": + integerAggregation, err := parseIntegerAggregation(property.SchemaType, property.NumericalAggregations) + if err != nil { + return nil, fmt.Errorf("parse integer aggregation: %w", err) + } + return &pb.AggregateReply_Aggregations_Aggregation{ + Property: propertyName, + Aggregation: &pb.AggregateReply_Aggregations_Aggregation_Int{Int: integerAggregation}, + }, nil + default: + numericalAggregation, err := parseNumericalAggregation(property.SchemaType, property.NumericalAggregations) + if err != nil { + return nil, fmt.Errorf("parse numerical aggregation: %w", err) + } + return &pb.AggregateReply_Aggregations_Aggregation{ + Property: propertyName, + Aggregation: &pb.AggregateReply_Aggregations_Aggregation_Number_{Number: numericalAggregation}, + }, nil + } + case aggregation.PropertyTypeText: + textAggregation := parseTextAggregation(property.SchemaType, property.TextAggregation) + return &pb.AggregateReply_Aggregations_Aggregation{ + Property: propertyName, + Aggregation: &pb.AggregateReply_Aggregations_Aggregation_Text_{Text: textAggregation}, + }, nil + case aggregation.PropertyTypeBoolean: + booleanAggregation := parseBooleanAggregation(property.SchemaType, property.BooleanAggregation) + return &pb.AggregateReply_Aggregations_Aggregation{ + Property: propertyName, + Aggregation: &pb.AggregateReply_Aggregations_Aggregation_Boolean_{Boolean: booleanAggregation}, + }, nil + case aggregation.PropertyTypeDate: + dateAggregation, err := parseDateAggregation(property.SchemaType, property.DateAggregations) + if err != nil { + return nil, fmt.Errorf("parse date aggregation: %w", err) + } + return &pb.AggregateReply_Aggregations_Aggregation{ + Property: propertyName, + Aggregation: &pb.AggregateReply_Aggregations_Aggregation_Date_{Date: dateAggregation}, + }, nil + case aggregation.PropertyTypeReference: + referenceAggregation := parseReferenceAggregation(property.SchemaType, property.ReferenceAggregation) + return &pb.AggregateReply_Aggregations_Aggregation{ + Property: propertyName, + Aggregation: &pb.AggregateReply_Aggregations_Aggregation_Reference_{Reference: referenceAggregation}, + }, nil + default: + return nil, fmt.Errorf("unknown property type: %s", property.Type) + } +} + +func parseNumericalAggregation(schemaType string, in map[string]interface{}) (*pb.AggregateReply_Aggregations_Aggregation_Number, error) { + var number *pb.AggregateReply_Aggregations_Aggregation_Number + if len(in) > 0 { + number = &pb.AggregateReply_Aggregations_Aggregation_Number{} + number.Type = &schemaType + for name, value := range in { + switch val := value.(type) { + case float64: + switch name { + case aggregation.CountAggregator.String(): + number.Count = ptInt64(val) + case aggregation.MeanAggregator.String(): + number.Mean = &val + case aggregation.MedianAggregator.String(): + number.Median = &val + case aggregation.ModeAggregator.String(): + number.Mode = &val + case aggregation.MaximumAggregator.String(): + number.Maximum = &val + case aggregation.MinimumAggregator.String(): + number.Minimum = &val + case aggregation.SumAggregator.String(): + number.Sum = &val + default: + return nil, fmt.Errorf("unknown numerical value aggregation type: %s", name) + } + default: + return nil, fmt.Errorf("unknown numerical value type: %T", value) + } + } + } + return number, nil +} + +func parseIntegerAggregation(schemaType string, in map[string]interface{}) (*pb.AggregateReply_Aggregations_Aggregation_Integer, error) { + var number *pb.AggregateReply_Aggregations_Aggregation_Integer + if len(in) > 0 { + number = &pb.AggregateReply_Aggregations_Aggregation_Integer{} + number.Type = &schemaType + for name, value := range in { + switch val := value.(type) { + case float64: + switch name { + case aggregation.CountAggregator.String(): + number.Count = ptInt64(val) + case aggregation.MeanAggregator.String(): + number.Mean = &val + case aggregation.MedianAggregator.String(): + number.Median = &val + case aggregation.ModeAggregator.String(): + number.Mode = ptInt64(val) + case aggregation.MaximumAggregator.String(): + number.Maximum = ptInt64(val) + case aggregation.MinimumAggregator.String(): + number.Minimum = ptInt64(val) + case aggregation.SumAggregator.String(): + number.Sum = ptInt64(val) + default: + return nil, fmt.Errorf("unknown integer value aggregation type: %s", name) + } + default: + return nil, fmt.Errorf("unknown integer value type: %T", value) + } + } + } + return number, nil +} + +func parseTextAggregation(schemaType string, in aggregation.Text) *pb.AggregateReply_Aggregations_Aggregation_Text { + var topOccurences *pb.AggregateReply_Aggregations_Aggregation_Text_TopOccurrences + if len(in.Items) > 0 { + items := make([]*pb.AggregateReply_Aggregations_Aggregation_Text_TopOccurrences_TopOccurrence, len(in.Items)) + for i := range in.Items { + items[i] = &pb.AggregateReply_Aggregations_Aggregation_Text_TopOccurrences_TopOccurrence{ + Value: in.Items[i].Value, + Occurs: int64(in.Items[i].Occurs), + } + } + topOccurences = &pb.AggregateReply_Aggregations_Aggregation_Text_TopOccurrences{Items: items} + } + return &pb.AggregateReply_Aggregations_Aggregation_Text{ + Count: ptInt64(in.Count), + Type: &schemaType, + TopOccurences: topOccurences, + } +} + +func parseBooleanAggregation(schemaType string, in aggregation.Boolean) *pb.AggregateReply_Aggregations_Aggregation_Boolean { + // TODO: check if it was requested + return &pb.AggregateReply_Aggregations_Aggregation_Boolean{ + Count: ptInt64(in.Count), + Type: &schemaType, + TotalTrue: ptInt64(in.TotalTrue), + TotalFalse: ptInt64(in.TotalFalse), + PercentageTrue: &in.PercentageTrue, + PercentageFalse: &in.PercentageFalse, + } +} + +func parseDateAggregation(schemaType string, in map[string]interface{}) (*pb.AggregateReply_Aggregations_Aggregation_Date, error) { + var date *pb.AggregateReply_Aggregations_Aggregation_Date + if len(in) > 0 { + date = &pb.AggregateReply_Aggregations_Aggregation_Date{} + date.Type = &schemaType + for name, value := range in { + switch val := value.(type) { + case int64: + if name == aggregation.CountAggregator.String() { + date.Count = &val + } + case string: + switch name { + case aggregation.MedianAggregator.String(): + date.Median = &val + case aggregation.ModeAggregator.String(): + date.Mode = &val + case aggregation.MaximumAggregator.String(): + date.Maximum = &val + case aggregation.MinimumAggregator.String(): + date.Minimum = &val + default: + return nil, fmt.Errorf("unknown date value aggregation type: %s", name) + } + default: + return nil, fmt.Errorf("unknown date value type: %T", value) + } + } + } + return date, nil +} + +func parseReferenceAggregation(schemaType string, in aggregation.Reference) *pb.AggregateReply_Aggregations_Aggregation_Reference { + return &pb.AggregateReply_Aggregations_Aggregation_Reference{ + Type: &schemaType, + PointingTo: in.PointingTo, + } +} + +func ptInt64[T int | float64](in T) *int64 { + out := int64(in) + return &out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_aggregate_reply_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_aggregate_reply_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b2ee2df7b89e5f31cf1ddc808acd6889eeee1be7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_aggregate_reply_test.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/aggregation" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func TestGRPCAggregateReply(t *testing.T) { + tests := []struct { + name string + res interface{} + outRes *pb.AggregateReply + wantError error + }{ + { + name: "meta count", + res: &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 11, + }, + }, + }, + outRes: &pb.AggregateReply{ + Result: &pb.AggregateReply_GroupedResults{ + GroupedResults: &pb.AggregateReply_Grouped{ + Groups: []*pb.AggregateReply_Group{ + { + ObjectsCount: ptInt64(11), + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + replier := NewAggregateReplier(nil, nil) + result, err := replier.Aggregate(tt.res, true) + if tt.wantError != nil { + require.Error(t, err) + assert.EqualError(t, tt.wantError, err.Error()) + } else { + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, tt.outRes, result) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_reply.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_reply.go new file mode 100644 index 0000000000000000000000000000000000000000..964dfc3a971e8a461eff62f5b300e6fe4d5ba268 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_reply.go @@ -0,0 +1,604 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "fmt" + "math/big" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/generative" + "github.com/weaviate/weaviate/usecases/byteops" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + generate "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate" + additionalModels "github.com/weaviate/weaviate/usecases/modulecomponents/additional/models" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/search" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +type mapper interface { + NewPrimitiveValue(v interface{}, dt schema.DataType) (*pb.Value, error) + NewNestedValue(v interface{}, dt schema.DataType, parent schema.PropertyInterface, prop search.SelectProperty) (*pb.Value, error) + NewNilValue() *pb.Value +} + +type generativeReplier interface { + Extract(_additional map[string]any, params any, metadata *pb.MetadataResult) (*pb.GenerativeResult, *pb.GenerativeResult, string, error) +} + +type Replier struct { + generative generativeReplier + mapper mapper + logger logrus.FieldLogger +} + +type generativeQueryParams interface { + ProviderName() string + ReturnMetadataForSingle() bool + ReturnMetadataForGrouped() bool + ReturnDebugForSingle() bool + ReturnDebugForGrouped() bool +} + +func NewReplier( + uses127 bool, + generativeQueryParams generativeQueryParams, + logger logrus.FieldLogger, +) *Replier { + return &Replier{ + generative: generative.NewReplier(logger, generativeQueryParams, uses127), + mapper: &Mapper{}, + logger: logger, + } +} + +func (r *Replier) Search(res []interface{}, start time.Time, searchParams dto.GetParams, scheme schema.Schema) (*pb.SearchReply, error) { + tookSeconds := float64(time.Since(start)) / float64(time.Second) + out := &pb.SearchReply{ + Took: float32(tookSeconds), + GenerativeGroupedResult: new(string), // pointer to empty string + } + + if searchParams.GroupBy != nil { + out.GroupByResults = make([]*pb.GroupByResult, len(res)) + for i, raw := range res { + group, generativeGroupResponse, err := r.extractGroup(raw, searchParams, scheme) + if err != nil { + return nil, err + } + if generativeGroupResponse != "" { + out.GenerativeGroupedResult = &generativeGroupResponse + } + out.GroupByResults[i] = group + } + } else { + objects, generativeGroupedResult, generativeGroupedResults, err := r.extractObjectsToResults(res, searchParams, scheme, false) + if err != nil { + return nil, err + } + out.GenerativeGroupedResult = &generativeGroupedResult + out.GenerativeGroupedResults = generativeGroupedResults + out.Results = objects + } + return out, nil +} + +func (r *Replier) extractObjectsToResults(res []interface{}, searchParams dto.GetParams, scheme schema.Schema, fromGroup bool) ([]*pb.SearchResult, string, *pb.GenerativeResult, error) { + results := make([]*pb.SearchResult, len(res)) + generativeGroupResultsReturnDeprecated := "" + var generativeGroupResults *pb.GenerativeResult + for i, raw := range res { + asMap, ok := raw.(map[string]interface{}) + if !ok { + return nil, "", nil, fmt.Errorf("could not parse returns %v", raw) + } + firstObject := i == 0 + + var props *pb.PropertiesResult + var err error + + props, err = r.extractPropertiesAnswer(scheme, asMap, searchParams.Properties, searchParams.ClassName, searchParams.Alias, searchParams.AdditionalProperties) + if err != nil { + return nil, "", nil, err + } + + additionalProps, err := r.extractAdditionalProps(asMap, searchParams.AdditionalProperties, firstObject, fromGroup) + if err != nil { + return nil, "", nil, err + } + + if generativeGroupResultsReturnDeprecated == "" && additionalProps.GenerativeGroupedDeprecated != "" { + generativeGroupResultsReturnDeprecated = additionalProps.GenerativeGroupedDeprecated + } + if generativeGroupResults == nil && r.isGenerativeGroupedPresent(additionalProps.GenerativeGrouped) { + generativeGroupResults = additionalProps.GenerativeGrouped + } + + result := &pb.SearchResult{ + Properties: props, + Metadata: additionalProps.Metadata, + Generative: additionalProps.GenerativeSingle, + } + + results[i] = result + } + return results, generativeGroupResultsReturnDeprecated, generativeGroupResults, nil +} + +func (r *Replier) isGenerativeGroupedPresent(grouped *pb.GenerativeResult) bool { + if grouped != nil && len(grouped.Values) > 0 && + (len(grouped.Values[0].Result) > 0 || grouped.Values[0].Debug != nil || grouped.Values[0].Metadata != nil) { + return true + } + return false +} + +func idToByte(idRaw interface{}) ([]byte, string, error) { + idStrfmt, ok := idRaw.(strfmt.UUID) + if !ok { + return nil, "", errors.New("could not extract format id in additional prop") + } + idStrfmtStr := idStrfmt.String() + hexInteger, success := new(big.Int).SetString(strings.ReplaceAll(idStrfmtStr, "-", ""), 16) + if !success { + return nil, "", fmt.Errorf("failed to parse hex string to integer") + } + return hexInteger.Bytes(), idStrfmtStr, nil +} + +func (r *Replier) extractAdditionalProps(asMap map[string]any, additionalPropsParams additional.Properties, firstObject, fromGroup bool) (*additionalProps, error) { + generativeSearchRaw, generativeSearchEnabled := additionalPropsParams.ModuleParams["generate"] + _, rerankEnabled := additionalPropsParams.ModuleParams["rerank"] + + addProps := &additionalProps{Metadata: &pb.MetadataResult{}} + if additionalPropsParams.ID && !generativeSearchEnabled && !rerankEnabled && !fromGroup { + idRaw, ok := asMap["id"] + if !ok { + return nil, errors.New("could not extract get id in additional prop") + } + + idToBytes, idAsString, err := idToByte(idRaw) + if err != nil { + return nil, errors.Wrap(err, "could not extract format id in additional prop") + } + addProps.Metadata.Id = idAsString + addProps.Metadata.IdAsBytes = idToBytes + } + _, ok := asMap["_additional"] + if !ok { + return addProps, nil + } + + var additionalPropertiesMap map[string]interface{} + if !fromGroup { + additionalPropertiesMap = asMap["_additional"].(map[string]interface{}) + } else { + addPropertiesGroup := asMap["_additional"].(*additional.GroupHitAdditional) + additionalPropertiesMap = make(map[string]interface{}, 3) + additionalPropertiesMap["id"] = addPropertiesGroup.ID + additionalPropertiesMap["vector"] = addPropertiesGroup.Vector + additionalPropertiesMap["vectors"] = addPropertiesGroup.Vectors + additionalPropertiesMap["distance"] = addPropertiesGroup.Distance + } + // id is part of the _additional map in case of generative search, group, & rerank - don't aks me why + if additionalPropsParams.ID && (generativeSearchEnabled || fromGroup || rerankEnabled) { + idRaw, ok := additionalPropertiesMap["id"] + if !ok { + return nil, errors.New("could not extract get id generative in additional prop") + } + + idToBytes, idAsString, err := idToByte(idRaw) + if err != nil { + return nil, errors.Wrap(err, "could not extract format id in additional prop") + } + addProps.Metadata.Id = idAsString + addProps.Metadata.IdAsBytes = idToBytes + } + + if generativeSearchEnabled { + singleGenerativeResult, groupedGenerativeResult, groupedDeprecated, err := r.generative.Extract(additionalPropertiesMap, generativeSearchRaw, addProps.Metadata) + if err != nil { + return nil, err + } + addProps.GenerativeSingle = singleGenerativeResult + addProps.GenerativeGrouped = groupedGenerativeResult + addProps.GenerativeGroupedDeprecated = groupedDeprecated + } + + if rerankEnabled { + rerank, ok := additionalPropertiesMap["rerank"] + if !ok { + return nil, errors.New("No results for rerank despite a search request. Is a the rerank module enabled?") + } + + rerankFmt, ok := rerank.([]*additionalModels.RankResult) + if !ok { + return nil, errors.New("could not cast rerank result additional prop") + } + addProps.Metadata.RerankScore = *rerankFmt[0].Score + addProps.Metadata.RerankScorePresent = true + } + + // additional properties are only present for certain searches/configs => don't return an error if not available + if additionalPropsParams.Vector { + vector, ok := additionalPropertiesMap["vector"] + if ok { + vectorfmt, ok2 := vector.([]float32) + if ok2 { + addProps.Metadata.Vector = vectorfmt // deprecated, remove in a bit + addProps.Metadata.VectorBytes = byteops.Fp32SliceToBytes(vectorfmt) + } + } + } + + if len(additionalPropsParams.Vectors) > 0 { + vectors, ok := additionalPropertiesMap["vectors"] + if ok { + vectorfmt, ok2 := vectors.(map[string]models.Vector) + if !ok2 { + // needed even though the types are identical, may have been created differently in core behind the interface{} + // e.g. for group hits + vectorfmt, ok2 = vectors.(models.Vectors) + } + if ok2 { + addProps.Metadata.Vectors = make([]*pb.Vectors, 0, len(additionalPropsParams.Vectors)) + for _, name := range additionalPropsParams.Vectors { + vector := vectorfmt[name] + switch vec := vector.(type) { + case []float32: + if len(vec) != 0 { + addProps.Metadata.Vectors = append(addProps.Metadata.Vectors, &pb.Vectors{ + VectorBytes: byteops.Fp32SliceToBytes(vec), + Name: name, + Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32, + }) + } + case [][]float32: + if len(vec) != 0 { + addProps.Metadata.Vectors = append(addProps.Metadata.Vectors, &pb.Vectors{ + VectorBytes: byteops.Fp32SliceOfSlicesToBytes(vec), + Name: name, + Type: pb.Vectors_VECTOR_TYPE_MULTI_FP32, + }) + } + default: + // do nothing + } + } + } + } + } + + if additionalPropsParams.Certainty { + addProps.Metadata.CertaintyPresent = false + certainty, ok := additionalPropertiesMap["certainty"] + if ok { + certaintyfmt, ok2 := certainty.(float64) + if ok2 { + addProps.Metadata.Certainty = float32(certaintyfmt) + addProps.Metadata.CertaintyPresent = true + } + } + } + + if additionalPropsParams.Distance { + addProps.Metadata.DistancePresent = false + distance, ok := additionalPropertiesMap["distance"] + if ok { + distancefmt, ok2 := distance.(float32) + if ok2 { + addProps.Metadata.Distance = distancefmt + addProps.Metadata.DistancePresent = true + } + } + } + + if additionalPropsParams.CreationTimeUnix { + addProps.Metadata.CreationTimeUnixPresent = false + creationtime, ok := additionalPropertiesMap["creationTimeUnix"] + if ok { + creationtimefmt, ok2 := creationtime.(int64) + if ok2 { + addProps.Metadata.CreationTimeUnix = creationtimefmt + addProps.Metadata.CreationTimeUnixPresent = true + } + } + } + + if additionalPropsParams.LastUpdateTimeUnix { + addProps.Metadata.LastUpdateTimeUnixPresent = false + lastUpdateTime, ok := additionalPropertiesMap["lastUpdateTimeUnix"] + if ok { + lastUpdateTimefmt, ok2 := lastUpdateTime.(int64) + if ok2 { + addProps.Metadata.LastUpdateTimeUnix = lastUpdateTimefmt + addProps.Metadata.LastUpdateTimeUnixPresent = true + } + } + } + + if additionalPropsParams.ExplainScore { + addProps.Metadata.ExplainScorePresent = false + explainScore, ok := additionalPropertiesMap["explainScore"] + if ok { + explainScorefmt, ok2 := explainScore.(string) + if ok2 { + addProps.Metadata.ExplainScore = explainScorefmt + addProps.Metadata.ExplainScorePresent = true + } + } + } + + if additionalPropsParams.Score { + addProps.Metadata.ScorePresent = false + score, ok := additionalPropertiesMap["score"] + if ok { + scorefmt, ok2 := score.(float32) + if ok2 { + addProps.Metadata.Score = scorefmt + addProps.Metadata.ScorePresent = true + } + } + } + + if additionalPropsParams.IsConsistent { + isConsistent, ok := additionalPropertiesMap["isConsistent"] + if ok { + isConsistentfmt, ok2 := isConsistent.(bool) + if ok2 { + addProps.Metadata.IsConsistent = &isConsistentfmt + addProps.Metadata.IsConsistentPresent = true + } + } + } + + return addProps, nil +} + +func (r *Replier) extractGroup(raw any, searchParams dto.GetParams, scheme schema.Schema) (*pb.GroupByResult, string, error) { + generativeSearchRaw, generativeSearchEnabled := searchParams.AdditionalProperties.ModuleParams["generate"] + _, rerankEnabled := searchParams.AdditionalProperties.ModuleParams["rerank"] + asMap, ok := raw.(map[string]interface{}) + if !ok { + return nil, "", fmt.Errorf("cannot parse result %v", raw) + } + add, ok := asMap["_additional"] + if !ok { + return nil, "", fmt.Errorf("_additional is required for groups %v", asMap) + } + addProps, ok := add.(models.AdditionalProperties) + if !ok { + addProps, ok = add.(map[string]interface{}) + } + if !ok { + return nil, "", fmt.Errorf("cannot parse _additional %v", add) + } + groupRaw, ok := addProps["group"] + if !ok { + return nil, "", fmt.Errorf("group is not present %v", addProps) + } + group, ok := groupRaw.(*additional.Group) + if !ok { + return nil, "", fmt.Errorf("cannot parse _additional %v", groupRaw) + } + + ret := &pb.GroupByResult{ + Name: group.GroupedBy.Value, + MaxDistance: group.MaxDistance, + MinDistance: group.MinDistance, + NumberOfObjects: int64(group.Count), + } + + groupedGenerativeResults := "" + if generativeSearchEnabled { + generateFmt, err := extractGenerateResult(addProps) + if err != nil { + return nil, "", err + } + + generativeSearch, ok := generativeSearchRaw.(*generate.Params) + if !ok { + return nil, "", errors.New("could not cast generative search params") + } + if generativeSearch.Prompt != nil && generateFmt.SingleResult == nil { + return nil, "", errors.New("No results for generative search despite a search request. Is a generative module enabled?") + } + + if generateFmt.Error != nil { + return nil, "", generateFmt.Error + } + + if generateFmt.SingleResult != nil && *generateFmt.SingleResult != "" { + ret.Generative = &pb.GenerativeReply{Result: *generateFmt.SingleResult} + } + + // grouped results are only added to the first object for GQL reasons + // however, reranking can result in a different order, so we need to check every object + // recording the result if it's present assuming that it is at least somewhere and will be caught + if generateFmt.GroupedResult != nil && *generateFmt.GroupedResult != "" { + groupedGenerativeResults = *generateFmt.GroupedResult + } + } + + if rerankEnabled { + rerankRaw, ok := addProps["rerank"] + if !ok { + return nil, "", fmt.Errorf("rerank is not present %v", addProps) + } + + rerank, ok := rerankRaw.([]*additionalModels.RankResult) + if !ok { + return nil, "", fmt.Errorf("cannot parse rerank %v", rerankRaw) + } + ret.Rerank = &pb.RerankReply{ + Score: *rerank[0].Score, + } + } + + // group results does not support more additional properties + searchParams.AdditionalProperties = additional.Properties{ + ID: searchParams.AdditionalProperties.ID, + Vector: searchParams.AdditionalProperties.Vector, + Vectors: searchParams.AdditionalProperties.Vectors, + Distance: searchParams.AdditionalProperties.Distance, + } + + // group objects are returned as a different type than normal results ([]map[string]interface{} vs []interface). As + // the normal path is used much more often than groupBy, convert the []map[string]interface{} to []interface{}, even + // though we cast it to map[string]interface{} in the extraction function. + // This way we only do a copy for groupBy and not for the standard code-path which is used more often + returnObjectsUntyped := make([]interface{}, len(group.Hits)) + for i := range returnObjectsUntyped { + returnObjectsUntyped[i] = group.Hits[i] + } + + objects, _, _, err := r.extractObjectsToResults(returnObjectsUntyped, searchParams, scheme, true) + if err != nil { + return nil, "", errors.Wrap(err, "extracting hits from group") + } + + ret.Objects = objects + + return ret, groupedGenerativeResults, nil +} + +func (r *Replier) extractPropertiesAnswer(scheme schema.Schema, results map[string]interface{}, properties search.SelectProperties, className, alias string, additionalPropsParams additional.Properties) (*pb.PropertiesResult, error) { + nonRefProps := &pb.Properties{ + Fields: make(map[string]*pb.Value, 0), + } + refProps := make([]*pb.RefPropertiesResult, 0) + for _, prop := range properties { + propRaw, ok := results[prop.Name] + + if !ok { + if prop.IsPrimitive || prop.IsObject { + nonRefProps.Fields[prop.Name] = r.mapper.NewNilValue() + } + continue + } + if prop.IsPrimitive { + class := scheme.GetClass(className) + if class == nil { + return nil, fmt.Errorf("could not find class %s in schema", className) + } + dataType, err := schema.GetPropertyDataType(class, prop.Name) + if err != nil { + return nil, errors.Wrap(err, "getting primitive property datatype") + } + value, err := r.mapper.NewPrimitiveValue(propRaw, *dataType) + if err != nil { + return nil, errors.Wrapf(err, "creating primitive value for %v", prop.Name) + } + nonRefProps.Fields[prop.Name] = value + continue + } + if prop.IsObject { + class := scheme.GetClass(className) + if class == nil { + return nil, fmt.Errorf("could not find class %s in schema", className) + } + nested, err := schema.GetPropertyByName(class, prop.Name) + if err != nil { + return nil, errors.Wrap(err, "getting nested property") + } + value, err := r.mapper.NewNestedValue(propRaw, schema.DataType(nested.DataType[0]), &Property{Property: nested}, prop) + if err != nil { + return nil, errors.Wrap(err, "creating object value") + } + nonRefProps.Fields[prop.Name] = value + continue + } + refs, ok := propRaw.([]interface{}) + if !ok { + continue + } + extractedRefProps := make([]*pb.PropertiesResult, 0, len(refs)) + for _, ref := range refs { + refLocal, ok := ref.(search.LocalRef) + if !ok { + continue + } + extractedRefProp, err := r.extractPropertiesAnswer(scheme, refLocal.Fields, prop.Refs[0].RefProperties, refLocal.Class, "", additionalPropsParams) + if err != nil { + continue + } + additionalProps, err := r.extractAdditionalProps(refLocal.Fields, prop.Refs[0].AdditionalProperties, false, false) + if err != nil { + return nil, err + } + if additionalProps == nil { + return nil, fmt.Errorf("additional props are nil somehow") + } + extractedRefProp.Metadata = additionalProps.Metadata + extractedRefProps = append(extractedRefProps, extractedRefProp) + } + + refProp := pb.RefPropertiesResult{PropName: prop.Name, Properties: extractedRefProps} + refProps = append(refProps, &refProp) + } + props := pb.PropertiesResult{} + if len(nonRefProps.Fields) != 0 { + props.NonRefProps = nonRefProps + } + if len(refProps) != 0 { + props.RefProps = refProps + } + props.RefPropsRequested = properties.HasRefs() + if alias != "" { + props.TargetCollection = alias + } else { + props.TargetCollection = className + } + return &props, nil +} + +func extractGenerateResult(additionalPropertiesMap map[string]interface{}) (*additionalModels.GenerateResult, error) { + generateFmt := &additionalModels.GenerateResult{} + if generate, ok := additionalPropertiesMap["generate"]; ok { + generateParams, ok := generate.(map[string]interface{}) + if !ok { + return nil, errors.New("could not cast generative result additional prop") + } + if generateParams["singleResult"] != nil { + if singleResult, ok := generateParams["singleResult"].(*string); ok { + generateFmt.SingleResult = singleResult + } + } + if generateParams["groupedResult"] != nil { + if groupedResult, ok := generateParams["groupedResult"].(*string); ok { + generateFmt.GroupedResult = groupedResult + } + } + if generateParams["error"] != nil { + if err, ok := generateParams["error"].(error); ok { + generateFmt.Error = err + } + } + } + return generateFmt, nil +} + +type additionalProps struct { + Metadata *pb.MetadataResult + GenerativeSingle *pb.GenerativeResult + GenerativeGrouped *pb.GenerativeResult + GenerativeGroupedDeprecated string +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_reply_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_reply_test.go new file mode 100644 index 0000000000000000000000000000000000000000..25a9b57230b5195016bda68d6941f2c247712fb8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/prepare_reply_test.go @@ -0,0 +1,1374 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "encoding/binary" + "math" + "math/big" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/usecases/byteops" + + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate" + addModels "github.com/weaviate/weaviate/usecases/modulecomponents/additional/models" +) + +const ( + UUID1 = strfmt.UUID("a4de3ca0-6975-464f-b23b-adddd83630d7") + UUID2 = strfmt.UUID("7e10ec81-a26d-4ac7-8264-3e3e05397ddc") +) + +func byteVector(vec []float32) []byte { + vector := make([]byte, len(vec)*4) + + for i := 0; i < len(vec); i++ { + binary.LittleEndian.PutUint32(vector[i*4:i*4+4], math.Float32bits(vec[i])) + } + + return vector +} + +func byteVectorMulti(mat [][]float32) []byte { + matrix := make([]byte, 2) + binary.LittleEndian.PutUint16(matrix, uint16(len(mat[0]))) + for _, vec := range mat { + matrix = append(matrix, byteVector(vec)...) + } + return matrix +} + +func idByte(id string) []byte { + hexInteger, _ := new(big.Int).SetString(strings.ReplaceAll(id, "-", ""), 16) + return hexInteger.Bytes() +} + +func TestGRPCReply(t *testing.T) { + allAdditional := dto.GetParams{AdditionalProperties: additional.Properties{ + Vector: true, + Certainty: true, + ID: true, + Distance: true, + CreationTimeUnix: true, + LastUpdateTimeUnix: true, + ExplainScore: true, + Score: true, + IsConsistent: true, + }} + truePointer := true + + someFloat64 := float64(0.1) + refClass1 := "RefClass1" + refClass2 := "RefClass2" + className := "className" + objClass := "objClass" + NamedVecClass := "NamedVecs" + scheme := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: className, + Properties: []*models.Property{ + {Name: "word", DataType: schema.DataTypeText.PropString()}, + {Name: "other", DataType: []string{"int"}}, + {Name: "age", DataType: []string{"int"}}, + {Name: "nums", DataType: schema.DataTypeIntArray.PropString()}, + {Name: "ref", DataType: []string{refClass1}}, + {Name: "multiRef", DataType: []string{refClass1, refClass2}}, + { + Name: "nested", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + {Name: "text", DataType: schema.DataTypeText.PropString()}, + {Name: "text2", DataType: schema.DataTypeText.PropString()}, + }, + }, + }, + }, + { + Class: refClass1, + Properties: []*models.Property{ + {Name: "something", DataType: schema.DataTypeText.PropString()}, + {Name: "nums", DataType: schema.DataTypeIntArray.PropString()}, + {Name: "ref2", DataType: []string{refClass2}}, + }, + }, + { + Class: refClass2, + Properties: []*models.Property{ + {Name: "else", DataType: schema.DataTypeText.PropString()}, + {Name: "ref3", DataType: []string{refClass2}}, + }, + }, + { + Class: NamedVecClass, + Properties: []*models.Property{ + {Name: "name", DataType: schema.DataTypeText.PropString()}, + }, + VectorConfig: map[string]models.VectorConfig{ + "custom": { + VectorIndexType: "hnsw", + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + }, + "first": { + VectorIndexType: "flat", + Vectorizer: map[string]interface{}{"text2vec-contextionary": map[string]interface{}{}}, + }, + }, + }, + { + Class: objClass, + Properties: []*models.Property{ + { + Name: "something", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "names", + DataType: schema.DataTypeTextArray.PropString(), + }, + { + Name: "else", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "names", + DataType: schema.DataTypeTextArray.PropString(), + }, + }, + }, + { + Name: "objs", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + }}, + }, + }, + }, + }, + }, + }, + }, + } + + tests := []struct { + name string + res []any + searchParams dto.GetParams // only a few things are needed to control what is returned + outSearch []*pb.SearchResult + outGenerative string + outGroup []*pb.GroupByResult + hasError bool + }{ + { + name: "vector only", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{"vector": []float32{1}}, + }, + map[string]interface{}{ + "_additional": map[string]interface{}{"vector": []float32{2}}, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{Vector: true}}, + outSearch: []*pb.SearchResult{ + {Metadata: &pb.MetadataResult{Vector: []float32{1}, VectorBytes: byteVector([]float32{1})}, Properties: &pb.PropertiesResult{}}, + {Metadata: &pb.MetadataResult{Vector: []float32{2}, VectorBytes: byteVector([]float32{2})}, Properties: &pb.PropertiesResult{}}, + }, + }, + { + name: "named vector only", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{"vectors": map[string]models.Vector{"custom": []float32{1}, "first": []float32{2}}}, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{Vectors: []string{"custom", "first"}}}, + outSearch: []*pb.SearchResult{ + {Metadata: &pb.MetadataResult{Vectors: []*pb.Vectors{ + {Name: "custom", VectorBytes: byteVector([]float32{1}), Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32}, + {Name: "first", VectorBytes: byteVector([]float32{2}), Type: pb.Vectors_VECTOR_TYPE_SINGLE_FP32}, + }}, Properties: &pb.PropertiesResult{}}, + }, + }, + { + name: "all additional", + res: []interface{}{ + map[string]interface{}{ + "id": UUID1, + "_additional": map[string]interface{}{ + "vector": []float32{1}, + "certainty": 0.4, + "distance": float32(0.01), + "creationTimeUnix": int64(123), + "lastUpdateTimeUnix": int64(345), + "explainScore": "other text", + "score": float32(0.25), + "isConsistent": true, + }, + }, + map[string]interface{}{ + "id": UUID2, + "_additional": map[string]interface{}{ + "vector": []float32{2}, + "certainty": 0.5, + "distance": float32(0.1), + "creationTimeUnix": int64(456), + "lastUpdateTimeUnix": int64(789), + "explainScore": "some text", + "score": float32(0.45), + "isConsistent": true, + }, + }, + }, + searchParams: allAdditional, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{ + Vector: []float32{1}, + Id: string(UUID1), + Certainty: 0.4, + CertaintyPresent: true, + Distance: 0.01, + DistancePresent: true, + CreationTimeUnix: 123, + CreationTimeUnixPresent: true, + LastUpdateTimeUnix: 345, + LastUpdateTimeUnixPresent: true, + ExplainScore: "other text", + ExplainScorePresent: true, + Score: 0.25, + ScorePresent: true, + IsConsistent: &truePointer, + IsConsistentPresent: true, + VectorBytes: byteVector([]float32{1}), + IdAsBytes: idByte(string(UUID1)), + }, + Properties: &pb.PropertiesResult{}, + }, + { + Metadata: &pb.MetadataResult{ + Vector: []float32{2}, + Id: string(UUID2), + Certainty: 0.5, + CertaintyPresent: true, + Distance: 0.1, + DistancePresent: true, + CreationTimeUnix: 456, + CreationTimeUnixPresent: true, + LastUpdateTimeUnix: 789, + LastUpdateTimeUnixPresent: true, + ExplainScore: "some text", + ExplainScorePresent: true, + Score: 0.45, + ScorePresent: true, + IsConsistent: &truePointer, + IsConsistentPresent: true, + VectorBytes: byteVector([]float32{2}), + IdAsBytes: idByte(string(UUID2)), + }, + Properties: &pb.PropertiesResult{}, + }, + }, + }, + { + name: "primitive properties", + res: []interface{}{ + map[string]interface{}{ + "word": "word", + "age": float64(21), + }, + map[string]interface{}{ + "word": "other", + "age": float64(26), + }, + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{{Name: "word", IsPrimitive: true}, {Name: "age", IsPrimitive: true}}, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "word"}}, + "age": {Kind: &pb.Value_IntValue{IntValue: 21}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{}, + RefPropsRequested: false, + }, + }, + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "other"}}, + "age": {Kind: &pb.Value_IntValue{IntValue: 26}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{}, + RefPropsRequested: false, + }, + }, + }, + }, + { + name: "request property with nil value", + res: []interface{}{ + map[string]interface{}{ + "word": "word", + }, + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{ + {Name: "word", IsPrimitive: true}, + {Name: "age", IsPrimitive: true}, + {Name: "nested", IsPrimitive: false, IsObject: true, Props: []search.SelectProperty{{Name: "text", IsPrimitive: true}}}, + }, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "word"}}, + "age": {Kind: &pb.Value_NullValue{}}, + "nested": {Kind: &pb.Value_NullValue{}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{}, + RefPropsRequested: false, + }, + }, + }, + }, + { + name: "array properties", + res: []interface{}{ + map[string]interface{}{"nums": []float64{1, 2, 3}}, // ints are encoded as float64 in json + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{{Name: "nums", IsPrimitive: true}}, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "nums": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_IntValues{IntValues: &pb.IntValues{Values: byteops.IntsToByteVector([]float64{1, 2, 3})}}, + }}}, + }, + }, + }, + }, + }, + }, + { + name: "nested object properties", + res: []interface{}{ + map[string]interface{}{ + "something": map[string]interface{}{ + "name": "Bob", + "names": []string{"Jo", "Jill"}, + "else": map[string]interface{}{ + "name": "Bill", + "names": []string{"Jo", "Jill"}, + }, + "objs": []interface{}{ + map[string]interface{}{"name": "Bill"}, + }, + }, + }, + }, + searchParams: dto.GetParams{ + ClassName: objClass, + Properties: search.SelectProperties{{ + Name: "something", + IsPrimitive: false, + IsObject: true, + Props: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + { + Name: "names", + IsPrimitive: true, + }, + { + Name: "else", + IsPrimitive: false, + IsObject: true, + Props: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + { + Name: "names", + IsPrimitive: true, + }, + }, + }, + { + Name: "objs", + IsPrimitive: false, + IsObject: true, + Props: []search.SelectProperty{{ + Name: "name", + IsPrimitive: true, + }}, + }, + }, + }}, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: objClass, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "something": {Kind: &pb.Value_ObjectValue{ + ObjectValue: &pb.Properties{ + Fields: map[string]*pb.Value{ + "name": {Kind: &pb.Value_TextValue{TextValue: "Bob"}}, + "names": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{"Jo", "Jill"}}}, + }}}, + "else": {Kind: &pb.Value_ObjectValue{ + ObjectValue: &pb.Properties{ + Fields: map[string]*pb.Value{ + "name": {Kind: &pb.Value_TextValue{TextValue: "Bill"}}, + "names": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{"Jo", "Jill"}}}, + }}}, + }, + }, + }}, + "objs": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_ObjectValues{ObjectValues: &pb.ObjectValues{Values: []*pb.Properties{{ + Fields: map[string]*pb.Value{ + "name": {Kind: &pb.Value_TextValue{TextValue: "Bill"}}, + }, + }}}}, + }}}, + }, + }, + }}, + }, + }, + }, + }, + }, + }, + { + name: "nested object properties with missing values", + res: []interface{}{ + map[string]interface{}{ + "something": map[string]interface{}{ + "name": "Bob", + "names": []string{"Jo", "Jill"}, + "else": map[string]interface{}{ + "names": []string{"Jo", "Jill"}, + }, + "objs": []interface{}{ + map[string]interface{}{"name": "Bill"}, + }, + }, + }, + }, + searchParams: dto.GetParams{ + ClassName: objClass, + Properties: search.SelectProperties{{ + Name: "something", + IsPrimitive: false, + IsObject: true, + Props: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + { + Name: "names", + IsPrimitive: true, + }, + { + Name: "else", + IsPrimitive: false, + IsObject: true, + Props: []search.SelectProperty{ + { + Name: "name", + IsPrimitive: true, + }, + { + Name: "names", + IsPrimitive: true, + }, + }, + }, + { + Name: "objs", + IsPrimitive: false, + IsObject: true, + Props: []search.SelectProperty{{ + Name: "name", + IsPrimitive: true, + }}, + }, + }, + }}, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: objClass, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "something": {Kind: &pb.Value_ObjectValue{ + ObjectValue: &pb.Properties{ + Fields: map[string]*pb.Value{ + "name": {Kind: &pb.Value_TextValue{TextValue: "Bob"}}, + "names": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{"Jo", "Jill"}}}, + }}}, + "else": {Kind: &pb.Value_ObjectValue{ + ObjectValue: &pb.Properties{ + Fields: map[string]*pb.Value{ + "names": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_TextValues{TextValues: &pb.TextValues{Values: []string{"Jo", "Jill"}}}, + }}}, + }, + }, + }}, + "objs": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_ObjectValues{ObjectValues: &pb.ObjectValues{Values: []*pb.Properties{{ + Fields: map[string]*pb.Value{ + "name": {Kind: &pb.Value_TextValue{TextValue: "Bill"}}, + }, + }}}}, + }}}, + }, + }, + }}, + }, + }, + }, + }, + }, + }, + { + name: "primitive and ref properties with no references", + res: []interface{}{ + map[string]interface{}{ + "word": "word", + }, + map[string]interface{}{ + "word": "other", + }, + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{ + {Name: "word", IsPrimitive: true}, + {Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{ + { + ClassName: refClass1, + RefProperties: search.SelectProperties{{Name: "something", IsPrimitive: true}}, + AdditionalProperties: additional.Properties{Vector: true}, + }, + }}, + }, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "word"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{}, + RefPropsRequested: true, + }, + }, + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "other"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{}, + RefPropsRequested: true, + }, + }, + }, + }, + { + name: "primitive and ref properties", + res: []interface{}{ + map[string]interface{}{ + "word": "word", + "ref": []interface{}{ + search.LocalRef{ + Class: refClass1, + Fields: map[string]interface{}{ + "something": "other", + "_additional": map[string]interface{}{"vector": []float32{3}}, + }, + }, + }, + }, + map[string]interface{}{ + "word": "other", + "ref": []interface{}{ + search.LocalRef{ + Class: refClass1, + Fields: map[string]interface{}{ + "something": "thing", + "_additional": map[string]interface{}{"vector": []float32{4}}, + }, + }, + }, + }, + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{ + {Name: "word", IsPrimitive: true}, + {Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{ + { + ClassName: refClass1, + RefProperties: search.SelectProperties{{Name: "something", IsPrimitive: true}}, + AdditionalProperties: additional.Properties{Vector: true}, + }, + }}, + }, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "word"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{{ + PropName: "ref", + Properties: []*pb.PropertiesResult{ + { + TargetCollection: refClass1, + Metadata: &pb.MetadataResult{Vector: []float32{3}, VectorBytes: byteVector([]float32{3})}, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "something": {Kind: &pb.Value_TextValue{TextValue: "other"}}, + }, + }, + }, + }, + }}, + RefPropsRequested: true, + }, + }, + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "other"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{{ + PropName: "ref", + Properties: []*pb.PropertiesResult{ + { + TargetCollection: refClass1, + Metadata: &pb.MetadataResult{Vector: []float32{4}, VectorBytes: byteVector([]float32{4})}, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "something": {Kind: &pb.Value_TextValue{TextValue: "thing"}}, + }, + }, + }, + }, + }}, + RefPropsRequested: true, + }, + }, + }, + }, + { + name: "nested ref properties", + res: []interface{}{ + map[string]interface{}{ + "word": "word", + "ref": []interface{}{ + search.LocalRef{ + Class: refClass1, + Fields: map[string]interface{}{ + "something": "other", + "ref2": []interface{}{ + search.LocalRef{ + Class: refClass2, + Fields: map[string]interface{}{ + "else": "thing", + }, + }, + }, + }, + }, + }, + }, + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{ + {Name: "word", IsPrimitive: true}, + { + Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{ + { + ClassName: refClass1, + RefProperties: search.SelectProperties{ + {Name: "something", IsPrimitive: true}, + { + Name: "ref2", IsPrimitive: false, Refs: []search.SelectClass{{ + ClassName: refClass2, + RefProperties: search.SelectProperties{{Name: "else", IsPrimitive: true}}, + }}, + }, + }, + }, + }, + }, + }, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "word"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{{ + PropName: "ref", + Properties: []*pb.PropertiesResult{ + { + TargetCollection: refClass1, + Metadata: &pb.MetadataResult{}, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "something": {Kind: &pb.Value_TextValue{TextValue: "other"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{{ + PropName: "ref2", + Properties: []*pb.PropertiesResult{{ + TargetCollection: refClass2, + Metadata: &pb.MetadataResult{}, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "else": {Kind: &pb.Value_TextValue{TextValue: "thing"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{}, + RefPropsRequested: false, + }}, + }}, + RefPropsRequested: true, + }, + }, + }}, + RefPropsRequested: true, + }, + }, + }, + }, + { + name: "nested ref properties with no references", + res: []interface{}{ + map[string]interface{}{ + "word": "word", + "ref": []interface{}{ + search.LocalRef{ + Class: refClass1, + Fields: map[string]interface{}{ + "something": "other", + }, + }, + }, + }, + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{ + {Name: "word", IsPrimitive: true}, + { + Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{ + { + ClassName: refClass1, + RefProperties: search.SelectProperties{ + {Name: "something", IsPrimitive: true}, + { + Name: "ref2", IsPrimitive: false, Refs: []search.SelectClass{{ + ClassName: refClass2, + RefProperties: search.SelectProperties{{Name: "else", IsPrimitive: true}}, + }}, + }, + }, + }, + }, + }, + }, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "word"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{{ + PropName: "ref", + Properties: []*pb.PropertiesResult{ + { + TargetCollection: refClass1, + Metadata: &pb.MetadataResult{}, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "something": {Kind: &pb.Value_TextValue{TextValue: "other"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{}, + RefPropsRequested: true, + }, + }, + }}, + RefPropsRequested: true, + }, + }, + }, + }, + { + name: "primitive and ref array properties", + res: []interface{}{ + map[string]interface{}{ + "word": "word", + "ref": []interface{}{ + search.LocalRef{ + Class: refClass1, + Fields: map[string]interface{}{ + "nums": []float64{1, 2, 3}, // ints are encoded as float64 in json + "_additional": map[string]interface{}{"vector": []float32{3}}, + }, + }, + }, + }, + }, + searchParams: dto.GetParams{ + ClassName: className, + Properties: search.SelectProperties{ + {Name: "word", IsPrimitive: true}, + {Name: "ref", IsPrimitive: false, Refs: []search.SelectClass{ + { + ClassName: refClass1, + RefProperties: search.SelectProperties{{Name: "nums", IsPrimitive: true}}, + AdditionalProperties: additional.Properties{Vector: true}, + }, + }}, + }, + }, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{}, + Properties: &pb.PropertiesResult{ + TargetCollection: className, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "word": {Kind: &pb.Value_TextValue{TextValue: "word"}}, + }, + }, + RefProps: []*pb.RefPropertiesResult{{ + PropName: "ref", + Properties: []*pb.PropertiesResult{ + { + TargetCollection: refClass1, + Metadata: &pb.MetadataResult{Vector: []float32{3}, VectorBytes: byteVector([]float32{3})}, + NonRefProps: &pb.Properties{ + Fields: map[string]*pb.Value{ + "nums": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ + Kind: &pb.ListValue_IntValues{IntValues: &pb.IntValues{Values: byteops.IntsToByteVector([]float64{1, 2, 3})}}, + }}}, + }, + }, + }, + }, + }}, + RefPropsRequested: true, + }, + }, + }, + }, + { + name: "generative single only with ID", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": UUID1, // different place for generative + "generate": map[string]interface{}{ + "singleResult": &refClass1, // just use some string + }, + }, + }, + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": UUID2, + "generate": map[string]interface{}{ + "singleResult": &refClass2, // just use some string + }, + }, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{ + ID: true, + ModuleParams: map[string]interface{}{ + "generate": &generate.Params{ + Prompt: &refClass1, + }, + }, + }}, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{ + Id: string(UUID1), + Generative: refClass1, + GenerativePresent: true, + IdAsBytes: idByte(UUID1.String()), + }, + Properties: &pb.PropertiesResult{}, + }, + { + Metadata: &pb.MetadataResult{ + Id: string(UUID2), + Generative: refClass2, + GenerativePresent: true, + IdAsBytes: idByte(UUID2.String()), + }, + Properties: &pb.PropertiesResult{}, + }, + }, + }, + { + name: "generative single only without ID", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ // different place for generative + "generate": map[string]interface{}{ + "singleResult": &refClass1, // just use some string + }, + }, + }, + map[string]interface{}{ + "_additional": map[string]interface{}{ + "generate": map[string]interface{}{ + "singleResult": &refClass2, // just use some string + }, + }, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{ + ModuleParams: map[string]interface{}{ + "generate": &generate.Params{ + Prompt: &refClass1, + }, + }, + }}, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{ + Generative: refClass1, + GenerativePresent: true, + }, + Properties: &pb.PropertiesResult{}, + }, + { + Metadata: &pb.MetadataResult{ + Generative: refClass2, + GenerativePresent: true, + }, + Properties: &pb.PropertiesResult{}, + }, + }, + }, + { + name: "generative with error", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ // different place for generative + "generate": map[string]interface{}{ + "error": errors.New("error"), + }, + }, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{ + ModuleParams: map[string]interface{}{ + "generate": &generate.Params{ + Prompt: &refClass1, + }, + }, + }}, + hasError: true, + }, + { + name: "generative group only", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": UUID1, // different place for generative + "generate": map[string]interface{}{ + "groupedResult": &refClass1, // just use some string + }, + }, + }, + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": UUID2, + "generate": map[string]interface{}{}, + }, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{ + ID: true, + ModuleParams: map[string]interface{}{ + "generate": &generate.Params{ + Task: &refClass1, + }, + }, + }}, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{ + Id: string(UUID1), + IdAsBytes: idByte(UUID1.String()), + }, + Properties: &pb.PropertiesResult{}, + }, + { + Metadata: &pb.MetadataResult{ + Id: string(UUID2), + IdAsBytes: idByte(UUID2.String()), + }, + Properties: &pb.PropertiesResult{}, + }, + }, + outGenerative: refClass1, + }, + { + name: "group by", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": UUID2, + "group": &additional.Group{ + ID: 1, + MinDistance: 0.1, + MaxDistance: 0.2, + Count: 3, + GroupedBy: &additional.GroupedBy{Value: "GroupByValue1", Path: []string{"some_prop"}}, + Hits: []map[string]interface{}{ + { + "word": "word", + "ref": []interface{}{ + search.LocalRef{ + Class: refClass1, + Fields: map[string]interface{}{ + "something": "other", + "_additional": map[string]interface{}{"vector": []float32{2}, "id": UUID1}, + }, + }, + }, + "_additional": &additional.GroupHitAdditional{Vector: []float32{3}, ID: UUID2}, + }, + { + "word": "other", + "_additional": &additional.GroupHitAdditional{Vector: []float32{4}, ID: UUID1}, + }, + }, + }, + }, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{ + ID: true, + Vector: true, + }, GroupBy: &searchparams.GroupBy{Groups: 3, ObjectsPerGroup: 4, Property: "name"}}, + outGroup: []*pb.GroupByResult{{ + Name: "GroupByValue1", + MaxDistance: 0.2, + MinDistance: 0.1, + NumberOfObjects: 3, + Objects: []*pb.SearchResult{ + { + Properties: &pb.PropertiesResult{ + NonRefProps: &pb.Properties{Fields: map[string]*pb.Value{"word": {Kind: &pb.Value_TextValue{TextValue: "word"}}}}, + RefProps: []*pb.RefPropertiesResult{ + { + PropName: "other", + Properties: []*pb.PropertiesResult{ + { + NonRefProps: &pb.Properties{Fields: map[string]*pb.Value{"something": {Kind: &pb.Value_TextValue{TextValue: "other"}}}}, + Metadata: &pb.MetadataResult{Vector: []float32{2}, Id: UUID1.String()}, + }, + }, + }, + }, + RefPropsRequested: true, + }, + Metadata: &pb.MetadataResult{ + Id: string(UUID2), + Vector: []float32{3}, + }, + }, + { + Properties: &pb.PropertiesResult{ + NonRefProps: &pb.Properties{Fields: map[string]*pb.Value{"word": {Kind: &pb.Value_TextValue{TextValue: "other"}}}}, + }, + Metadata: &pb.MetadataResult{ + Id: string(UUID1), + Vector: []float32{4}, + }, + }, + }, + }}, + }, + { + name: "rerank only", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": UUID1, + "rerank": []*addModels.RankResult{{Score: &someFloat64}}, + }, + }, + }, + searchParams: dto.GetParams{AdditionalProperties: additional.Properties{ + ID: true, + ModuleParams: map[string]interface{}{"rerank": "must be present for extraction"}, + }}, + outSearch: []*pb.SearchResult{ + { + Metadata: &pb.MetadataResult{ + Id: string(UUID1), + RerankScore: someFloat64, + RerankScorePresent: true, + IdAsBytes: idByte(UUID1.String()), + }, + Properties: &pb.PropertiesResult{}, + }, + }, + }, + { + name: "generate, group by, & rerank", + res: []interface{}{ + map[string]interface{}{ + "_additional": map[string]interface{}{ + "id": UUID2, + "generate": map[string]interface{}{ + "singleResult": &refClass1, + "groupedResult": &refClass2, + }, + "rerank": []*addModels.RankResult{{Score: &someFloat64}}, + "group": &additional.Group{ + ID: 1, + MinDistance: 0.1, + MaxDistance: 0.2, + Count: 3, + GroupedBy: &additional.GroupedBy{Value: "GroupByValue1", Path: []string{"some_prop"}}, + Hits: []map[string]interface{}{ + { + "word": "word", + "ref": []interface{}{ + search.LocalRef{ + Class: refClass1, + Fields: map[string]interface{}{ + "something": "other", + "_additional": map[string]interface{}{"vector": []float32{2}, "id": UUID1}, + }, + }, + }, + "_additional": &additional.GroupHitAdditional{Vector: []float32{3}, ID: UUID2}, + }, + { + "word": "other", + "_additional": &additional.GroupHitAdditional{Vector: []float32{4}, ID: UUID1}, + }, + }, + }, + }, + }, + }, + searchParams: dto.GetParams{ + AdditionalProperties: additional.Properties{ + ID: true, + Vector: true, + ModuleParams: map[string]interface{}{ + "generate": &generate.Params{ + Prompt: &refClass1, + Task: &refClass2, + }, + "rerank": "must be present for extraction", + }, + }, + GroupBy: &searchparams.GroupBy{Groups: 3, ObjectsPerGroup: 4, Property: "name"}, + }, + outGroup: []*pb.GroupByResult{{ + Name: "GroupByValue1", + MaxDistance: 0.2, + MinDistance: 0.1, + NumberOfObjects: 3, + Generative: &pb.GenerativeReply{Result: refClass1}, + Rerank: &pb.RerankReply{Score: someFloat64}, + Objects: []*pb.SearchResult{ + { + Properties: &pb.PropertiesResult{ + NonRefProps: &pb.Properties{Fields: map[string]*pb.Value{"word": {Kind: &pb.Value_TextValue{TextValue: "word"}}}}, + RefProps: []*pb.RefPropertiesResult{ + { + PropName: "other", + Properties: []*pb.PropertiesResult{ + { + NonRefProps: &pb.Properties{Fields: map[string]*pb.Value{"something": {Kind: &pb.Value_TextValue{TextValue: "other"}}}}, + Metadata: &pb.MetadataResult{Vector: []float32{2}, Id: UUID1.String()}, + }, + }, + }, + }, + RefPropsRequested: true, + }, + Metadata: &pb.MetadataResult{ + Id: string(UUID2), + Vector: []float32{3}, + }, + }, + { + Properties: &pb.PropertiesResult{ + NonRefProps: &pb.Properties{Fields: map[string]*pb.Value{"word": {Kind: &pb.Value_TextValue{TextValue: "other"}}}}, + }, + Metadata: &pb.MetadataResult{ + Id: string(UUID1), + Vector: []float32{4}, + }, + }, + }, + }}, + outGenerative: refClass2, + }, + } + for _, tt := range tests { + replier := NewReplier(false, fakeGenerativeParams{}, nil) + t.Run(tt.name, func(t *testing.T) { + out, err := replier.Search(tt.res, time.Now(), tt.searchParams, scheme) + if tt.hasError { + require.NotNil(t, err) + } else { + require.Nil(t, err) + for i := range tt.outSearch { + require.Equal(t, tt.outSearch[i].Properties.String(), out.Results[i].Properties.String()) + // order of the vectors is not guaranteed, doesn't matter for results + vectorsOut := out.Results[i].Metadata.Vectors + vectorsExpected := tt.outSearch[i].Metadata.Vectors + require.ElementsMatch(t, vectorsOut, vectorsExpected) + + out.Results[i].Metadata.Vectors = nil + tt.outSearch[i].Metadata.Vectors = nil + require.Equal(t, tt.outSearch[i].Metadata.String(), out.Results[i].Metadata.String()) + } + require.Equal(t, tt.outGenerative, *out.GenerativeGroupedResult) + } + }) + } +} + +type fakeGenerativeParams struct{} + +func (f fakeGenerativeParams) ProviderName() string { + return "" +} + +func (f fakeGenerativeParams) ReturnMetadataForSingle() bool { + return false +} + +func (f fakeGenerativeParams) ReturnMetadataForGrouped() bool { + return false +} + +func (f fakeGenerativeParams) ReturnDebugForSingle() bool { + return false +} + +func (f fakeGenerativeParams) ReturnDebugForGrouped() bool { + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/service.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/service.go new file mode 100644 index 0000000000000000000000000000000000000000..6f7478469f9f25933bcb10cc67a42672acca5475 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/service.go @@ -0,0 +1,429 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "context" + "fmt" + "runtime" + "time" + + "github.com/google/uuid" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/weaviate/weaviate/usecases/config" + + "github.com/weaviate/weaviate/usecases/objects" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/schema" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" + "github.com/weaviate/weaviate/usecases/auth/authentication/composer" + schemaManager "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/traverser" +) + +var _NUMCPU = runtime.GOMAXPROCS(0) + +type Service struct { + pb.UnimplementedWeaviateServer + traverser *traverser.Traverser + authComposer composer.TokenFunc + allowAnonymousAccess bool + schemaManager *schemaManager.Manager + batchManager *objects.BatchManager + config *config.Config + authorizer authorization.Authorizer + logger logrus.FieldLogger + + authenticator *authHandler + batchHandler *batch.Handler + batchQueuesHandler *batch.QueuesHandler +} + +func NewService(traverser *traverser.Traverser, authComposer composer.TokenFunc, + allowAnonymousAccess bool, schemaManager *schemaManager.Manager, + batchManager *objects.BatchManager, config *config.Config, authorization authorization.Authorizer, + logger logrus.FieldLogger, shutdown *batch.Shutdown, +) *Service { + authenticator := NewAuthHandler(allowAnonymousAccess, authComposer) + internalQueue := batch.NewBatchInternalQueue() + batchWriteQueues := batch.NewBatchWriteQueues() + batchReadQueues := batch.NewBatchReadQueues() + + batchHandler := batch.NewHandler(authorization, batchManager, logger, authenticator, schemaManager) + batchQueuesHandler := batch.NewQueuesHandler(shutdown.HandlersCtx, shutdown.SendWg, shutdown.StreamWg, shutdown.ShutdownFinished, batchWriteQueues, batchReadQueues, logger) + + numWorkers := _NUMCPU + batch.StartBatchWorkers(shutdown.WorkersCtx, shutdown.WorkersWg, numWorkers, internalQueue, batchReadQueues, batchHandler, logger) + batch.StartScheduler(shutdown.SchedulerCtx, shutdown.SchedulerWg, batchWriteQueues, internalQueue, logger) + + return &Service{ + traverser: traverser, + authComposer: authComposer, + allowAnonymousAccess: allowAnonymousAccess, + schemaManager: schemaManager, + batchManager: batchManager, + config: config, + logger: logger, + authorizer: authorization, + authenticator: authenticator, + batchHandler: batchHandler, + batchQueuesHandler: batchQueuesHandler, + } +} + +func (s *Service) Aggregate(ctx context.Context, req *pb.AggregateRequest) (*pb.AggregateReply, error) { + var result *pb.AggregateReply + var errInner error + + if class := s.schemaManager.ResolveAlias(req.Collection); class != "" { + req.Collection = class + } + + if err := enterrors.GoWrapperWithBlock(func() { + result, errInner = s.aggregate(ctx, req) + }, s.logger); err != nil { + return nil, err + } + + return result, errInner +} + +func (s *Service) aggregate(ctx context.Context, req *pb.AggregateRequest) (*pb.AggregateReply, error) { + before := time.Now() + + principal, err := s.authenticator.PrincipalFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("extract auth: %w", err) + } + ctx = restCtx.AddPrincipalToContext(ctx, principal) + + parser := NewAggregateParser( + s.classGetterWithAuthzFunc(ctx, principal, req.Tenant), + ) + + params, err := parser.Aggregate(req) + if err != nil { + return nil, fmt.Errorf("parse params: %w", err) + } + + res, err := s.traverser.Aggregate(restCtx.AddPrincipalToContext(ctx, principal), principal, params) + if err != nil { + return nil, fmt.Errorf("aggregate: %w", err) + } + + replier := NewAggregateReplier( + s.classGetterWithAuthzFunc(ctx, principal, req.Tenant), + params, + ) + reply, err := replier.Aggregate(res, params.GroupBy != nil) + if err != nil { + return nil, fmt.Errorf("prepare reply: %w", err) + } + + reply.Took = float32(time.Since(before).Seconds()) + return reply, nil +} + +func (s *Service) TenantsGet(ctx context.Context, req *pb.TenantsGetRequest) (*pb.TenantsGetReply, error) { + before := time.Now() + + if class := s.schemaManager.ResolveAlias(req.Collection); class != "" { + req.Collection = class + } + + principal, err := s.authenticator.PrincipalFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("extract auth: %w", err) + } + ctx = restCtx.AddPrincipalToContext(ctx, principal) + + retTenants, err := s.tenantsGet(ctx, principal, req) + if err != nil { + return nil, fmt.Errorf("get tenants: %w", err) + } + + result := &pb.TenantsGetReply{ + Took: float32(time.Since(before).Seconds()), + Tenants: retTenants, + } + return result, nil +} + +func (s *Service) BatchDelete(ctx context.Context, req *pb.BatchDeleteRequest) (*pb.BatchDeleteReply, error) { + var result *pb.BatchDeleteReply + var errInner error + + if err := enterrors.GoWrapperWithBlock(func() { + result, errInner = s.batchDelete(ctx, req) + }, s.logger); err != nil { + return nil, err + } + + return result, errInner +} + +func (s *Service) batchDelete(ctx context.Context, req *pb.BatchDeleteRequest) (*pb.BatchDeleteReply, error) { + before := time.Now() + principal, err := s.authenticator.PrincipalFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("extract auth: %w", err) + } + ctx = restCtx.AddPrincipalToContext(ctx, principal) + + replicationProperties := extractReplicationProperties(req.ConsistencyLevel) + + tenant := "" + if req.Tenant != nil { + tenant = *req.Tenant + } + + if err := s.authorizer.Authorize(ctx, principal, authorization.DELETE, authorization.ShardsData(req.Collection, tenant)...); err != nil { + return nil, err + } + + params, err := batchDeleteParamsFromProto(req, s.classGetterWithAuthzFunc(ctx, principal, tenant)) + if err != nil { + return nil, fmt.Errorf("batch delete params: %w", err) + } + + response, err := s.batchManager.DeleteObjectsFromGRPCAfterAuth(ctx, principal, params, replicationProperties, tenant) + if err != nil { + return nil, fmt.Errorf("batch delete: %w", err) + } + + result, err := batchDeleteReplyFromObjects(response, req.Verbose) + if err != nil { + return nil, fmt.Errorf("batch delete reply: %w", err) + } + result.Took = float32(time.Since(before).Seconds()) + + return result, nil +} + +// BatchObjects handles end-to-end batch object creation. It accepts N objects in the request and forwards them to the internal +// batch objects logic. It blocks until a response is retrieved from the internal APIs whereupon it returns the response to the client. +// +// It is intended to be used in isolation and therefore is not dependent on BatchSend/BatchStream. +func (s *Service) BatchObjects(ctx context.Context, req *pb.BatchObjectsRequest) (*pb.BatchObjectsReply, error) { + var result *pb.BatchObjectsReply + var errInner error + + if err := enterrors.GoWrapperWithBlock(func() { + result, errInner = s.batchHandler.BatchObjects(ctx, req) + }, s.logger); err != nil { + return nil, err + } + + return result, errInner +} + +// BatchObjects handles end-to-end batch reference creation. It accepts N references in the request and forwards them to the internal +// batch references logic. It blocks until a response is retrieved from the internal APIs whereupon it returns the response to the client. +// +// It is intended to be used in isolation and therefore is not dependent on BatchSend/BatchStream. +func (s *Service) BatchReferences(ctx context.Context, req *pb.BatchReferencesRequest) (*pb.BatchReferencesReply, error) { + var result *pb.BatchReferencesReply + var errInner error + + if err := enterrors.GoWrapperWithBlock(func() { + result, errInner = s.batchHandler.BatchReferences(ctx, req) + }, s.logger); err != nil { + return nil, err + } + + return result, errInner +} + +// BatchSend is similar in concept to the BatchObjects and BatchReferences methods in that it accepts N objects or references +// in a single gRPC invocation call. However, it differs in that it does not wait for the objects/references to be fully +// inserted into the database before returning a response. Instead, it simply adds the objects/references to the internal +// queueing system and then returns immediately. +// +// In addition, in order to assign the objects/references to the correct internal queue, it requires the stream ID to be +// specified in the request. This stream ID is only available to clients once they have opened a stream using the BatchStream method. +// +// This method therefore does not work in isolation, it has to be used in conjunction with other methods. +// It should be used as part of the automatic batching process provided in clients. +func (s *Service) BatchSend(ctx context.Context, req *pb.BatchSendRequest) (*pb.BatchSendReply, error) { + var result *pb.BatchSendReply + var errInner error + + if err := enterrors.GoWrapperWithBlock(func() { + result, errInner = s.batchQueuesHandler.Send(ctx, req) + }, s.logger); err != nil { + return nil, err + } + + return result, errInner +} + +// BatchStream defines a UnaryStream gRPC method whereby the server streams messages back to the client in order to +// asynchronously report on any errors that have occurred during the automatic batching process. +// +// The initial request contains the consistency level that is desired when batch inserting in this processing context. +// +// The first message send to the client contains the stream ID for the overall stream. All subsequent messages, besides the final one, +// correspond to errors emitted by the internal batching APIs, e.g. validation errors of the objects/references. The final +// message sent to the client is a confirmation that the batch processing has completed successfully and that the client can hangup. +// +// In addition, there is also the shutdown logic that is sent via the stream from the server to the client. In the event that +// the node handling the batch processing must be shutdown, e.g. there's a rolling restart occurring on the cluster, then the +// stream will notify the client that it is shutting down allowing for all the internal queues to be drained and waited on. Once the final +// shutdown message is sent and received by the client, the client can then safely hangup and reconnect to the cluster in an effort to +// reconnect to a different available node. At that point, the batching process resumes on the other node as if nothing happened. +// +// It should be used as part of the automatic batching process provided in clients. +func (s *Service) BatchStream(req *pb.BatchStreamRequest, stream pb.Weaviate_BatchStreamServer) error { + id, err := uuid.NewRandom() + if err != nil { + return fmt.Errorf("stream ID generation failed: %w", err) + } + streamId := id.String() + s.batchQueuesHandler.Setup(streamId, req) + defer s.batchQueuesHandler.Teardown(streamId) + return s.batchQueuesHandler.Stream(stream.Context(), streamId, stream) +} + +func (s *Service) Search(ctx context.Context, req *pb.SearchRequest) (*pb.SearchReply, error) { + var result *pb.SearchReply + var errInner error + + if class := s.schemaManager.ResolveAlias(req.Collection); class != "" { + req.Collection = class + } + + if err := enterrors.GoWrapperWithBlock(func() { + result, errInner = s.search(ctx, req) + }, s.logger); err != nil { + return nil, err + } + + return result, errInner +} + +func (s *Service) search(ctx context.Context, req *pb.SearchRequest) (*pb.SearchReply, error) { + before := time.Now() + + principal, err := s.authenticator.PrincipalFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("extract auth: %w", err) + } + ctx = restCtx.AddPrincipalToContext(ctx, principal) + + parser := NewParser( + req.Uses_127Api, + s.classGetterWithAuthzFunc(ctx, principal, req.Tenant), + s.aliasGetter(), + ) + replier := NewReplier( + req.Uses_127Api, + parser.generative, + s.logger, + ) + + searchParams, err := parser.Search(req, s.config) + if err != nil { + return nil, err + } + + if err := s.validateClassAndProperty(searchParams); err != nil { + return nil, err + } + + res, err := s.traverser.GetClass(restCtx.AddPrincipalToContext(ctx, principal), principal, searchParams) + if err != nil { + return nil, err + } + + scheme := s.schemaManager.GetSchemaSkipAuth() + return replier.Search(res, before, searchParams, scheme) +} + +func (s *Service) validateClassAndProperty(searchParams dto.GetParams) error { + class := s.schemaManager.ReadOnlyClass(searchParams.ClassName) + if class == nil { + return fmt.Errorf("could not find class %s in schema", searchParams.ClassName) + } + + for _, prop := range searchParams.Properties { + _, err := schema.GetPropertyByName(class, prop.Name) + if err != nil { + return err + } + } + + return nil +} + +type classGetterWithAuthzFunc func(string) (*models.Class, error) + +func (s *Service) classGetterWithAuthzFunc(ctx context.Context, principal *models.Principal, tenant string) classGetterWithAuthzFunc { + authorizedCollections := map[string]*models.Class{} + + return func(name string) (*models.Class, error) { + classTenantName := name + "#" + tenant + class, ok := authorizedCollections[classTenantName] + if !ok { + resources := authorization.CollectionsData(name) + if tenant != "" { + resources = authorization.ShardsData(name, tenant) + } + // having data access is enough for querying as we dont leak any info from the collection config that you cannot get via data access anyways + if err := s.authorizer.Authorize(ctx, principal, authorization.READ, resources...); err != nil { + return nil, err + } + class = s.schemaManager.ReadOnlyClass(name) + authorizedCollections[name] = class + } + if class == nil { + return nil, fmt.Errorf("could not find class %s in schema", name) + } + return class, nil + } +} + +type aliasGetter func(string) string + +func (s *Service) aliasGetter() aliasGetter { + return func(name string) string { + if cls := s.schemaManager.ResolveAlias(name); cls != "" { + return name // name is an alias + } + return "" + } +} + +func extractReplicationProperties(level *pb.ConsistencyLevel) *additional.ReplicationProperties { + if level == nil { + return nil + } + + switch *level { + case pb.ConsistencyLevel_CONSISTENCY_LEVEL_ONE: + return &additional.ReplicationProperties{ConsistencyLevel: "ONE"} + case pb.ConsistencyLevel_CONSISTENCY_LEVEL_QUORUM: + return &additional.ReplicationProperties{ConsistencyLevel: "QUORUM"} + case pb.ConsistencyLevel_CONSISTENCY_LEVEL_ALL: + return &additional.ReplicationProperties{ConsistencyLevel: "ALL"} + default: + return nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/tenants.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/tenants.go new file mode 100644 index 0000000000000000000000000000000000000000..40bc534cd41cf046d68474c0cd00bc9cb96ac98c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/tenants.go @@ -0,0 +1,71 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/models" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func (s *Service) tenantsGet(ctx context.Context, principal *models.Principal, req *pb.TenantsGetRequest) ([]*pb.Tenant, error) { + if req.Collection == "" { + return nil, fmt.Errorf("missing collection %s", req.Collection) + } + + var err error + var tenants []*models.Tenant + if req.Params == nil { + tenants, err = s.schemaManager.GetConsistentTenants(ctx, principal, req.Collection, true, []string{}) + if err != nil { + return nil, err + } + } else { + switch req.GetParams().(type) { + case *pb.TenantsGetRequest_Names: + requestedNames := req.GetNames().GetValues() + if len(requestedNames) == 0 { + return nil, fmt.Errorf("must specify at least one tenant name") + } + tenants, err = s.schemaManager.GetConsistentTenants(ctx, principal, req.Collection, true, requestedNames) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown tenant parameter %v", req.Params) + } + } + + retTenants := make([]*pb.Tenant, len(tenants)) + for i, tenant := range tenants { + tenantGRPC, err := tenantToGRPC(tenant) + if err != nil { + return nil, err + } + retTenants[i] = tenantGRPC + } + return retTenants, nil +} + +func tenantToGRPC(tenant *models.Tenant) (*pb.Tenant, error) { + status, ok := pb.TenantActivityStatus_value[fmt.Sprintf("TENANT_ACTIVITY_STATUS_%s", tenant.ActivityStatus)] + if !ok { + return nil, fmt.Errorf("unknown tenant activity status %s", tenant.ActivityStatus) + } + + return &pb.Tenant{ + Name: tenant.Name, + ActivityStatus: pb.TenantActivityStatus(status), + }, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/tenants_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/tenants_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2cdd83e31cecbb9a6b967ca61c3953fd3a23eeec --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/grpc/v1/tenants_test.go @@ -0,0 +1,60 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/entities/models" + pb "github.com/weaviate/weaviate/grpc/generated/protocol/v1" +) + +func TestGRPCTenants(t *testing.T) { + tests := []struct { + activityStatusGRPC pb.TenantActivityStatus + activityStatus string + }{ + { + activityStatusGRPC: pb.TenantActivityStatus_TENANT_ACTIVITY_STATUS_HOT, + activityStatus: models.TenantActivityStatusHOT, + }, + { + activityStatusGRPC: pb.TenantActivityStatus_TENANT_ACTIVITY_STATUS_COLD, + activityStatus: models.TenantActivityStatusCOLD, + }, + { + activityStatusGRPC: pb.TenantActivityStatus_TENANT_ACTIVITY_STATUS_FROZEN, + activityStatus: models.TenantActivityStatusFROZEN, + }, + { + activityStatusGRPC: pb.TenantActivityStatus_TENANT_ACTIVITY_STATUS_FREEZING, + activityStatus: types.TenantActivityStatusFREEZING, + }, + { + activityStatusGRPC: pb.TenantActivityStatus_TENANT_ACTIVITY_STATUS_UNFREEZING, + activityStatus: types.TenantActivityStatusUNFREEZING, + }, + } + for _, tt := range tests { + t.Run(tt.activityStatus, func(t *testing.T) { + tenantGRPC, err := tenantToGRPC(&models.Tenant{ + Name: "TestTenant", + ActivityStatus: tt.activityStatus, + }) + require.Nil(t, err) + require.Equal(t, "TestTenant", tenantGRPC.GetName()) + require.Equal(t, tt.activityStatusGRPC, tenantGRPC.GetActivityStatus()) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz.go new file mode 100644 index 0000000000000000000000000000000000000000..59b15b3461998b9718d9a59c46c743aa67acb6d6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz.go @@ -0,0 +1,1237 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "context" + "errors" + "fmt" + "regexp" + "slices" + "sort" + "strings" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + + cerrors "github.com/weaviate/weaviate/adapters/handlers/rest/errors" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" + "github.com/weaviate/weaviate/usecases/auth/authorization/filter" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/monitoring" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +const ( + roleNameMaxLength = 64 + roleNameRegexCore = `[A-Za-z][-_0-9A-Za-z]{0,254}` +) + +var validateRoleNameRegex = regexp.MustCompile(`^` + roleNameRegexCore + `$`) + +type authZHandlers struct { + authorizer authorization.Authorizer + controller ControllerAndGetUsers + schemaReader schemaUC.SchemaGetter + logger logrus.FieldLogger + metrics *monitoring.PrometheusMetrics + apiKeysConfigs config.StaticAPIKey + oidcConfigs config.OIDC + rbacconfig rbacconf.Config +} + +type ControllerAndGetUsers interface { + authorization.Controller + GetUsers(userIds ...string) (map[string]*apikey.User, error) +} + +func SetupHandlers(api *operations.WeaviateAPI, controller ControllerAndGetUsers, schemaReader schemaUC.SchemaGetter, + apiKeysConfigs config.StaticAPIKey, oidcConfigs config.OIDC, rconfig rbacconf.Config, metrics *monitoring.PrometheusMetrics, authorizer authorization.Authorizer, logger logrus.FieldLogger, +) { + h := &authZHandlers{ + controller: controller, + authorizer: authorizer, + schemaReader: schemaReader, + rbacconfig: rconfig, + oidcConfigs: oidcConfigs, + apiKeysConfigs: apiKeysConfigs, + logger: logger, + metrics: metrics, + } + + // rbac role handlers + api.AuthzCreateRoleHandler = authz.CreateRoleHandlerFunc(h.createRole) + api.AuthzGetRolesHandler = authz.GetRolesHandlerFunc(h.getRoles) + api.AuthzGetRoleHandler = authz.GetRoleHandlerFunc(h.getRole) + api.AuthzDeleteRoleHandler = authz.DeleteRoleHandlerFunc(h.deleteRole) + api.AuthzAddPermissionsHandler = authz.AddPermissionsHandlerFunc(h.addPermissions) + api.AuthzRemovePermissionsHandler = authz.RemovePermissionsHandlerFunc(h.removePermissions) + api.AuthzHasPermissionHandler = authz.HasPermissionHandlerFunc(h.hasPermission) + + // rbac users handlers + api.AuthzGetRolesForUserHandler = authz.GetRolesForUserHandlerFunc(h.getRolesForUser) + api.AuthzGetUsersForRoleHandler = authz.GetUsersForRoleHandlerFunc(h.getUsersForRole) + api.AuthzGetUsersForRoleDeprecatedHandler = authz.GetUsersForRoleDeprecatedHandlerFunc(h.getUsersForRoleDeprecated) + api.AuthzAssignRoleToUserHandler = authz.AssignRoleToUserHandlerFunc(h.assignRoleToUser) + api.AuthzRevokeRoleFromUserHandler = authz.RevokeRoleFromUserHandlerFunc(h.revokeRoleFromUser) + api.AuthzGetRolesForUserDeprecatedHandler = authz.GetRolesForUserDeprecatedHandlerFunc(h.getRolesForUserDeprecated) + + // rbac group handlers + api.AuthzAssignRoleToGroupHandler = authz.AssignRoleToGroupHandlerFunc(h.assignRoleToGroup) + api.AuthzRevokeRoleFromGroupHandler = authz.RevokeRoleFromGroupHandlerFunc(h.revokeRoleFromGroup) + api.AuthzGetRolesForGroupHandler = authz.GetRolesForGroupHandlerFunc(h.getRolesForGroup) + api.AuthzGetGroupsHandler = authz.GetGroupsHandlerFunc(h.getGroups) + api.AuthzGetGroupsForRoleHandler = authz.GetGroupsForRoleHandlerFunc(h.getGroupsForRole) +} + +func (h *authZHandlers) authorizeRoleScopes(ctx context.Context, principal *models.Principal, originalVerb string, policies []authorization.Policy, roleName string) error { + // The error will be accumulated with each check. We first verify if the user has the necessary permissions. + // If not, we check for matching permissions and authorize each permission being added or removed from the role. + // NOTE: logic is inverted for error checks if err == nil + var err error + if err = h.authorizer.Authorize(ctx, principal, authorization.VerbWithScope(originalVerb, authorization.ROLE_SCOPE_ALL), authorization.Roles(roleName)...); err == nil { + return nil + } + + // Check if user can manage roles with matching permissions + if err = h.authorizer.Authorize(ctx, principal, authorization.VerbWithScope(originalVerb, authorization.ROLE_SCOPE_MATCH), authorization.Roles(roleName)...); err == nil { + // Verify user has all permissions they're trying to grant + var errs error + for _, policy := range policies { + if err := h.authorizer.AuthorizeSilent(ctx, principal, policy.Verb, policy.Resource); err != nil { + errs = errors.Join(errs, err) + } + } + return errs + } + + return fmt.Errorf("can only create roles with less or equal permissions as the current user: %w", err) +} + +func (h *authZHandlers) createRole(params authz.CreateRoleParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if *params.Body.Name == "" { + return authz.NewCreateRoleBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("role name is required"))) + } + + if err := validateRoleName(*params.Body.Name); err != nil { + return authz.NewCreateRoleBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("role name is invalid"))) + } + + if err := validatePermissions(true, params.Body.Permissions...); err != nil { + return authz.NewCreateRoleUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("role permissions are invalid: %w", err))) + } + + policies, err := conv.RolesToPolicies(params.Body) + if err != nil { + return authz.NewCreateRoleBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("invalid role: %w", err))) + } + + if slices.Contains(authorization.BuiltInRoles, *params.Body.Name) { + return authz.NewCreateRoleBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("you cannot create role with the same name as built-in role %s", *params.Body.Name))) + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.CREATE, policies[*params.Body.Name], *params.Body.Name); err != nil { + return authz.NewCreateRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + roles, err := h.controller.GetRoles(*params.Body.Name) + if err != nil { + return authz.NewCreateRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + if len(roles) > 0 { + return authz.NewCreateRoleConflict().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("role with name %s already exists", *params.Body.Name))) + } + + if err = h.controller.CreateRolesPermissions(policies); err != nil { + return authz.NewCreateRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + h.logger.WithFields(logrus.Fields{ + "action": "create_role", + "component": authorization.ComponentName, + "user": principal.Username, + "roleName": params.Body.Name, + "permissions": params.Body.Permissions, + }).Info("role created") + + return authz.NewCreateRoleCreated() +} + +func (h *authZHandlers) addPermissions(params authz.AddPermissionsParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if slices.Contains(authorization.BuiltInRoles, params.ID) { + return authz.NewAddPermissionsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("you can not update built-in role %s", params.ID))) + } + + if err := validatePermissions(false, params.Body.Permissions...); err != nil { + return authz.NewAddPermissionsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("invalid permissions %w", err))) + } + + policies, err := conv.RolesToPolicies(&models.Role{ + Name: ¶ms.ID, + Permissions: params.Body.Permissions, + }) + if err != nil { + return authz.NewAddPermissionsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("invalid permissions %w", err))) + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.UPDATE, policies[params.ID], params.ID); err != nil { + return authz.NewAddPermissionsForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + roles, err := h.controller.GetRoles(params.ID) + if err != nil { + return authz.NewAddPermissionsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + if len(roles) == 0 { // i.e. new role + return authz.NewAddPermissionsNotFound() + } + + if err := h.controller.UpdateRolesPermissions(policies); err != nil { + return authz.NewAddPermissionsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + h.logger.WithFields(logrus.Fields{ + "action": "add_permissions", + "component": authorization.ComponentName, + "user": principal.Username, + "roleName": params.ID, + "permissions": params.Body.Permissions, + }).Info("permissions added") + + return authz.NewAddPermissionsOK() +} + +func (h *authZHandlers) removePermissions(params authz.RemovePermissionsParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + // we don't validate permissions entity existence + // in case of the permissions gets removed after the entity got removed + // delete class ABC, then remove permissions on class ABC + if err := validatePermissions(false, params.Body.Permissions...); err != nil { + return authz.NewRemovePermissionsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("invalid permissions %w", err))) + } + + if slices.Contains(authorization.BuiltInRoles, params.ID) { + return authz.NewRemovePermissionsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("you cannot update built-in role %s", params.ID))) + } + + permissions, err := conv.PermissionToPolicies(params.Body.Permissions...) + if err != nil { + return authz.NewRemovePermissionsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("invalid permissions %w", err))) + } + // TODO-RBAC PermissionToPolicies has to be []Policy{} not slice of pointers + policies := map[string][]authorization.Policy{ + params.ID: {}, + } + for _, p := range permissions { + policies[params.ID] = append(policies[params.ID], *p) + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.UPDATE, policies[params.ID], params.ID); err != nil { + return authz.NewRemovePermissionsForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + role, err := h.controller.GetRoles(params.ID) + if err != nil { + return authz.NewRemovePermissionsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + if len(role) == 0 { + return authz.NewRemovePermissionsNotFound() + } + + if err := h.controller.RemovePermissions(params.ID, permissions); err != nil { + return authz.NewRemovePermissionsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("RemovePermissions: %w", err))) + } + + h.logger.WithFields(logrus.Fields{ + "action": "remove_permissions", + "component": authorization.ComponentName, + "user": principal.Username, + "roleName": params.ID, + "permissions": params.Body.Permissions, + }).Info("permissions removed") + + return authz.NewRemovePermissionsOK() +} + +func (h *authZHandlers) hasPermission(params authz.HasPermissionParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if params.Body == nil { + return authz.NewHasPermissionBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("permission is required"))) + } + + if err := validatePermissions(false, params.Body); err != nil { + return authz.NewHasPermissionBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("invalid permissions %w", err))) + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, params.ID); err != nil { + return authz.NewHasPermissionForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + policy, err := conv.PermissionToPolicies(params.Body) + if err != nil { + return authz.NewHasPermissionBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("invalid permissions %w", err))) + } + if len(policy) == 0 { + return authz.NewHasPermissionInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("unknown error occurred passing permission to policy"))) + } + + hasPermission, err := h.controller.HasPermission(params.ID, policy[0]) + if err != nil { + return authz.NewHasPermissionInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("HasPermission: %w", err))) + } + + return authz.NewHasPermissionOK().WithPayload(hasPermission) +} + +func (h *authZHandlers) getRoles(params authz.GetRolesParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + roles, err := h.controller.GetRoles() + if err != nil { + return authz.NewGetRolesInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + var response []*models.Role + for roleName, policies := range roles { + if roleName == authorization.Root && !slices.Contains(h.rbacconfig.RootUsers, principal.Username) { + continue + } + + perms, err := conv.PoliciesToPermission(policies...) + if err != nil { + return authz.NewGetRolesInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("PoliciesToPermission: %w", err))) + } + response = append(response, &models.Role{ + Name: &roleName, + Permissions: perms, + }) + } + + // Filter roles based on authorization + resourceFilter := filter.New[*models.Role](h.authorizer, h.rbacconfig) + filteredRoles := resourceFilter.Filter( + ctx, + h.logger, + principal, + response, + authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), + func(role *models.Role) string { + return authorization.Roles(*role.Name)[0] + }, + ) + if len(filteredRoles) == 0 { + // try match if all was none + filteredRoles = resourceFilter.Filter( + ctx, + h.logger, + principal, + response, + authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH), + func(role *models.Role) string { + return authorization.Roles(*role.Name)[0] + }, + ) + } + + sortByName(filteredRoles) + + logFields := logrus.Fields{ + "action": "read_all_roles", + "component": authorization.ComponentName, + } + + if principal != nil { + logFields["user"] = principal.Username + } + + h.logger.WithFields(logFields).Info("roles requested") + + return authz.NewGetRolesOK().WithPayload(filteredRoles) +} + +func (h *authZHandlers) getRole(params authz.GetRoleParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, params.ID); err != nil { + return authz.NewGetRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + roles, err := h.controller.GetRoles(params.ID) + if err != nil { + return authz.NewGetRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + if len(roles) == 0 { + return authz.NewGetRoleNotFound() + } + if len(roles) != 1 { + err := fmt.Errorf("expected one role but got %d", len(roles)) + return authz.NewGetRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + perms, err := conv.PoliciesToPermission(roles[params.ID]...) + if err != nil { + return authz.NewGetRoleBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("PoliciesToPermission: %w", err))) + } + + h.logger.WithFields(logrus.Fields{ + "action": "read_role", + "component": authorization.ComponentName, + "user": principal.Username, + "role_id": params.ID, + }).Info("role requested") + + return authz.NewGetRoleOK().WithPayload(&models.Role{ + Name: ¶ms.ID, + Permissions: perms, + }) +} + +func (h *authZHandlers) deleteRole(params authz.DeleteRoleParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if slices.Contains(authorization.BuiltInRoles, params.ID) { + return authz.NewDeleteRoleBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("you can not delete built-in role %s", params.ID))) + } + + roles, err := h.controller.GetRoles(params.ID) + if err != nil { + h.logger.WithFields(logrus.Fields{ + "action": "delete_role", + "component": authorization.ComponentName, + "user": principal.Username, + "roleName": params.ID, + }).Info("role was already deleted") + return authz.NewDeleteRoleNoContent() + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.DELETE, roles[params.ID], params.ID); err != nil { + return authz.NewDeleteRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.controller.DeleteRoles(params.ID); err != nil { + return authz.NewDeleteRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("DeleteRoles: %w", err))) + } + + h.logger.WithFields(logrus.Fields{ + "action": "delete_role", + "component": authorization.ComponentName, + "user": principal.Username, + "roleName": params.ID, + }).Info("role deleted") + + return authz.NewDeleteRoleNoContent() +} + +func (h *authZHandlers) assignRoleToUser(params authz.AssignRoleToUserParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + for _, role := range params.Body.Roles { + if strings.TrimSpace(role) == "" { + return authz.NewAssignRoleToUserBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("one or more of the roles you want to assign is empty"))) + } + + if err := validateEnvVarRoles(role); err != nil { + return authz.NewAssignRoleToUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("assigning: %w", err))) + } + } + + if len(params.Body.Roles) == 0 { + return authz.NewAssignRoleToUserBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("roles can not be empty"))) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(params.ID)...); err != nil { + return authz.NewAssignRoleToUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + existedRoles, err := h.controller.GetRoles(params.Body.Roles...) + if err != nil { + return authz.NewAssignRoleToUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + if len(existedRoles) != len(params.Body.Roles) { + return authz.NewAssignRoleToUserNotFound().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("one or more of the roles requested doesn't exist"))) + } + + userTypes, err := h.getUserTypesAndValidateExistence(params.ID, authentication.AuthType(params.Body.UserType)) + if err != nil { + return authz.NewAssignRoleToUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user exists: %w", err))) + } + if userTypes == nil { + return authz.NewAssignRoleToUserNotFound().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("username to assign role to doesn't exist"))) + } + for _, userType := range userTypes { + if err := h.controller.AddRolesForUser(conv.UserNameWithTypeFromId(params.ID, userType), params.Body.Roles); err != nil { + return authz.NewAssignRoleToUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("AddRolesForUser: %w", err))) + } + } + + h.logger.WithFields(logrus.Fields{ + "action": "assign_roles", + "component": authorization.ComponentName, + "user": principal.Username, + "user_to_assign_roles_to": params.ID, + "roles": params.Body.Roles, + }).Info("roles assigned to user") + + return authz.NewAssignRoleToUserOK() +} + +func (h *authZHandlers) assignRoleToGroup(params authz.AssignRoleToGroupParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + for _, role := range params.Body.Roles { + if strings.TrimSpace(role) == "" { + return authz.NewAssignRoleToGroupBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("one or more of the roles you want to assign is empty"))) + } + + if err := validateEnvVarRoles(role); err != nil { + return authz.NewAssignRoleToGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("assigning: %w", err))) + } + } + + if len(params.Body.Roles) == 0 { + return authz.NewAssignRoleToGroupBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("roles can not be empty"))) + } + + groupType, err := validateUserTypeInput(string(params.Body.GroupType)) + if err != nil || groupType != authentication.AuthTypeOIDC { + return authz.NewAssignRoleToGroupBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("unknown groupType: %v", params.Body.GroupType))) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(groupType, params.ID)...); err != nil { + return authz.NewAssignRoleToGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.validateRootGroup(params.ID); err != nil { + return authz.NewAssignRoleToGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("assigning: %w", err))) + } + + existedRoles, err := h.controller.GetRoles(params.Body.Roles...) + if err != nil { + return authz.NewAssignRoleToGroupInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + if len(existedRoles) != len(params.Body.Roles) && len(params.Body.Roles) > 0 { + return authz.NewAssignRoleToGroupNotFound() + } + + if err := h.controller.AddRolesForUser(conv.PrefixGroupName(params.ID), params.Body.Roles); err != nil { + return authz.NewAssignRoleToGroupInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("AddRolesForUser: %w", err))) + } + + h.logger.WithFields(logrus.Fields{ + "action": "assign_roles", + "component": authorization.ComponentName, + "user": principal.Username, + "group_to_assign_roles_to": params.ID, + "roles": params.Body.Roles, + }).Info("roles assigned to group") + + return authz.NewAssignRoleToGroupOK() +} + +// Delete this when 1.29 is not supported anymore +func (h *authZHandlers) getRolesForUserDeprecated(params authz.GetRolesForUserDeprecatedParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + ownUser := params.ID == principal.Username + + if !ownUser { + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Users(params.ID)...); err != nil { + return authz.NewGetRolesForUserDeprecatedForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + } + + exists, err := h.userExistsDeprecated(params.ID) + if err != nil { + return authz.NewGetRolesForUserDeprecatedInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user existence: %w", err))) + } + if !exists { + return authz.NewGetRolesForUserDeprecatedNotFound() + } + + existingRolesDB, err := h.controller.GetRolesForUserOrGroup(params.ID, authentication.AuthTypeDb, false) + if err != nil { + return authz.NewGetRolesForUserDeprecatedInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetUsersOrGroupsWithRoles: %w", err))) + } + existingRolesOIDC, err := h.controller.GetRolesForUserOrGroup(params.ID, authentication.AuthTypeOIDC, false) + if err != nil { + return authz.NewGetRolesForUserDeprecatedInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetUsersOrGroupsWithRoles: %w", err))) + } + + var response []*models.Role + foundRoles := map[string]struct{}{} + var authErr error + for _, existing := range []map[string][]authorization.Policy{existingRolesDB, existingRolesOIDC} { + for roleName, policies := range existing { + perms, err := conv.PoliciesToPermission(policies...) + if err != nil { + return authz.NewGetRolesForUserDeprecatedInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("PoliciesToPermission: %w", err))) + } + + if !ownUser { + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, roleName); err != nil { + authErr = err + continue + } + } + + // no duplicates + if _, ok := foundRoles[roleName]; ok { + continue + } + + foundRoles[roleName] = struct{}{} + + response = append(response, &models.Role{ + Name: &roleName, + Permissions: perms, + }) + } + } + + if (len(existingRolesDB) != 0 || len(existingRolesOIDC) != 0) && len(response) == 0 { + return authz.NewGetRolesForUserDeprecatedForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(authErr)) + } + + sortByName(response) + + h.logger.WithFields(logrus.Fields{ + "action": "get_roles_for_user", + "component": authorization.ComponentName, + "user": principal.Username, + "user_to_get_roles_for": params.ID, + }).Info("roles requested") + + return authz.NewGetRolesForUserDeprecatedOK().WithPayload(response) +} + +func (h *authZHandlers) getRolesForUser(params authz.GetRolesForUserParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + ownUser := params.ID == principal.Username && params.UserType == string(principal.UserType) + + if !ownUser { + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Users(params.ID)...); err != nil { + return authz.NewGetRolesForUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + } + + includeFullRoles := params.IncludeFullRoles != nil && *params.IncludeFullRoles + + userType, err := validateUserTypeInput(params.UserType) + if err != nil { + return authz.NewGetRolesForUserBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("unknown userType: %v", params.UserType))) + } + + exists, err := h.userExists(params.ID, userType) + if err != nil { + return authz.NewGetRolesForUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user existence: %w", err))) + } + if !exists { + return authz.NewGetRolesForUserNotFound() + } + + existingRoles, err := h.controller.GetRolesForUserOrGroup(params.ID, userType, false) + if err != nil { + return authz.NewGetRolesForUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetUsersOrGroupsWithRoles: %w", err))) + } + + var roles []*models.Role + var authErrs []error + for roleName, policies := range existingRoles { + perms, err := conv.PoliciesToPermission(policies...) + if err != nil { + return authz.NewGetRolesForUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("PoliciesToPermission: %w", err))) + } + + role := &models.Role{Name: &roleName} + if includeFullRoles { + if !ownUser { + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, roleName); err != nil { + authErrs = append(authErrs, err) + continue + } + } + role.Permissions = perms + } + roles = append(roles, role) + } + + if len(authErrs) > 0 { + return authz.NewGetRolesForUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.Join(authErrs...))) + } + + sortByName(roles) + + h.logger.WithFields(logrus.Fields{ + "action": "get_roles_for_user", + "component": authorization.ComponentName, + "user": principal.Username, + "user_to_get_roles_for": params.ID, + }).Info("roles requested") + + return authz.NewGetRolesForUserOK().WithPayload(roles) +} + +func (h *authZHandlers) getUsersForRole(params authz.GetUsersForRoleParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := validateEnvVarRoles(params.ID); err != nil && !slices.Contains(h.rbacconfig.RootUsers, principal.Username) { + return authz.NewGetUsersForRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, params.ID); err != nil { + return authz.NewGetUsersForRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + var response []*authz.GetUsersForRoleOKBodyItems0 + for _, userType := range []authentication.AuthType{authentication.AuthTypeOIDC, authentication.AuthTypeDb} { + users, err := h.controller.GetUsersOrGroupForRole(params.ID, userType, false) + if err != nil { + return authz.NewGetUsersForRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetUsersOrGroupForRole: %w", err))) + } + + filteredUsers := make([]string, 0, len(users)) + for _, userName := range users { + if userName == principal.Username { + // own username + filteredUsers = append(filteredUsers, userName) + continue + } + if err := h.authorizer.AuthorizeSilent(ctx, principal, authorization.READ, authorization.Users(userName)...); err == nil { + filteredUsers = append(filteredUsers, userName) + } + } + slices.Sort(filteredUsers) + if userType == authentication.AuthTypeOIDC { + for _, userId := range filteredUsers { + response = append(response, &authz.GetUsersForRoleOKBodyItems0{UserID: userId, UserType: models.NewUserTypeOutput(models.UserTypeOutputOidc)}) + } + } else { + dynamicUsers, err := h.controller.GetUsers(filteredUsers...) + if err != nil { + return authz.NewGetUsersForRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetUsers: %w", err))) + } + for _, userId := range filteredUsers { + if _, ok := dynamicUsers[userId]; ok { + response = append(response, &authz.GetUsersForRoleOKBodyItems0{UserID: userId, UserType: models.NewUserTypeOutput(models.UserTypeOutputDbUser)}) + } else { + response = append(response, &authz.GetUsersForRoleOKBodyItems0{UserID: userId, UserType: models.NewUserTypeOutput(models.UserTypeOutputDbEnvUser)}) + } + } + + } + + } + + h.logger.WithFields(logrus.Fields{ + "action": "get_users_for_role", + "component": authorization.ComponentName, + "user": principal.Username, + "role_to_get_users_for": params.ID, + }).Info("users requested") + + return authz.NewGetUsersForRoleOK().WithPayload(response) +} + +func (h *authZHandlers) getGroupsForRole(params authz.GetGroupsForRoleParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := validateEnvVarRoles(params.ID); err != nil && !slices.Contains(h.rbacconfig.RootUsers, principal.Username) { + return authz.NewGetGroupsForRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, params.ID); err != nil { + return authz.NewGetGroupsForRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + users, err := h.controller.GetUsersOrGroupForRole(params.ID, authentication.AuthTypeOIDC, true) + if err != nil { + return authz.NewGetGroupsForRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetUsersOrGroupForRole: %w", err))) + } + + filteredUsers := make([]string, 0, len(users)) + for _, userName := range users { + if userName == principal.Username { + // own username + filteredUsers = append(filteredUsers, userName) + continue + } + if err := h.authorizer.AuthorizeSilent(ctx, principal, authorization.READ, authorization.Users(userName)...); err == nil { + filteredUsers = append(filteredUsers, userName) + } + } + slices.Sort(filteredUsers) + + // only OIDC groups so far + oidc := models.GroupTypeOidc + var response []*authz.GetGroupsForRoleOKBodyItems0 + for _, userId := range filteredUsers { + response = append(response, &authz.GetGroupsForRoleOKBodyItems0{GroupID: userId, GroupType: &oidc}) + } + + h.logger.WithFields(logrus.Fields{ + "action": "get_groups_for_role", + "component": authorization.ComponentName, + "user": principal.Username, + "role_to_get_users_for": params.ID, + }).Info("groups requested") + + return authz.NewGetGroupsForRoleOK().WithPayload(response) +} + +// Delete this when 1.29 is not supported anymore +func (h *authZHandlers) getUsersForRoleDeprecated(params authz.GetUsersForRoleDeprecatedParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := validateEnvVarRoles(params.ID); err != nil && !slices.Contains(h.rbacconfig.RootUsers, principal.Username) { + return authz.NewGetUsersForRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, params.ID); err != nil { + return authz.NewGetUsersForRoleForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + foundUsers := map[string]struct{}{} // no duplicates + filteredUsers := make([]string, 0) + + for _, userType := range []authentication.AuthType{authentication.AuthTypeDb, authentication.AuthTypeOIDC} { + users, err := h.controller.GetUsersOrGroupForRole(params.ID, userType, false) + if err != nil { + return authz.NewGetUsersForRoleInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetUsersOrGroupForRole: %w", err))) + } + + for _, userName := range users { + if _, ok := foundUsers[userName]; ok { + continue + } + foundUsers[userName] = struct{}{} + + if userName == principal.Username { + // own username + filteredUsers = append(filteredUsers, userName) + continue + } + if err := h.authorizer.AuthorizeSilent(ctx, principal, authorization.READ, authorization.Users(userName)...); err == nil { + filteredUsers = append(filteredUsers, userName) + } + } + + } + + slices.Sort(filteredUsers) + + h.logger.WithFields(logrus.Fields{ + "action": "get_users_for_role", + "component": authorization.ComponentName, + "user": principal.Username, + "role_to_get_users_for": params.ID, + }).Info("users requested") + + return authz.NewGetUsersForRoleDeprecatedOK().WithPayload(filteredUsers) +} + +func (h *authZHandlers) revokeRoleFromUser(params authz.RevokeRoleFromUserParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + for _, role := range params.Body.Roles { + if strings.TrimSpace(role) == "" { + return authz.NewRevokeRoleFromUserBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("one or more of the roles you want to revoke is empty"))) + } + + if err := validateEnvVarRoles(role); err != nil { + return authz.NewRevokeRoleFromUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("revoking: %w", err))) + } + } + + if len(params.Body.Roles) == 0 { + return authz.NewRevokeRoleFromUserBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("roles can not be empty"))) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(params.ID)...); err != nil { + return authz.NewRevokeRoleFromUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + existedRoles, err := h.controller.GetRoles(params.Body.Roles...) + if err != nil { + return authz.NewRevokeRoleFromUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + if len(existedRoles) != len(params.Body.Roles) { + return authz.NewRevokeRoleFromUserNotFound().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("one or more of the request roles doesn't exist"))) + } + + userTypes, err := h.getUserTypesAndValidateExistence(params.ID, authentication.AuthType(params.Body.UserType)) + if err != nil { + return authz.NewRevokeRoleFromUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user exists: %w", err))) + } + if userTypes == nil { + return authz.NewRevokeRoleFromUserNotFound().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("username to revoke role from doesn't exist"))) + } + for _, userType := range userTypes { + if err := h.controller.RevokeRolesForUser(conv.UserNameWithTypeFromId(params.ID, userType), params.Body.Roles...); err != nil { + return authz.NewRevokeRoleFromUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("AddRolesForUser: %w", err))) + } + } + + h.logger.WithFields(logrus.Fields{ + "action": "revoke_roles", + "component": authorization.ComponentName, + "user": principal.Username, + "user_to_assign_roles_to": params.ID, + "roles": params.Body.Roles, + }).Info("roles revoked from user") + + return authz.NewRevokeRoleFromUserOK() +} + +func (h *authZHandlers) revokeRoleFromGroup(params authz.RevokeRoleFromGroupParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + for _, role := range params.Body.Roles { + if strings.TrimSpace(role) == "" { + return authz.NewRevokeRoleFromGroupBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("one or more of the roles you want to revoke is empty"))) + } + + if err := validateEnvVarRoles(role); err != nil { + return authz.NewRevokeRoleFromGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("revoking: %w", err))) + } + } + + if len(params.Body.Roles) == 0 { + return authz.NewRevokeRoleFromGroupBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("roles can not be empty"))) + } + + groupType, err := validateUserTypeInput(string(params.Body.GroupType)) + if err != nil || groupType != authentication.AuthTypeOIDC { + return authz.NewRevokeRoleFromGroupBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("unknown groupType: %v", params.Body.GroupType))) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(groupType, params.ID)...); err != nil { + return authz.NewRevokeRoleFromGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.validateRootGroup(params.ID); err != nil { + return authz.NewRevokeRoleFromGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("revoking: %w", err))) + } + + existedRoles, err := h.controller.GetRoles(params.Body.Roles...) + if err != nil { + return authz.NewRevokeRoleFromGroupInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRoles: %w", err))) + } + + if len(existedRoles) != len(params.Body.Roles) { + return authz.NewRevokeRoleFromGroupNotFound() + } + + if err := h.controller.RevokeRolesForUser(conv.PrefixGroupName(params.ID), params.Body.Roles...); err != nil { + return authz.NewRevokeRoleFromGroupInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("RevokeRolesForGroup: %w", err))) + } + + h.logger.WithFields(logrus.Fields{ + "action": "revoke_roles", + "component": authorization.ComponentName, + "user": principal.Username, + "group_to_assign_roles_to": params.ID, + "roles": params.Body.Roles, + }).Info("roles revoked from group") + + return authz.NewRevokeRoleFromGroupOK() +} + +func (h *authZHandlers) getGroups(params authz.GetGroupsParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + groupType, err := validateUserTypeInput(params.GroupType) + if err != nil || groupType != authentication.AuthTypeOIDC { + return authz.NewGetGroupsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("unknown groupType: %v", params.GroupType))) + } + + groups, err := h.controller.GetUsersOrGroupsWithRoles(true, groupType) + if err != nil { + return nil + } + + // Filter roles based on authorization + resourceFilter := filter.New[string](h.authorizer, h.rbacconfig) + filteredGroups := resourceFilter.Filter( + ctx, + h.logger, + principal, + groups, + authorization.READ, + func(group string) string { + return authorization.Groups(authentication.AuthTypeOIDC, group)[0] + }, + ) + + h.logger.WithFields(logrus.Fields{ + "action": "get_groups", + "component": authorization.ComponentName, + "user": principal.Username, + }).Info("groups requested") + + return authz.NewGetGroupsOK().WithPayload(filteredGroups) +} + +func (h *authZHandlers) getRolesForGroup(params authz.GetRolesForGroupParams, principal *models.Principal) middleware.Responder { + ownGroup := slices.Contains(principal.Groups, params.ID) && params.GroupType == string(principal.UserType) + ctx := params.HTTPRequest.Context() + + groupType, err := validateUserTypeInput(params.GroupType) + if err != nil || groupType != authentication.AuthTypeOIDC { + return authz.NewGetRolesForGroupBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("unknown groupType: %v", params.GroupType))) + } + + if !ownGroup { + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Groups(groupType, params.ID)...); err != nil { + return authz.NewGetRolesForGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + } + + includeFullRoles := params.IncludeFullRoles != nil && *params.IncludeFullRoles + + existingRoles, err := h.controller.GetRolesForUserOrGroup(params.ID, groupType, true) + if err != nil { + return authz.NewGetRolesForGroupInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("GetRolesForUserOrGroup: %w", err))) + } + + var roles []*models.Role + var authErrs []error + for roleName, policies := range existingRoles { + perms, err := conv.PoliciesToPermission(policies...) + if err != nil { + return authz.NewGetRolesForGroupInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("PoliciesToPermission: %w", err))) + } + + role := &models.Role{Name: &roleName} + if includeFullRoles { + if !ownGroup { + if err := h.authorizeRoleScopes(ctx, principal, authorization.READ, nil, roleName); err != nil { + authErrs = append(authErrs, err) + continue + } + } + role.Permissions = perms + } + roles = append(roles, role) + } + + if len(authErrs) > 0 { + return authz.NewGetRolesForGroupForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.Join(authErrs...))) + } + + sortByName(roles) + + h.logger.WithFields(logrus.Fields{ + "action": "get_roles_for_group", + "component": authorization.ComponentName, + "user": principal.Username, + "group_to_get_roles_for": params.ID, + }).Info("roles for group requested") + + return authz.NewGetRolesForGroupOK().WithPayload(roles) +} + +func (h *authZHandlers) userExists(user string, userType authentication.AuthType) (bool, error) { + switch userType { + case authentication.AuthTypeOIDC: + if !h.oidcConfigs.Enabled { + return false, fmt.Errorf("oidc is not enabled") + } + return true, nil + case authentication.AuthTypeDb: + if h.apiKeysConfigs.Enabled { + for _, apiKey := range h.apiKeysConfigs.Users { + if apiKey == user { + return true, nil + } + } + } + + users, err := h.controller.GetUsers(user) + if err != nil { + return false, err + } + if len(users) == 1 { + return true, nil + } else { + return false, nil + } + default: + return false, fmt.Errorf("unknown user type") + } +} + +func (h *authZHandlers) userExistsDeprecated(user string) (bool, error) { + // We are only able to check if a user is present on the system if APIKeys are the only auth method. For OIDC + // users are managed in an external service and there is no general way to check if a user we have not seen yet is + // valid. + if h.oidcConfigs.Enabled { + return true, nil + } + + if h.apiKeysConfigs.Enabled { + for _, apiKey := range h.apiKeysConfigs.Users { + if apiKey == user { + return true, nil + } + } + } + + users, err := h.controller.GetUsers(user) + if err != nil { + return false, err + } + if len(users) == 1 { + return true, nil + } else { + return false, nil + } +} + +// validateRootGroup validates that enduser do not touch the internal root group +func (h *authZHandlers) validateRootGroup(name string) error { + if slices.Contains(h.rbacconfig.RootGroups, name) || slices.Contains(h.rbacconfig.ReadOnlyGroups, name) { + return fmt.Errorf("cannot assign or revoke from root group %s", name) + } + return nil +} + +func (h *authZHandlers) getUserTypesAndValidateExistence(id string, userTypeParam authentication.AuthType) ([]authentication.AuthType, error) { + if userTypeParam == "" { + exists, err := h.userExistsDeprecated(id) + if err != nil { + return nil, err + } + if !exists { + return nil, nil + } + + return []authentication.AuthType{authentication.AuthTypeOIDC, authentication.AuthTypeDb}, nil + } else { + exists, err := h.userExists(id, userTypeParam) + if err != nil { + return nil, err + } + if !exists { + return nil, nil + } + + return []authentication.AuthType{userTypeParam}, nil + } +} + +// validateEnvVarRoles validates that enduser do not touch the internal root role +func validateEnvVarRoles(name string) error { + if slices.Contains(authorization.EnvVarRoles, name) { + return fmt.Errorf("modifying '%s' role or changing its assignments is not allowed", name) + } + return nil +} + +// validateRoleName validates that this string is a valid role name (format wise) +func validateRoleName(name string) error { + if len(name) > roleNameMaxLength { + return fmt.Errorf("'%s' is not a valid role name. Name should not be longer than %d characters", name, roleNameMaxLength) + } + if !validateRoleNameRegex.MatchString(name) { + return fmt.Errorf("'%s' is not a valid role name", name) + } + return nil +} + +func sortByName(roles []*models.Role) { + sort.Slice(roles, func(i, j int) bool { + return *roles[i].Name < *roles[j].Name + }) +} + +func validateUserTypeInput(userTypeInput string) (authentication.AuthType, error) { + var userType authentication.AuthType + if userTypeInput == string(authentication.AuthTypeOIDC) { + userType = authentication.AuthTypeOIDC + } else if userTypeInput == string(authentication.AuthTypeDb) { + userType = authentication.AuthTypeDb + } else { + return userType, fmt.Errorf("unknown userType: %v", userTypeInput) + } + return userType, nil +} + +// TODO-RBAC: we could expose endpoint to validate permissions as dry-run +// func (h *authZHandlers) validatePermissions(permissions []*models.Permission) error { +// for _, perm := range permissions { +// if perm == nil { +// continue +// } + +// // collection filtration +// if perm.Collection != nil && *perm.Collection != "" && *perm.Collection != "*" { +// if class := h.schemaReader.ReadOnlyClass(*perm.Collection); class == nil { +// return fmt.Errorf("collection %s doesn't exists", *perm.Collection) +// } +// } + +// // tenants filtration specific collection, specific tenant +// if perm.Collection != nil && *perm.Collection != "" && *perm.Collection != "*" && perm.Tenant != nil && *perm.Tenant != "" && *perm.Tenant != "*" { +// shardsStatus, err := h.schemaReader.TenantsShards(context.Background(), *perm.Collection, *perm.Tenant) +// if err != nil { +// return fmt.Errorf("err while fetching collection '%s', tenant '%s', %s", *perm.Collection, *perm.Tenant, err) +// } + +// if _, ok := shardsStatus[*perm.Tenant]; !ok { +// return fmt.Errorf("tenant %s doesn't exists", *perm.Tenant) +// } +// } + +// // tenants filtration all collections, specific tenant +// if (perm.Collection == nil || *perm.Collection == "" || *perm.Collection == "*") && perm.Tenant != nil && *perm.Tenant != "" && *perm.Tenant != "*" { +// schema := h.schemaReader.GetSchemaSkipAuth() +// for _, class := range schema.Objects.Classes { +// //NOTE: CopyShardingState not available anymore +// state := h.schemaReader.CopyShardingState(class.Class) +// if state == nil { +// continue +// } +// if _, ok := state.Physical[*perm.Tenant]; ok { +// // exists +// return nil +// } +// } +// return fmt.Errorf("tenant %s doesn't exists", *perm.Tenant) +// } + +// // TODO validate mapping filter to weaviate permissions +// // TODO users checking +// // TODO roles checking +// // TODO object checking +// } + +// return nil +// } diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_add_permission_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_add_permission_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2828cb7956ae713dd5862970d04118f33769b676 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_add_permission_test.go @@ -0,0 +1,382 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "net/http" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/schema" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +var req, _ = http.NewRequest("POST", "/activate", nil) + +func TestAddPermissionsSuccess(t *testing.T) { + type testCase struct { + name string + principal *models.Principal + params authz.AddPermissionsParams + } + + tests := []testCase{ + { + name: "all are *", + principal: &models.Principal{Username: "user1"}, + params: authz.AddPermissionsParams{ + ID: "test", + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("*"), + }, + }, + }, + }, + }, + }, + { + name: "collection checks", + principal: &models.Principal{Username: "user1"}, + params: authz.AddPermissionsParams{ + ID: "newRole", + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{Collection: String("ABC")}, + }, + }, + }, + }, + }, + { + name: "collection and tenant checks", + principal: &models.Principal{Username: "user1"}, + params: authz.AddPermissionsParams{ + ID: "newRole", + HTTPRequest: req, + + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("ABC"), + }, + }, + }, + }, + }, + }, + { + name: "* collections and tenant checks", + principal: &models.Principal{Username: "user1"}, + params: authz.AddPermissionsParams{ + ID: "newRole", + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("*"), + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + schemaReader := schema.NewMockSchemaGetter(t) + logger, _ := test.NewNullLogger() + + policies, err := conv.RolesToPolicies(&models.Role{ + Name: &tt.params.ID, + Permissions: tt.params.Body.Permissions, + }) + require.Nil(t, err) + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.ID).Return(map[string][]authorization.Policy{ + "test": { + {Resource: "whatever", Verb: authorization.READ, Domain: "whatever"}, + }, + }, nil) + controller.On("UpdateRolesPermissions", policies).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + schemaReader: schemaReader, + logger: logger, + } + res := h.addPermissions(tt.params, tt.principal) + parsed, ok := res.(*authz.AddPermissionsOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestAddPermissionsBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.AddPermissionsParams + principal *models.Principal + expectedError string + } + + tests := []testCase{ + { + name: "role has to have at least 1 permission", + params: authz.AddPermissionsParams{ + ID: "someName", + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{}, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "role has to have at least 1 permission", + }, + { + name: "update builtin role", + params: authz.AddPermissionsParams{ + ID: authorization.BuiltInRoles[0], + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "you can not update built-in role", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := NewMockControllerAndGetUsers(t) + authorizer := authorization.NewMockAuthorizer(t) + schemaReader := schema.NewMockSchemaGetter(t) + logger, _ := test.NewNullLogger() + h := &authZHandlers{ + controller: controller, + authorizer: authorizer, + schemaReader: schemaReader, + logger: logger, + } + res := h.addPermissions(tt.params, tt.principal) + parsed, ok := res.(*authz.AddPermissionsBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestAddPermissionsForbidden(t *testing.T) { + type testCase struct { + name string + params authz.AddPermissionsParams + principal *models.Principal + authorizeErr error + expectedError string + } + + tests := []testCase{ + { + name: "update some role", + params: authz.AddPermissionsParams{ + ID: "someRole", + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("*"), + }, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: fmt.Errorf("some error from authZ"), + expectedError: "some error from authZ", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + if tt.authorizeErr != nil { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.addPermissions(tt.params, tt.principal) + parsed, ok := res.(*authz.AddPermissionsForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestAddPermissionsRoleNotFound(t *testing.T) { + type testCase struct { + name string + params authz.AddPermissionsParams + principal *models.Principal + expectedError string + } + + tests := []testCase{ + { + name: "role not found", + params: authz.AddPermissionsParams{ + ID: "some role", + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("*"), + }, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "role not found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.ID).Return(map[string][]authorization.Policy{}, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.addPermissions(tt.params, tt.principal) + _, ok := res.(*authz.AddPermissionsNotFound) + assert.True(t, ok) + }) + } +} + +func TestAddPermissionsInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.AddPermissionsParams + principal *models.Principal + upsertErr error + expectedError string + } + + tests := []testCase{ + { + name: "update some role", + params: authz.AddPermissionsParams{ + ID: "someRole", + HTTPRequest: req, + Body: authz.AddPermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("*"), + }, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + upsertErr: fmt.Errorf("some error from controller"), + expectedError: "some error from controller", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.ID).Return(map[string][]authorization.Policy{ + "test": { + {Resource: "whatever", Verb: authorization.READ, Domain: "whatever"}, + }, + }, nil) + controller.On("UpdateRolesPermissions", mock.Anything).Return(tt.upsertErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.addPermissions(tt.params, tt.principal) + parsed, ok := res.(*authz.AddPermissionsInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_assign_roles_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_assign_roles_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3fe158aa6fb0ee832d723afa8146207d379074fc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_assign_roles_test.go @@ -0,0 +1,606 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestAssignRoleToUserSuccess(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + userType := models.UserTypeInputDb + principal := &models.Principal{Username: "user1"} + params := authz.AssignRoleToUserParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{ + Roles: []string{"testRole"}, + UserType: userType, + }, + } + + authorizer.On("Authorize", mock.Anything, principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(params.ID)[0]).Return(nil) + controller.On("GetRoles", params.Body.Roles[0]).Return(map[string][]authorization.Policy{params.Body.Roles[0]: {}}, nil) + controller.On("AddRolesForUser", conv.UserNameWithTypeFromId(params.ID, authentication.AuthType(params.Body.UserType)), params.Body.Roles).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + apiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"user1"}}, + logger: logger, + } + res := h.assignRoleToUser(params, principal) + parsed, ok := res.(*authz.AssignRoleToUserOK) + assert.True(t, ok) + assert.NotNil(t, parsed) +} + +func TestAssignRoleToGroupSuccess(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + principal := &models.Principal{Username: "root-user"} + params := authz.AssignRoleToGroupParams{ + ID: "group1", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{ + Roles: []string{"testRole"}, + GroupType: models.GroupTypeOidc, + }, + } + + authorizer.On("Authorize", mock.Anything, principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(params.Body.GroupType), params.ID)[0]).Return(nil) + controller.On("GetRoles", params.Body.Roles[0]).Return(map[string][]authorization.Policy{params.Body.Roles[0]: {}}, nil) + controller.On("AddRolesForUser", conv.PrefixGroupName(params.ID), params.Body.Roles).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + apiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"user1"}}, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + }, + } + res := h.assignRoleToGroup(params, principal) + parsed, ok := res.(*authz.AssignRoleToGroupOK) + assert.True(t, ok) + assert.NotNil(t, parsed) +} + +func TestAssignRoleToUserOrUserNotFound(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToUserParams + principal *models.Principal + existedRoles map[string][]authorization.Policy + existedUsers []string + callToGetUser bool + } + + tests := []testCase{ + { + name: "user not found", + params: authz.AssignRoleToUserParams{ + ID: "user_not_exist", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{ + Roles: []string{"role1"}, + }, + }, + principal: &models.Principal{Username: "user1"}, + existedRoles: map[string][]authorization.Policy{"role1": {}}, + existedUsers: []string{"user1"}, + callToGetUser: true, + }, + { + name: "role not found", + params: authz.AssignRoleToUserParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{ + Roles: []string{"role1"}, + }, + }, + principal: &models.Principal{Username: "user1"}, + existedRoles: map[string][]authorization.Policy{}, + existedUsers: []string{"user1"}, + callToGetUser: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, mock.Anything, mock.Anything).Return(nil) + + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(tt.existedRoles, nil) + if tt.callToGetUser { + controller.On("GetUsers", tt.params.ID).Return(nil, nil) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + apiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: tt.existedUsers}, + logger: logger, + } + res := h.assignRoleToUser(tt.params, tt.principal) + parsed, ok := res.(*authz.AssignRoleToUserNotFound) + assert.True(t, ok) + assert.Contains(t, parsed.Payload.Error[0].Message, "doesn't exist") + }) + } +} + +func TestAssignRoleToGroupOrUserNotFound(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToGroupParams + principal *models.Principal + existedRoles map[string][]authorization.Policy + callToGetRole bool + } + + tests := []testCase{ + { + name: "role not found", + params: authz.AssignRoleToGroupParams{ + ID: "group1", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{ + Roles: []string{"role1"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "root-user"}, + existedRoles: map[string][]authorization.Policy{}, + callToGetRole: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(tt.params.Body.GroupType), tt.params.ID)[0]).Return(nil) + + if tt.callToGetRole { + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(tt.existedRoles, nil) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + }, + } + res := h.assignRoleToGroup(tt.params, tt.principal) + _, ok := res.(*authz.AssignRoleToGroupNotFound) + assert.True(t, ok) + }) + } +} + +func TestAssignRoleToUserBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToUserParams + principal *models.Principal + expectedError string + } + + tests := []testCase{ + { + name: "empty role", + params: authz.AssignRoleToUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{ + Roles: []string{""}, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "one or more of the roles you want to assign is empty", + }, + { + name: "no roles", + params: authz.AssignRoleToUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{}, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "roles can not be empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.assignRoleToUser(tt.params, tt.principal) + parsed, ok := res.(*authz.AssignRoleToUserBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestAssignRoleToGroupBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToGroupParams + principal *models.Principal + expectedError string + callAuthZ bool + } + + tests := []testCase{ + { + name: "empty role", + params: authz.AssignRoleToGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{ + Roles: []string{""}, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "one or more of the roles you want to assign is empty", + }, + { + name: "no roles", + params: authz.AssignRoleToGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{}, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "roles can not be empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + if tt.callAuthZ { + authorizer.On("Authorize", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + } + + res := h.assignRoleToGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.AssignRoleToGroupBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestAssignRoleToUserForbidden(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToUserParams + principal *models.Principal + authorizeErr error + expectedError string + skipAuthZ bool + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.AssignRoleToUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{ + Roles: []string{"testRole"}, + }, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + { + name: "root role", + params: authz.AssignRoleToUserParams{ + ID: "someuser", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{Roles: []string{"root"}}, + }, + skipAuthZ: true, + principal: &models.Principal{Username: "user1"}, + expectedError: "assigning: modifying 'root' role or changing its assignments is not allowed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + if !tt.skipAuthZ { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(tt.params.ID)[0]).Return(tt.authorizeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + }, + } + res := h.assignRoleToUser(tt.params, tt.principal) + parsed, ok := res.(*authz.AssignRoleToUserForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestAssignRoleToGroupForbidden(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToGroupParams + principal *models.Principal + authorizeErr error + expectedError string + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.AssignRoleToGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{ + Roles: []string{"testRole"}, + GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + { + name: "root group", + params: authz.AssignRoleToGroupParams{ + ID: "root-group", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{Roles: []string{"some-role"}, GroupType: models.GroupTypeOidc}, + }, + principal: &models.Principal{Username: "root-user"}, + expectedError: "assigning: cannot assign or revoke from root group root-group", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(tt.params.Body.GroupType), tt.params.ID)[0]).Return(tt.authorizeErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + RootGroups: []string{"root-group"}, + ReadOnlyGroups: []string{"viewer-root-group"}, + }, + } + res := h.assignRoleToGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.AssignRoleToGroupForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestAssignRoleToUserInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToUserParams + principal *models.Principal + getRolesErr error + assignErr error + expectedError string + } + + userType := models.UserTypeInputDb + + tests := []testCase{ + { + name: "internal server error from assigning", + params: authz.AssignRoleToUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{ + Roles: []string{"testRole"}, + UserType: userType, + }, + }, + principal: &models.Principal{Username: "user1"}, + assignErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + { + name: "internal server error from getting role", + params: authz.AssignRoleToUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToUserBody{ + Roles: []string{"testRole"}, + UserType: userType, + }, + }, + principal: &models.Principal{Username: "user1"}, + getRolesErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(map[string][]authorization.Policy{tt.params.Body.Roles[0]: {}}, tt.getRolesErr) + if tt.getRolesErr == nil { + controller.On("GetUsers", "testUser").Return(map[string]*apikey.User{"testUser": {}}, nil) + controller.On("AddRolesForUser", conv.UserNameWithTypeFromId(tt.params.ID, authentication.AuthType(tt.params.Body.UserType)), tt.params.Body.Roles).Return(tt.assignErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.assignRoleToUser(tt.params, tt.principal) + parsed, ok := res.(*authz.AssignRoleToUserInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestAssignRoleToGroupInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.AssignRoleToGroupParams + principal *models.Principal + getRolesErr error + assignErr error + expectedError string + } + + tests := []testCase{ + { + name: "internal server error from assigning", + params: authz.AssignRoleToGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{ + Roles: []string{"testRole"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "root-user"}, + assignErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + { + name: "internal server error from getting role", + params: authz.AssignRoleToGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.AssignRoleToGroupBody{ + Roles: []string{"testRole"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "root-user"}, + getRolesErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(tt.params.Body.GroupType), tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(map[string][]authorization.Policy{tt.params.Body.Roles[0]: {}}, tt.getRolesErr) + if tt.getRolesErr == nil { + controller.On("AddRolesForUser", conv.PrefixGroupName(tt.params.ID), tt.params.Body.Roles).Return(tt.assignErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + }, + } + res := h.assignRoleToGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.AssignRoleToGroupInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_create_role_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_create_role_test.go new file mode 100644 index 0000000000000000000000000000000000000000..47c43fc6711557f00d17e2acf55045cb138a19f9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_create_role_test.go @@ -0,0 +1,420 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/schema" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +func TestCreateRoleSuccess(t *testing.T) { + type testCase struct { + name string + principal *models.Principal + params authz.CreateRoleParams + } + + tests := []testCase{ + { + name: "all are *", + principal: &models.Principal{Username: "user1"}, + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + }, + }, + { + name: "collection checks", + principal: &models.Principal{Username: "user1"}, + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("ABC"), + }, + }, + }, + }, + }, + }, + { + name: "collection and tenant checks", + principal: &models.Principal{Username: "user1"}, + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Tenants: &models.PermissionTenants{ + Collection: String("ABC"), + Tenant: String("Tenant1"), + }, + }, + }, + }, + }, + }, + { + name: "* collections and tenant checks", + principal: &models.Principal{Username: "user1"}, + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Tenants: &models.PermissionTenants{ + Tenant: String("Tenant1"), + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + schemaReader := schema.NewMockSchemaGetter(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + controller.On("GetRoles", *tt.params.Body.Name).Return(map[string][]authorization.Policy{}, nil) + controller.On("CreateRolesPermissions", mock.Anything).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + schemaReader: schemaReader, + logger: logger, + } + res := h.createRole(tt.params, tt.principal) + parsed, ok := res.(*authz.CreateRoleCreated) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestCreateRoleConflict(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + principal := &models.Principal{Username: "user1"} + params := authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + } + authorizer.On("Authorize", mock.Anything, principal, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("newRole")[0]).Return(nil) + controller.On("GetRoles", *params.Body.Name).Return(map[string][]authorization.Policy{"newRole": {}}, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.createRole(params, principal) + parsed, ok := res.(*authz.CreateRoleConflict) + assert.True(t, ok) + assert.Contains(t, parsed.Payload.Error[0].Message, fmt.Sprintf("role with name %s already exists", *params.Body.Name)) +} + +func TestCreateRoleBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.CreateRoleParams + upsertErr error + expectedError string + } + + tests := []testCase{ + { + name: "role name is required", + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String(""), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + }, + expectedError: "role name is required", + }, + { + name: "invalid role name", + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("something/wrong"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + }, + expectedError: "role name is invalid", + }, + { + name: "invalid permission", + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("someRole"), + Permissions: []*models.Permission{ + { + Action: String("manage_something"), + }, + }, + }, + }, + expectedError: "invalid role", + }, + { + name: "cannot create role with the same name as builtin role", + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: &authorization.BuiltInRoles[0], + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + }, + expectedError: "you cannot create role with the same name as built-in role", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + schemaReader := schema.NewMockSchemaGetter(t) + logger, _ := test.NewNullLogger() + + if tt.expectedError == "" { + authorizer.On("Authorize", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + controller.On("GetRoles", *tt.params.Body.Name).Return(map[string][]authorization.Policy{}, nil) + controller.On("upsertRolesPermissions", mock.Anything).Return(tt.upsertErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + schemaReader: schemaReader, + logger: logger, + } + res := h.createRole(tt.params, nil) + parsed, ok := res.(*authz.CreateRoleBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestCreateRoleForbidden(t *testing.T) { + type testCase struct { + name string + params authz.CreateRoleParams + principal *models.Principal + authorizeErr error + expectedError string + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: errors.New("authorization error"), + expectedError: "authorization error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(*tt.params.Body.Name)[0]).Return(tt.authorizeErr) + if tt.authorizeErr != nil { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles(*tt.params.Body.Name)[0]).Return(tt.authorizeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.createRole(tt.params, tt.principal) + parsed, ok := res.(*authz.CreateRoleForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestCreateRoleInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.CreateRoleParams + principal *models.Principal + upsertErr error + expectedError string + } + + tests := []testCase{ + { + name: "upsert roles permissions error", + params: authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{}, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + upsertErr: errors.New("upsert error"), + expectedError: "upsert error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + policies, err := conv.RolesToPolicies(tt.params.Body) + require.Nil(t, err) + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(*tt.params.Body.Name)[0]).Return(nil) + controller.On("GetRoles", *tt.params.Body.Name).Return(map[string][]authorization.Policy{}, nil) + controller.On("CreateRolesPermissions", policies).Return(tt.upsertErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.createRole(tt.params, tt.principal) + parsed, ok := res.(*authz.CreateRoleInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestCreateRoleUnprocessableRegexp(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + params := authz.CreateRoleParams{ + HTTPRequest: req, + Body: &models.Role{ + Name: String("newRole"), + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{Collection: String("/[a-z+/")}, + }, + }, + }, + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.createRole(params, &models.Principal{Username: "user1"}) + _, ok := res.(*authz.CreateRoleUnprocessableEntity) + assert.True(t, ok) + assert.Contains(t, res.(*authz.CreateRoleUnprocessableEntity).Payload.Error[0].Message, "role permissions are invalid") +} + +func String(s string) *string { + return &s +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_delete_role_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_delete_role_test.go new file mode 100644 index 0000000000000000000000000000000000000000..70602fa7fd131837fb2816105841e062421cc1e4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_delete_role_test.go @@ -0,0 +1,187 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func TestDeleteRoleSuccess(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + principal := &models.Principal{Username: "user1"} + params := authz.DeleteRoleParams{ + ID: "roleToRemove", + HTTPRequest: req, + } + controller.On("GetRoles", mock.Anything).Return(map[string][]authorization.Policy{"roleToRemove": {}}, nil) + authorizer.On("Authorize", mock.Anything, principal, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL), authorization.Roles("roleToRemove")[0]).Return(nil) + controller.On("DeleteRoles", params.ID).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.deleteRole(params, principal) + parsed, ok := res.(*authz.DeleteRoleNoContent) + assert.True(t, ok) + assert.NotNil(t, parsed) +} + +func TestDeleteRoleBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.DeleteRoleParams + principal *models.Principal + expectedError string + } + + tests := []testCase{ + { + name: "update builtin role", + params: authz.DeleteRoleParams{ + HTTPRequest: req, + ID: authorization.BuiltInRoles[0], + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "you can not delete built-in role", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + h := &authZHandlers{ + controller: controller, + logger: logger, + } + res := h.deleteRole(tt.params, tt.principal) + parsed, ok := res.(*authz.DeleteRoleBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestDeleteRoleForbidden(t *testing.T) { + type testCase struct { + name string + params authz.DeleteRoleParams + principal *models.Principal + authorizeErr error + expectedError string + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.DeleteRoleParams{ + ID: "roleToRemove", + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: errors.New("authorization error"), + expectedError: "authorization error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + controller.On("GetRoles", mock.Anything).Return(map[string][]authorization.Policy{tt.params.ID: {}}, nil) + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + if tt.authorizeErr != nil { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + } + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.deleteRole(tt.params, tt.principal) + parsed, ok := res.(*authz.DeleteRoleForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestDeleteRoleInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.DeleteRoleParams + principal *models.Principal + upsertErr error + expectedError string + } + + tests := []testCase{ + { + name: "remove role error", + params: authz.DeleteRoleParams{ + ID: "roleToRemove", + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1"}, + upsertErr: errors.New("remove error"), + expectedError: "remove error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + controller.On("GetRoles", mock.Anything).Return(map[string][]authorization.Policy{tt.params.ID: {}}, nil) + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + controller.On("DeleteRoles", tt.params.ID).Return(tt.upsertErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.deleteRole(tt.params, tt.principal) + parsed, ok := res.(*authz.DeleteRoleInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_role_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_role_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a3aeff18b86bfb3ceb8bdfef26bd8fb2d57d3bff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_role_test.go @@ -0,0 +1,216 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +func TestGetRoleSuccess(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + principal := &models.Principal{Username: "user1"} + params := authz.GetRoleParams{ + ID: "testRole", + HTTPRequest: req, + } + + policies := []authorization.Policy{ + { + Resource: authorization.Collections("ABC")[0], + Verb: authorization.READ, + Domain: authorization.SchemaDomain, + }, + } + + expectedPermissions, err := conv.PoliciesToPermission(policies...) + assert.Nil(t, err) + + returnedPolices := map[string][]authorization.Policy{ + "testRole": policies, + } + authorizer.On("Authorize", mock.Anything, principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles(params.ID)[0]).Return(nil) + controller.On("GetRoles", params.ID).Return(returnedPolices, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRole(params, principal) + parsed, ok := res.(*authz.GetRoleOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + assert.Equal(t, params.ID, *parsed.Payload.Name) + assert.Equal(t, expectedPermissions, parsed.Payload.Permissions) +} + +func TestGetRoleForbidden(t *testing.T) { + type testCase struct { + name string + params authz.GetRoleParams + principal *models.Principal + authorizeErr error + expectedError string + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.GetRoleParams{ + HTTPRequest: req, + ID: "testRole", + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + if tt.authorizeErr != nil { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRole(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRoleForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestGetRoleNotFound(t *testing.T) { + type testCase struct { + name string + params authz.GetRoleParams + principal *models.Principal + expectedError string + } + + tests := []testCase{ + { + name: "role not found", + params: authz.GetRoleParams{ + HTTPRequest: req, + ID: "nonExistentRole", + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "role not found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.ID).Return(map[string][]authorization.Policy{}, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRole(tt.params, tt.principal) + _, ok := res.(*authz.GetRoleNotFound) + assert.True(t, ok) + }) + } +} + +func TestGetRoleInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.GetRoleParams + principal *models.Principal + getRolesErr error + expectedError string + } + + tests := []testCase{ + { + name: "internal server error from getting role", + params: authz.GetRoleParams{ + HTTPRequest: req, + ID: "testRole", + }, + principal: &models.Principal{Username: "user1"}, + getRolesErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + policies := []authorization.Policy{ + { + Resource: authorization.Collections("ABC")[0], + Verb: authorization.READ, + Domain: authorization.SchemaDomain, + }, + } + + returnedPolices := map[string][]authorization.Policy{ + "testRole": policies, + } + + authorizer.On("Authorize", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + controller.On("GetRoles", tt.params.ID).Return(returnedPolices, tt.getRolesErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRole(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRoleInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_for_group_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_for_group_test.go new file mode 100644 index 0000000000000000000000000000000000000000..43e26b23aca47c7b0145d97bc313cd4600e6398b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_for_group_test.go @@ -0,0 +1,243 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/stretchr/testify/mock" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +func TestGetRolesForGroupSuccess(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + policies := []authorization.Policy{ + { + Resource: authorization.Collections("ABC")[0], + Verb: authorization.READ, + Domain: authorization.SchemaDomain, + }, + } + groupType := models.GroupTypeOidc + returnedPolices := map[string][]authorization.Policy{ + "testRole": policies, + } + truep := true + falseP := false + tests := []struct { + name string + params authz.GetRolesForGroupParams + principal *models.Principal + expectAuthz bool + }{ + { + name: "success", + params: authz.GetRolesForGroupParams{ + ID: "group1", + GroupType: string(groupType), + IncludeFullRoles: &truep, + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: models.UserTypeInputDb}, + expectAuthz: true, + }, + { + name: "success without roles", + params: authz.GetRolesForGroupParams{ + ID: "group1", + GroupType: string(groupType), + IncludeFullRoles: &falseP, + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: models.UserTypeInputDb}, + expectAuthz: true, + }, + { + name: "success for own group", + params: authz.GetRolesForGroupParams{ + ID: "group1", + GroupType: string(groupType), + IncludeFullRoles: &truep, + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: models.UserTypeInputOidc, Groups: []string{"group1", "group2"}}, + expectAuthz: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.expectAuthz { + if tt.expectAuthz { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.READ, authorization.Groups(authentication.AuthType(tt.params.GroupType), tt.params.ID)[0]).Return(nil) + } + + if *tt.params.IncludeFullRoles { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles("testRole")[0]).Return(nil) + } + } + controller.On("GetRolesForUserOrGroup", tt.params.ID, authentication.AuthTypeOIDC, true).Return(returnedPolices, nil) + // controller.On("GetUsers", tt.params.ID).Return(map[string]*apikey.User{"testUser": {}}, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRolesForGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRolesForGroupOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + + permissions, err := conv.PoliciesToPermission(policies...) + assert.Nil(t, err) + + roles := []*models.Role{ + { + Name: String("testRole"), + Permissions: permissions, + }, + } + expectedRoles := models.RolesListResponse(roles) + if *tt.params.IncludeFullRoles { + assert.Equal(t, expectedRoles, parsed.Payload) + } else { + assert.Nil(t, parsed.Payload[0].Permissions) + } + assert.Equal(t, *roles[0].Name, *parsed.Payload[0].Name) + }) + } +} + +func TestGetRolesForGroupForbidden(t *testing.T) { + type testCase struct { + name string + params authz.GetRolesForGroupParams + principal *models.Principal + authorizeErr error + expectedError string + } + truep := true + userType := models.UserTypeInputOidc + tests := []testCase{ + { + name: "authorization error no access to role", + params: authz.GetRolesForGroupParams{ + ID: "testUser", + GroupType: string(userType), + IncludeFullRoles: &truep, + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: userType}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + returnedPolices := map[string][]authorization.Policy{ + "testRole": { + { + Resource: authorization.Collections("ABC")[0], + Verb: authorization.READ, + Domain: authorization.SchemaDomain, + }, + }, + } + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.READ, authorization.Groups(authentication.AuthType(models.GroupTypeOidc), tt.params.ID)[0]).Return(nil) + + if tt.authorizeErr != nil { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles("testRole")[0]).Return(tt.authorizeErr) + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH), authorization.Roles("testRole")[0]).Return(tt.authorizeErr) + } + controller.On("GetRolesForUserOrGroup", tt.params.ID, authentication.AuthTypeOIDC, true).Return(returnedPolices, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRolesForGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRolesForGroupForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestGetRolesForGroupInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.GetRolesForGroupParams + principal *models.Principal + getRolesErr error + expectedError string + } + + tests := []testCase{ + { + name: "internal server error", + params: authz.GetRolesForGroupParams{ + ID: "testGroup", + GroupType: string(models.GroupTypeOidc), + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: models.UserTypeInputOidc}, + getRolesErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.READ, authorization.Groups(authentication.AuthType(models.GroupTypeOidc), tt.params.ID)[0]).Return(nil) + controller.On("GetRolesForUserOrGroup", tt.params.ID, authentication.AuthTypeOIDC, true).Return(nil, tt.getRolesErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRolesForGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRolesForGroupInternalServerError) + assert.True(t, ok) + + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_for_user_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_for_user_test.go new file mode 100644 index 0000000000000000000000000000000000000000..65b9f480764fbb090ff232ae6806853644ea988d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_for_user_test.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +func TestGetRolesForUserSuccess(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + policies := []authorization.Policy{ + { + Resource: authorization.Collections("ABC")[0], + Verb: authorization.READ, + Domain: authorization.SchemaDomain, + }, + } + userType := models.UserTypeInputDb + returnedPolices := map[string][]authorization.Policy{ + "testRole": policies, + } + truep := true + falseP := false + tests := []struct { + name string + params authz.GetRolesForUserParams + principal *models.Principal + expectAuthz bool + }{ + { + name: "success", + params: authz.GetRolesForUserParams{ + ID: "testUser", + UserType: string(userType), + HTTPRequest: req, + IncludeFullRoles: &truep, + }, + principal: &models.Principal{Username: "user1", UserType: models.UserTypeInputDb}, + expectAuthz: true, + }, + { + name: "success without roles", + params: authz.GetRolesForUserParams{ + ID: "testUser", + UserType: string(userType), + IncludeFullRoles: &falseP, + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: models.UserTypeInputDb}, + expectAuthz: true, + }, + { + name: "success for own user", + params: authz.GetRolesForUserParams{ + ID: "user1", + UserType: string(userType), + IncludeFullRoles: &truep, + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: models.UserTypeInputDb}, + expectAuthz: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.expectAuthz { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.READ, authorization.Users(tt.params.ID)[0]).Return(nil) + if *tt.params.IncludeFullRoles { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles("testRole")[0]).Return(nil) + } + } + controller.On("GetRolesForUserOrGroup", tt.params.ID, authentication.AuthTypeDb, false).Return(returnedPolices, nil) + controller.On("GetUsers", tt.params.ID).Return(map[string]*apikey.User{"testUser": {}}, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRolesForUser(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRolesForUserOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + + permissions, err := conv.PoliciesToPermission(policies...) + assert.Nil(t, err) + + roles := []*models.Role{ + { + Name: String("testRole"), + Permissions: permissions, + }, + } + expectedRoles := models.RolesListResponse(roles) + if *tt.params.IncludeFullRoles { + assert.Equal(t, expectedRoles, parsed.Payload) + } else { + assert.Nil(t, parsed.Payload[0].Permissions) + } + assert.Equal(t, *roles[0].Name, *parsed.Payload[0].Name) + }) + } +} + +func TestGetRolesForUserForbidden(t *testing.T) { + type testCase struct { + name string + params authz.GetRolesForUserParams + principal *models.Principal + authorizeErr error + expectedError string + } + truep := true + userType := models.UserTypeInputDb + tests := []testCase{ + { + name: "authorization error no access to role", + params: authz.GetRolesForUserParams{ + ID: "testUser", + UserType: string(userType), + IncludeFullRoles: &truep, + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: userType}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + returnedPolices := map[string][]authorization.Policy{ + "testRole": { + { + Resource: authorization.Collections("ABC")[0], + Verb: authorization.READ, + Domain: authorization.SchemaDomain, + }, + }, + } + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.READ, authorization.Users(tt.params.ID)[0]).Return(nil) + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles("testRole")[0]).Return(tt.authorizeErr) + if tt.authorizeErr != nil { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH), authorization.Roles("testRole")[0]).Return(tt.authorizeErr) + } + controller.On("GetRolesForUserOrGroup", tt.params.ID, authentication.AuthType(userType), false).Return(returnedPolices, nil) + controller.On("GetUsers", tt.params.ID).Return(map[string]*apikey.User{tt.params.ID: {}}, nil) + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRolesForUser(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRolesForUserForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestGetRolesForUserInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.GetRolesForUserParams + principal *models.Principal + getRolesErr error + expectedError string + } + + userType := models.UserTypeInputDb + tests := []testCase{ + { + name: "internal server error", + params: authz.GetRolesForUserParams{ + ID: "testUser", + UserType: string(userType), + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1", UserType: userType}, + getRolesErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.READ, authorization.Users(tt.params.ID)[0]).Return(nil) + controller.On("GetRolesForUserOrGroup", tt.params.ID, authentication.AuthType(userType), false).Return(nil, tt.getRolesErr) + controller.On("GetUsers", tt.params.ID).Return(map[string]*apikey.User{tt.params.ID: {}}, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getRolesForUser(tt.params, tt.principal) + parsed, ok := res.(*authz.GetRolesForUserInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestSortRolesByName(t *testing.T) { + tests := []struct { + name string + input []*models.Role + expected []*models.Role + }{ + { + name: "already sorted", + input: []*models.Role{ + {Name: String("admin")}, + {Name: String("editor")}, + {Name: String("user")}, + }, + expected: []*models.Role{ + {Name: String("admin")}, + {Name: String("editor")}, + {Name: String("user")}, + }, + }, + { + name: "unsorted", + input: []*models.Role{ + {Name: String("user")}, + {Name: String("admin")}, + {Name: String("editor")}, + }, + expected: []*models.Role{ + {Name: String("admin")}, + {Name: String("editor")}, + {Name: String("user")}, + }, + }, + { + name: "same name", + input: []*models.Role{ + {Name: String("admin")}, + {Name: String("admin")}, + {Name: String("editor")}, + }, + expected: []*models.Role{ + {Name: String("admin")}, + {Name: String("admin")}, + {Name: String("editor")}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sortByName(tt.input) + assert.Equal(t, tt.expected, tt.input) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_test.go new file mode 100644 index 0000000000000000000000000000000000000000..31e2cb0353e8346dc51147ce4cab4a520a4cc776 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_roles_test.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" +) + +func TestGetRolesSuccess(t *testing.T) { + type testCase struct { + name string + principal *models.Principal + authorizedRoles []string + expectedRoles map[string][]authorization.Policy + } + + tests := []testCase{ + { + name: "success non root user", + principal: &models.Principal{Username: "user1"}, + authorizedRoles: []string{"testRole"}, + expectedRoles: map[string][]authorization.Policy{ + "testRole": {}, + }, + }, + { + name: "success as root user", + principal: &models.Principal{Username: "root"}, + authorizedRoles: []string{"testRole", "root"}, + expectedRoles: map[string][]authorization.Policy{ + "testRole": {}, "root": {}, + }, + }, + { + name: "success without principal", + principal: nil, + authorizedRoles: []string{"testRole"}, + expectedRoles: map[string][]authorization.Policy{ + "testRole": {}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + authorizer.On("Authorize", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + controller.On("GetRoles").Return(tt.expectedRoles, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root"}}, + } + res := h.getRoles(authz.GetRolesParams{HTTPRequest: req}, tt.principal) + parsed, ok := res.(*authz.GetRolesOK) + assert.True(t, ok) + assert.Len(t, parsed.Payload, len(tt.expectedRoles)) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_users_for_role_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_users_for_role_test.go new file mode 100644 index 0000000000000000000000000000000000000000..83f6457d8948dd9443babc385aade178202a3669 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_get_users_for_role_test.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/stretchr/testify/mock" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func TestGetUsersForRoleSuccess(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + principal := &models.Principal{Username: "user1"} + params := authz.GetUsersForRoleParams{ + ID: "testuser", + HTTPRequest: req, + } + + expectedUsers := []string{"user1", "user2"} + expectedResponse := []*authz.GetUsersForRoleOKBodyItems0{ + {UserID: expectedUsers[0], UserType: models.NewUserTypeOutput(models.UserTypeOutputOidc)}, + {UserID: expectedUsers[1], UserType: models.NewUserTypeOutput(models.UserTypeOutputOidc)}, + {UserID: expectedUsers[0], UserType: models.NewUserTypeOutput(models.UserTypeOutputDbEnvUser)}, + {UserID: expectedUsers[1], UserType: models.NewUserTypeOutput(models.UserTypeOutputDbEnvUser)}, + } + + authorizer.On("Authorize", mock.Anything, principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles(params.ID)[0]).Return(nil) + authorizer.On("AuthorizeSilent", mock.Anything, principal, authorization.READ, authorization.Users(expectedUsers...)[1]).Return(nil) + controller.On("GetUsersOrGroupForRole", params.ID, authentication.AuthTypeDb, false).Return(expectedUsers, nil) + controller.On("GetUsersOrGroupForRole", params.ID, authentication.AuthTypeOIDC, false).Return(expectedUsers, nil) + controller.On("GetUsers", expectedUsers[0], expectedUsers[1]).Return(nil, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getUsersForRole(params, principal) + parsed, ok := res.(*authz.GetUsersForRoleOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + assert.Equal(t, expectedResponse, parsed.Payload) +} + +func TestGetUsersForRoleForbidden(t *testing.T) { + type testCase struct { + name string + params authz.GetUsersForRoleParams + principal *models.Principal + authorizeErr error + skipAuthZ bool + expectedError string + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.GetUsersForRoleParams{ + ID: "testRole", + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + { + name: "root", + params: authz.GetUsersForRoleParams{ + ID: "root", + HTTPRequest: req, + }, + skipAuthZ: true, + principal: &models.Principal{Username: "user1"}, + expectedError: "modifying 'root' role or changing its assignments is not allowed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + if !tt.skipAuthZ { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + if tt.authorizeErr != nil { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + } + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getUsersForRole(tt.params, tt.principal) + parsed, ok := res.(*authz.GetUsersForRoleForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestGetUsersForRoleInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.GetUsersForRoleParams + principal *models.Principal + getUsersErr error + expectedError string + } + + tests := []testCase{ + { + name: "internal server error", + params: authz.GetUsersForRoleParams{ + ID: "testRole", + HTTPRequest: req, + }, + principal: &models.Principal{Username: "user1"}, + getUsersErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + + controller.On("GetUsersOrGroupForRole", tt.params.ID, authentication.AuthTypeOIDC, false).Return(nil, tt.getUsersErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.getUsersForRole(tt.params, tt.principal) + parsed, ok := res.(*authz.GetUsersForRoleInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_remove_permission_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_remove_permission_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a71416c1a3a2452a25e3bd29cafdaa8bbd406257 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_remove_permission_test.go @@ -0,0 +1,308 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" +) + +func TestRemovePermissionsSuccessUpdate(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + principal := &models.Principal{Username: "user1"} + params := authz.RemovePermissionsParams{ + ID: "test", + HTTPRequest: req, + Body: authz.RemovePermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String("create_roles"), + Roles: &models.PermissionRoles{}, + }, + }, + }, + } + policies, err := conv.PermissionToPolicies(params.Body.Permissions...) + require.Nil(t, err) + + authorizer.On("Authorize", mock.Anything, principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(params.ID)[0]).Return(nil) + controller.On("GetRoles", params.ID).Return(map[string][]authorization.Policy{params.ID: { + {Resource: "whatever", Verb: authorization.READ, Domain: "whatever"}, + {Resource: "whatever", Verb: authorization.READ, Domain: "whatever"}, + }}, nil) + controller.On("RemovePermissions", params.ID, policies).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.removePermissions(params, principal) + parsed, ok := res.(*authz.RemovePermissionsOK) + assert.True(t, ok) + assert.NotNil(t, parsed) +} + +func TestRemovePermissionsBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.RemovePermissionsParams + principal *models.Principal + expectedError string + } + + tests := []testCase{ + { + name: "role has to have at least 1 permission", + params: authz.RemovePermissionsParams{ + ID: "someRole", + HTTPRequest: req, + Body: authz.RemovePermissionsBody{ + Permissions: []*models.Permission{}, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "role has to have at least 1 permission", + }, + // { + // name: "invalid permission", + // params: authz.RemovePermissionsParams{ + // Body: authz.RemovePermissionsBody{ + // Name: String("someName"), + // Permissions: []*models.Permission{ + // { + // Action: String("create_roles"), + // }, + // }, + // }, + // }, + // principal: &models.Principal{Username: "user1"}, + // expectedError: "invalid permission", + // }, + { + name: "update builtin role", + params: authz.RemovePermissionsParams{ + ID: authorization.BuiltInRoles[0], + HTTPRequest: req, + Body: authz.RemovePermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String("create_roles"), + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "you cannot update built-in role", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + h := &authZHandlers{ + controller: controller, + logger: logger, + } + res := h.removePermissions(tt.params, tt.principal) + parsed, ok := res.(*authz.RemovePermissionsBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestRemovePermissionsForbidden(t *testing.T) { + type testCase struct { + name string + params authz.RemovePermissionsParams + principal *models.Principal + authorizeErr error + expectedError string + } + + tests := []testCase{ + { + name: "remove permissions", + params: authz.RemovePermissionsParams{ + ID: "someRole", + HTTPRequest: req, + Body: authz.RemovePermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String("read_roles"), + Roles: &models.PermissionRoles{}, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: fmt.Errorf("some error from authZ"), + expectedError: "some error from authZ", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + if tt.authorizeErr != nil { + // 2nd Call if update failed + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles(tt.params.ID)[0]).Return(tt.authorizeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.removePermissions(tt.params, tt.principal) + parsed, ok := res.(*authz.RemovePermissionsForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestRemovePermissionsRoleNotFound(t *testing.T) { + type testCase struct { + name string + params authz.RemovePermissionsParams + principal *models.Principal + expectedError string + } + + tests := []testCase{ + { + name: "role not found", + params: authz.RemovePermissionsParams{ + ID: "some role", + HTTPRequest: req, + Body: authz.RemovePermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String(authorization.CreateCollections), + Collections: &models.PermissionCollections{ + Collection: String("*"), + }, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "role not found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.ID).Return(map[string][]authorization.Policy{}, nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.removePermissions(tt.params, tt.principal) + _, ok := res.(*authz.RemovePermissionsNotFound) + assert.True(t, ok) + }) + } +} + +func TestRemovePermissionsInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.RemovePermissionsParams + principal *models.Principal + upsertErr error + expectedError string + } + + tests := []testCase{ + { + name: "update some role", + params: authz.RemovePermissionsParams{ + ID: "someRole", + HTTPRequest: req, + Body: authz.RemovePermissionsBody{ + Permissions: []*models.Permission{ + { + Action: String("update_roles"), + Roles: &models.PermissionRoles{}, + }, + }, + }, + }, + principal: &models.Principal{Username: "user1"}, + upsertErr: fmt.Errorf("some error from controller"), + expectedError: "some error from controller", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.ID).Return(map[string][]authorization.Policy{tt.params.ID: { + {Resource: "whatever", Verb: authorization.READ, Domain: "whatever"}, + {Resource: "whatever", Verb: authorization.READ, Domain: "whatever"}, + }}, nil) + controller.On("RemovePermissions", mock.Anything, mock.Anything).Return(tt.upsertErr) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.removePermissions(tt.params, tt.principal) + parsed, ok := res.(*authz.RemovePermissionsInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_revoke_roles_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_revoke_roles_test.go new file mode 100644 index 0000000000000000000000000000000000000000..34bf20a2cc699bf6c0ba578c54a4f8ce42bb19b0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_revoke_roles_test.go @@ -0,0 +1,704 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestRevokeRoleFromUserSuccess(t *testing.T) { + userType := models.UserTypeInputDb + tests := []struct { + name string + principal *models.Principal + params authz.RevokeRoleFromUserParams + configuredAdmins []string + configuredViewers []string + }{ + { + name: "successful revocation", + principal: &models.Principal{Username: "user1"}, + params: authz.RevokeRoleFromUserParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"testRole"}, + UserType: userType, + }, + }, + }, + { + name: "revoke another user not configured admin role", + params: authz.RevokeRoleFromUserParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"admin"}, + UserType: userType, + }, + }, + configuredAdmins: []string{"testUser"}, + principal: &models.Principal{Username: "user1"}, + }, + { + name: "revoke another user user not configured viewer role", + params: authz.RevokeRoleFromUserParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"viewer"}, + UserType: userType, + }, + }, + configuredViewers: []string{"testUser"}, + principal: &models.Principal{Username: "user1"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(map[string][]authorization.Policy{tt.params.Body.Roles[0]: {}}, nil) + controller.On("RevokeRolesForUser", conv.UserNameWithTypeFromId(tt.params.ID, authentication.AuthType(tt.params.Body.UserType)), tt.params.Body.Roles[0]).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + apiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"user1"}}, + logger: logger, + } + res := h.revokeRoleFromUser(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromUserOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestRevokeRoleFromGroupSuccess(t *testing.T) { + tests := []struct { + name string + principal *models.Principal + params authz.RevokeRoleFromGroupParams + configuredAdmins []string + configuredViewers []string + }{ + { + name: "successful revocation", + principal: &models.Principal{Username: "root-user"}, + params: authz.RevokeRoleFromGroupParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"testRole"}, GroupType: models.GroupTypeOidc, + }, + }, + }, + { + name: "successful revocation via root group", + principal: &models.Principal{Username: "not-root-user", Groups: []string{"root-group"}}, + params: authz.RevokeRoleFromGroupParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"testRole"}, GroupType: models.GroupTypeOidc, + }, + }, + }, + { + name: "revoke another user not configured admin role", + params: authz.RevokeRoleFromGroupParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"admin"}, GroupType: models.GroupTypeOidc, + }, + }, + configuredAdmins: []string{"testUser"}, + principal: &models.Principal{Username: "root-user"}, + }, + { + name: "revoke another user user not configured viewer role", + params: authz.RevokeRoleFromGroupParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"viewer"}, GroupType: models.GroupTypeOidc, + }, + }, + configuredViewers: []string{"testUser"}, + principal: &models.Principal{Username: "root-user"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(tt.params.Body.GroupType), tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(map[string][]authorization.Policy{tt.params.Body.Roles[0]: {}}, nil) + controller.On("RevokeRolesForUser", conv.PrefixGroupName(tt.params.ID), tt.params.Body.Roles[0]).Return(nil) + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + apiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"user1"}}, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, RootGroups: []string{"root-group"}, + }, + } + res := h.revokeRoleFromGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromGroupOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestRevokeRoleFromUserBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromUserParams + principal *models.Principal + expectedError string + existedRoles map[string][]authorization.Policy + callAuthZ bool + } + + tests := []testCase{ + { + name: "empty role", + params: authz.RevokeRoleFromUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{""}, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "one or more of the roles you want to revoke is empty", + existedRoles: map[string][]authorization.Policy{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + if tt.callAuthZ { + authorizer.On("Authorize", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.revokeRoleFromUser(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromUserBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestRevokeRoleFromGroupBadRequest(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromGroupParams + principal *models.Principal + expectedError string + existedRoles map[string][]authorization.Policy + callAuthZ bool + } + + tests := []testCase{ + { + name: "empty role", + params: authz.RevokeRoleFromGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{""}, + }, + }, + principal: &models.Principal{Username: "user1"}, + expectedError: "one or more of the roles you want to revoke is empty", + existedRoles: map[string][]authorization.Policy{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + if tt.callAuthZ { + authorizer.On("Authorize", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.revokeRoleFromGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromGroupBadRequest) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestRevokeRoleFromUserOrUserNotFound(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromUserParams + principal *models.Principal + existedRoles map[string][]authorization.Policy + existedUsers []string + callToGetUser bool + } + + tests := []testCase{ + { + name: "user not found", + params: authz.RevokeRoleFromUserParams{ + ID: "user_not_exist", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"role1"}, + }, + }, + principal: &models.Principal{Username: "user1"}, + existedRoles: map[string][]authorization.Policy{"role1": {}}, + existedUsers: []string{"user1"}, + callToGetUser: true, + }, + { + name: "role not found", + params: authz.RevokeRoleFromUserParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"role1"}, + }, + }, + principal: &models.Principal{Username: "user1"}, + existedRoles: map[string][]authorization.Policy{}, + existedUsers: []string{"user1"}, + callToGetUser: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(tt.params.ID)[0]).Return(nil) + + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(tt.existedRoles, nil) + if tt.callToGetUser { + controller.On("GetUsers", tt.params.ID).Return(nil, nil) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + apiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: tt.existedUsers}, + logger: logger, + } + res := h.revokeRoleFromUser(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromUserNotFound) + assert.True(t, ok) + assert.Contains(t, parsed.Payload.Error[0].Message, "doesn't exist") + }) + } +} + +func TestRevokeRoleFromGroupOrUserNotFound(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromGroupParams + principal *models.Principal + existedRoles map[string][]authorization.Policy + existedUsers []string + callToGetRole bool + } + + tests := []testCase{ + { + name: "role not found", + params: authz.RevokeRoleFromGroupParams{ + ID: "user1", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"role1"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "root-user"}, + existedRoles: map[string][]authorization.Policy{}, + existedUsers: []string{"user1"}, + callToGetRole: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(tt.params.Body.GroupType), tt.params.ID)[0]).Return(nil) + if tt.callToGetRole { + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(tt.existedRoles, nil) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + apiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: tt.existedUsers}, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + }, + } + res := h.revokeRoleFromGroup(tt.params, tt.principal) + _, ok := res.(*authz.RevokeRoleFromGroupNotFound) + assert.True(t, ok) + }) + } +} + +func TestRevokeRoleFromUserForbidden(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromUserParams + principal *models.Principal + authorizeErr error + expectedError string + skipAuthZ bool + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.RevokeRoleFromUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"testRole"}, + }, + }, + principal: &models.Principal{Username: "user1"}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + { + name: "revoke configured root role", + params: authz.RevokeRoleFromUserParams{ + ID: "root-user", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"root"}, + }, + }, + skipAuthZ: true, + principal: &models.Principal{Username: "user1"}, + expectedError: "revoking: modifying 'root' role or changing its assignments is not allowed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + if !tt.skipAuthZ { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(tt.params.ID)[0]).Return(tt.authorizeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + }, + } + res := h.revokeRoleFromUser(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromUserForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestRevokeRoleFromGroupForbidden(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromGroupParams + principal *models.Principal + authorizeErr error + expectedError string + skipAuthZ bool + } + + tests := []testCase{ + { + name: "authorization error", + params: authz.RevokeRoleFromGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"testRole"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "user1", Groups: []string{"testGroup"}}, + authorizeErr: fmt.Errorf("authorization error"), + expectedError: "authorization error", + }, + { + name: "revoke role from root group as root user", + params: authz.RevokeRoleFromGroupParams{ + ID: "viewer-root-group", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"something"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "root-user"}, + expectedError: "revoking: cannot assign or revoke from root group", + }, + { + name: "revoke configured root role", + params: authz.RevokeRoleFromGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"root"}, GroupType: models.GroupTypeOidc, + }, + }, + skipAuthZ: true, + principal: &models.Principal{Username: "user1"}, + expectedError: "revoking: modifying 'root' role or changing its assignments is not allowed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + if !tt.skipAuthZ { + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(tt.params.Body.GroupType), tt.params.ID)[0]).Return(tt.authorizeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + RootGroups: []string{"root-group"}, + ReadOnlyGroups: []string{"viewer-root-group"}, + }, + } + res := h.revokeRoleFromGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromGroupForbidden) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestRevokeRoleFromUserInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromUserParams + principal *models.Principal + getRolesErr error + revokeErr error + expectedError string + } + userType := models.UserTypeInputDb + tests := []testCase{ + { + name: "internal server error from revoking", + params: authz.RevokeRoleFromUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"testRole"}, + UserType: userType, + }, + }, + principal: &models.Principal{Username: "user1"}, + revokeErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + { + name: "internal server error from getting role", + params: authz.RevokeRoleFromUserParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromUserBody{ + Roles: []string{"testRole"}, + UserType: userType, + }, + }, + principal: &models.Principal{Username: "user1"}, + getRolesErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Users(tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(map[string][]authorization.Policy{tt.params.Body.Roles[0]: {}}, tt.getRolesErr) + if tt.getRolesErr == nil { + controller.On("GetUsers", "testUser").Return(map[string]*apikey.User{"testUser": {}}, nil) + controller.On("RevokeRolesForUser", conv.UserNameWithTypeFromId(tt.params.ID, authentication.AuthType(tt.params.Body.UserType)), tt.params.Body.Roles[0]).Return(tt.revokeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + } + res := h.revokeRoleFromUser(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromUserInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} + +func TestRevokeRoleFromGroupInternalServerError(t *testing.T) { + type testCase struct { + name string + params authz.RevokeRoleFromGroupParams + principal *models.Principal + getRolesErr error + revokeErr error + expectedError string + } + + tests := []testCase{ + { + name: "internal server error from revoking", + params: authz.RevokeRoleFromGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"testRole"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "root-user"}, + revokeErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + { + name: "internal server error from getting role", + params: authz.RevokeRoleFromGroupParams{ + ID: "testUser", + HTTPRequest: req, + Body: authz.RevokeRoleFromGroupBody{ + Roles: []string{"testRole"}, GroupType: models.GroupTypeOidc, + }, + }, + principal: &models.Principal{Username: "root-user"}, + getRolesErr: fmt.Errorf("internal server error"), + expectedError: "internal server error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + controller := NewMockControllerAndGetUsers(t) + logger, _ := test.NewNullLogger() + + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE, authorization.Groups(authentication.AuthType(tt.params.Body.GroupType), tt.params.ID)[0]).Return(nil) + controller.On("GetRoles", tt.params.Body.Roles[0]).Return(map[string][]authorization.Policy{tt.params.Body.Roles[0]: {}}, tt.getRolesErr) + if tt.getRolesErr == nil { + controller.On("RevokeRolesForUser", conv.PrefixGroupName(tt.params.ID), tt.params.Body.Roles[0]).Return(tt.revokeErr) + } + + h := &authZHandlers{ + authorizer: authorizer, + controller: controller, + logger: logger, + rbacconfig: rbacconf.Config{ + RootUsers: []string{"root-user"}, + }, + } + res := h.revokeRoleFromGroup(tt.params, tt.principal) + parsed, ok := res.(*authz.RevokeRoleFromGroupInternalServerError) + assert.True(t, ok) + + if tt.expectedError != "" { + assert.Contains(t, parsed.Payload.Error[0].Message, tt.expectedError) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6ceb4e1576f65cb61c87b7a81af6706b53f64619 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/handlers_authz_test.go @@ -0,0 +1,294 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/mock" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func TestAuthorizeRoleScopes(t *testing.T) { + type testCase struct { + name string + principal *models.Principal + originalVerb string + policies []authorization.Policy + roleName string + authorizeSetup func(*authorization.MockAuthorizer) + expectedError string + } + tests := []testCase{ + { + name: "has full role management permissions", + principal: &models.Principal{Username: "admin"}, + originalVerb: authorization.CREATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + }, + roleName: "newRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call succeeds - has full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "admin"}, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("newRole")[0]). + Return(nil).Once() + }, + expectedError: "", + }, + { + name: "has role scope match and all required permissions", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.CREATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + }, + roleName: "newRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("newRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call succeeds - has role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("newRole")[0]). + Return(nil).Once() + // Third call succeeds - has required permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.READ, "collections/ABC"). + Return(nil).Once() + }, + expectedError: "", + }, + { + name: "has role scope match but missing required permissions", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.CREATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + {Resource: "collections/XYZ", Verb: authorization.UPDATE}, + }, + roleName: "newRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("newRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call succeeds - has role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("newRole")[0]). + Return(nil).Once() + // Third call succeeds - has first permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.READ, "collections/ABC"). + Return(nil).Once() + // Fourth call fails - missing second permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.UPDATE, "collections/XYZ"). + Return(errors.New("missing write permission")).Once() + }, + expectedError: "missing write permission", + }, + { + name: "has neither full management nor role scope match", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.CREATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + }, + roleName: "newRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("newRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call fails - no role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.CREATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("newRole")[0]). + Return(errors.New("no role scope match")).Once() + }, + expectedError: "can only create roles with less or equal permissions as the current user: no role scope match", + }, + { + name: "has full role management permissions for update", + principal: &models.Principal{Username: "admin"}, + originalVerb: authorization.UPDATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + }, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call succeeds - has full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "admin"}, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(nil).Once() + }, + expectedError: "", + }, + { + name: "has role scope match and all required permissions for update", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.UPDATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + }, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call succeeds - has role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("existingRole")[0]). + Return(nil).Once() + // Third call succeeds - has required permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.READ, "collections/ABC"). + Return(nil).Once() + }, + expectedError: "", + }, + { + name: "has role scope match but missing some required permissions for update", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.UPDATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + {Resource: "collections/XYZ", Verb: authorization.DELETE}, + }, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call succeeds - has role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("existingRole")[0]). + Return(nil).Once() + // Third call succeeds - has first permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.READ, "collections/ABC"). + Return(nil).Once() + // Fourth call fails - missing delete permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.DELETE, "collections/XYZ"). + Return(errors.New("missing delete permission")).Once() + }, + expectedError: "missing delete permission", + }, + { + name: "has neither full management nor role scope match for update", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.UPDATE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + }, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call fails - no role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.UPDATE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("existingRole")[0]). + Return(errors.New("no role scope match")).Once() + }, + expectedError: "can only create roles with less or equal permissions as the current user: no role scope match", + }, + { + name: "has full role management permissions for delete", + principal: &models.Principal{Username: "admin"}, + originalVerb: authorization.DELETE, + policies: []authorization.Policy{}, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call succeeds - has full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "admin"}, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(nil).Once() + }, + expectedError: "", + }, + { + name: "has role scope match for delete", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.DELETE, + policies: []authorization.Policy{}, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call succeeds - has role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("existingRole")[0]). + Return(nil).Once() + }, + expectedError: "", + }, + { + name: "has role scope match but missing permissions for delete", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.DELETE, + policies: []authorization.Policy{ + {Resource: "collections/ABC", Verb: authorization.READ}, + {Resource: "collections/XYZ", Verb: authorization.DELETE}, + }, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call succeeds - has role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("existingRole")[0]). + Return(nil).Once() + // Third call succeeds - has first permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.READ, "collections/ABC"). + Return(nil).Once() + // Fourth call fails - missing delete permission + a.On("AuthorizeSilent", mock.Anything, &models.Principal{Username: "user"}, authorization.DELETE, "collections/XYZ"). + Return(errors.New("missing delete permission")).Once() + }, + expectedError: "missing delete permission", + }, + { + name: "get role fails during delete", + principal: &models.Principal{Username: "user"}, + originalVerb: authorization.DELETE, + policies: nil, + roleName: "existingRole", + authorizeSetup: func(a *authorization.MockAuthorizer) { + // First call fails - no full permissions + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_ALL), authorization.Roles("existingRole")[0]). + Return(errors.New("no full permissions")).Once() + // Second call succeeds - has role scope match + a.On("Authorize", mock.Anything, &models.Principal{Username: "user"}, authorization.VerbWithScope(authorization.DELETE, authorization.ROLE_SCOPE_MATCH), authorization.Roles("existingRole")[0]). + Return(nil).Once() + }, + expectedError: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + logger, _ := test.NewNullLogger() + + if tt.authorizeSetup != nil { + tt.authorizeSetup(authorizer) + } + + h := &authZHandlers{ + authorizer: authorizer, + logger: logger, + } + + err := h.authorizeRoleScopes(context.Background(), tt.principal, tt.originalVerb, tt.policies, tt.roleName) + + if tt.expectedError == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, tt.expectedError) + } + + authorizer.AssertExpectations(t) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/mock_controller_and_get_users.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/mock_controller_and_get_users.go new file mode 100644 index 0000000000000000000000000000000000000000..bb752b0091a47c503f86ed84ee1454e16138eddd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/mock_controller_and_get_users.go @@ -0,0 +1,734 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package authz + +import ( + authentication "github.com/weaviate/weaviate/usecases/auth/authentication" + apikey "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + + authorization "github.com/weaviate/weaviate/usecases/auth/authorization" + + mock "github.com/stretchr/testify/mock" +) + +// MockControllerAndGetUsers is an autogenerated mock type for the ControllerAndGetUsers type +type MockControllerAndGetUsers struct { + mock.Mock +} + +type MockControllerAndGetUsers_Expecter struct { + mock *mock.Mock +} + +func (_m *MockControllerAndGetUsers) EXPECT() *MockControllerAndGetUsers_Expecter { + return &MockControllerAndGetUsers_Expecter{mock: &_m.Mock} +} + +// AddRolesForUser provides a mock function with given fields: user, roles +func (_m *MockControllerAndGetUsers) AddRolesForUser(user string, roles []string) error { + ret := _m.Called(user, roles) + + if len(ret) == 0 { + panic("no return value specified for AddRolesForUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, []string) error); ok { + r0 = rf(user, roles) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockControllerAndGetUsers_AddRolesForUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddRolesForUser' +type MockControllerAndGetUsers_AddRolesForUser_Call struct { + *mock.Call +} + +// AddRolesForUser is a helper method to define mock.On call +// - user string +// - roles []string +func (_e *MockControllerAndGetUsers_Expecter) AddRolesForUser(user interface{}, roles interface{}) *MockControllerAndGetUsers_AddRolesForUser_Call { + return &MockControllerAndGetUsers_AddRolesForUser_Call{Call: _e.mock.On("AddRolesForUser", user, roles)} +} + +func (_c *MockControllerAndGetUsers_AddRolesForUser_Call) Run(run func(user string, roles []string)) *MockControllerAndGetUsers_AddRolesForUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].([]string)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_AddRolesForUser_Call) Return(_a0 error) *MockControllerAndGetUsers_AddRolesForUser_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockControllerAndGetUsers_AddRolesForUser_Call) RunAndReturn(run func(string, []string) error) *MockControllerAndGetUsers_AddRolesForUser_Call { + _c.Call.Return(run) + return _c +} + +// CreateRolesPermissions provides a mock function with given fields: roles +func (_m *MockControllerAndGetUsers) CreateRolesPermissions(roles map[string][]authorization.Policy) error { + ret := _m.Called(roles) + + if len(ret) == 0 { + panic("no return value specified for CreateRolesPermissions") + } + + var r0 error + if rf, ok := ret.Get(0).(func(map[string][]authorization.Policy) error); ok { + r0 = rf(roles) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockControllerAndGetUsers_CreateRolesPermissions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateRolesPermissions' +type MockControllerAndGetUsers_CreateRolesPermissions_Call struct { + *mock.Call +} + +// CreateRolesPermissions is a helper method to define mock.On call +// - roles map[string][]authorization.Policy +func (_e *MockControllerAndGetUsers_Expecter) CreateRolesPermissions(roles interface{}) *MockControllerAndGetUsers_CreateRolesPermissions_Call { + return &MockControllerAndGetUsers_CreateRolesPermissions_Call{Call: _e.mock.On("CreateRolesPermissions", roles)} +} + +func (_c *MockControllerAndGetUsers_CreateRolesPermissions_Call) Run(run func(roles map[string][]authorization.Policy)) *MockControllerAndGetUsers_CreateRolesPermissions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(map[string][]authorization.Policy)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_CreateRolesPermissions_Call) Return(_a0 error) *MockControllerAndGetUsers_CreateRolesPermissions_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockControllerAndGetUsers_CreateRolesPermissions_Call) RunAndReturn(run func(map[string][]authorization.Policy) error) *MockControllerAndGetUsers_CreateRolesPermissions_Call { + _c.Call.Return(run) + return _c +} + +// DeleteRoles provides a mock function with given fields: roles +func (_m *MockControllerAndGetUsers) DeleteRoles(roles ...string) error { + _va := make([]interface{}, len(roles)) + for _i := range roles { + _va[_i] = roles[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteRoles") + } + + var r0 error + if rf, ok := ret.Get(0).(func(...string) error); ok { + r0 = rf(roles...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockControllerAndGetUsers_DeleteRoles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteRoles' +type MockControllerAndGetUsers_DeleteRoles_Call struct { + *mock.Call +} + +// DeleteRoles is a helper method to define mock.On call +// - roles ...string +func (_e *MockControllerAndGetUsers_Expecter) DeleteRoles(roles ...interface{}) *MockControllerAndGetUsers_DeleteRoles_Call { + return &MockControllerAndGetUsers_DeleteRoles_Call{Call: _e.mock.On("DeleteRoles", + append([]interface{}{}, roles...)...)} +} + +func (_c *MockControllerAndGetUsers_DeleteRoles_Call) Run(run func(roles ...string)) *MockControllerAndGetUsers_DeleteRoles_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]string, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(string) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_DeleteRoles_Call) Return(_a0 error) *MockControllerAndGetUsers_DeleteRoles_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockControllerAndGetUsers_DeleteRoles_Call) RunAndReturn(run func(...string) error) *MockControllerAndGetUsers_DeleteRoles_Call { + _c.Call.Return(run) + return _c +} + +// GetRoles provides a mock function with given fields: names +func (_m *MockControllerAndGetUsers) GetRoles(names ...string) (map[string][]authorization.Policy, error) { + _va := make([]interface{}, len(names)) + for _i := range names { + _va[_i] = names[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetRoles") + } + + var r0 map[string][]authorization.Policy + var r1 error + if rf, ok := ret.Get(0).(func(...string) (map[string][]authorization.Policy, error)); ok { + return rf(names...) + } + if rf, ok := ret.Get(0).(func(...string) map[string][]authorization.Policy); ok { + r0 = rf(names...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string][]authorization.Policy) + } + } + + if rf, ok := ret.Get(1).(func(...string) error); ok { + r1 = rf(names...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockControllerAndGetUsers_GetRoles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRoles' +type MockControllerAndGetUsers_GetRoles_Call struct { + *mock.Call +} + +// GetRoles is a helper method to define mock.On call +// - names ...string +func (_e *MockControllerAndGetUsers_Expecter) GetRoles(names ...interface{}) *MockControllerAndGetUsers_GetRoles_Call { + return &MockControllerAndGetUsers_GetRoles_Call{Call: _e.mock.On("GetRoles", + append([]interface{}{}, names...)...)} +} + +func (_c *MockControllerAndGetUsers_GetRoles_Call) Run(run func(names ...string)) *MockControllerAndGetUsers_GetRoles_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]string, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(string) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_GetRoles_Call) Return(_a0 map[string][]authorization.Policy, _a1 error) *MockControllerAndGetUsers_GetRoles_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockControllerAndGetUsers_GetRoles_Call) RunAndReturn(run func(...string) (map[string][]authorization.Policy, error)) *MockControllerAndGetUsers_GetRoles_Call { + _c.Call.Return(run) + return _c +} + +// GetRolesForUserOrGroup provides a mock function with given fields: user, authMethod, isGroup +func (_m *MockControllerAndGetUsers) GetRolesForUserOrGroup(user string, authMethod authentication.AuthType, isGroup bool) (map[string][]authorization.Policy, error) { + ret := _m.Called(user, authMethod, isGroup) + + if len(ret) == 0 { + panic("no return value specified for GetRolesForUserOrGroup") + } + + var r0 map[string][]authorization.Policy + var r1 error + if rf, ok := ret.Get(0).(func(string, authentication.AuthType, bool) (map[string][]authorization.Policy, error)); ok { + return rf(user, authMethod, isGroup) + } + if rf, ok := ret.Get(0).(func(string, authentication.AuthType, bool) map[string][]authorization.Policy); ok { + r0 = rf(user, authMethod, isGroup) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string][]authorization.Policy) + } + } + + if rf, ok := ret.Get(1).(func(string, authentication.AuthType, bool) error); ok { + r1 = rf(user, authMethod, isGroup) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockControllerAndGetUsers_GetRolesForUserOrGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRolesForUserOrGroup' +type MockControllerAndGetUsers_GetRolesForUserOrGroup_Call struct { + *mock.Call +} + +// GetRolesForUserOrGroup is a helper method to define mock.On call +// - user string +// - authMethod authentication.AuthType +// - isGroup bool +func (_e *MockControllerAndGetUsers_Expecter) GetRolesForUserOrGroup(user interface{}, authMethod interface{}, isGroup interface{}) *MockControllerAndGetUsers_GetRolesForUserOrGroup_Call { + return &MockControllerAndGetUsers_GetRolesForUserOrGroup_Call{Call: _e.mock.On("GetRolesForUserOrGroup", user, authMethod, isGroup)} +} + +func (_c *MockControllerAndGetUsers_GetRolesForUserOrGroup_Call) Run(run func(user string, authMethod authentication.AuthType, isGroup bool)) *MockControllerAndGetUsers_GetRolesForUserOrGroup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(authentication.AuthType), args[2].(bool)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_GetRolesForUserOrGroup_Call) Return(_a0 map[string][]authorization.Policy, _a1 error) *MockControllerAndGetUsers_GetRolesForUserOrGroup_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockControllerAndGetUsers_GetRolesForUserOrGroup_Call) RunAndReturn(run func(string, authentication.AuthType, bool) (map[string][]authorization.Policy, error)) *MockControllerAndGetUsers_GetRolesForUserOrGroup_Call { + _c.Call.Return(run) + return _c +} + +// GetUsers provides a mock function with given fields: userIds +func (_m *MockControllerAndGetUsers) GetUsers(userIds ...string) (map[string]*apikey.User, error) { + _va := make([]interface{}, len(userIds)) + for _i := range userIds { + _va[_i] = userIds[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetUsers") + } + + var r0 map[string]*apikey.User + var r1 error + if rf, ok := ret.Get(0).(func(...string) (map[string]*apikey.User, error)); ok { + return rf(userIds...) + } + if rf, ok := ret.Get(0).(func(...string) map[string]*apikey.User); ok { + r0 = rf(userIds...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*apikey.User) + } + } + + if rf, ok := ret.Get(1).(func(...string) error); ok { + r1 = rf(userIds...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockControllerAndGetUsers_GetUsers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUsers' +type MockControllerAndGetUsers_GetUsers_Call struct { + *mock.Call +} + +// GetUsers is a helper method to define mock.On call +// - userIds ...string +func (_e *MockControllerAndGetUsers_Expecter) GetUsers(userIds ...interface{}) *MockControllerAndGetUsers_GetUsers_Call { + return &MockControllerAndGetUsers_GetUsers_Call{Call: _e.mock.On("GetUsers", + append([]interface{}{}, userIds...)...)} +} + +func (_c *MockControllerAndGetUsers_GetUsers_Call) Run(run func(userIds ...string)) *MockControllerAndGetUsers_GetUsers_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]string, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(string) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_GetUsers_Call) Return(_a0 map[string]*apikey.User, _a1 error) *MockControllerAndGetUsers_GetUsers_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockControllerAndGetUsers_GetUsers_Call) RunAndReturn(run func(...string) (map[string]*apikey.User, error)) *MockControllerAndGetUsers_GetUsers_Call { + _c.Call.Return(run) + return _c +} + +// GetUsersOrGroupForRole provides a mock function with given fields: role, authMethod, IsGroup +func (_m *MockControllerAndGetUsers) GetUsersOrGroupForRole(role string, authMethod authentication.AuthType, IsGroup bool) ([]string, error) { + ret := _m.Called(role, authMethod, IsGroup) + + if len(ret) == 0 { + panic("no return value specified for GetUsersOrGroupForRole") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(string, authentication.AuthType, bool) ([]string, error)); ok { + return rf(role, authMethod, IsGroup) + } + if rf, ok := ret.Get(0).(func(string, authentication.AuthType, bool) []string); ok { + r0 = rf(role, authMethod, IsGroup) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(string, authentication.AuthType, bool) error); ok { + r1 = rf(role, authMethod, IsGroup) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockControllerAndGetUsers_GetUsersOrGroupForRole_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUsersOrGroupForRole' +type MockControllerAndGetUsers_GetUsersOrGroupForRole_Call struct { + *mock.Call +} + +// GetUsersOrGroupForRole is a helper method to define mock.On call +// - role string +// - authMethod authentication.AuthType +// - IsGroup bool +func (_e *MockControllerAndGetUsers_Expecter) GetUsersOrGroupForRole(role interface{}, authMethod interface{}, IsGroup interface{}) *MockControllerAndGetUsers_GetUsersOrGroupForRole_Call { + return &MockControllerAndGetUsers_GetUsersOrGroupForRole_Call{Call: _e.mock.On("GetUsersOrGroupForRole", role, authMethod, IsGroup)} +} + +func (_c *MockControllerAndGetUsers_GetUsersOrGroupForRole_Call) Run(run func(role string, authMethod authentication.AuthType, IsGroup bool)) *MockControllerAndGetUsers_GetUsersOrGroupForRole_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(authentication.AuthType), args[2].(bool)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_GetUsersOrGroupForRole_Call) Return(_a0 []string, _a1 error) *MockControllerAndGetUsers_GetUsersOrGroupForRole_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockControllerAndGetUsers_GetUsersOrGroupForRole_Call) RunAndReturn(run func(string, authentication.AuthType, bool) ([]string, error)) *MockControllerAndGetUsers_GetUsersOrGroupForRole_Call { + _c.Call.Return(run) + return _c +} + +// GetUsersOrGroupsWithRoles provides a mock function with given fields: isGroup, authMethod +func (_m *MockControllerAndGetUsers) GetUsersOrGroupsWithRoles(isGroup bool, authMethod authentication.AuthType) ([]string, error) { + ret := _m.Called(isGroup, authMethod) + + if len(ret) == 0 { + panic("no return value specified for GetUsersOrGroupsWithRoles") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(bool, authentication.AuthType) ([]string, error)); ok { + return rf(isGroup, authMethod) + } + if rf, ok := ret.Get(0).(func(bool, authentication.AuthType) []string); ok { + r0 = rf(isGroup, authMethod) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(bool, authentication.AuthType) error); ok { + r1 = rf(isGroup, authMethod) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUsersOrGroupsWithRoles' +type MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call struct { + *mock.Call +} + +// GetUsersOrGroupsWithRoles is a helper method to define mock.On call +// - isGroup bool +// - authMethod authentication.AuthType +func (_e *MockControllerAndGetUsers_Expecter) GetUsersOrGroupsWithRoles(isGroup interface{}, authMethod interface{}) *MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call { + return &MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call{Call: _e.mock.On("GetUsersOrGroupsWithRoles", isGroup, authMethod)} +} + +func (_c *MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call) Run(run func(isGroup bool, authMethod authentication.AuthType)) *MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(bool), args[1].(authentication.AuthType)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call) Return(_a0 []string, _a1 error) *MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call) RunAndReturn(run func(bool, authentication.AuthType) ([]string, error)) *MockControllerAndGetUsers_GetUsersOrGroupsWithRoles_Call { + _c.Call.Return(run) + return _c +} + +// HasPermission provides a mock function with given fields: role, permission +func (_m *MockControllerAndGetUsers) HasPermission(role string, permission *authorization.Policy) (bool, error) { + ret := _m.Called(role, permission) + + if len(ret) == 0 { + panic("no return value specified for HasPermission") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(string, *authorization.Policy) (bool, error)); ok { + return rf(role, permission) + } + if rf, ok := ret.Get(0).(func(string, *authorization.Policy) bool); ok { + r0 = rf(role, permission) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(string, *authorization.Policy) error); ok { + r1 = rf(role, permission) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockControllerAndGetUsers_HasPermission_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasPermission' +type MockControllerAndGetUsers_HasPermission_Call struct { + *mock.Call +} + +// HasPermission is a helper method to define mock.On call +// - role string +// - permission *authorization.Policy +func (_e *MockControllerAndGetUsers_Expecter) HasPermission(role interface{}, permission interface{}) *MockControllerAndGetUsers_HasPermission_Call { + return &MockControllerAndGetUsers_HasPermission_Call{Call: _e.mock.On("HasPermission", role, permission)} +} + +func (_c *MockControllerAndGetUsers_HasPermission_Call) Run(run func(role string, permission *authorization.Policy)) *MockControllerAndGetUsers_HasPermission_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(*authorization.Policy)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_HasPermission_Call) Return(_a0 bool, _a1 error) *MockControllerAndGetUsers_HasPermission_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockControllerAndGetUsers_HasPermission_Call) RunAndReturn(run func(string, *authorization.Policy) (bool, error)) *MockControllerAndGetUsers_HasPermission_Call { + _c.Call.Return(run) + return _c +} + +// RemovePermissions provides a mock function with given fields: role, permissions +func (_m *MockControllerAndGetUsers) RemovePermissions(role string, permissions []*authorization.Policy) error { + ret := _m.Called(role, permissions) + + if len(ret) == 0 { + panic("no return value specified for RemovePermissions") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, []*authorization.Policy) error); ok { + r0 = rf(role, permissions) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockControllerAndGetUsers_RemovePermissions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemovePermissions' +type MockControllerAndGetUsers_RemovePermissions_Call struct { + *mock.Call +} + +// RemovePermissions is a helper method to define mock.On call +// - role string +// - permissions []*authorization.Policy +func (_e *MockControllerAndGetUsers_Expecter) RemovePermissions(role interface{}, permissions interface{}) *MockControllerAndGetUsers_RemovePermissions_Call { + return &MockControllerAndGetUsers_RemovePermissions_Call{Call: _e.mock.On("RemovePermissions", role, permissions)} +} + +func (_c *MockControllerAndGetUsers_RemovePermissions_Call) Run(run func(role string, permissions []*authorization.Policy)) *MockControllerAndGetUsers_RemovePermissions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].([]*authorization.Policy)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_RemovePermissions_Call) Return(_a0 error) *MockControllerAndGetUsers_RemovePermissions_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockControllerAndGetUsers_RemovePermissions_Call) RunAndReturn(run func(string, []*authorization.Policy) error) *MockControllerAndGetUsers_RemovePermissions_Call { + _c.Call.Return(run) + return _c +} + +// RevokeRolesForUser provides a mock function with given fields: user, roles +func (_m *MockControllerAndGetUsers) RevokeRolesForUser(user string, roles ...string) error { + _va := make([]interface{}, len(roles)) + for _i := range roles { + _va[_i] = roles[_i] + } + var _ca []interface{} + _ca = append(_ca, user) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for RevokeRolesForUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, ...string) error); ok { + r0 = rf(user, roles...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockControllerAndGetUsers_RevokeRolesForUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RevokeRolesForUser' +type MockControllerAndGetUsers_RevokeRolesForUser_Call struct { + *mock.Call +} + +// RevokeRolesForUser is a helper method to define mock.On call +// - user string +// - roles ...string +func (_e *MockControllerAndGetUsers_Expecter) RevokeRolesForUser(user interface{}, roles ...interface{}) *MockControllerAndGetUsers_RevokeRolesForUser_Call { + return &MockControllerAndGetUsers_RevokeRolesForUser_Call{Call: _e.mock.On("RevokeRolesForUser", + append([]interface{}{user}, roles...)...)} +} + +func (_c *MockControllerAndGetUsers_RevokeRolesForUser_Call) Run(run func(user string, roles ...string)) *MockControllerAndGetUsers_RevokeRolesForUser_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]string, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(string) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_RevokeRolesForUser_Call) Return(_a0 error) *MockControllerAndGetUsers_RevokeRolesForUser_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockControllerAndGetUsers_RevokeRolesForUser_Call) RunAndReturn(run func(string, ...string) error) *MockControllerAndGetUsers_RevokeRolesForUser_Call { + _c.Call.Return(run) + return _c +} + +// UpdateRolesPermissions provides a mock function with given fields: roles +func (_m *MockControllerAndGetUsers) UpdateRolesPermissions(roles map[string][]authorization.Policy) error { + ret := _m.Called(roles) + + if len(ret) == 0 { + panic("no return value specified for UpdateRolesPermissions") + } + + var r0 error + if rf, ok := ret.Get(0).(func(map[string][]authorization.Policy) error); ok { + r0 = rf(roles) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockControllerAndGetUsers_UpdateRolesPermissions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateRolesPermissions' +type MockControllerAndGetUsers_UpdateRolesPermissions_Call struct { + *mock.Call +} + +// UpdateRolesPermissions is a helper method to define mock.On call +// - roles map[string][]authorization.Policy +func (_e *MockControllerAndGetUsers_Expecter) UpdateRolesPermissions(roles interface{}) *MockControllerAndGetUsers_UpdateRolesPermissions_Call { + return &MockControllerAndGetUsers_UpdateRolesPermissions_Call{Call: _e.mock.On("UpdateRolesPermissions", roles)} +} + +func (_c *MockControllerAndGetUsers_UpdateRolesPermissions_Call) Run(run func(roles map[string][]authorization.Policy)) *MockControllerAndGetUsers_UpdateRolesPermissions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(map[string][]authorization.Policy)) + }) + return _c +} + +func (_c *MockControllerAndGetUsers_UpdateRolesPermissions_Call) Return(_a0 error) *MockControllerAndGetUsers_UpdateRolesPermissions_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockControllerAndGetUsers_UpdateRolesPermissions_Call) RunAndReturn(run func(map[string][]authorization.Policy) error) *MockControllerAndGetUsers_UpdateRolesPermissions_Call { + _c.Call.Return(run) + return _c +} + +// NewMockControllerAndGetUsers creates a new instance of MockControllerAndGetUsers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockControllerAndGetUsers(t interface { + mock.TestingT + Cleanup(func()) +}) *MockControllerAndGetUsers { + mock := &MockControllerAndGetUsers{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/validation.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/validation.go new file mode 100644 index 0000000000000000000000000000000000000000..f3f311bcd5422240fa9bfb5a5647e19cb7828e33 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/validation.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "errors" + "fmt" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func validatePermissions(allowEmpty bool, permissions ...*models.Permission) error { + if !allowEmpty && len(permissions) == 0 { + return fmt.Errorf("role has to have at least 1 permission") + } + + for _, perm := range permissions { + + var ( + multiErr error + collectionsInput = perm.Collections + tenantsInput = perm.Tenants + dataInput = perm.Data + backupsInput = perm.Backups + nodesInput = perm.Nodes + replicateInput = perm.Replicate + ) + if collectionsInput != nil { + if collectionsInput.Collection != nil { + _, err := schema.ValidateClassNameIncludesRegex(*collectionsInput.Collection) + multiErr = errors.Join(err) + } + } + + if tenantsInput != nil { + if tenantsInput.Collection != nil { + _, classErr := schema.ValidateClassNameIncludesRegex(*tenantsInput.Collection) + multiErr = errors.Join(classErr) + } + if tenantsInput.Tenant != nil { + multiErr = errors.Join(schema.ValidateTenantNameIncludesRegex(*tenantsInput.Tenant)) + } + } + + if dataInput != nil { + if dataInput.Collection != nil { + _, err := schema.ValidateClassNameIncludesRegex(*dataInput.Collection) + multiErr = errors.Join(err) + } + + if dataInput.Tenant != nil { + multiErr = errors.Join(schema.ValidateTenantNameIncludesRegex(*dataInput.Tenant)) + } + } + + if backupsInput != nil && backupsInput.Collection != nil { + _, err := schema.ValidateClassNameIncludesRegex(*backupsInput.Collection) + multiErr = errors.Join(err) + } + + if nodesInput != nil && nodesInput.Collection != nil { + _, err := schema.ValidateClassNameIncludesRegex(*nodesInput.Collection) + multiErr = errors.Join(err) + } + + if replicateInput != nil && replicateInput.Collection != nil { + _, err := schema.ValidateClassNameIncludesRegex(*replicateInput.Collection) + multiErr = errors.Join(err) + } + + if multiErr != nil { + return multiErr + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/validation_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/validation_test.go new file mode 100644 index 0000000000000000000000000000000000000000..13df66c73590473d9406d855e31ac1e07922ed19 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/authz/validation_test.go @@ -0,0 +1,169 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package authz + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" +) + +func TestValidatePermissions(t *testing.T) { + tests := []struct { + name string + permissions []*models.Permission + allowEmpty bool + expectedErr string + }{ + { + name: "no permissions - not allowed", + permissions: []*models.Permission{}, + expectedErr: "role has to have at least 1 permission", + }, + { + name: "no permissions - allowed", + permissions: []*models.Permission{}, + allowEmpty: true, + }, + { + name: "invalid collection name with space", + permissions: []*models.Permission{ + { + Collections: &models.PermissionCollections{ + Collection: String("Invalid class name"), + }, + }, + }, + expectedErr: "not a valid class name", + }, + { + name: "invalid collection name with special character", + permissions: []*models.Permission{ + { + Collections: &models.PermissionCollections{ + Collection: String("InvalidClassName!"), + }, + }, + }, + expectedErr: "not a valid class name", + }, + { + name: "invalid tenant name with space", + permissions: []*models.Permission{ + { + Tenants: &models.PermissionTenants{ + Collection: String("*"), + Tenant: String("Invalid Tenant Name"), + }, + }, + }, + expectedErr: "is not a valid tenant name.", + }, + { + name: "invalid tenant name with special character", + permissions: []*models.Permission{ + { + Tenants: &models.PermissionTenants{ + Collection: String("*"), + Tenant: String("InvalidTenantName!"), + }, + }, + }, + expectedErr: "is not a valid tenant name.", + }, + { + name: "invalid tenant name with one character", + permissions: []*models.Permission{ + { + Tenants: &models.PermissionTenants{ + Collection: String("*"), + Tenant: String("#"), + }, + }, + }, + expectedErr: "is not a valid tenant name.", + }, + { + name: "valid collection regex name", + permissions: []*models.Permission{ + { + Collections: &models.PermissionCollections{ + Collection: String("ValidTenantName*"), + }, + }, + }, + }, + { + name: "valid collection *", + permissions: []*models.Permission{ + { + Collections: &models.PermissionCollections{ + Collection: String("*"), + }, + }, + }, + }, + { + name: "valid tenant regex name", + permissions: []*models.Permission{ + { + Tenants: &models.PermissionTenants{ + Collection: String("*"), + Tenant: String("Tenant*"), + }, + }, + }, + }, + { + name: "valid tenant *", + permissions: []*models.Permission{ + { + Tenants: &models.PermissionTenants{ + Collection: String("*"), + Tenant: String("*"), + }, + }, + }, + }, + { + name: "valid permissions", + permissions: []*models.Permission{ + { + Collections: &models.PermissionCollections{ + Collection: String("ValidCollectionName"), + }, + Tenants: &models.PermissionTenants{ + Collection: String("*"), + Tenant: String("ValidTenantName"), + }, + Data: &models.PermissionData{ + Collection: String("ValidCollectionName"), + Tenant: String("ValidTenantName"), + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validatePermissions(tt.allowEmpty, tt.permissions...) + if tt.expectedErr != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErr) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/auth.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/auth.go new file mode 100644 index 0000000000000000000000000000000000000000..9f11129a300feec79af7ab064a7a2fb9b49b6299 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/auth.go @@ -0,0 +1,55 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "net/http" + + "github.com/weaviate/weaviate/usecases/cluster" +) + +type auth interface { + handleFunc(handler http.HandlerFunc) http.HandlerFunc +} + +type basicAuthHandler struct { + basicAuth cluster.BasicAuth +} + +func NewBasicAuthHandler(authConfig cluster.AuthConfig) auth { + return &basicAuthHandler{authConfig.BasicAuth} +} + +func (h *basicAuthHandler) handleFunc(handler http.HandlerFunc) http.HandlerFunc { + if !h.basicAuth.Enabled() { + return handler + } + return func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if ok && u == h.basicAuth.Username && p == h.basicAuth.Password { + handler(w, r) + return + } + // unauthorized request, send 401 + w.WriteHeader(401) + } +} + +type noopAuthHandler struct{} + +func NewNoopAuthHandler() auth { + return &noopAuthHandler{} +} + +func (h *noopAuthHandler) handleFunc(handler http.HandlerFunc) http.HandlerFunc { + return handler +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/backups.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/backups.go new file mode 100644 index 0000000000000000000000000000000000000000..601139600f6a3095bebbd723c434bba290b419fd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/backups.go @@ -0,0 +1,168 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/weaviate/weaviate/usecases/backup" +) + +type backupManager interface { + OnCanCommit(ctx context.Context, req *backup.Request) *backup.CanCommitResponse + OnCommit(ctx context.Context, req *backup.StatusRequest) error + OnAbort(ctx context.Context, req *backup.AbortRequest) error + OnStatus(ctx context.Context, req *backup.StatusRequest) *backup.StatusResponse +} + +type backups struct { + manager backupManager + auth auth +} + +func NewBackups(manager backupManager, auth auth) *backups { + return &backups{manager: manager, auth: auth} +} + +func (b *backups) CanCommit() http.Handler { + return b.auth.handleFunc(b.canCommitHandler()) +} + +func (b *backups) canCommitHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("read request body: %w", err).Error(), status) + return + } + defer r.Body.Close() + + var req backup.Request + if err := json.Unmarshal(body, &req); err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("unmarshal request: %w", err).Error(), status) + return + } + + resp := b.manager.OnCanCommit(r.Context(), &req) + b, err := json.Marshal(&resp) + if err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("marshal response: %w", err).Error(), status) + return + } + + w.WriteHeader(http.StatusOK) + w.Write(b) + } +} + +func (b *backups) Commit() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("read request body: %w", err).Error(), status) + return + } + defer r.Body.Close() + + var req backup.StatusRequest + if err := json.Unmarshal(body, &req); err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("unmarshal request: %w", err).Error(), status) + return + } + + if err := b.manager.OnCommit(r.Context(), &req); err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("commit: %w", err).Error(), status) + return + } + + w.WriteHeader(http.StatusCreated) + }) +} + +func (b *backups) Abort() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("read request body: %w", err).Error(), status) + return + } + defer r.Body.Close() + + var req backup.AbortRequest + if err := json.Unmarshal(body, &req); err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("unmarshal request: %w", err).Error(), status) + return + } + + params := r.URL.Query() + req.Bucket = params.Get("bucket") + req.Path = params.Get("path") + + if err := b.manager.OnAbort(r.Context(), &req); err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("abort: %w", err).Error(), status) + return + } + + w.WriteHeader(http.StatusNoContent) + }) +} + +func (b *backups) Status() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("read request body: %w", err).Error(), status) + return + } + defer r.Body.Close() + + var req backup.StatusRequest + if err := json.Unmarshal(body, &req); err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("unmarshal request: %w", err).Error(), status) + return + } + + params := r.URL.Query() + if params.Get("bucket") != "" { + req.Bucket = params.Get("bucket") + } + if params.Get("path") != "" { + req.Path = params.Get("path") + } + + resp := b.manager.OnStatus(r.Context(), &req) + b, err := json.Marshal(&resp) + if err != nil { + status := http.StatusInternalServerError + http.Error(w, fmt.Errorf("marshal response: %w", err).Error(), status) + return + } + + w.WriteHeader(http.StatusOK) + w.Write(b) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/backups_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/backups_test.go new file mode 100644 index 0000000000000000000000000000000000000000..51ca6bfe3102bd6d43672ea36be106ed0f350ea7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/backups_test.go @@ -0,0 +1,184 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi_test + +import ( + "context" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/clients" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" + "github.com/weaviate/weaviate/usecases/backup" +) + +func TestInternalBackupsAPI(t *testing.T) { + nodes := []*backupNode{ + { + name: "node1", + backupManager: &fakeBackupManager{}, + }, + { + name: "node2", + backupManager: &fakeBackupManager{}, + }, + } + hosts := setupClusterAPI(t, nodes) + + for _, node := range nodes { + node.backupManager.On("OnCanCommit", &backup.Request{Method: backup.OpCreate}). + Return(&backup.CanCommitResponse{}) + node.backupManager.On("OnCommit", &backup.StatusRequest{}).Return(nil) + node.backupManager.On("OnAbort", &backup.AbortRequest{}).Return(nil) + } + + coord := newFakeCoordinator(newFakeNodeResolver(hosts)) + + t.Run("can commit, commit", func(t *testing.T) { + err := coord.Backup(context.Background(), &backup.Request{Method: backup.OpCreate}, false) + require.Nil(t, err) + }) + + t.Run("abort", func(t *testing.T) { + err := coord.Backup(context.Background(), &backup.Request{Method: backup.OpCreate}, true) + require.Nil(t, err) + }) +} + +func setupClusterAPI(t *testing.T, nodes []*backupNode) map[string]string { + hosts := make(map[string]string) + + for _, node := range nodes { + backupsHandler := clusterapi.NewBackups(node.backupManager, clusterapi.NewNoopAuthHandler()) + + mux := http.NewServeMux() + mux.Handle("/backups/can-commit", backupsHandler.CanCommit()) + mux.Handle("/backups/commit", backupsHandler.Commit()) + mux.Handle("/backups/abort", backupsHandler.Abort()) + mux.Handle("/backups/status", backupsHandler.Status()) + server := httptest.NewServer(mux) + + parsedURL, err := url.Parse(server.URL) + require.Nil(t, err) + + hosts[node.name] = parsedURL.Host + } + + return hosts +} + +type backupNode struct { + name string + backupManager *fakeBackupManager +} + +func newFakeNodeResolver(hosts map[string]string) *fakeNodeResolver { + return &fakeNodeResolver{hosts: hosts} +} + +type fakeNodeResolver struct { + hosts map[string]string +} + +func (r *fakeNodeResolver) AllHostnames() []string { + return r.HostNames() +} + +func (r *fakeNodeResolver) NodeHostName(nodeName string) (string, bool) { + if host, ok := r.hosts[nodeName]; ok { + return host, true + } + return "", false +} + +func (r *fakeNodeResolver) HostNames() []string { + hosts := make([]string, len(r.hosts)) + count := 0 + for _, host := range r.hosts { + hosts[count] = host + count++ + } + return hosts[:count] +} + +func newFakeCoordinator(resolver *fakeNodeResolver) *fakeCoordinator { + return &fakeCoordinator{ + client: clients.NewClusterBackups(&http.Client{}), + nodeResolver: resolver, + } +} + +type fakeCoordinator struct { + client *clients.ClusterBackups + nodeResolver *fakeNodeResolver +} + +func (c *fakeCoordinator) Backup(ctx context.Context, req *backup.Request, abort bool) error { + if abort { + return c.abort(ctx) + } + + for _, host := range c.nodeResolver.HostNames() { + _, err := c.client.CanCommit(ctx, host, req) + if err != nil { + return err + } + } + + for _, host := range c.nodeResolver.HostNames() { + err := c.client.Commit(ctx, host, &backup.StatusRequest{}) + if err != nil { + return err + } + } + + return nil +} + +func (c *fakeCoordinator) abort(ctx context.Context) error { + for _, host := range c.nodeResolver.HostNames() { + err := c.client.Abort(ctx, host, &backup.AbortRequest{}) + if err != nil { + return err + } + } + + return nil +} + +type fakeBackupManager struct { + mock.Mock +} + +func (m *fakeBackupManager) OnCanCommit(ctx context.Context, req *backup.Request) *backup.CanCommitResponse { + args := m.Called(req) + return args.Get(0).(*backup.CanCommitResponse) +} + +func (m *fakeBackupManager) OnCommit(ctx context.Context, req *backup.StatusRequest) error { + args := m.Called(req) + return args.Error(0) +} + +func (m *fakeBackupManager) OnAbort(ctx context.Context, req *backup.AbortRequest) error { + args := m.Called(req) + return args.Error(0) +} + +func (m *fakeBackupManager) OnStatus(ctx context.Context, req *backup.StatusRequest) *backup.StatusResponse { + args := m.Called(req) + return args.Get(0).(*backup.StatusResponse) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/classifications.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/classifications.go new file mode 100644 index 0000000000000000000000000000000000000000..36f55059cb77e1f55ea8398e425611b77383ba8e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/classifications.go @@ -0,0 +1,22 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +type classifications struct { + txHandler +} + +func NewClassifications(manager txManager, auth auth) *classifications { + return &classifications{ + newTxHandler(manager, auth, classifyTX), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/db_users.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/db_users.go new file mode 100644 index 0000000000000000000000000000000000000000..7ff623638da2b79f37a69929e651931a9e87de41 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/db_users.go @@ -0,0 +1,80 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +type DbUsers struct { + userManager *apikey.RemoteApiKey + auth auth +} + +func NewDbUsers(manager *apikey.RemoteApiKey, auth auth) *DbUsers { + return &DbUsers{userManager: manager, auth: auth} +} + +func (d *DbUsers) Users() http.Handler { + return d.auth.handleFunc(d.userHandler()) +} + +func (d *DbUsers) userHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + switch path { + case "/cluster/users/db/lastUsedTime": + if r.Method != http.MethodPost { + msg := fmt.Sprintf("/user api path %q with method %v not found", path, r.Method) + http.Error(w, msg, http.StatusMethodNotAllowed) + return + } + + d.incomingUserStatus().ServeHTTP(w, r) + return + default: + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return + } + } +} + +func (d *DbUsers) incomingUserStatus() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + var body apikey.UserStatusRequest + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + http.Error(w, "Error parsing JSON body", http.StatusBadRequest) + return + } + userStatus, err := d.userManager.GetUserStatus(r.Context(), body) + if err != nil { + http.Error(w, "/user fulfill request: "+err.Error(), http.StatusBadRequest) + return + } + + if userStatus == nil { + w.WriteHeader(http.StatusNotFound) + return + } + + if err := json.NewEncoder(w).Encode(userStatus); err != nil { + http.Error(w, "/user marshal response: "+err.Error(), + http.StatusInternalServerError) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices.go new file mode 100644 index 0000000000000000000000000000000000000000..0bb7ae3f1e8125a246ef505391a07ed4f8be5d42 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices.go @@ -0,0 +1,1684 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/models" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + reposdb "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + entschema "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/file" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +type indices struct { + shards shards + db db + auth auth + // maintenanceModeEnabled is an experimental feature to allow the system to be + // put into a maintenance mode where all indices requests just return a 418 + maintenanceModeEnabled func() bool + regexpObjects *regexp.Regexp + regexpObjectsOverwrite *regexp.Regexp + regexObjectsDigest *regexp.Regexp + regexObjectsDigestsInRange *regexp.Regexp + regexObjectsHashTreeLevel *regexp.Regexp + regexpObjectsSearch *regexp.Regexp + regexpObjectsFind *regexp.Regexp + + regexpObjectsAggregations *regexp.Regexp + regexpObject *regexp.Regexp + regexpReferences *regexp.Regexp + regexpShardsQueueSize *regexp.Regexp + regexpShardsStatus *regexp.Regexp + regexpShardFiles *regexp.Regexp + regexpShardFileMetadata *regexp.Regexp + regexpShard *regexp.Regexp + regexpShardReinit *regexp.Regexp + + regexpPauseFileActivity *regexp.Regexp + regexpResumeFileActivity *regexp.Regexp + regexpListFiles *regexp.Regexp + + regexpAsyncReplicationTargetNode *regexp.Regexp + + logger logrus.FieldLogger +} + +const ( + cl = entschema.ClassNameRegexCore + sh = entschema.ShardNameRegexCore + ob = `[A-Za-z0-9_+-]+` + l = "[0-9]+" + + urlPatternObjects = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects` + urlPatternObjectsOverwrite = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects:overwrite` + urlPatternObjectsDigest = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects:digest` + urlPatternObjectsDigestsInRange = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects:digestsInRange` + urlPatternHashTreeLevel = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects\/hashtree\/(` + l + `)` + urlPatternObjectsSearch = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects\/_search` + urlPatternObjectsFind = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects\/_find` + urlPatternObjectsAggregations = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects\/_aggregations` + urlPatternObject = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects\/(` + ob + `)` + urlPatternReferences = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/references` + urlPatternShardsQueueSize = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/queuesize` + urlPatternShardsStatus = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/status` + urlPatternShardFiles = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/files/(.*)` + urlPatternShardFileMetadata = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/files:metadata/(.*)` + urlPatternShard = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)$` + urlPatternShardReinit = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `):reinit` + urlPatternPauseFileActivity = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/background:pause` + urlPatternResumeFileActivity = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/background:resume` + urlPatternListFiles = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/background:list` + urlPatternAsyncReplicationTargetNode = `\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/async-replication-target-node` +) + +type shards interface { + PutObject(ctx context.Context, indexName, shardName string, + obj *storobj.Object, schemaVersion uint64) error + BatchPutObjects(ctx context.Context, indexName, shardName string, + objs []*storobj.Object, schemaVersion uint64) []error + BatchAddReferences(ctx context.Context, indexName, shardName string, + refs objects.BatchReferences, schemaVersion uint64) []error + GetObject(ctx context.Context, indexName, shardName string, + id strfmt.UUID, selectProperties search.SelectProperties, + additional additional.Properties) (*storobj.Object, error) + Exists(ctx context.Context, indexName, shardName string, + id strfmt.UUID) (bool, error) + DeleteObject(ctx context.Context, indexName, shardName string, + id strfmt.UUID, deletionTime time.Time, schemaVersion uint64) error + MergeObject(ctx context.Context, indexName, shardName string, + mergeDoc objects.MergeDocument, schemaVersion uint64) error + MultiGetObjects(ctx context.Context, indexName, shardName string, + id []strfmt.UUID) ([]*storobj.Object, error) + Search(ctx context.Context, indexName, shardName string, + vectors []models.Vector, targetVectors []string, distance float32, limit int, + filters *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, + sort []filters.Sort, cursor *filters.Cursor, groupBy *searchparams.GroupBy, + additional additional.Properties, targetCombination *dto.TargetCombination, properties []string, + ) ([]*storobj.Object, []float32, error) + Aggregate(ctx context.Context, indexName, shardName string, + params aggregation.Params) (*aggregation.Result, error) + FindUUIDs(ctx context.Context, indexName, shardName string, + filters *filters.LocalFilter) ([]strfmt.UUID, error) + DeleteObjectBatch(ctx context.Context, indexName, shardName string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64) objects.BatchSimpleObjects + GetShardQueueSize(ctx context.Context, indexName, shardName string) (int64, error) + GetShardStatus(ctx context.Context, indexName, shardName string) (string, error) + UpdateShardStatus(ctx context.Context, indexName, shardName, + targetStatus string, schemaVersion uint64) error + + // Replication-specific + OverwriteObjects(ctx context.Context, indexName, shardName string, + vobjects []*objects.VObject) ([]types.RepairResponse, error) + DigestObjects(ctx context.Context, indexName, shardName string, + ids []strfmt.UUID) (result []types.RepairResponse, err error) + DigestObjectsInRange(ctx context.Context, indexName, shardName string, + initialUUID, finalUUID strfmt.UUID, limit int) (result []types.RepairResponse, err error) + HashTreeLevel(ctx context.Context, indexName, shardName string, + level int, discriminant *hashtree.Bitset) (digests []hashtree.Digest, err error) + + // Scale-out Replication POC + FilePutter(ctx context.Context, indexName, shardName, + filePath string) (io.WriteCloser, error) + CreateShard(ctx context.Context, indexName, shardName string) error + ReInitShard(ctx context.Context, indexName, shardName string) error + // PauseFileActivity See adapters/clients.RemoteIndex.PauseFileActivity + PauseFileActivity(ctx context.Context, indexName, shardName string, schemaVersion uint64) error + // ResumeFileActivity See adapters/clients.RemoteIndex.ResumeFileActivity + ResumeFileActivity(ctx context.Context, indexName, shardName string) error + // ListFiles See adapters/clients.RemoteIndex.ListFiles + ListFiles(ctx context.Context, indexName, shardName string) ([]string, error) + // GetFileMetadata See adapters/clients.RemoteIndex.GetFileMetadata + GetFileMetadata(ctx context.Context, indexName, shardName, + relativeFilePath string) (file.FileMetadata, error) + // GetFile See adapters/clients.RemoteIndex.GetFile + GetFile(ctx context.Context, indexName, shardName, + relativeFilePath string) (io.ReadCloser, error) + // AddAsyncReplicationTargetNode See adapters/clients.RemoteIndex.AddAsyncReplicationTargetNode + AddAsyncReplicationTargetNode(ctx context.Context, indexName, shardName string, + targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error + // RemoveAsyncReplicationTargetNode See adapters/clients.RemoteIndex.RemoveAsyncReplicationTargetNode + RemoveAsyncReplicationTargetNode(ctx context.Context, indexName, shardName string, + targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error +} + +type db interface { + StartupComplete() bool +} + +func NewIndices(shards shards, db db, auth auth, maintenanceModeEnabled func() bool, logger logrus.FieldLogger) *indices { + return &indices{ + regexpObjects: regexp.MustCompile(urlPatternObjects), + regexpObjectsOverwrite: regexp.MustCompile(urlPatternObjectsOverwrite), + regexObjectsDigest: regexp.MustCompile(urlPatternObjectsDigest), + regexObjectsDigestsInRange: regexp.MustCompile(urlPatternObjectsDigestsInRange), + regexObjectsHashTreeLevel: regexp.MustCompile(urlPatternHashTreeLevel), + regexpObjectsSearch: regexp.MustCompile(urlPatternObjectsSearch), + regexpObjectsFind: regexp.MustCompile(urlPatternObjectsFind), + + regexpObjectsAggregations: regexp.MustCompile(urlPatternObjectsAggregations), + regexpObject: regexp.MustCompile(urlPatternObject), + regexpReferences: regexp.MustCompile(urlPatternReferences), + regexpShardsQueueSize: regexp.MustCompile(urlPatternShardsQueueSize), + regexpShardsStatus: regexp.MustCompile(urlPatternShardsStatus), + regexpShardFiles: regexp.MustCompile(urlPatternShardFiles), + regexpShardFileMetadata: regexp.MustCompile(urlPatternShardFileMetadata), + regexpShard: regexp.MustCompile(urlPatternShard), + regexpShardReinit: regexp.MustCompile(urlPatternShardReinit), + regexpPauseFileActivity: regexp.MustCompile(urlPatternPauseFileActivity), + regexpResumeFileActivity: regexp.MustCompile(urlPatternResumeFileActivity), + regexpListFiles: regexp.MustCompile(urlPatternListFiles), + regexpAsyncReplicationTargetNode: regexp.MustCompile(urlPatternAsyncReplicationTargetNode), + shards: shards, + db: db, + auth: auth, + maintenanceModeEnabled: maintenanceModeEnabled, + logger: logger, + } +} + +func (i *indices) Indices() http.Handler { + return i.auth.handleFunc(i.indicesHandler()) +} + +func (i *indices) indicesHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + if i.maintenanceModeEnabled() { + http.Error(w, "418 Maintenance mode", http.StatusTeapot) + return + } + // NOTE if you update any of these handler methods/paths, also update the indices_test.go + // TestMaintenanceModeIndices test to include the new methods/paths. + switch { + case i.regexpObjectsSearch.MatchString(path): + if r.Method != http.MethodPost { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + i.postSearchObjects().ServeHTTP(w, r) + return + case i.regexpObjectsFind.MatchString(path): + if r.Method != http.MethodPost { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + i.postFindUUIDs().ServeHTTP(w, r) + return + case i.regexpObjectsAggregations.MatchString(path): + if r.Method != http.MethodPost { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + i.postAggregateObjects().ServeHTTP(w, r) + return + case i.regexpObjectsOverwrite.MatchString(path): + if r.Method != http.MethodPut { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + i.putOverwriteObjects().ServeHTTP(w, r) + case i.regexObjectsDigest.MatchString(path): + if r.Method != http.MethodGet { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + i.getObjectsDigest().ServeHTTP(w, r) + case i.regexObjectsDigestsInRange.MatchString(path): + if r.Method != http.MethodPost { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + } + + i.getObjectsDigestsInRange().ServeHTTP(w, r) + case i.regexObjectsHashTreeLevel.MatchString(path): + if r.Method != http.MethodPost { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + } + + i.getHashTreeLevel().ServeHTTP(w, r) + case i.regexpObject.MatchString(path): + if r.Method == http.MethodGet { + i.getObject().ServeHTTP(w, r) + return + } + if r.Method == http.MethodDelete { + i.deleteObject().ServeHTTP(w, r) + return + } + if r.Method == http.MethodPatch { + i.mergeObject().ServeHTTP(w, r) + return + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + case i.regexpObjects.MatchString(path): + if r.Method == http.MethodGet { + i.getObjectsMulti().ServeHTTP(w, r) + return + } + if r.Method == http.MethodPost { + i.postObject().ServeHTTP(w, r) + return + } + if r.Method == http.MethodDelete { + i.deleteObjects().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + case i.regexpReferences.MatchString(path): + if r.Method != http.MethodPost { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + i.postReferences().ServeHTTP(w, r) + return + case i.regexpShardsQueueSize.MatchString(path): + if r.Method == http.MethodGet { + i.getGetShardQueueSize().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case i.regexpShardsStatus.MatchString(path): + if r.Method == http.MethodGet { + i.getGetShardStatus().ServeHTTP(w, r) + return + } + if r.Method == http.MethodPost { + i.postUpdateShardStatus().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + case i.regexpShardFiles.MatchString(path): + if r.Method == http.MethodPost { + i.postShardFile().ServeHTTP(w, r) + return + } + if r.Method == http.MethodGet { + i.getShardFile().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + case i.regexpShardFileMetadata.MatchString(path): + if r.Method == http.MethodGet { + i.getShardFileMetadata().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + case i.regexpShard.MatchString(path): + if r.Method == http.MethodPost { + i.postShard().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case i.regexpShardReinit.MatchString(path): + if r.Method == http.MethodPut { + i.putShardReinit().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case i.regexpPauseFileActivity.MatchString(path): + if r.Method == http.MethodPost { + i.postPauseFileActivity().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case i.regexpResumeFileActivity.MatchString(path): + if r.Method == http.MethodPost { + i.postResumeFileActivity().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case i.regexpListFiles.MatchString(path): + if r.Method == http.MethodPost { + i.postListFiles().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case i.regexpAsyncReplicationTargetNode.MatchString(path): + if r.Method == http.MethodPost { + i.postAddAsyncReplicationTargetNode().ServeHTTP(w, r) + return + } + if r.Method == http.MethodDelete { + i.deleteAsyncReplicationTargetNode().ServeHTTP(w, r) + return + } + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + default: + http.NotFound(w, r) + return + } + } +} + +func (i *indices) postObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + + ct := r.Header.Get("content-type") + + switch ct { + case IndicesPayloads.ObjectList.MIME(): + i.postObjectBatch(w, r, index, shard) + return + + case IndicesPayloads.SingleObject.MIME(): + i.postObjectSingle(w, r, index, shard) + return + + default: + http.Error(w, "415 Unsupported Media Type", http.StatusUnsupportedMediaType) + return + } + }) +} + +func (i *indices) postObjectSingle(w http.ResponseWriter, r *http.Request, + index, shard string, +) { + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + obj, err := IndicesPayloads.SingleObject.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + if err := i.shards.PutObject(r.Context(), index, shard, obj, schemaVersion); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +func (i *indices) postObjectBatch(w http.ResponseWriter, r *http.Request, + index, shard string, +) { + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + objs, err := IndicesPayloads.ObjectList.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + errs := i.shards.BatchPutObjects(r.Context(), index, shard, objs, schemaVersion) + if len(errs) > 0 && errors.Is(errs[0], reposdb.ErrShardNotFound) { + http.Error(w, errs[0].Error(), http.StatusInternalServerError) + return + } + errsJSON, err := IndicesPayloads.ErrorList.Marshal(errs) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.ErrorList.SetContentTypeHeader(w) + w.Write(errsJSON) +} + +func (i *indices) getObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObject.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard, id := args[1], args[2], args[3] + + defer r.Body.Close() + + if r.URL.Query().Get("check_exists") != "" { + i.checkExists(w, r, index, shard, id) + return + } + + additionalEncoded := r.URL.Query().Get("additional") + if additionalEncoded == "" { + http.Error(w, "missing required url param 'additional'", + http.StatusBadRequest) + return + } + + additionalBytes, err := base64.StdEncoding.DecodeString(additionalEncoded) + if err != nil { + http.Error(w, "base64 decode 'additional' param: "+err.Error(), + http.StatusBadRequest) + return + } + + selectPropertiesEncoded := r.URL.Query().Get("selectProperties") + if selectPropertiesEncoded == "" { + http.Error(w, "missing required url param 'selectProperties'", + http.StatusBadRequest) + return + } + + selectPropertiesBytes, err := base64.StdEncoding. + DecodeString(selectPropertiesEncoded) + if err != nil { + http.Error(w, "base64 decode 'selectProperties' param: "+err.Error(), + http.StatusBadRequest) + return + } + + var additional additional.Properties + if err := json.Unmarshal(additionalBytes, &additional); err != nil { + http.Error(w, "unmarshal 'additional' param from json: "+err.Error(), + http.StatusBadRequest) + return + } + + var selectProperties search.SelectProperties + if err := json.Unmarshal(selectPropertiesBytes, &selectProperties); err != nil { + http.Error(w, "unmarshal 'selectProperties' param from json: "+err.Error(), + http.StatusBadRequest) + return + } + if !i.db.StartupComplete() { + http.Error(w, "startup is not complete", http.StatusServiceUnavailable) + return + } + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "GetObject", + }).Debug("getting object ...") + + obj, err := i.shards.GetObject(r.Context(), index, shard, strfmt.UUID(id), + selectProperties, additional) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if obj == nil { + // this is a legitimate case - the requested ID doesn't exist, don't try + // to marshal anything + w.WriteHeader(http.StatusNotFound) + return + } + + objBytes, err := IndicesPayloads.SingleObject.Marshal(obj) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.SingleObject.SetContentTypeHeader(w) + w.Write(objBytes) + }) +} + +func (i *indices) checkExists(w http.ResponseWriter, r *http.Request, + index, shard, id string, +) { + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "checkExists", + }).Debug("checking if shard exists ...") + ok, err := i.shards.Exists(r.Context(), index, shard, strfmt.UUID(id)) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if ok { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +func (i *indices) deleteObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObject.FindStringSubmatch(r.URL.Path) + if len(args) < 4 || len(args) > 5 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard, id := args[1], args[2], args[3] + + var deletionTime time.Time + + if len(args) == 5 { + deletionTimeUnixMilli, err := strconv.ParseInt(args[4], 10, 64) + if err != nil { + http.Error(w, "invalid URI", http.StatusBadRequest) + } + deletionTime = time.UnixMilli(deletionTimeUnixMilli) + } + + defer r.Body.Close() + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = i.shards.DeleteObject(r.Context(), index, shard, strfmt.UUID(id), deletionTime, schemaVersion) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusNoContent) + }) +} + +func (i *indices) mergeObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObject.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard, _ := args[1], args[2], args[3] + + defer r.Body.Close() + ct, ok := IndicesPayloads.MergeDoc.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + mergeDoc, err := IndicesPayloads.MergeDoc.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + if err = i.shards.MergeObject(r.Context(), index, shard, mergeDoc, schemaVersion); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusNoContent) + }) +} + +func (i *indices) getObjectsMulti() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, fmt.Sprintf("invalid URI: %s", r.URL.Path), + http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + + idsEncoded := r.URL.Query().Get("ids") + if idsEncoded == "" { + http.Error(w, "missing required url param 'ids'", + http.StatusBadRequest) + return + } + + idsBytes, err := base64.StdEncoding.DecodeString(idsEncoded) + if err != nil { + http.Error(w, "base64 decode 'ids' param: "+err.Error(), + http.StatusBadRequest) + return + } + + var ids []strfmt.UUID + if err := json.Unmarshal(idsBytes, &ids); err != nil { + http.Error(w, "unmarshal 'ids' param from json: "+err.Error(), + http.StatusBadRequest) + return + } + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "MultiGetObjects", + }).Debug("get multiple objects ...") + + objs, err := i.shards.MultiGetObjects(r.Context(), index, shard, ids) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + objsBytes, err := IndicesPayloads.ObjectList.Marshal(objs) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.ObjectList.SetContentTypeHeader(w) + w.Write(objsBytes) + }) +} + +func (i *indices) postSearchObjects() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObjectsSearch.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + ct, ok := IndicesPayloads.SearchParams.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + vector, targetVector, certainty, limit, filters, keywordRanking, sort, cursor, groupBy, additional, targetCombination, props, err := IndicesPayloads.SearchParams. + Unmarshal(reqPayload) + if err != nil { + http.Error(w, "unmarshal search params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "Search", + }).Debug("searching ...") + + results, dists, err := i.shards.Search(r.Context(), index, shard, + vector, targetVector, certainty, limit, filters, keywordRanking, sort, cursor, groupBy, additional, targetCombination, props) + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, err.Error(), http.StatusUnprocessableEntity) + return + } + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + resBytes, err := IndicesPayloads.SearchResults.Marshal(results, dists) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.SearchResults.SetContentTypeHeader(w) + w.Write(resBytes) + }) +} + +func (i *indices) postReferences() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpReferences.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), + http.StatusInternalServerError) + return + } + + ct, ok := IndicesPayloads.ReferenceList.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + refs, err := IndicesPayloads.ReferenceList.Unmarshal(reqPayload) + if err != nil { + http.Error(w, "read request body: "+err.Error(), + http.StatusInternalServerError) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + errs := i.shards.BatchAddReferences(r.Context(), index, shard, refs, schemaVersion) + errsJSON, err := IndicesPayloads.ErrorList.Marshal(errs) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.ErrorList.SetContentTypeHeader(w) + w.Write(errsJSON) + }) +} + +func (i *indices) postAggregateObjects() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObjectsAggregations.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), + http.StatusInternalServerError) + return + } + + ct, ok := IndicesPayloads.AggregationParams.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + params, err := IndicesPayloads.AggregationParams.Unmarshal(reqPayload) + if err != nil { + http.Error(w, "read request body: "+err.Error(), + http.StatusInternalServerError) + return + } + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "Aggregate", + }).Debug("aggregate ...") + + aggRes, err := i.shards.Aggregate(r.Context(), index, shard, params) + + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, err.Error(), http.StatusUnprocessableEntity) + return + } + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + aggResBytes, err := IndicesPayloads.AggregationResult.Marshal(aggRes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.AggregationResult.SetContentTypeHeader(w) + w.Write(aggResBytes) + }) +} + +func (i *indices) postFindUUIDs() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObjectsFind.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + ct, ok := IndicesPayloads.FindUUIDsParams.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + filters, err := IndicesPayloads.FindUUIDsParams. + Unmarshal(reqPayload) + if err != nil { + http.Error(w, "unmarshal find doc ids params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "FindUUIDs", + }).Debug("find UUIDs ...") + + results, err := i.shards.FindUUIDs(r.Context(), index, shard, filters) + + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, err.Error(), http.StatusUnprocessableEntity) + return + } + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + resBytes, err := IndicesPayloads.FindUUIDsResults.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.FindUUIDsResults.SetContentTypeHeader(w) + w.Write(resBytes) + }) +} + +func (i *indices) putOverwriteObjects() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObjectsOverwrite.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + ct, ok := IndicesPayloads.VersionedObjectList.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + vobjs, err := IndicesPayloads.VersionedObjectList.Unmarshal(reqPayload) + if err != nil { + http.Error(w, "unmarshal overwrite objects params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + results, err := i.shards.OverwriteObjects(r.Context(), index, shard, vobjs) + if err != nil { + http.Error(w, "overwrite objects: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *indices) getObjectsDigest() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexObjectsDigest.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + var ids []strfmt.UUID + if err := json.Unmarshal(reqPayload, &ids); err != nil { + http.Error(w, "unmarshal digest objects params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "DigestObjects", + }).Debug("digest objects ...") + + results, err := i.shards.DigestObjects(r.Context(), index, shard, ids) + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, err.Error(), http.StatusUnprocessableEntity) + return + } + if err != nil { + http.Error(w, "digest objects: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *indices) getObjectsDigestsInRange() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexObjectsDigestsInRange.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + var rangeReq replica.DigestObjectsInRangeReq + if err := json.Unmarshal(reqPayload, &rangeReq); err != nil { + http.Error(w, "unmarshal digest objects in token range params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + digests, err := i.shards.DigestObjectsInRange(r.Context(), + index, shard, rangeReq.InitialUUID, rangeReq.FinalUUID, rangeReq.Limit) + if err != nil { + http.Error(w, "digest objects in range: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(replica.DigestObjectsInRangeResp{ + Digests: digests, + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *indices) getHashTreeLevel() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexObjectsHashTreeLevel.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard, level := args[1], args[2], args[3] + + l, err := strconv.Atoi(level) + if err != nil { + http.Error(w, "unmarshal hashtree level params: "+err.Error(), http.StatusInternalServerError) + return + } + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + var discriminant hashtree.Bitset + if err := discriminant.Unmarshal(reqPayload); err != nil { + http.Error(w, "unmarshal hashtree level params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + results, err := i.shards.HashTreeLevel(r.Context(), index, shard, l, &discriminant) + if err != nil { + http.Error(w, "hashtree level: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *indices) deleteObjects() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + ct, ok := IndicesPayloads.BatchDeleteParams.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + uuids, deletionTimeUnix, dryRun, err := IndicesPayloads.BatchDeleteParams. + Unmarshal(reqPayload) + if err != nil { + http.Error(w, "unmarshal find doc ids params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + results := i.shards.DeleteObjectBatch(r.Context(), index, shard, uuids, deletionTimeUnix, dryRun, schemaVersion) + + resBytes, err := IndicesPayloads.BatchDeleteResults.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.BatchDeleteResults.SetContentTypeHeader(w) + w.Write(resBytes) + }) +} + +func (i *indices) getGetShardQueueSize() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShardsQueueSize.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "GetShardQueueSize", + }).Debug("getting shard queue size ...") + + size, err := i.shards.GetShardQueueSize(r.Context(), index, shard) + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, err.Error(), http.StatusUnprocessableEntity) + return + } + + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + sizeBytes, err := IndicesPayloads.GetShardQueueSizeResults.Marshal(size) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.GetShardQueueSizeResults.SetContentTypeHeader(w) + w.Write(sizeBytes) + }) +} + +func (i *indices) getGetShardStatus() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShardsStatus.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + + i.logger.WithFields(logrus.Fields{ + "shard": shard, + "action": "GetShardStatus", + }).Debug("getting shard status ...") + + status, err := i.shards.GetShardStatus(r.Context(), index, shard) + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, err.Error(), http.StatusUnprocessableEntity) + return + } + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + statusBytes, err := IndicesPayloads.GetShardStatusResults.Marshal(status) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + IndicesPayloads.GetShardStatusResults.SetContentTypeHeader(w) + w.Write(statusBytes) + }) +} + +func (i *indices) postUpdateShardStatus() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShardsStatus.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + ct, ok := IndicesPayloads.UpdateShardStatusParams.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + targetStatus, err := IndicesPayloads.UpdateShardStatusParams. + Unmarshal(reqPayload) + if err != nil { + http.Error(w, "unmarshal find doc ids params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = i.shards.UpdateShardStatus(r.Context(), index, shard, targetStatus, schemaVersion) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) +} + +func (i *indices) postShardFile() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShardFiles.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard, filename := args[1], args[2], args[3] + + ct, ok := IndicesPayloads.ShardFiles.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + fp, err := i.shards.FilePutter(r.Context(), index, shard, filename) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + defer fp.Close() + n, err := io.Copy(fp, r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + i.logger.WithFields(logrus.Fields{ + "index": index, + "shard": shard, + "fileName": filename, + "n": n, + }).Debug() + + w.WriteHeader(http.StatusNoContent) + }) +} + +func (i *indices) postShard() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShard.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + err := i.shards.CreateShard(r.Context(), index, shard) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusCreated) + }) +} + +func (i *indices) putShardReinit() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShardReinit.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + err := i.shards.ReInitShard(r.Context(), index, shard) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusNoContent) + }) +} + +func (i *indices) getShardFileMetadata() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShardFileMetadata.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + indexName, shardName, relativeFilePath := args[1], args[2], args[3] + + ct, ok := IndicesPayloads.ShardFiles.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + md, err := i.shards.GetFileMetadata(r.Context(), indexName, shardName, relativeFilePath) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(md) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + w.WriteHeader(http.StatusOK) + }) +} + +func (i *indices) getShardFile() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpShardFiles.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + indexName, shardName, relativeFilePath := args[1], args[2], args[3] + + ct, ok := IndicesPayloads.ShardFiles.CheckContentTypeHeaderReq(r) + if !ok { + http.Error(w, errors.Errorf("unexpected content type: %s", ct).Error(), + http.StatusUnsupportedMediaType) + return + } + + reader, err := i.shards.GetFile(r.Context(), indexName, shardName, relativeFilePath) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + defer reader.Close() + + n, err := io.Copy(w, reader) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + i.logger.WithFields(logrus.Fields{ + "action": "replica_movement", + "index": indexName, + "shard": shardName, + "fileName": relativeFilePath, + "fileSizeBytes": n, + }).Debug("Copied replica file") + + w.WriteHeader(http.StatusOK) + }) +} + +func (i *indices) postPauseFileActivity() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpPauseFileActivity.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + indexName, shardName := args[1], args[2] + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = i.shards.PauseFileActivity(r.Context(), indexName, shardName, schemaVersion) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + i.logger.WithFields(logrus.Fields{ + "action": "replica_movement", + "index": indexName, + "shard": shardName, + }).Debug("Paused replica file activity") + + w.WriteHeader(http.StatusOK) + }) +} + +func (i *indices) postResumeFileActivity() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpPauseFileActivity.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + indexName, shardName := args[1], args[2] + + err := i.shards.ResumeFileActivity(r.Context(), indexName, shardName) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + i.logger.WithFields(logrus.Fields{ + "action": "replica_movement", + "index": indexName, + "shard": shardName, + }).Debug("Resumed replica file activity") + + w.WriteHeader(http.StatusOK) + }) +} + +func (i *indices) postListFiles() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpListFiles.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + indexName, shardName := args[1], args[2] + + relativeFilePaths, err := i.shards.ListFiles(r.Context(), indexName, shardName) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(relativeFilePaths) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + i.logger.WithFields(logrus.Fields{ + "action": "replica_movement", + "index": indexName, + "shard": shardName, + "numFiles": len(relativeFilePaths), + }).Debug("Listed replica files") + + w.Write(resBytes) + w.WriteHeader(http.StatusOK) + }) +} + +func (i *indices) postAddAsyncReplicationTargetNode() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpAsyncReplicationTargetNode.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + indexName, shardName := args[1], args[2] + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + var targetNodeOverride additional.AsyncReplicationTargetNodeOverride + if err := json.NewDecoder(r.Body).Decode(&targetNodeOverride); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = i.shards.AddAsyncReplicationTargetNode(r.Context(), indexName, shardName, targetNodeOverride, schemaVersion) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + }) +} + +func (i *indices) deleteAsyncReplicationTargetNode() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := i.regexpAsyncReplicationTargetNode.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + indexName, shardName := args[1], args[2] + + var targetNodeOverride additional.AsyncReplicationTargetNodeOverride + if err := json.NewDecoder(r.Body).Decode(&targetNodeOverride); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err := i.shards.RemoveAsyncReplicationTargetNode(r.Context(), indexName, shardName, targetNodeOverride) + if err != nil { + // There's no easy to have a re-usable error type via all our interfaces to reach the shard/index + if strings.Contains(err.Error(), "shard not found") { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if strings.Contains(err.Error(), fmt.Sprintf("local index %q not found", indexName)) { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_payloads.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_payloads.go new file mode 100644 index 0000000000000000000000000000000000000000..dbe89930bea40942dbc395e131817a17c16e8544 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_payloads.go @@ -0,0 +1,989 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "math" + "net/http" + "time" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/byteops" + "github.com/weaviate/weaviate/usecases/file" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" +) + +var IndicesPayloads = indicesPayloads{} + +type indicesPayloads struct { + ErrorList errorListPayload + SingleObject singleObjectPayload + MergeDoc mergeDocPayload + ObjectList objectListPayload + VersionedObjectList versionedObjectListPayload + SearchResults searchResultsPayload + SearchParams searchParamsPayload + VectorDistanceParams vectorDistanceParamsPayload + VectorDistanceResults vectorDistanceResultsPayload + ReferenceList referenceListPayload + AggregationParams aggregationParamsPayload + AggregationResult aggregationResultPayload + FindUUIDsParams findUUIDsParamsPayload + FindUUIDsResults findUUIDsResultsPayload + BatchDeleteParams batchDeleteParamsPayload + BatchDeleteResults batchDeleteResultsPayload + GetShardQueueSizeParams getShardQueueSizeParamsPayload + GetShardQueueSizeResults getShardQueueSizeResultsPayload + GetShardStatusParams getShardStatusParamsPayload + GetShardStatusResults getShardStatusResultsPayload + UpdateShardStatusParams updateShardStatusParamsPayload + UpdateShardsStatusResults updateShardsStatusResultsPayload + ShardFiles shardFilesPayload + ShardFileMetadataResults shardFileMetadataResultsPayload + ShardFilesResults shardFilesResultsPayload + AsyncReplicationTargetNode asyncReplicationTargetNode +} + +type shardFileMetadataResultsPayload struct{} + +func (p shardFileMetadataResultsPayload) MIME() string { + return "application/vnd.weaviate.shardfilemetadataresults+json" +} + +func (p shardFileMetadataResultsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p shardFileMetadataResultsPayload) Unmarshal(in []byte) (file.FileMetadata, error) { + var md file.FileMetadata + if err := json.Unmarshal(in, &md); err != nil { + return file.FileMetadata{}, fmt.Errorf("unmarshal shard file metadata: %w", err) + } + return md, nil +} + +type shardFilesResultsPayload struct{} + +func (p shardFilesResultsPayload) MIME() string { + return "application/vnd.weaviate.shardfilesresults+json" +} + +func (p shardFilesResultsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p shardFilesResultsPayload) Unmarshal(in []byte) ([]string, error) { + var shardFiles []string + if err := json.Unmarshal(in, &shardFiles); err != nil { + return nil, fmt.Errorf("unmarshal shard files: %w", err) + } + return shardFiles, nil +} + +type asyncReplicationTargetNode struct{} + +func (p asyncReplicationTargetNode) MIME() string { + return "application/vnd.weaviate.asyncreplicationtargetnode+json" +} + +func (p asyncReplicationTargetNode) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p asyncReplicationTargetNode) Marshal(in additional.AsyncReplicationTargetNodeOverride) ([]byte, error) { + return json.Marshal(in) +} + +type errorListPayload struct{} + +func (e errorListPayload) MIME() string { + return "application/vnd.weaviate.error.list+json" +} + +func (e errorListPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", e.MIME()) +} + +func (e errorListPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == e.MIME() +} + +func (e errorListPayload) Marshal(in []error) ([]byte, error) { + converted := make([]interface{}, len(in)) + for i, err := range in { + if err == nil { + continue + } + + converted[i] = err.Error() + } + + return json.Marshal(converted) +} + +func (e errorListPayload) Unmarshal(in []byte) []error { + var msgs []interface{} + json.Unmarshal(in, &msgs) + + converted := make([]error, len(msgs)) + + for i, msg := range msgs { + if msg == nil { + continue + } + + converted[i] = errors.New(msg.(string)) + } + + return converted +} + +type singleObjectPayload struct{} + +func (p singleObjectPayload) MIME() string { + return "application/vnd.weaviate.storobj+octet-stream" +} + +func (p singleObjectPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p singleObjectPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p singleObjectPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p singleObjectPayload) Marshal(in *storobj.Object) ([]byte, error) { + return in.MarshalBinary() +} + +func (p singleObjectPayload) Unmarshal(in []byte) (*storobj.Object, error) { + return storobj.FromBinary(in) +} + +type objectListPayload struct{} + +func (p objectListPayload) MIME() string { + return "application/vnd.weaviate.storobj.list+octet-stream" +} + +func (p objectListPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p objectListPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p objectListPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p objectListPayload) Marshal(in []*storobj.Object) ([]byte, error) { + // NOTE: This implementation is not optimized for allocation efficiency, + // reserve 1024 byte per object which is rather arbitrary + out := make([]byte, 0, 1024*len(in)) + + reusableLengthBuf := make([]byte, 8) + for _, ind := range in { + if ind != nil { + bytes, err := ind.MarshalBinary() + if err != nil { + return nil, err + } + + length := uint64(len(bytes)) + binary.LittleEndian.PutUint64(reusableLengthBuf, length) + + out = append(out, reusableLengthBuf...) + out = append(out, bytes...) + } + } + + return out, nil +} + +func (p objectListPayload) Unmarshal(in []byte) ([]*storobj.Object, error) { + var out []*storobj.Object + + reusableLengthBuf := make([]byte, 8) + r := bytes.NewReader(in) + + for { + _, err := r.Read(reusableLengthBuf) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + + payloadBytes := make([]byte, binary.LittleEndian.Uint64(reusableLengthBuf)) + _, err = r.Read(payloadBytes) + if err != nil { + return nil, err + } + + obj, err := storobj.FromBinary(payloadBytes) + if err != nil { + return nil, err + } + + out = append(out, obj) + } + + return out, nil +} + +type versionedObjectListPayload struct{} + +func (p versionedObjectListPayload) MIME() string { + return "application/vnd.weaviate.vobject.list+octet-stream" +} + +func (p versionedObjectListPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p versionedObjectListPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p versionedObjectListPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p versionedObjectListPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p versionedObjectListPayload) Marshal(in []*objects.VObject) ([]byte, error) { + // NOTE: This implementation is not optimized for allocation efficiency, + // reserve 1024 byte per object which is rather arbitrary + out := make([]byte, 0, 1024*len(in)) + + reusableLengthBuf := make([]byte, 8) + for _, ind := range in { + objBytes, err := ind.MarshalBinary() + if err != nil { + return nil, err + } + + length := uint64(len(objBytes)) + binary.LittleEndian.PutUint64(reusableLengthBuf, length) + + out = append(out, reusableLengthBuf...) + out = append(out, objBytes...) + } + + return out, nil +} + +func (p versionedObjectListPayload) Unmarshal(in []byte) ([]*objects.VObject, error) { + var out []*objects.VObject + + reusableLengthBuf := make([]byte, 8) + r := bytes.NewReader(in) + + for { + _, err := r.Read(reusableLengthBuf) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + + ln := binary.LittleEndian.Uint64(reusableLengthBuf) + payloadBytes := make([]byte, ln) + _, err = r.Read(payloadBytes) + if err != nil { + return nil, err + } + + var vobj objects.VObject + err = vobj.UnmarshalBinary(payloadBytes) + if err != nil { + return nil, err + } + + out = append(out, &vobj) + } + + return out, nil +} + +type mergeDocPayload struct{} + +func (p mergeDocPayload) MIME() string { + return "application/vnd.weaviate.mergedoc+json" +} + +func (p mergeDocPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p mergeDocPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p mergeDocPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p mergeDocPayload) Marshal(in objects.MergeDocument) ([]byte, error) { + // assumes that this type is fully json-marshable. Not the most + // bandwidth-efficient way, but this is unlikely to become a bottleneck. If it + // does, a custom binary marshaller might be more appropriate + return json.Marshal(in) +} + +func (p mergeDocPayload) Unmarshal(in []byte) (objects.MergeDocument, error) { + var mergeDoc objects.MergeDocument + err := json.Unmarshal(in, &mergeDoc) + return mergeDoc, err +} + +type vectorDistanceParamsPayload struct{} + +func (p vectorDistanceParamsPayload) Marshal(id strfmt.UUID, targets []string, searchVectors [][]float32, +) ([]byte, error) { + type params struct { + Id strfmt.UUID `json:"id"` + Targets []string `json:"targets"` + SearchVectors [][]float32 `json:"searchVectors"` + } + + par := params{id, targets, searchVectors} + return json.Marshal(par) +} + +func (p vectorDistanceParamsPayload) Unmarshal(in []byte) (strfmt.UUID, []string, [][]float32, error, +) { + type params struct { + Id strfmt.UUID `json:"id"` + Targets []string `json:"targets"` + SearchVectors [][]float32 `json:"searchVectors"` + } + var par params + err := json.Unmarshal(in, &par) + return par.Id, par.Targets, par.SearchVectors, err +} + +func (p vectorDistanceParamsPayload) MIME() string { + return "vnd.weaviate.vectordistanceparams+json" +} + +func (p vectorDistanceParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p vectorDistanceParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +type vectorDistanceResultsPayload struct{} + +func (p vectorDistanceResultsPayload) Unmarshal(in []byte) ([]float32, error) { + read := uint64(0) + + distsLength := binary.LittleEndian.Uint64(in[read : read+8]) + read += 8 + + dists := make([]float32, distsLength) + for i := range dists { + dists[i] = math.Float32frombits(binary.LittleEndian.Uint32(in[read : read+4])) + read += 4 + } + + return dists, nil +} + +func (p vectorDistanceResultsPayload) Marshal(dists []float32) ([]byte, error) { + buf := byteops.NewReadWriter(make([]byte, 8+len(dists)*4)) + buf.WriteUint64(uint64(len(dists))) + + for _, dist := range dists { + buf.WriteUint32(math.Float32bits(dist)) + } + + return buf.Buffer, nil +} + +func (p vectorDistanceResultsPayload) MIME() string { + return "application/vnd.weaviate.vectordistanceresults+octet-stream" +} + +func (p vectorDistanceResultsPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p vectorDistanceResultsPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type searchParametersPayload struct { + SearchVector []float32 `json:"searchVector"` + TargetVector string `json:"targetVector"` + Distance float32 `json:"distance"` + Limit int `json:"limit"` + Filters *filters.LocalFilter `json:"filters"` + KeywordRanking *searchparams.KeywordRanking `json:"keywordRanking"` + Sort []filters.Sort `json:"sort"` + Cursor *filters.Cursor `json:"cursor"` + GroupBy *searchparams.GroupBy `json:"groupBy"` + Additional additional.Properties `json:"additional"` + SearchVectors []models.Vector `json:"searchVectors"` + TargetVectors []string `json:"TargetVectors"` + TargetCombination *dto.TargetCombination `json:"targetCombination"` + Properties []string `json:"properties"` +} + +func (p *searchParametersPayload) UnmarshalJSON(data []byte) error { + type alias searchParametersPayload + aux := &struct { + SearchVectors json.RawMessage `json:"searchVectors"` + *alias + }{ + alias: (*alias)(p), + } + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + + // SearchVectors are nil + if aux.SearchVectors == nil { + return nil + } + + // Try unmarshaling as []float32 + var vectors [][]float32 + if err := json.Unmarshal(aux.SearchVectors, &vectors); err == nil { + if len(vectors) > 0 { + asVectors := make([]models.Vector, len(vectors)) + for i := range vectors { + asVectors[i] = vectors[i] + } + p.SearchVectors = asVectors + } + return nil + } + + // Try unmarshaling as [][]float32 + var multiVectors [][][]float32 + if err := json.Unmarshal(aux.SearchVectors, &multiVectors); err == nil { + if len(multiVectors) > 0 { + asVectors := make([]models.Vector, len(multiVectors)) + for i := range multiVectors { + asVectors[i] = multiVectors[i] + } + p.SearchVectors = asVectors + } + return nil + } + + return fmt.Errorf("searchVectors: cannot unmarshal into either [][]float32 or [][][]float32: %v", aux.SearchVectors) +} + +type searchParamsPayload struct{} + +func (p searchParamsPayload) Marshal(vectors []models.Vector, targetVectors []string, distance float32, limit int, + filter *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, + sort []filters.Sort, cursor *filters.Cursor, groupBy *searchparams.GroupBy, + addP additional.Properties, targetCombination *dto.TargetCombination, properties []string, +) ([]byte, error) { + var vector []float32 + var targetVector string + // BC with pre 1.26 + if len(vectors) == 1 { + // we only add a vector here only if it's []float32 vector to be backward compatible with pre v1.26 versions + if v, ok := vectors[0].([]float32); ok { + vector = v + targetVector = targetVectors[0] + } + } + + par := searchParametersPayload{vector, targetVector, distance, limit, filter, keywordRanking, sort, cursor, groupBy, addP, vectors, targetVectors, targetCombination, properties} + return json.Marshal(par) +} + +func (p searchParamsPayload) Unmarshal(in []byte) ([]models.Vector, []string, float32, int, + *filters.LocalFilter, *searchparams.KeywordRanking, []filters.Sort, + *filters.Cursor, *searchparams.GroupBy, additional.Properties, *dto.TargetCombination, []string, error, +) { + var par searchParametersPayload + err := json.Unmarshal(in, &par) + + if len(par.SearchVector) > 0 { + par.SearchVectors = []models.Vector{par.SearchVector} + par.TargetVectors = []string{par.TargetVector} + } + + return par.SearchVectors, par.TargetVectors, par.Distance, par.Limit, + par.Filters, par.KeywordRanking, par.Sort, par.Cursor, par.GroupBy, par.Additional, par.TargetCombination, par.Properties, err +} + +func (p searchParamsPayload) MIME() string { + return "vnd.weaviate.searchparams+json" +} + +func (p searchParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p searchParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +type searchResultsPayload struct{} + +func (p searchResultsPayload) Unmarshal(in []byte) ([]*storobj.Object, []float32, error) { + read := uint64(0) + + objsLength := binary.LittleEndian.Uint64(in[read : read+8]) + read += 8 + + objs, err := IndicesPayloads.ObjectList.Unmarshal(in[read : read+objsLength]) + if err != nil { + return nil, nil, err + } + read += objsLength + + distsLength := binary.LittleEndian.Uint64(in[read : read+8]) + read += 8 + + dists := make([]float32, distsLength) + for i := range dists { + dists[i] = math.Float32frombits(binary.LittleEndian.Uint32(in[read : read+4])) + read += 4 + } + + return objs, dists, nil +} + +func (p searchResultsPayload) Marshal(objs []*storobj.Object, + dists []float32, +) ([]byte, error) { + reusableLengthBuf := make([]byte, 8) + var out []byte + objsBytes, err := IndicesPayloads.ObjectList.Marshal(objs) + if err != nil { + return nil, err + } + + objsLength := uint64(len(objsBytes)) + binary.LittleEndian.PutUint64(reusableLengthBuf, objsLength) + + out = append(out, reusableLengthBuf...) + out = append(out, objsBytes...) + + distsLength := uint64(len(dists)) + binary.LittleEndian.PutUint64(reusableLengthBuf, distsLength) + out = append(out, reusableLengthBuf...) + + distsBuf := make([]byte, distsLength*4) + for i, dist := range dists { + distUint32 := math.Float32bits(dist) + binary.LittleEndian.PutUint32(distsBuf[(i*4):((i+1)*4)], distUint32) + } + out = append(out, distsBuf...) + + return out, nil +} + +func (p searchResultsPayload) MIME() string { + return "application/vnd.weaviate.shardsearchresults+octet-stream" +} + +func (p searchResultsPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p searchResultsPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type referenceListPayload struct{} + +func (p referenceListPayload) MIME() string { + return "application/vnd.weaviate.references.list+json" +} + +func (p referenceListPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p referenceListPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p referenceListPayload) Marshal(in objects.BatchReferences) ([]byte, error) { + // assumes that this type is fully json-marshable. Not the most + // bandwidth-efficient way, but this is unlikely to become a bottleneck. If it + // does, a custom binary marshaller might be more appropriate + return json.Marshal(in) +} + +func (p referenceListPayload) Unmarshal(in []byte) (objects.BatchReferences, error) { + var out objects.BatchReferences + err := json.Unmarshal(in, &out) + return out, err +} + +type aggregationParamsPayload struct{} + +func (p aggregationParamsPayload) Marshal(params aggregation.Params) ([]byte, error) { + // assumes that this type is fully json-marshable. Not the most + // bandwidth-efficient way, but this is unlikely to become a bottleneck. If it + // does, a custom binary marshaller might be more appropriate + return json.Marshal(params) +} + +func (p aggregationParamsPayload) Unmarshal(in []byte) (aggregation.Params, error) { + var out aggregation.Params + err := json.Unmarshal(in, &out) + return out, err +} + +func (p aggregationParamsPayload) MIME() string { + return "application/vnd.weaviate.aggregations.params+json" +} + +func (p aggregationParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p aggregationParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type aggregationResultPayload struct{} + +func (p aggregationResultPayload) MIME() string { + return "application/vnd.weaviate.aggregations.result+json" +} + +func (p aggregationResultPayload) CheckContentTypeHeader(res *http.Response) (string, bool) { + ct := res.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p aggregationResultPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p aggregationResultPayload) Marshal(in *aggregation.Result) ([]byte, error) { + // assumes that this type is fully json-marshable. Not the most + // bandwidth-efficient way, but this is unlikely to become a bottleneck. If it + // does, a custom binary marshaller might be more appropriate + return json.Marshal(in) +} + +func (p aggregationResultPayload) Unmarshal(in []byte) (*aggregation.Result, error) { + var out aggregation.Result + err := json.Unmarshal(in, &out) + return &out, err +} + +type findUUIDsParamsPayload struct{} + +func (p findUUIDsParamsPayload) Marshal(filter *filters.LocalFilter) ([]byte, error) { + type params struct { + Filters *filters.LocalFilter `json:"filters"` + } + + par := params{filter} + return json.Marshal(par) +} + +func (p findUUIDsParamsPayload) Unmarshal(in []byte) (*filters.LocalFilter, error) { + type findUUIDsParametersPayload struct { + Filters *filters.LocalFilter `json:"filters"` + } + var par findUUIDsParametersPayload + err := json.Unmarshal(in, &par) + return par.Filters, err +} + +func (p findUUIDsParamsPayload) MIME() string { + return "vnd.weaviate.finduuidsparams+json" +} + +func (p findUUIDsParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p findUUIDsParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +type findUUIDsResultsPayload struct{} + +func (p findUUIDsResultsPayload) Unmarshal(in []byte) ([]strfmt.UUID, error) { + var out []strfmt.UUID + err := json.Unmarshal(in, &out) + return out, err +} + +func (p findUUIDsResultsPayload) Marshal(in []strfmt.UUID) ([]byte, error) { + return json.Marshal(in) +} + +func (p findUUIDsResultsPayload) MIME() string { + return "application/vnd.weaviate.findUUIDsresults+octet-stream" +} + +func (p findUUIDsResultsPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p findUUIDsResultsPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type batchDeleteParamsPayload struct{} + +func (p batchDeleteParamsPayload) Marshal(uuids []strfmt.UUID, deletionTime time.Time, dryRun bool) ([]byte, error) { + type params struct { + UUIDs []strfmt.UUID `json:"uuids"` + DeletionTimeUnixMilli int64 `json:"deletionTimeUnixMilli"` + DryRun bool `json:"dryRun"` + } + + par := params{uuids, deletionTime.UnixMilli(), dryRun} + return json.Marshal(par) +} + +func (p batchDeleteParamsPayload) Unmarshal(in []byte) ([]strfmt.UUID, time.Time, bool, error) { + type batchDeleteParametersPayload struct { + UUIDs []strfmt.UUID `json:"uuids"` + DeletionTimeUnixMilli int64 `json:"deletionTimeUnixMilli"` + DryRun bool `json:"dryRun"` + } + var par batchDeleteParametersPayload + err := json.Unmarshal(in, &par) + return par.UUIDs, time.UnixMilli(par.DeletionTimeUnixMilli), par.DryRun, err +} + +func (p batchDeleteParamsPayload) MIME() string { + return "vnd.weaviate.batchdeleteparams+json" +} + +func (p batchDeleteParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p batchDeleteParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +type batchDeleteResultsPayload struct{} + +func (p batchDeleteResultsPayload) Unmarshal(in []byte) (objects.BatchSimpleObjects, error) { + var out objects.BatchSimpleObjects + err := json.Unmarshal(in, &out) + return out, err +} + +func (p batchDeleteResultsPayload) Marshal(in objects.BatchSimpleObjects) ([]byte, error) { + return json.Marshal(in) +} + +func (p batchDeleteResultsPayload) MIME() string { + return "application/vnd.weaviate.batchdeleteresults+octet-stream" +} + +func (p batchDeleteResultsPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p batchDeleteResultsPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type getShardQueueSizeParamsPayload struct{} + +func (p getShardQueueSizeParamsPayload) MIME() string { + return "vnd.weaviate.getshardqueuesizeparams+json" +} + +func (p getShardQueueSizeParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p getShardQueueSizeParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +type getShardQueueSizeResultsPayload struct{} + +func (p getShardQueueSizeResultsPayload) Unmarshal(in []byte) (int64, error) { + var out int64 + err := json.Unmarshal(in, &out) + return out, err +} + +func (p getShardQueueSizeResultsPayload) Marshal(in int64) ([]byte, error) { + return json.Marshal(in) +} + +func (p getShardQueueSizeResultsPayload) MIME() string { + return "application/vnd.weaviate.getshardqueuesizeresults+octet-stream" +} + +func (p getShardQueueSizeResultsPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p getShardQueueSizeResultsPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type getShardStatusParamsPayload struct{} + +func (p getShardStatusParamsPayload) MIME() string { + return "vnd.weaviate.getshardstatusparams+json" +} + +func (p getShardStatusParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p getShardStatusParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +type getShardStatusResultsPayload struct{} + +func (p getShardStatusResultsPayload) Unmarshal(in []byte) (string, error) { + var out string + err := json.Unmarshal(in, &out) + return out, err +} + +func (p getShardStatusResultsPayload) Marshal(in string) ([]byte, error) { + return json.Marshal(in) +} + +func (p getShardStatusResultsPayload) MIME() string { + return "application/vnd.weaviate.getshardstatusresults+octet-stream" +} + +func (p getShardStatusResultsPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p getShardStatusResultsPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type updateShardStatusParamsPayload struct{} + +func (p updateShardStatusParamsPayload) Marshal(targetStatus string) ([]byte, error) { + type params struct { + TargetStatus string `json:"targetStatus"` + } + + par := params{targetStatus} + return json.Marshal(par) +} + +func (p updateShardStatusParamsPayload) Unmarshal(in []byte) (string, error) { + type updateShardStatusParametersPayload struct { + TargetStatus string `json:"targetStatus"` + } + var par updateShardStatusParametersPayload + err := json.Unmarshal(in, &par) + return par.TargetStatus, err +} + +func (p updateShardStatusParamsPayload) MIME() string { + return "vnd.weaviate.updateshardstatusparams+json" +} + +func (p updateShardStatusParamsPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +func (p updateShardStatusParamsPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +type updateShardsStatusResultsPayload struct{} + +func (p updateShardsStatusResultsPayload) MIME() string { + return "application/vnd.weaviate.updateshardstatusresults+octet-stream" +} + +func (p updateShardsStatusResultsPayload) SetContentTypeHeader(w http.ResponseWriter) { + w.Header().Set("content-type", p.MIME()) +} + +func (p updateShardsStatusResultsPayload) CheckContentTypeHeader(r *http.Response) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} + +type shardFilesPayload struct{} + +func (p shardFilesPayload) MIME() string { + return "application/vnd.weaviate.indexfiles+octet-stream" +} + +func (p shardFilesPayload) SetContentTypeHeaderReq(r *http.Request) { + r.Header.Set("content-type", p.MIME()) +} + +func (p shardFilesPayload) CheckContentTypeHeaderReq(r *http.Request) (string, bool) { + ct := r.Header.Get("content-type") + return ct, ct == p.MIME() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_payloads_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_payloads_test.go new file mode 100644 index 0000000000000000000000000000000000000000..70935778bbc3b626f4aa139e0fe18d33fcc2598d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_payloads_test.go @@ -0,0 +1,279 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "encoding/json" + "testing" + "time" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/searchparams" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/storobj" +) + +func Test_objectListPayload_Marshal(t *testing.T) { + now := time.Now() + vec1 := []float32{1, 2, 3, 4, 5} + vec2 := []float32{10, 20, 30, 40, 50} + id1 := strfmt.UUID("c6f85bf5-c3b7-4c1d-bd51-e899f9605336") + id2 := strfmt.UUID("88750a99-a72d-46c2-a582-89f02654391d") + + objs := []*storobj.Object{ + { + MarshallerVersion: 1, + Object: models.Object{ + ID: id1, + Class: "SomeClass", + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.Add(time.Hour).UnixMilli(), // time-traveling ;) + Properties: map[string]interface{}{ + "propA": "this is prop A", + "propB": "this is prop B", + "someDate": now.Format(time.RFC3339Nano), + "aNumber": 1e+06, + "crossRef": models.MultipleRef{ + crossref.NewLocalhost("OtherClass", id1). + SingleRef(), + }, + }, + Additional: map[string]interface{}{ + "score": 0.055465422484, + }, + }, + Vector: vec1, + VectorLen: 5, + }, + nil, + { + MarshallerVersion: 1, + Object: models.Object{ + ID: id2, + Class: "SomeClass", + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.Add(time.Hour).UnixMilli(), // time-traveling ;) + Properties: map[string]interface{}{ + "propA": "this is prop A", + "propB": "this is prop B", + "someDate": now.Format(time.RFC3339Nano), + "aNumber": 1e+06, + "crossRef": models.MultipleRef{ + crossref.NewLocalhost("OtherClass", id2). + SingleRef(), + }, + }, + Additional: map[string]interface{}{ + "score": 0.055465422484, + }, + }, + Vector: vec2, + VectorLen: 5, + }, + } + + payload := objectListPayload{} + b, err := payload.Marshal(objs) + require.Nil(t, err) + + received, err := payload.Unmarshal(b) + require.Nil(t, err) + assert.Len(t, received, 2) + assert.EqualValues(t, objs[0].Object, received[0].Object) + assert.EqualValues(t, objs[0].ID(), received[0].ID()) + assert.EqualValues(t, objs[2].Object, received[1].Object) + assert.EqualValues(t, objs[2].ID(), received[1].ID()) +} + +type searchParamsPayloadOld struct{} + +func (p searchParamsPayloadOld) Marshal(vector []float32, targetVector string, limit int, + filter *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, + sort []filters.Sort, cursor *filters.Cursor, groupBy *searchparams.GroupBy, + addP additional.Properties, +) ([]byte, error) { + type params struct { + SearchVector []float32 `json:"searchVector"` + TargetVector string `json:"targetVector"` + Limit int `json:"limit"` + Filters *filters.LocalFilter `json:"filters"` + KeywordRanking *searchparams.KeywordRanking `json:"keywordRanking"` + Sort []filters.Sort `json:"sort"` + Cursor *filters.Cursor `json:"cursor"` + GroupBy *searchparams.GroupBy `json:"groupBy"` + Additional additional.Properties `json:"additional"` + } + + par := params{vector, targetVector, limit, filter, keywordRanking, sort, cursor, groupBy, addP} + return json.Marshal(par) +} + +func (p searchParamsPayloadOld) Unmarshal(in []byte) ([]float32, string, float32, int, + *filters.LocalFilter, *searchparams.KeywordRanking, []filters.Sort, + *filters.Cursor, *searchparams.GroupBy, additional.Properties, error, +) { + type searchParametersPayload struct { + SearchVector []float32 `json:"searchVector"` + TargetVector string `json:"targetVector"` + Distance float32 `json:"distance"` + Limit int `json:"limit"` + Filters *filters.LocalFilter `json:"filters"` + KeywordRanking *searchparams.KeywordRanking `json:"keywordRanking"` + Sort []filters.Sort `json:"sort"` + Cursor *filters.Cursor `json:"cursor"` + GroupBy *searchparams.GroupBy `json:"groupBy"` + Additional additional.Properties `json:"additional"` + } + var par searchParametersPayload + err := json.Unmarshal(in, &par) + return par.SearchVector, par.TargetVector, par.Distance, par.Limit, + par.Filters, par.KeywordRanking, par.Sort, par.Cursor, par.GroupBy, par.Additional, err +} + +// This tests the backward compatibility of the searchParamsPayload with the old version in 1.25 and before (copied from the old code above) +func TestBackwardCompatibilitySearch(t *testing.T) { + payload := searchParamsPayload{} + tests := []struct { + SearchVectors []models.Vector + Targets []string + compatible bool + }{ + { + SearchVectors: []models.Vector{[]float32{1, 2, 3}, []float32{4, 5, 6}}, + Targets: []string{"target1", "target2"}, + compatible: false, + }, + { + SearchVectors: []models.Vector{[]float32{1, 2, 3}}, + Targets: []string{"target1"}, + compatible: true, + }, + } + + for _, tt := range tests { + t.Run("test", func(t *testing.T) { + b126, err := payload.Marshal(tt.SearchVectors, tt.Targets, 0.7, 10, nil, nil, nil, nil, nil, additional.Properties{}, nil, nil) + require.Nil(t, err) + + vecs, targets, _, _, _, _, _, _, _, _, _, _, err := payload.Unmarshal(b126) + require.Nil(t, err) + assert.Equal(t, tt.SearchVectors, vecs) + assert.Equal(t, tt.Targets, targets) + + if tt.compatible { + payloadOld := searchParamsPayloadOld{} + b125, err := payloadOld.Marshal(tt.SearchVectors[0].([]float32), tt.Targets[0], 10, nil, nil, nil, nil, nil, additional.Properties{}) + require.Nil(t, err) + vecsOld, targetsOld, _, _, _, _, _, _, _, _, err := payloadOld.Unmarshal(b126) + require.Nil(t, err) + assert.Equal(t, tt.SearchVectors[0], vecsOld) + assert.Equal(t, tt.Targets[0], targetsOld) + + vecs, targets, _, _, _, _, _, _, _, _, _, _, err := payload.Unmarshal(b125) + require.Nil(t, err) + assert.Equal(t, tt.SearchVectors, vecs) + assert.Equal(t, tt.Targets, targets) + + } + }) + } +} + +func Test_searchParametersPayload_Unmarshal(t *testing.T) { + tests := []struct { + name string + payload string + isMultiVector bool + }{ + { + name: "regular vectors", + payload: `{ + "limit": 10, + "TargetVectors": ["vector1", "vector2"], + "searchVectors": [[1.0, 2.0], [3.0, 4.0]] + }`, + isMultiVector: false, + }, + { + name: "multi vectors", + payload: `{ + "limit": 10, + "TargetVectors": ["vector1", "vector2"], + "searchVectors": [[[1.0, 2.0], [3.0, 4.0]], [[11.0], [33.0]]] + }`, + isMultiVector: true, + }, + { + name: "empty search vectors", + payload: `{ + "searchVector": [1,2,3], + "targetVector": "target1", + "limit": 10, + "filters": null, + "keywordRanking": null, + "sort": null, + "cursor": null, + "groupBy": null, + "additional": { + "classification": false, + "refMeta": false, + "vector": false, + "vectors": null, + "certainty": false, + "id": false, + "creationTimeUnix": false, + "lastUpdateTimeUnix": false, + "moduleParams": null, + "distance": false, + "score": false, + "explainScore": false, + "isConsistent": false, + "group": false, + "noProps": true + } + }`, + isMultiVector: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var par searchParametersPayload + err := json.Unmarshal([]byte(tt.payload), &par) + require.NoError(t, err) + if par.SearchVectors != nil { + require.Len(t, par.SearchVectors, 2) + if tt.isMultiVector { + for _, vec := range par.SearchVectors { + vector, ok := vec.([][]float32) + assert.True(t, ok) + assert.True(t, len(vector) > 0) + } + } else { + for _, vec := range par.SearchVectors { + vector, ok := vec.([]float32) + assert.True(t, ok) + assert.True(t, len(vector) > 0) + } + } + } else { + require.NotNil(t, par.Additional) + assert.True(t, par.Additional.NoProps) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_replicas.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_replicas.go new file mode 100644 index 0000000000000000000000000000000000000000..8a0f26a76df3c6451100423990834e092f3739df --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_replicas.go @@ -0,0 +1,836 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/cluster/router/types" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +type replicator interface { + // Write endpoints + ReplicateObject(ctx context.Context, indexName, shardName, + requestID string, object *storobj.Object, schemaVersion uint64) replica.SimpleResponse + ReplicateObjects(ctx context.Context, indexName, shardName, + requestID string, objects []*storobj.Object, schemaVersion uint64) replica.SimpleResponse + ReplicateUpdate(ctx context.Context, indexName, shardName, + requestID string, mergeDoc *objects.MergeDocument, schemaVersion uint64) replica.SimpleResponse + ReplicateDeletion(ctx context.Context, indexName, shardName, + requestID string, uuid strfmt.UUID, deletionTime time.Time, schemaVersion uint64) replica.SimpleResponse + ReplicateDeletions(ctx context.Context, indexName, shardName, + requestID string, uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64) replica.SimpleResponse + ReplicateReferences(ctx context.Context, indexName, shardName, + requestID string, refs []objects.BatchReference, schemaVersion uint64) replica.SimpleResponse + CommitReplication(indexName, shardName, requestID string) interface{} + AbortReplication(indexName, shardName, requestID string) interface{} + OverwriteObjects(ctx context.Context, index, shard string, + vobjects []*objects.VObject) ([]types.RepairResponse, error) + // Read endpoints + FetchObject(ctx context.Context, indexName, + shardName string, id strfmt.UUID) (replica.Replica, error) + FetchObjects(ctx context.Context, class, + shardName string, ids []strfmt.UUID) ([]replica.Replica, error) + DigestObjects(ctx context.Context, class, shardName string, + ids []strfmt.UUID) (result []types.RepairResponse, err error) + DigestObjectsInRange(ctx context.Context, class, shardName string, + initialUUID, finalUUID strfmt.UUID, limit int) (result []types.RepairResponse, err error) + HashTreeLevel(ctx context.Context, index, shard string, + level int, discriminant *hashtree.Bitset) (digests []hashtree.Digest, err error) +} + +type replicatedIndices struct { + shards replicator + auth auth + // maintenanceModeEnabled is an experimental feature to allow the system to be + // put into a maintenance mode where all replicatedIndices requests just return a 418 + maintenanceModeEnabled func() bool +} + +var ( + regxObject = regexp.MustCompile(`\/replicas\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects\/(` + ob + `)(\/[0-9]{1,64})?`) + regxOverwriteObjects = regexp.MustCompile(`\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects/_overwrite`) + regxObjectsDigest = regexp.MustCompile(`\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects/_digest`) + regexObjectsDigestsInRange = regexp.MustCompile(`\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects/digestsInRange`) + regxHashTreeLevel = regexp.MustCompile(`\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects\/hashtree\/(` + l + `)`) + regxObjects = regexp.MustCompile(`\/replicas\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects`) + regxReferences = regexp.MustCompile(`\/replicas\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `)\/objects/references`) + regxCommitPhase = regexp.MustCompile(`\/replicas\/indices\/(` + cl + `)` + + `\/shards\/(` + sh + `):(commit|abort)`) +) + +func NewReplicatedIndices(shards replicator, auth auth, maintenanceModeEnabled func() bool) *replicatedIndices { + return &replicatedIndices{ + shards: shards, + auth: auth, + maintenanceModeEnabled: maintenanceModeEnabled, + } +} + +func (i *replicatedIndices) Indices() http.Handler { + return i.auth.handleFunc(i.indicesHandler()) +} + +func (i *replicatedIndices) indicesHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + if i.maintenanceModeEnabled() { + http.Error(w, "418 Maintenance mode", http.StatusTeapot) + return + } + // NOTE if you update any of these handler methods/paths, also update the indices_replicas_test.go + // TestMaintenanceModeReplicatedIndices test to include the new methods/paths. + switch { + case regxObjectsDigest.MatchString(path): + if r.Method == http.MethodGet { + i.getObjectsDigest().ServeHTTP(w, r) + return + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case regexObjectsDigestsInRange.MatchString(path): + if r.Method == http.MethodPost { + i.getObjectsDigestsInRange().ServeHTTP(w, r) + return + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case regxHashTreeLevel.MatchString(path): + if r.Method == http.MethodPost { + i.getHashTreeLevel().ServeHTTP(w, r) + return + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case regxOverwriteObjects.MatchString(path): + if r.Method == http.MethodPut { + i.putOverwriteObjects().ServeHTTP(w, r) + return + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + case regxObject.MatchString(path): + if r.Method == http.MethodDelete { + i.deleteObject().ServeHTTP(w, r) + return + } + + if r.Method == http.MethodPatch { + i.patchObject().ServeHTTP(w, r) + return + } + + if r.Method == http.MethodGet { + i.getObject().ServeHTTP(w, r) + return + } + + if regxReferences.MatchString(path) { + if r.Method == http.MethodPost { + i.postRefs().ServeHTTP(w, r) + return + } + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + case regxObjects.MatchString(path): + if r.Method == http.MethodGet { + i.getObjectsMulti().ServeHTTP(w, r) + return + } + + if r.Method == http.MethodPost { + i.postObject().ServeHTTP(w, r) + return + } + + if r.Method == http.MethodDelete { + i.deleteObjects().ServeHTTP(w, r) + return + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + case regxCommitPhase.MatchString(path): + if r.Method == http.MethodPost { + i.executeCommitPhase().ServeHTTP(w, r) + return + } + + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + + default: + http.NotFound(w, r) + return + } + } +} + +func (i *replicatedIndices) executeCommitPhase() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxCommitPhase.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + requestID := r.URL.Query().Get(replica.RequestKey) + if requestID == "" { + http.Error(w, "request_id not provided", http.StatusBadRequest) + return + } + + index, shard, cmd := args[1], args[2], args[3] + + var resp interface{} + + switch cmd { + case "commit": + resp = i.shards.CommitReplication(index, shard, requestID) + case "abort": + resp = i.shards.AbortReplication(index, shard, requestID) + default: + http.Error(w, fmt.Sprintf("unrecognized command: %s", cmd), http.StatusNotImplemented) + return + } + if resp == nil { // could not find request with specified id + http.Error(w, "request not found", http.StatusNotFound) + return + } + b, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal response: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + w.Write(b) + }) +} + +func (i *replicatedIndices) postObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + requestID := r.URL.Query().Get(replica.RequestKey) + if requestID == "" { + http.Error(w, "request_id not provided", http.StatusBadRequest) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + + ct := r.Header.Get("content-type") + + switch ct { + + case IndicesPayloads.SingleObject.MIME(): + i.postObjectSingle(w, r, index, shard, requestID, schemaVersion) + return + case IndicesPayloads.ObjectList.MIME(): + i.postObjectBatch(w, r, index, shard, requestID, schemaVersion) + return + default: + http.Error(w, "415 Unsupported Media Type", http.StatusUnsupportedMediaType) + return + } + }) +} + +func (i *replicatedIndices) patchObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + requestID := r.URL.Query().Get(replica.RequestKey) + if requestID == "" { + http.Error(w, "request_id not provided", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + mergeDoc, err := IndicesPayloads.MergeDoc.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + resp := i.shards.ReplicateUpdate(r.Context(), index, shard, requestID, &mergeDoc, schemaVersion) + if localIndexNotReady(resp) { + http.Error(w, resp.FirstError().Error(), http.StatusServiceUnavailable) + return + } + + b, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal response: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + + w.Write(b) + }) +} + +func (i *replicatedIndices) getObjectsDigest() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObjectsDigest.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + var ids []strfmt.UUID + if err := json.Unmarshal(reqPayload, &ids); err != nil { + http.Error(w, "unmarshal digest objects params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + results, err := i.shards.DigestObjects(r.Context(), index, shard, ids) + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, "digest objects: "+err.Error(), + http.StatusUnprocessableEntity) + return + } + + if err != nil { + http.Error(w, "digest objects: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *replicatedIndices) getObjectsDigestsInRange() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regexObjectsDigestsInRange.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + var rangeReq replica.DigestObjectsInRangeReq + if err := json.Unmarshal(reqPayload, &rangeReq); err != nil { + http.Error(w, "unmarshal digest objects in token range params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + digests, err := i.shards.DigestObjectsInRange(r.Context(), + index, shard, rangeReq.InitialUUID, rangeReq.FinalUUID, rangeReq.Limit) + if err != nil { + http.Error(w, "digest objects in range: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(replica.DigestObjectsInRangeResp{ + Digests: digests, + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *replicatedIndices) getHashTreeLevel() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxHashTreeLevel.FindStringSubmatch(r.URL.Path) + if len(args) != 4 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard, level := args[1], args[2], args[3] + + l, err := strconv.Atoi(level) + if err != nil { + http.Error(w, "unmarshal hashtree level params: "+err.Error(), http.StatusInternalServerError) + return + } + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + var discriminant hashtree.Bitset + if err := discriminant.Unmarshal(reqPayload); err != nil { + http.Error(w, "unmarshal hashtree level params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + results, err := i.shards.HashTreeLevel(r.Context(), index, shard, l, &discriminant) + if err != nil { + http.Error(w, "hashtree level: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *replicatedIndices) putOverwriteObjects() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxOverwriteObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + reqPayload, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read request body: "+err.Error(), http.StatusInternalServerError) + return + } + + vobjs, err := IndicesPayloads.VersionedObjectList.Unmarshal(reqPayload) + if err != nil { + http.Error(w, "unmarshal overwrite objects params from json: "+err.Error(), + http.StatusBadRequest) + return + } + + results, err := i.shards.OverwriteObjects(r.Context(), index, shard, vobjs) + if err != nil { + http.Error(w, "overwrite objects: "+err.Error(), + http.StatusInternalServerError) + return + } + + resBytes, err := json.Marshal(results) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(resBytes) + }) +} + +func (i *replicatedIndices) deleteObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObject.FindStringSubmatch(r.URL.Path) + if len(args) != 5 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + requestID := r.URL.Query().Get(replica.RequestKey) + if requestID == "" { + http.Error(w, "request_id not provided", http.StatusBadRequest) + return + } + + index, shard, id := args[1], args[2], args[3] + + var deletionTime time.Time + + if args[4] != "" { + deletionTimeUnixMilli, err := strconv.ParseInt(args[4][1:], 10, 64) + if err != nil { + http.Error(w, "invalid URI", http.StatusBadRequest) + } + deletionTime = time.UnixMilli(deletionTimeUnixMilli) + } + + defer r.Body.Close() + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + resp := i.shards.ReplicateDeletion(r.Context(), index, shard, requestID, strfmt.UUID(id), deletionTime, schemaVersion) + if localIndexNotReady(resp) { + http.Error(w, resp.FirstError().Error(), http.StatusServiceUnavailable) + return + } + + b, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal response: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + w.Write(b) + }) +} + +func (i *replicatedIndices) deleteObjects() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + requestID := r.URL.Query().Get(replica.RequestKey) + if requestID == "" { + http.Error(w, "request_id not provided", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + defer r.Body.Close() + + uuids, deletionTimeUnix, dryRun, err := IndicesPayloads.BatchDeleteParams.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + resp := i.shards.ReplicateDeletions(r.Context(), index, shard, requestID, uuids, deletionTimeUnix, dryRun, schemaVersion) + if localIndexNotReady(resp) { + http.Error(w, resp.FirstError().Error(), http.StatusServiceUnavailable) + return + } + + b, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal response: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + w.Write(b) + }) +} + +func (i *replicatedIndices) postObjectSingle(w http.ResponseWriter, r *http.Request, + index, shard, requestID string, schemaVersion uint64, +) { + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + obj, err := IndicesPayloads.SingleObject.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + resp := i.shards.ReplicateObject(r.Context(), index, shard, requestID, obj, schemaVersion) + if localIndexNotReady(resp) { + http.Error(w, resp.FirstError().Error(), http.StatusServiceUnavailable) + return + } + + b, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal response: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + + w.Write(b) +} + +func (i *replicatedIndices) postObjectBatch(w http.ResponseWriter, r *http.Request, + index, shard, requestID string, schemaVersion uint64, +) { + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + objs, err := IndicesPayloads.ObjectList.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + resp := i.shards.ReplicateObjects(r.Context(), index, shard, requestID, objs, schemaVersion) + if localIndexNotReady(resp) { + http.Error(w, resp.FirstError().Error(), http.StatusServiceUnavailable) + return + } + + b, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("unmarshal resp: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + + w.Write(b) +} + +func (i *replicatedIndices) getObject() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObject.FindStringSubmatch(r.URL.Path) + if len(args) != 5 || args[4] != "" { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + index, shard, id := args[1], args[2], args[3] + + defer r.Body.Close() + + var ( + resp replica.Replica + err error + ) + + resp, err = i.shards.FetchObject(r.Context(), index, shard, strfmt.UUID(id)) + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, "digest objects: "+err.Error(), + http.StatusUnprocessableEntity) + return + } + + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + b, err := resp.MarshalBinary() + if err != nil { + http.Error(w, fmt.Sprintf("unmarshal resp: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + + w.Write(b) + }) +} + +func (i *replicatedIndices) getObjectsMulti() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, fmt.Sprintf("invalid URI: %s", r.URL.Path), + http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + + defer r.Body.Close() + + idsEncoded := r.URL.Query().Get("ids") + if idsEncoded == "" { + http.Error(w, "missing required url param 'ids'", + http.StatusBadRequest) + return + } + + idsBytes, err := base64.StdEncoding.DecodeString(idsEncoded) + if err != nil { + http.Error(w, "base64 decode 'ids' param: "+err.Error(), + http.StatusBadRequest) + return + } + + var ids []strfmt.UUID + if err := json.Unmarshal(idsBytes, &ids); err != nil { + http.Error(w, "unmarshal 'ids' param from json: "+err.Error(), + http.StatusBadRequest) + return + } + + resp, err := i.shards.FetchObjects(r.Context(), index, shard, ids) + if err != nil && errors.As(err, &enterrors.ErrUnprocessable{}) { + http.Error(w, "digest objects: "+err.Error(), + http.StatusUnprocessableEntity) + return + } + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + b, err := replica.Replicas(resp).MarshalBinary() + if err != nil { + http.Error(w, fmt.Sprintf("unmarshal resp: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + + w.Write(b) + }) +} + +func (i *replicatedIndices) postRefs() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + args := regxObjects.FindStringSubmatch(r.URL.Path) + if len(args) != 3 { + http.Error(w, "invalid URI", http.StatusBadRequest) + return + } + + requestID := r.URL.Query().Get(replica.RequestKey) + if requestID == "" { + http.Error(w, "request_id not provided", http.StatusBadRequest) + return + } + + index, shard := args[1], args[2] + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + refs, err := IndicesPayloads.ReferenceList.Unmarshal(bodyBytes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + schemaVersion, err := extractSchemaVersionFromUrlQuery(r.URL.Query()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + resp := i.shards.ReplicateReferences(r.Context(), index, shard, requestID, refs, schemaVersion) + if localIndexNotReady(resp) { + http.Error(w, resp.FirstError().Error(), http.StatusServiceUnavailable) + return + } + + b, err := json.Marshal(resp) + if err != nil { + http.Error(w, fmt.Sprintf("unmarshal resp: %+v, error: %v", resp, err), + http.StatusInternalServerError) + return + } + + w.Write(b) + }) +} + +func localIndexNotReady(resp replica.SimpleResponse) bool { + if err := resp.FirstError(); err != nil { + var replicaErr *replica.Error + if errors.As(err, &replicaErr) && replicaErr.IsStatusCode(replica.StatusNotReady) { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_replicas_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_replicas_test.go new file mode 100644 index 0000000000000000000000000000000000000000..119ee7dfe2e6c51f4b2f262ae256e8aaa680ffcd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_replicas_test.go @@ -0,0 +1,62 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" +) + +func TestMaintenanceModeReplicatedIndices(t *testing.T) { + noopAuth := clusterapi.NewNoopAuthHandler() + // NOTE leaving shards and scaler nil for now, fill in when needed + indices := clusterapi.NewReplicatedIndices(nil, noopAuth, func() bool { return true }) + mux := http.NewServeMux() + mux.Handle("/replicas/indices/", indices.Indices()) + server := httptest.NewServer(mux) + + defer server.Close() + + maintenanceModeExpectedHTTPStatus := http.StatusTeapot + requestURL := func(suffix string) string { + return fmt.Sprintf("%s/replicas/indices/MyClass/shards/myshard%s", server.URL, suffix) + } + indicesTestRequests := []indicesTestRequest{ + {"GET", "/objects/_digest"}, + {"PUT", "/objects/_overwrite"}, + {"DELETE", "/objects/deadbeef"}, + {"PATCH", "/objects/deadbeef"}, + {"GET", "/objects/deadbeef"}, + {"POST", "/objects/references"}, + {"GET", "/objects"}, + {"POST", "/objects"}, + {"DELETE", "/objects"}, + {"PUT", "/replication-factor:increase"}, + {"POST", ":commit"}, + {"POST", ":abort"}, + } + for _, testRequest := range indicesTestRequests { + t.Run(fmt.Sprintf("%s on %s returns maintenance mode status", testRequest.method, testRequest.suffix), func(t *testing.T) { + req, err := http.NewRequest(testRequest.method, requestURL(testRequest.suffix), nil) + assert.Nil(t, err) + res, err := http.DefaultClient.Do(req) + assert.Nil(t, err) + defer res.Body.Close() + assert.True(t, res.StatusCode == maintenanceModeExpectedHTTPStatus, "expected %d, got %d", maintenanceModeExpectedHTTPStatus, res.StatusCode) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_test.go new file mode 100644 index 0000000000000000000000000000000000000000..35f2e84e48a8b6dbf27a55e61265e334d39de8ad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/indices_test.go @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" +) + +func TestMaintenanceModeIndices(t *testing.T) { + noopAuth := clusterapi.NewNoopAuthHandler() + // NOTE leaving shards, db, and logger nil for now, fill in when needed + indices := clusterapi.NewIndices(nil, nil, noopAuth, func() bool { return true }, nil) + mux := http.NewServeMux() + mux.Handle("/indices/", indices.Indices()) + server := httptest.NewServer(mux) + + defer server.Close() + + maintenanceModeExpectedHTTPStatus := http.StatusTeapot + requestURL := func(suffix string) string { + return fmt.Sprintf("%s/indices/MyClass/shards/myshard%s", server.URL, suffix) + } + indicesTestRequests := []indicesTestRequest{ + {"POST", "/objects/_search"}, + {"POST", "/objects/_find"}, + {"POST", "/objects/_aggregations"}, + {"PUT", "/objects:overwrite"}, + {"GET", "/objects:digest"}, + {"GET", "/objects/deadbeef"}, + {"DELETE", "/objects/deadbeef"}, + {"PATCH", "/objects/deadbeef"}, + {"GET", "/objects"}, + {"POST", "/objects"}, + {"DELETE", "/objects"}, + {"POST", "/references"}, + {"GET", "/queuesize"}, + {"GET", "/status"}, + {"POST", "/status"}, + {"POST", "/files/myfile"}, + {"POST", ""}, + {"PUT", ":reinit"}, + } + for _, testRequest := range indicesTestRequests { + t.Run(fmt.Sprintf("%s on %s returns maintenance mode status", testRequest.method, testRequest.suffix), func(t *testing.T) { + req, err := http.NewRequest(testRequest.method, requestURL(testRequest.suffix), nil) + assert.Nil(t, err) + res, err := http.DefaultClient.Do(req) + assert.Nil(t, err) + defer res.Body.Close() + assert.True(t, res.StatusCode == maintenanceModeExpectedHTTPStatus, "expected %d, got %d", maintenanceModeExpectedHTTPStatus, res.StatusCode) + }) + } +} + +type indicesTestRequest struct { + method string + suffix string +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/nodes.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/nodes.go new file mode 100644 index 0000000000000000000000000000000000000000..0150758cb9a504e0263d385ab73f0ed261ab2cde --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/nodes.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/weaviate/weaviate/entities/models" + entschema "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/verbosity" +) + +type nodesManager interface { + GetNodeStatus(ctx context.Context, className, shardName, output string) (*models.NodeStatus, error) + GetStatistics(ctx context.Context) (*models.Statistics, error) +} + +type nodes struct { + nodesManager nodesManager + auth auth +} + +func NewNodes(manager nodesManager, auth auth) *nodes { + return &nodes{nodesManager: manager, auth: auth} +} + +var ( + regxNodes = regexp.MustCompile(`/status`) + regxNodesClass = regexp.MustCompile(`/status/(` + entschema.ClassNameRegexCore + `)`) + regxStatistics = regexp.MustCompile(`/statistics`) +) + +func (s *nodes) Nodes() http.Handler { + return s.auth.handleFunc(s.nodesHandler()) +} + +func (s *nodes) nodesHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + switch { + case regxNodes.MatchString(path) || regxNodesClass.MatchString(path): + if r.Method != http.MethodGet { + msg := fmt.Sprintf("/nodes api path %q not found", path) + http.Error(w, msg, http.StatusMethodNotAllowed) + return + } + + s.incomingNodeStatus().ServeHTTP(w, r) + return + case regxStatistics.MatchString(path): + if r.Method != http.MethodGet { + msg := fmt.Sprintf("/nodes api path %q not found", path) + http.Error(w, msg, http.StatusMethodNotAllowed) + return + } + + s.incomingStatistics().ServeHTTP(w, r) + return + default: + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return + } + } +} + +func (s *nodes) incomingNodeStatus() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + var className string + + // url is /nodes/status[/className], where /className is optional + args := strings.Split(r.URL.Path, "/") + if len(args) == 4 { + className = args[3] + } + + // shard is an optional query parameter + shardName := r.URL.Query().Get("shard") + + output := verbosity.OutputMinimal + out, found := r.URL.Query()["output"] + if found && len(out) > 0 { + output = out[0] + } + nodeStatus, err := s.nodesManager.GetNodeStatus(r.Context(), className, shardName, output) + if err != nil { + http.Error(w, "/nodes fulfill request: "+err.Error(), + http.StatusBadRequest) + return + } + + if nodeStatus == nil { + w.WriteHeader(http.StatusNotFound) + return + } + + nodeStatusBytes, err := json.Marshal(nodeStatus) + if err != nil { + http.Error(w, "/nodes marshal response: "+err.Error(), + http.StatusInternalServerError) + } + + w.Write(nodeStatusBytes) + }) +} + +func (s *nodes) incomingStatistics() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + statistics, err := s.nodesManager.GetStatistics(r.Context()) + if err != nil { + http.Error(w, "/nodes fulfill request: "+err.Error(), + http.StatusBadRequest) + return + } + + if statistics == nil { + w.WriteHeader(http.StatusNotFound) + return + } + + statisticsBytes, err := json.Marshal(statistics) + if err != nil { + http.Error(w, "/nodes marshal response: "+err.Error(), + http.StatusInternalServerError) + } + + w.Write(statisticsBytes) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/schema_version.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/schema_version.go new file mode 100644 index 0000000000000000000000000000000000000000..0ba30c59f83b68a9dd42598f0615156cb77fe46f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/schema_version.go @@ -0,0 +1,32 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/weaviate/weaviate/usecases/replica" +) + +func extractSchemaVersionFromUrlQuery(values url.Values) (uint64, error) { + if v := values.Get(replica.SchemaVersionKey); v != "" { + schemaVersion, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return 0, fmt.Errorf("%w: %q is an invalid value for %s", err, v, replica.SchemaVersionKey) + } + + return schemaVersion, nil + } + return 0, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/serve.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/serve.go new file mode 100644 index 0000000000000000000000000000000000000000..b6a1cee39e3c3f54e2e51cc98eba87a468a7cf13 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/serve.go @@ -0,0 +1,163 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + + sentryhttp "github.com/getsentry/sentry-go/http" + + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + "github.com/weaviate/weaviate/adapters/handlers/rest/types" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +// Server represents the cluster API server +type Server struct { + server *http.Server + appState *state.State +} + +// Ensure Server implements interfaces.ClusterServer +var _ types.ClusterServer = (*Server)(nil) + +// NewServer creates a new cluster API server instance +func NewServer(appState *state.State) *Server { + port := appState.ServerConfig.Config.Cluster.DataBindPort + auth := NewBasicAuthHandler(appState.ServerConfig.Config.Cluster.AuthConfig) + + appState.Logger.WithField("port", port). + WithField("action", "cluster_api_startup"). + Debugf("serving cluster api on port %d", port) + + indices := NewIndices(appState.RemoteIndexIncoming, appState.DB, auth, appState.Cluster.MaintenanceModeEnabledForLocalhost, appState.Logger) + replicatedIndices := NewReplicatedIndices(appState.RemoteReplicaIncoming, auth, appState.Cluster.MaintenanceModeEnabledForLocalhost) + classifications := NewClassifications(appState.ClassificationRepo.TxManager(), auth) + nodes := NewNodes(appState.RemoteNodeIncoming, auth) + backups := NewBackups(appState.BackupManager, auth) + dbUsers := NewDbUsers(appState.APIKeyRemote, auth) + + mux := http.NewServeMux() + mux.Handle("/classifications/transactions/", + http.StripPrefix("/classifications/transactions/", + classifications.Transactions())) + + mux.Handle("/cluster/users/db/", dbUsers.Users()) + mux.Handle("/nodes/", nodes.Nodes()) + mux.Handle("/indices/", indices.Indices()) + mux.Handle("/replicas/indices/", replicatedIndices.Indices()) + + mux.Handle("/backups/can-commit", backups.CanCommit()) + mux.Handle("/backups/commit", backups.Commit()) + mux.Handle("/backups/abort", backups.Abort()) + mux.Handle("/backups/status", backups.Status()) + + mux.Handle("/", index()) + + var handler http.Handler + handler = mux + if appState.ServerConfig.Config.Sentry.Enabled { + // Wrap the default mux with Sentry to capture panics, report errors and + // measure performance. + // + // Alternatively, you can also wrap individual handlers if you need to + // use different options for different parts of your app. + handler = sentryhttp.New(sentryhttp.Options{}).Handle(mux) + } + + if appState.ServerConfig.Config.Monitoring.Enabled { + handler = monitoring.InstrumentHTTP( + handler, + staticRoute(mux), + appState.HTTPServerMetrics.InflightRequests, + appState.HTTPServerMetrics.RequestDuration, + appState.HTTPServerMetrics.RequestBodySize, + appState.HTTPServerMetrics.ResponseBodySize, + ) + } + + return &Server{ + server: &http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: handler, + }, + appState: appState, + } +} + +// Serve starts the server and blocks until an error occurs +func (s *Server) Serve() error { + s.appState.Logger.WithField("action", "cluster_api_startup"). + Infof("cluster api server is ready to handle requests on %s", s.server.Addr) + return s.server.ListenAndServe() +} + +// Close gracefully shuts down the server +func (s *Server) Close(ctx context.Context) error { + s.appState.Logger.WithField("action", "cluster_api_shutdown"). + Info("server is shutting down") + + if err := s.server.Shutdown(ctx); err != nil { + s.appState.Logger.WithField("action", "cluster_api_shutdown"). + WithError(err). + Error("could not stop server gracefully") + return s.server.Close() + } + return nil +} + +// Serve is kept for backward compatibility +func Serve(appState *state.State) (*Server, error) { + server := NewServer(appState) + if err := server.Serve(); err != nil && !errors.Is(err, http.ErrServerClosed) { + appState.Logger.WithField("action", "cluster_api_shutdown"). + WithError(err). + Error("server error") + } + return server, nil +} + +func index() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "" && r.URL.String() != "/" { + http.NotFound(w, r) + return + } + + payload := map[string]string{ + "description": "Weaviate's cluster-internal API for cross-node communication", + } + + json.NewEncoder(w).Encode(payload) + }) +} + +// staticRoute is used to convert routes in our internal http server into static routes +// by removing all the dynamic variables in the route. Useful for instrumentation +// where "route cardinality" matters. + +// Example: `/replicas/indices/Movies/shards/hello0/objects` -> `/replicas/indices` +func staticRoute(mux *http.ServeMux) monitoring.StaticRouteLabel { + return func(r *http.Request) (*http.Request, string) { + route := r.URL.String() + + _, pattern := mux.Handler(r) + if pattern != "" { + route = pattern + } + return r, route + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/serve_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/serve_test.go new file mode 100644 index 0000000000000000000000000000000000000000..93dc4c3844865542eab4f54c086f3554aa8d4938 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/serve_test.go @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_staticRoute(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/indices", okHandler) + mux.HandleFunc("/replicas/", okHandler) + + cases := []struct { + name string + req *http.Request + expected string + }{ + { + name: "unmatched route", + req: newRequest(t, "/foo"), // un-matched route + expected: "/foo", + }, + { + name: "matched route", + req: newRequest(t, "/indices"), // matched route + expected: "/indices", + }, + { + name: "un-matched route with dynamic path", + req: newRequest(t, "/indices/objects/Movies"), // un-matched route. Note original handler is `/indices` (without `/` suffix) + expected: "/indices/objects/Movies", + }, + { + name: "matched route with dynamic path", + req: newRequest(t, "/replicas/objects/Movies"), // matched route. + expected: "/replicas/", // yay! + }, + { + name: "matched route with dynamic path 2", + req: newRequest(t, "/replicas/objects/Movies2"), // matched route. + expected: "/replicas/", // yay! + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + _, got := staticRoute(mux)(tc.req) + assert.Equal(t, tc.expected, got) + }) + } +} + +func newRequest(t *testing.T, path string) *http.Request { + t.Helper() + + r, err := http.NewRequest("GET", path, nil) + require.NoError(t, err) + return r +} + +func okHandler(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "ok") + w.WriteHeader(http.StatusOK) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/transactions.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/transactions.go new file mode 100644 index 0000000000000000000000000000000000000000..828540f38e72434574ccc2ce8e95d34fd0bb491e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/clusterapi/transactions.go @@ -0,0 +1,217 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package clusterapi + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/usecases/classification" + "github.com/weaviate/weaviate/usecases/cluster" +) + +// TODO-RAFT START +// Get rid of TxManager +// TODO-RAFT END + +type txManager interface { + IncomingBeginTransaction(ctx context.Context, tx *cluster.Transaction) ([]byte, error) + IncomingCommitTransaction(ctx context.Context, tx *cluster.Transaction) error + IncomingAbortTransaction(ctx context.Context, tx *cluster.Transaction) +} + +type txPayload struct { + ID string `json:"id"` + Type cluster.TransactionType `json:"type"` + Payload json.RawMessage `json:"payload"` + DeadlineMilli int64 `json:"deadlineMilli"` +} + +type handlerType int + +const ( + // schemaTX left for backward computability + schemaTX handlerType = iota + classifyTX +) + +type txHandler struct { + manager txManager + auth auth + handlerType handlerType +} + +func newTxHandler(manager txManager, auth auth, handlerType handlerType) txHandler { + if handlerType != schemaTX && handlerType != classifyTX { + panic(fmt.Sprintf("unknown handler type: %q", handlerType)) + } + return txHandler{ + manager: manager, + auth: auth, + handlerType: handlerType, + } +} + +func (h *txHandler) Transactions() http.Handler { + return h.auth.handleFunc(h.transactionsHandler()) +} + +func (h *txHandler) transactionsHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + switch { + case path == "": + if r.Method != http.MethodPost { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + h.incomingTransaction().ServeHTTP(w, r) + return + + case strings.HasSuffix(path, "/commit"): + if r.Method != http.MethodPut { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + h.incomingCommitTransaction().ServeHTTP(w, r) + return + default: + if r.Method != http.MethodDelete { + http.Error(w, "405 Method not Allowed", http.StatusMethodNotAllowed) + return + } + + h.incomingAbortTransaction().ServeHTTP(w, r) + return + } + } +} + +func (h *txHandler) incomingTransaction() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.Header.Get("content-type") != "application/json" { + http.Error(w, "415 Unsupported Media Type", http.StatusUnsupportedMediaType) + return + } + + var payload txPayload + if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { + http.Error(w, errors.Wrap(err, "decode body").Error(), + http.StatusInternalServerError) + return + } + + if len(payload.ID) == 0 { + http.Error(w, "id must be set", http.StatusBadRequest) + return + } + + if len(payload.Type) == 0 { + http.Error(w, "type must be set", http.StatusBadRequest) + return + } + + var ( + txPayload interface{} + err error + ) + + switch h.handlerType { + case classifyTX: + txPayload, err = classification.UnmarshalTransaction(payload.Type, payload.Payload) + if err != nil { + http.Error(w, errors.Wrap(err, "decode tx payload").Error(), + http.StatusInternalServerError) + return + } + default: + http.Error(w, "not implemented", http.StatusInternalServerError) + return + } + + txType := payload.Type + tx := &cluster.Transaction{ + ID: payload.ID, + Type: txType, + Payload: txPayload, + Deadline: time.UnixMilli(payload.DeadlineMilli), + } + + data, err := h.manager.IncomingBeginTransaction(r.Context(), tx) + if err != nil { + status := http.StatusInternalServerError + if errors.Is(err, cluster.ErrConcurrentTransaction) { + status = http.StatusConflict + } + + http.Error(w, errors.Wrap(err, "open transaction").Error(), status) + return + } + + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + } + w.WriteHeader(http.StatusCreated) + w.Write(data) + }) +} + +func (h *txHandler) incomingAbortTransaction() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + path := r.URL.String() + tx := &cluster.Transaction{ + ID: path, + } + + h.manager.IncomingAbortTransaction(r.Context(), tx) + w.WriteHeader(http.StatusNoContent) + }) +} + +func (h *txHandler) incomingCommitTransaction() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + parts := strings.Split(r.URL.Path, "/") + if len(parts) != 2 { + http.NotFound(w, r) + return + } + + tx := &cluster.Transaction{ + ID: parts[0], + } + + if err := h.manager.IncomingCommitTransaction(r.Context(), tx); err != nil { + status := http.StatusInternalServerError + if errors.Is(err, cluster.ErrConcurrentTransaction) { + status = http.StatusConflict + } + + http.Error(w, errors.Wrap(err, "open transaction").Error(), status) + return + } + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_api.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_api.go new file mode 100644 index 0000000000000000000000000000000000000000..71dcec02492d778f738cd5598e90a277b27e0b80 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_api.go @@ -0,0 +1,1967 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + _ "net/http/pprof" + "os" + "path/filepath" + "regexp" + goruntime "runtime" + "runtime/debug" + "strconv" + "strings" + "time" + + "github.com/KimMachineGun/automemlimit/memlimit" + armonmetrics "github.com/armon/go-metrics" + armonprometheus "github.com/armon/go-metrics/prometheus" + "github.com/getsentry/sentry-go" + openapierrors "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/swag" + "github.com/pbnjay/memory" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/collectors/version" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + "github.com/weaviate/fgprof" + "github.com/weaviate/weaviate/adapters/clients" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + "github.com/weaviate/weaviate/adapters/handlers/rest/authz" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" + "github.com/weaviate/weaviate/adapters/handlers/rest/db_users" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + replicationHandlers "github.com/weaviate/weaviate/adapters/handlers/rest/replication" + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + "github.com/weaviate/weaviate/adapters/handlers/rest/tenantactivity" + "github.com/weaviate/weaviate/adapters/repos/classifications" + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + modulestorage "github.com/weaviate/weaviate/adapters/repos/modules" + schemarepo "github.com/weaviate/weaviate/adapters/repos/schema" + rCluster "github.com/weaviate/weaviate/cluster" + "github.com/weaviate/weaviate/cluster/distributedtask" + "github.com/weaviate/weaviate/cluster/replication/copier" + "github.com/weaviate/weaviate/cluster/usage" + "github.com/weaviate/weaviate/entities/concurrency" + entconfig "github.com/weaviate/weaviate/entities/config" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/replication" + vectorIndex "github.com/weaviate/weaviate/entities/vectorindex" + modstgazure "github.com/weaviate/weaviate/modules/backup-azure" + modstgfs "github.com/weaviate/weaviate/modules/backup-filesystem" + modstggcs "github.com/weaviate/weaviate/modules/backup-gcs" + modstgs3 "github.com/weaviate/weaviate/modules/backup-s3" + modgenerativeanthropic "github.com/weaviate/weaviate/modules/generative-anthropic" + modgenerativeanyscale "github.com/weaviate/weaviate/modules/generative-anyscale" + modgenerativeaws "github.com/weaviate/weaviate/modules/generative-aws" + modgenerativecohere "github.com/weaviate/weaviate/modules/generative-cohere" + modgenerativedatabricks "github.com/weaviate/weaviate/modules/generative-databricks" + modgenerativedummy "github.com/weaviate/weaviate/modules/generative-dummy" + modgenerativefriendliai "github.com/weaviate/weaviate/modules/generative-friendliai" + modgenerativegoogle "github.com/weaviate/weaviate/modules/generative-google" + modgenerativemistral "github.com/weaviate/weaviate/modules/generative-mistral" + modgenerativenvidia "github.com/weaviate/weaviate/modules/generative-nvidia" + modgenerativeoctoai "github.com/weaviate/weaviate/modules/generative-octoai" + modgenerativeollama "github.com/weaviate/weaviate/modules/generative-ollama" + modgenerativeopenai "github.com/weaviate/weaviate/modules/generative-openai" + modgenerativexai "github.com/weaviate/weaviate/modules/generative-xai" + modimage "github.com/weaviate/weaviate/modules/img2vec-neural" + modmulti2multivecjinaai "github.com/weaviate/weaviate/modules/multi2multivec-jinaai" + modbind "github.com/weaviate/weaviate/modules/multi2vec-bind" + modclip "github.com/weaviate/weaviate/modules/multi2vec-clip" + modmulti2veccohere "github.com/weaviate/weaviate/modules/multi2vec-cohere" + modmulti2vecgoogle "github.com/weaviate/weaviate/modules/multi2vec-google" + modmulti2vecjinaai "github.com/weaviate/weaviate/modules/multi2vec-jinaai" + modmulti2vecnvidia "github.com/weaviate/weaviate/modules/multi2vec-nvidia" + modmulti2vecvoyageai "github.com/weaviate/weaviate/modules/multi2vec-voyageai" + modner "github.com/weaviate/weaviate/modules/ner-transformers" + modsloads3 "github.com/weaviate/weaviate/modules/offload-s3" + modqnaopenai "github.com/weaviate/weaviate/modules/qna-openai" + modqna "github.com/weaviate/weaviate/modules/qna-transformers" + modcentroid "github.com/weaviate/weaviate/modules/ref2vec-centroid" + modrerankercohere "github.com/weaviate/weaviate/modules/reranker-cohere" + modrerankerdummy "github.com/weaviate/weaviate/modules/reranker-dummy" + modrerankerjinaai "github.com/weaviate/weaviate/modules/reranker-jinaai" + modrerankernvidia "github.com/weaviate/weaviate/modules/reranker-nvidia" + modrerankertransformers "github.com/weaviate/weaviate/modules/reranker-transformers" + modrerankervoyageai "github.com/weaviate/weaviate/modules/reranker-voyageai" + modsum "github.com/weaviate/weaviate/modules/sum-transformers" + modspellcheck "github.com/weaviate/weaviate/modules/text-spellcheck" + modtext2multivecjinaai "github.com/weaviate/weaviate/modules/text2multivec-jinaai" + modtext2vecaws "github.com/weaviate/weaviate/modules/text2vec-aws" + modt2vbigram "github.com/weaviate/weaviate/modules/text2vec-bigram" + modcohere "github.com/weaviate/weaviate/modules/text2vec-cohere" + modcontextionary "github.com/weaviate/weaviate/modules/text2vec-contextionary" + moddatabricks "github.com/weaviate/weaviate/modules/text2vec-databricks" + modtext2vecgoogle "github.com/weaviate/weaviate/modules/text2vec-google" + modgpt4all "github.com/weaviate/weaviate/modules/text2vec-gpt4all" + modhuggingface "github.com/weaviate/weaviate/modules/text2vec-huggingface" + modjinaai "github.com/weaviate/weaviate/modules/text2vec-jinaai" + modmistral "github.com/weaviate/weaviate/modules/text2vec-mistral" + modt2vmodel2vec "github.com/weaviate/weaviate/modules/text2vec-model2vec" + modnvidia "github.com/weaviate/weaviate/modules/text2vec-nvidia" + modtext2vecoctoai "github.com/weaviate/weaviate/modules/text2vec-octoai" + modollama "github.com/weaviate/weaviate/modules/text2vec-ollama" + modopenai "github.com/weaviate/weaviate/modules/text2vec-openai" + modtransformers "github.com/weaviate/weaviate/modules/text2vec-transformers" + modvoyageai "github.com/weaviate/weaviate/modules/text2vec-voyageai" + modweaviateembed "github.com/weaviate/weaviate/modules/text2vec-weaviate" + modusagegcs "github.com/weaviate/weaviate/modules/usage-gcs" + modusages3 "github.com/weaviate/weaviate/modules/usage-s3" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authentication/composer" + "github.com/weaviate/weaviate/usecases/backup" + "github.com/weaviate/weaviate/usecases/build" + "github.com/weaviate/weaviate/usecases/classification" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config" + configRuntime "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + "github.com/weaviate/weaviate/usecases/telemetry" + "github.com/weaviate/weaviate/usecases/traverser" +) + +const MinimumRequiredContextionaryVersion = "1.0.2" + +func makeConfigureServer(appState *state.State) func(*http.Server, string, string) { + return func(s *http.Server, scheme, addr string) { + // Add properties to the config + appState.ServerConfig.Hostname = addr + appState.ServerConfig.Scheme = scheme + } +} + +type vectorRepo interface { + objects.BatchVectorRepo + traverser.VectorSearcher + classification.VectorRepo + SetSchemaGetter(schema.SchemaGetter) + WaitForStartup(ctx context.Context) error + Shutdown(ctx context.Context) error +} + +func getCores() (int, error) { + cpuset, err := os.ReadFile("/sys/fs/cgroup/cpuset/cpuset.cpus") + if err != nil { + return 0, errors.Wrap(err, "read cpuset") + } + return calcCPUs(strings.TrimSpace(string(cpuset))) +} + +func calcCPUs(cpuString string) (int, error) { + cores := 0 + if cpuString == "" { + return 0, nil + } + + // Split by comma to handle multiple ranges + ranges := strings.Split(cpuString, ",") + for _, r := range ranges { + // Check if it's a range (contains a hyphen) + if strings.Contains(r, "-") { + parts := strings.Split(r, "-") + if len(parts) != 2 { + return 0, fmt.Errorf("invalid CPU range format: %s", r) + } + start, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, fmt.Errorf("invalid start of CPU range: %s", parts[0]) + } + end, err := strconv.Atoi(parts[1]) + if err != nil { + return 0, fmt.Errorf("invalid end of CPU range: %s", parts[1]) + } + cores += end - start + 1 + } else { + // Single CPU + cores++ + } + } + + return cores, nil +} + +func MakeAppState(ctx context.Context, options *swag.CommandLineOptionsGroup) *state.State { + build.Version = ParseVersionFromSwaggerSpec() // Version is always static and loaded from swagger spec. + + // config.ServerVersion is deprecated: It's there to be backward compatible + // use build.Version instead. + config.ServerVersion = build.Version + + appState := startupRoutine(ctx, options) + + // this is before initRuntimeOverrides to be able to init module configs + // as runtime overrides are applied after initModules + if err := registerModules(appState); err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("modules didn't load") + } + + // while we accept an overall longer startup, e.g. due to a recovery, we + // still want to limit the module startup context, as that's mostly service + // discovery / dependency checking + moduleCtx, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + + if err := initModules(moduleCtx, appState); err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("modules didn't initialize") + } + // now that modules are loaded we can run the remaining config validation + // which is module dependent + if err := appState.ServerConfig.Config.ValidateModules(appState.Modules); err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("invalid config") + } + + // initializing at the top to reflect the config changes before we pass on to different components. + initRuntimeOverrides(appState) + + if appState.ServerConfig.Config.Monitoring.Enabled { + appState.HTTPServerMetrics = monitoring.NewHTTPServerMetrics(monitoring.DefaultMetricsNamespace, prometheus.DefaultRegisterer) + appState.GRPCServerMetrics = monitoring.NewGRPCServerMetrics(monitoring.DefaultMetricsNamespace, prometheus.DefaultRegisterer) + + appState.TenantActivity = tenantactivity.NewHandler() + + // Since we are scraping prometheus.DefaultRegisterer, it already has + // a go collector configured by default in internal module init(). + // However, the go collector configured by default is missing some interesting metrics, + // therefore, we have to first unregister it so there are no duplicate metric declarations + // and then register extended collector once again. + prometheus.Unregister(collectors.NewGoCollector()) + prometheus.MustRegister(collectors.NewGoCollector( + collectors.WithGoCollectorRuntimeMetrics(collectors.GoRuntimeMetricsRule{ + Matcher: regexp.MustCompile(`/sched/latencies:seconds`), + }), + )) + + // export build tags to prometheus metric + build.SetPrometheusBuildInfo() + prometheus.MustRegister(version.NewCollector(build.AppName)) + + opts := armonprometheus.PrometheusOpts{ + Expiration: 0, // never expire any metrics, + Registerer: prometheus.DefaultRegisterer, + } + + sink, err := armonprometheus.NewPrometheusSinkFrom(opts) + if err != nil { + appState.Logger.WithField("action", "startup").WithError(err).Fatal("failed to create prometheus sink for raft metrics") + } + + cfg := armonmetrics.DefaultConfig("weaviate_internal") // to differentiate it's coming from internal/dependency packages. + cfg.EnableHostname = false // no `host` label + cfg.EnableHostnameLabel = false // no `hostname` label + cfg.EnableServiceLabel = false // no `service` label + cfg.EnableRuntimeMetrics = false // runtime metrics already provided by prometheus + cfg.EnableTypePrefix = true // to have some meaningful suffix to identify type of metrics. + cfg.TimerGranularity = time.Second // time should always in seconds + + _, err = armonmetrics.NewGlobal(cfg, sink) + if err != nil { + appState.Logger.WithField("action", "startup").WithError(err).Fatal("failed to create metric registry raft metrics") + } + + // only monitoring tool supported at the moment is prometheus + enterrors.GoWrapper(func() { + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.Handler()) + mux.Handle("/tenant-activity", appState.TenantActivity) + http.ListenAndServe(fmt.Sprintf(":%d", appState.ServerConfig.Config.Monitoring.Port), mux) + }, appState.Logger) + } + + if appState.ServerConfig.Config.Sentry.Enabled { + err := sentry.Init(sentry.ClientOptions{ + // Setup related config + Dsn: appState.ServerConfig.Config.Sentry.DSN, + Debug: appState.ServerConfig.Config.Sentry.Debug, + Release: "weaviate-core@" + build.Version, + Environment: appState.ServerConfig.Config.Sentry.Environment, + // Enable tracing if requested + EnableTracing: !appState.ServerConfig.Config.Sentry.TracingDisabled, + AttachStacktrace: true, + // Sample rates based on the config + SampleRate: appState.ServerConfig.Config.Sentry.ErrorSampleRate, + ProfilesSampleRate: appState.ServerConfig.Config.Sentry.ProfileSampleRate, + TracesSampler: sentry.TracesSampler(func(ctx sentry.SamplingContext) float64 { + // Inherit decision from parent transaction (if any) if it is sampled or not + if ctx.Parent != nil && ctx.Parent.Sampled != sentry.SampledUndefined { + return 1.0 + } + + // Filter out uneeded traces + switch ctx.Span.Name { + // We are not interested in traces related to metrics endpoint + case "GET /metrics": + // These are some usual internet bot that will spam the server. Won't catch them all but we can reduce + // the number a bit + case "GET /favicon.ico": + case "GET /t4": + case "GET /ab2g": + case "PRI *": + case "GET /api/sonicos/tfa": + case "GET /RDWeb/Pages/en-US/login.aspx": + case "GET /_profiler/phpinfo": + case "POST /wsman": + case "POST /dns-query": + case "GET /dns-query": + return 0.0 + } + + // Filter out graphql queries, currently we have no context intrumentation around it and it's therefore + // just a blank line with 0 info except graphql resolve -> do -> return. + if ctx.Span.Name == "POST /v1/graphql" { + return 0.0 + } + + // Return the configured sample rate otherwise + return appState.ServerConfig.Config.Sentry.TracesSampleRate + }), + }) + if err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("sentry initialization failed") + } + + sentry.ConfigureScope(func(scope *sentry.Scope) { + // Set cluster ID and cluster owner using sentry user feature to distinguish multiple clusters in the UI + scope.SetUser(sentry.User{ + ID: appState.ServerConfig.Config.Sentry.ClusterId, + Username: appState.ServerConfig.Config.Sentry.ClusterOwner, + }) + // Set any tags defined + for key, value := range appState.ServerConfig.Config.Sentry.Tags { + scope.SetTag(key, value) + } + }) + } + + limitResources(appState) + + appState.ClusterHttpClient = reasonableHttpClient(appState.ServerConfig.Config.Cluster.AuthConfig, appState.ServerConfig.Config.MinimumInternalTimeout) + appState.MemWatch = memwatch.NewMonitor(memwatch.LiveHeapReader, debug.SetMemoryLimit, 0.97) + + var vectorRepo vectorRepo + // var vectorMigrator schema.Migrator + // var migrator schema.Migrator + + metricsRegisterer := monitoring.NoopRegisterer + if appState.ServerConfig.Config.Monitoring.Enabled { + promMetrics := monitoring.GetMetrics() + metricsRegisterer = promMetrics.Registerer + appState.Metrics = promMetrics + } + + // TODO: configure http transport for efficient intra-cluster comm + remoteIndexClient := clients.NewRemoteIndex(appState.ClusterHttpClient) + remoteNodesClient := clients.NewRemoteNode(appState.ClusterHttpClient) + replicationClient := clients.NewReplicationClient(appState.ClusterHttpClient) + repo, err := db.New(appState.Logger, appState.Cluster.LocalName(), db.Config{ + ServerVersion: config.ServerVersion, + GitHash: build.Revision, + MemtablesFlushDirtyAfter: appState.ServerConfig.Config.Persistence.MemtablesFlushDirtyAfter, + MemtablesInitialSizeMB: 10, + MemtablesMaxSizeMB: appState.ServerConfig.Config.Persistence.MemtablesMaxSizeMB, + MemtablesMinActiveSeconds: appState.ServerConfig.Config.Persistence.MemtablesMinActiveDurationSeconds, + MemtablesMaxActiveSeconds: appState.ServerConfig.Config.Persistence.MemtablesMaxActiveDurationSeconds, + MinMMapSize: appState.ServerConfig.Config.Persistence.MinMMapSize, + LazySegmentsDisabled: appState.ServerConfig.Config.Persistence.LazySegmentsDisabled, + SegmentInfoIntoFileNameEnabled: appState.ServerConfig.Config.Persistence.SegmentInfoIntoFileNameEnabled, + WriteMetadataFilesEnabled: appState.ServerConfig.Config.Persistence.WriteMetadataFilesEnabled, + MaxReuseWalSize: appState.ServerConfig.Config.Persistence.MaxReuseWalSize, + SegmentsCleanupIntervalSeconds: appState.ServerConfig.Config.Persistence.LSMSegmentsCleanupIntervalSeconds, + SeparateObjectsCompactions: appState.ServerConfig.Config.Persistence.LSMSeparateObjectsCompactions, + MaxSegmentSize: appState.ServerConfig.Config.Persistence.LSMMaxSegmentSize, + CycleManagerRoutinesFactor: appState.ServerConfig.Config.Persistence.LSMCycleManagerRoutinesFactor, + IndexRangeableInMemory: appState.ServerConfig.Config.Persistence.IndexRangeableInMemory, + RootPath: appState.ServerConfig.Config.Persistence.DataPath, + QueryLimit: appState.ServerConfig.Config.QueryDefaults.Limit, + QueryMaximumResults: appState.ServerConfig.Config.QueryMaximumResults, + QueryHybridMaximumResults: appState.ServerConfig.Config.QueryHybridMaximumResults, + QueryNestedRefLimit: appState.ServerConfig.Config.QueryNestedCrossReferenceLimit, + MaxImportGoroutinesFactor: appState.ServerConfig.Config.MaxImportGoroutinesFactor, + TrackVectorDimensions: appState.ServerConfig.Config.TrackVectorDimensions || appState.Modules.UsageEnabled(), + TrackVectorDimensionsInterval: appState.ServerConfig.Config.TrackVectorDimensionsInterval, + UsageEnabled: appState.Modules.UsageEnabled(), + ResourceUsage: appState.ServerConfig.Config.ResourceUsage, + AvoidMMap: appState.ServerConfig.Config.AvoidMmap, + DisableLazyLoadShards: appState.ServerConfig.Config.DisableLazyLoadShards, + ForceFullReplicasSearch: appState.ServerConfig.Config.ForceFullReplicasSearch, + TransferInactivityTimeout: appState.ServerConfig.Config.TransferInactivityTimeout, + LSMEnableSegmentsChecksumValidation: appState.ServerConfig.Config.Persistence.LSMEnableSegmentsChecksumValidation, + // Pass dummy replication config with minimum factor 1. Otherwise the + // setting is not backward-compatible. The user may have created a class + // with factor=1 before the change was introduced. Now their setup would no + // longer start up if the required minimum is now higher than 1. We want + // the required minimum to only apply to newly created classes - not block + // loading existing ones. + Replication: replication.GlobalConfig{ + MinimumFactor: 1, + AsyncReplicationDisabled: appState.ServerConfig.Config.Replication.AsyncReplicationDisabled, + }, + MaximumConcurrentShardLoads: appState.ServerConfig.Config.MaximumConcurrentShardLoads, + HNSWMaxLogSize: appState.ServerConfig.Config.Persistence.HNSWMaxLogSize, + HNSWDisableSnapshots: appState.ServerConfig.Config.Persistence.HNSWDisableSnapshots, + HNSWSnapshotIntervalSeconds: appState.ServerConfig.Config.Persistence.HNSWSnapshotIntervalSeconds, + HNSWSnapshotOnStartup: appState.ServerConfig.Config.Persistence.HNSWSnapshotOnStartup, + HNSWSnapshotMinDeltaCommitlogsNumber: appState.ServerConfig.Config.Persistence.HNSWSnapshotMinDeltaCommitlogsNumber, + HNSWSnapshotMinDeltaCommitlogsSizePercentage: appState.ServerConfig.Config.Persistence.HNSWSnapshotMinDeltaCommitlogsSizePercentage, + HNSWWaitForCachePrefill: appState.ServerConfig.Config.HNSWStartupWaitForVectorCache, + HNSWFlatSearchConcurrency: appState.ServerConfig.Config.HNSWFlatSearchConcurrency, + HNSWAcornFilterRatio: appState.ServerConfig.Config.HNSWAcornFilterRatio, + VisitedListPoolMaxSize: appState.ServerConfig.Config.HNSWVisitedListPoolMaxSize, + TenantActivityReadLogLevel: appState.ServerConfig.Config.TenantActivityReadLogLevel, + TenantActivityWriteLogLevel: appState.ServerConfig.Config.TenantActivityWriteLogLevel, + QuerySlowLogEnabled: appState.ServerConfig.Config.QuerySlowLogEnabled, + QuerySlowLogThreshold: appState.ServerConfig.Config.QuerySlowLogThreshold, + InvertedSorterDisabled: appState.ServerConfig.Config.InvertedSorterDisabled, + MaintenanceModeEnabled: appState.Cluster.MaintenanceModeEnabledForLocalhost, + }, remoteIndexClient, appState.Cluster, remoteNodesClient, replicationClient, appState.Metrics, appState.MemWatch, nil, nil, nil) // TODO client + if err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("invalid new DB") + } + + appState.DB = repo + if appState.ServerConfig.Config.Monitoring.Enabled { + appState.TenantActivity.SetSource(appState.DB) + } + + setupDebugHandlers(appState) + setupGoProfiling(appState.ServerConfig.Config, appState.Logger) + + migrator := db.NewMigrator(repo, appState.Logger, appState.Cluster.LocalName()) + migrator.SetNode(appState.Cluster.LocalName()) + // TODO-offload: "offload-s3" has to come from config when enable modules more than S3 + migrator.SetOffloadProvider(appState.Modules, "offload-s3") + appState.Migrator = migrator + + vectorRepo = repo + // migrator = vectorMigrator + explorer := traverser.NewExplorer(repo, appState.Logger, appState.Modules, traverser.NewMetrics(appState.Metrics), appState.ServerConfig.Config) + schemaRepo := schemarepo.NewStore(appState.ServerConfig.Config.Persistence.DataPath, appState.Logger) + if err = schemaRepo.Open(); err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("could not initialize schema repo") + os.Exit(1) + } + + localClassifierRepo, err := classifications.NewRepo( + appState.ServerConfig.Config.Persistence.DataPath, appState.Logger) + if err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("could not initialize classifications repo") + os.Exit(1) + } + + // TODO: configure http transport for efficient intra-cluster comm + classificationsTxClient := clients.NewClusterClassifications(appState.ClusterHttpClient) + classifierRepo := classifications.NewDistributeRepo(classificationsTxClient, + appState.Cluster, localClassifierRepo, appState.Logger) + appState.ClassificationRepo = classifierRepo + + server2port, err := parseNode2Port(appState) + if len(server2port) == 0 || err != nil { + appState.Logger. + WithField("action", "startup"). + WithField("raft-join", appState.ServerConfig.Config.Raft.Join). + WithError(err). + Fatal("parsing raft-join") + os.Exit(1) + } + + nodeName := appState.Cluster.LocalName() + nodeAddr, _ := appState.Cluster.NodeHostname(nodeName) + addrs := strings.Split(nodeAddr, ":") + dataPath := appState.ServerConfig.Config.Persistence.DataPath + + schemaParser := schema.NewParser(appState.Cluster, vectorIndex.ParseAndValidateConfig, migrator, appState.Modules, appState.ServerConfig.Config.DefaultQuantization) + + remoteClientFactory := func(ctx context.Context, address string) (copier.FileReplicationServiceClient, error) { + grpcConfig := appState.ServerConfig.Config.GRPC + authConfig := appState.ServerConfig.Config.Cluster.AuthConfig + + var creds credentials.TransportCredentials + + useTLS := len(grpcConfig.CertFile) > 0 + + if useTLS { + creds = credentials.NewClientTLSFromCert(nil, "") + } else { + creds = insecure.NewCredentials() // use insecure credentials for testing + } + + clientConn, err := grpc.NewClient( + address, + grpc.WithTransportCredentials(creds), + ) + if err != nil { + return nil, fmt.Errorf("failed to create gRPC client connection: %w", err) + } + + return copier.NewFileReplicationServiceClient(clientConn, authConfig), nil + } + + replicaCopier := copier.New(remoteClientFactory, remoteIndexClient, appState.Cluster, + appState.ServerConfig.Config.ReplicationEngineFileCopyWorkers, dataPath, appState.DB, nodeName, appState.Logger) + + rConfig := rCluster.Config{ + WorkDir: filepath.Join(dataPath, config.DefaultRaftDir), + NodeID: nodeName, + Host: addrs[0], + RaftPort: appState.ServerConfig.Config.Raft.Port, + RPCPort: appState.ServerConfig.Config.Raft.InternalRPCPort, + RaftRPCMessageMaxSize: appState.ServerConfig.Config.Raft.RPCMessageMaxSize, + BootstrapTimeout: appState.ServerConfig.Config.Raft.BootstrapTimeout, + BootstrapExpect: appState.ServerConfig.Config.Raft.BootstrapExpect, + HeartbeatTimeout: appState.ServerConfig.Config.Raft.HeartbeatTimeout, + ElectionTimeout: appState.ServerConfig.Config.Raft.ElectionTimeout, + LeaderLeaseTimeout: appState.ServerConfig.Config.Raft.LeaderLeaseTimeout, + TimeoutsMultiplier: appState.ServerConfig.Config.Raft.TimeoutsMultiplier, + SnapshotInterval: appState.ServerConfig.Config.Raft.SnapshotInterval, + SnapshotThreshold: appState.ServerConfig.Config.Raft.SnapshotThreshold, + TrailingLogs: appState.ServerConfig.Config.Raft.TrailingLogs, + ConsistencyWaitTimeout: appState.ServerConfig.Config.Raft.ConsistencyWaitTimeout, + MetadataOnlyVoters: appState.ServerConfig.Config.Raft.MetadataOnlyVoters, + EnableOneNodeRecovery: appState.ServerConfig.Config.Raft.EnableOneNodeRecovery, + ForceOneNodeRecovery: appState.ServerConfig.Config.Raft.ForceOneNodeRecovery, + DB: nil, + Parser: schemaParser, + NodeNameToPortMap: server2port, + NodeSelector: appState.Cluster, + Logger: appState.Logger, + IsLocalHost: appState.ServerConfig.Config.Cluster.Localhost, + LoadLegacySchema: schemaRepo.LoadLegacySchema, + SaveLegacySchema: schemaRepo.SaveLegacySchema, + SentryEnabled: appState.ServerConfig.Config.Sentry.Enabled, + AuthzController: appState.AuthzController, + RBAC: appState.RBAC, + DynamicUserController: appState.APIKey.Dynamic, + ReplicaCopier: replicaCopier, + AuthNConfig: appState.ServerConfig.Config.Authentication, + ReplicationEngineMaxWorkers: appState.ServerConfig.Config.ReplicationEngineMaxWorkers, + DistributedTasks: appState.ServerConfig.Config.DistributedTasks, + ReplicaMovementDisabled: appState.ServerConfig.Config.ReplicaMovementDisabled, + ReplicaMovementMinimumAsyncWait: appState.ServerConfig.Config.ReplicaMovementMinimumAsyncWait, + } + for _, name := range appState.ServerConfig.Config.Raft.Join[:rConfig.BootstrapExpect] { + if strings.Contains(name, rConfig.NodeID) { + rConfig.Voter = true + break + } + } + + appState.ClusterService = rCluster.New(rConfig, appState.AuthzController, appState.AuthzSnapshotter, appState.GRPCServerMetrics) + migrator.SetCluster(appState.ClusterService.Raft) + + executor := schema.NewExecutor(migrator, + appState.ClusterService.SchemaReader(), + appState.Logger, backup.RestoreClassDir(dataPath), + ) + + offloadmod, _ := appState.Modules.OffloadBackend("offload-s3") + + collectionRetrievalStrategyConfigFlag := configRuntime.NewFeatureFlag( + configRuntime.CollectionRetrievalStrategyLDKey, + string(configRuntime.LeaderOnly), + appState.LDIntegration, + configRuntime.CollectionRetrievalStrategyEnvVariable, + appState.Logger, + ) + + schemaManager, err := schema.NewManager(migrator, + appState.ClusterService.Raft, + appState.ClusterService.SchemaReader(), + schemaRepo, + appState.Logger, appState.Authorizer, &appState.ServerConfig.Config.SchemaHandlerConfig, appState.ServerConfig.Config, + vectorIndex.ParseAndValidateConfig, appState.Modules, inverted.ValidateConfig, + appState.Modules, appState.Cluster, + offloadmod, *schemaParser, + collectionRetrievalStrategyConfigFlag, + ) + if err != nil { + appState.Logger. + WithField("action", "startup").WithError(err). + Fatal("could not initialize schema manager") + os.Exit(1) + } + + appState.SchemaManager = schemaManager + repo.SetNodeSelector(appState.ClusterService.NodeSelector()) + repo.SetSchemaReader(appState.ClusterService.SchemaReader()) + repo.SetReplicationFSM(appState.ClusterService.ReplicationFsm()) + repo.SetSchemaGetter(appState.SchemaManager) + + // initialize needed services after all components are ready + postInitModules(appState) + + appState.RemoteIndexIncoming = sharding.NewRemoteIndexIncoming(repo, appState.ClusterService.SchemaReader(), appState.Modules) + appState.RemoteNodeIncoming = sharding.NewRemoteNodeIncoming(repo) + appState.RemoteReplicaIncoming = replica.NewRemoteReplicaIncoming(repo, appState.ClusterService.SchemaReader()) + + backupManager := backup.NewHandler(appState.Logger, appState.Authorizer, + schemaManager, repo, appState.Modules, appState.RBAC, appState.APIKey.Dynamic) + appState.BackupManager = backupManager + + internalServer := clusterapi.NewServer(appState) + appState.InternalServer = internalServer + enterrors.GoWrapper(func() { appState.InternalServer.Serve() }, appState.Logger) + + vectorRepo.SetSchemaGetter(schemaManager) + explorer.SetSchemaGetter(schemaManager) + appState.Modules.SetSchemaGetter(schemaManager) + + appState.Traverser = traverser.NewTraverser(appState.ServerConfig, + appState.Logger, appState.Authorizer, vectorRepo, explorer, schemaManager, + appState.Modules, traverser.NewMetrics(appState.Metrics), + appState.ServerConfig.Config.MaximumConcurrentGetRequests) + + updateSchemaCallback := makeUpdateSchemaCall(appState) + executor.RegisterSchemaUpdateCallback(updateSchemaCallback) + + bitmapBufPool, bitmapBufPoolClose := configureBitmapBufPool(appState) + repo.WithBitmapBufPool(bitmapBufPool, bitmapBufPoolClose) + + var reindexCtx context.Context + reindexCtx, appState.ReindexCtxCancel = context.WithCancelCause(context.Background()) + reindexer := configureReindexer(appState, reindexCtx) + repo.WithReindexer(reindexer) + + metaStoreReadyErr := fmt.Errorf("meta store ready") + metaStoreFailedErr := fmt.Errorf("meta store failed") + storeReadyCtx, storeReadyCancel := context.WithCancelCause(context.Background()) + enterrors.GoWrapper(func() { + if err := appState.ClusterService.Open(context.Background(), executor); err != nil { + appState.Logger. + WithField("action", "startup"). + WithError(err). + Fatal("could not open cloud meta store") + storeReadyCancel(metaStoreFailedErr) + } else { + storeReadyCancel(metaStoreReadyErr) + } + }, appState.Logger) + + // TODO-RAFT: refactor remove this sleep + // this sleep was used to block GraphQL and give time to RAFT to start. + time.Sleep(2 * time.Second) + + appState.AutoSchemaManager = objects.NewAutoSchemaManager(schemaManager, vectorRepo, appState.ServerConfig, appState.Authorizer, + appState.Logger, prometheus.DefaultRegisterer) + batchManager := objects.NewBatchManager(vectorRepo, appState.Modules, + schemaManager, appState.ServerConfig, appState.Logger, + appState.Authorizer, appState.Metrics, appState.AutoSchemaManager) + appState.BatchManager = batchManager + + err = migrator.AdjustFilterablePropSettings(ctx) + if err != nil { + appState.Logger. + WithError(err). + WithField("action", "adjustFilterablePropSettings"). + Fatal("migration failed") + os.Exit(1) + } + + // FIXME to avoid import cycles, tasks are passed as strings + reindexTaskNamesWithArgs := map[string]any{} + reindexFinished := make(chan error, 1) + + if appState.ServerConfig.Config.ReindexSetToRoaringsetAtStartup { + reindexTaskNamesWithArgs["ShardInvertedReindexTaskSetToRoaringSet"] = nil + } + if appState.ServerConfig.Config.IndexMissingTextFilterableAtStartup { + reindexTaskNamesWithArgs["ShardInvertedReindexTaskMissingTextFilterable"] = nil + } + if len(appState.ServerConfig.Config.ReindexIndexesAtStartup) > 0 { + reindexTaskNamesWithArgs["ShardInvertedReindexTask_SpecifiedIndex"] = appState.ServerConfig.Config.ReindexIndexesAtStartup + } + + if len(reindexTaskNamesWithArgs) > 0 { + // start reindexing inverted indexes (if requested by user) in the background + // allowing db to complete api configuration and start handling requests + enterrors.GoWrapper(func() { + // wait until meta store is ready, as reindex tasks needs schema + <-storeReadyCtx.Done() + if errors.Is(context.Cause(storeReadyCtx), metaStoreReadyErr) { + appState.Logger. + WithField("action", "startup"). + Info("Reindexing inverted indexes") + reindexFinished <- migrator.InvertedReindex(reindexCtx, reindexTaskNamesWithArgs) + } + }, appState.Logger) + } + + configureServer = makeConfigureServer(appState) + + // Add dimensions to all the objects in the database, if requested by the user + if appState.ServerConfig.Config.ReindexVectorDimensionsAtStartup && repo.GetConfig().TrackVectorDimensions { + appState.Logger. + WithField("action", "startup"). + Info("Reindexing dimensions") + migrator.RecalculateVectorDimensions(ctx) + } + + // Add recount properties of all the objects in the database, if requested by the user + if appState.ServerConfig.Config.RecountPropertiesAtStartup { + migrator.RecountProperties(ctx) + } + + if appState.ServerConfig.Config.DistributedTasks.Enabled { + appState.DistributedTaskScheduler = distributedtask.NewScheduler(distributedtask.SchedulerParams{ + CompletionRecorder: appState.ClusterService.Raft, + TasksLister: appState.ClusterService.Raft, + Providers: map[string]distributedtask.Provider{}, + Logger: appState.Logger, + MetricsRegisterer: metricsRegisterer, + LocalNode: appState.Cluster.LocalName(), + TickInterval: appState.ServerConfig.Config.DistributedTasks.SchedulerTickInterval, + + // Using a single global value for now to keep it simple. If there is a need + // this can be changed to provide a value per provider. + CompletedTaskTTL: appState.ServerConfig.Config.DistributedTasks.CompletedTaskTTL, + }) + enterrors.GoWrapper(func() { + // Do not launch scheduler until the full RAFT state is restored to avoid needlessly starting + // and stopping tasks. + // Additionally, not-ready RAFT state could lead to lose of local task metadata. + <-storeReadyCtx.Done() + if !errors.Is(context.Cause(storeReadyCtx), metaStoreReadyErr) { + return + } + if err = appState.DistributedTaskScheduler.Start(ctx); err != nil { + appState.Logger.WithError(err).WithField("action", "startup"). + Error("failed to start distributed task scheduler") + } + }, appState.Logger) + } + + return appState +} + +func configureBitmapBufPool(appState *state.State) (pool roaringset.BitmapBufPool, close func()) { + return roaringset.NewBitmapBufPoolDefault(appState.Logger, appState.Metrics, + appState.ServerConfig.Config.QueryBitmapBufsMaxBufSize, + appState.ServerConfig.Config.QueryBitmapBufsMaxMemory) +} + +func configureReindexer(appState *state.State, reindexCtx context.Context) db.ShardReindexerV3 { + tasks := []db.ShardReindexTaskV3{} + logger := appState.Logger.WithField("action", "reindexV3") + cfg := appState.ServerConfig.Config + concurrency := concurrency.TimesFloatNUMCPU(cfg.ReindexerGoroutinesFactor) + + if cfg.ReindexMapToBlockmaxAtStartup { + tasks = append(tasks, db.NewShardInvertedReindexTaskMapToBlockmax( + logger, + cfg.ReindexMapToBlockmaxConfig.SwapBuckets, + cfg.ReindexMapToBlockmaxConfig.UnswapBuckets, + cfg.ReindexMapToBlockmaxConfig.TidyBuckets, + cfg.ReindexMapToBlockmaxConfig.ReloadShards, + cfg.ReindexMapToBlockmaxConfig.Rollback, + cfg.ReindexMapToBlockmaxConfig.ConditionalStart, + time.Second*time.Duration(cfg.ReindexMapToBlockmaxConfig.ProcessingDurationSeconds), + time.Second*time.Duration(cfg.ReindexMapToBlockmaxConfig.PauseDurationSeconds), + time.Millisecond*time.Duration(cfg.ReindexMapToBlockmaxConfig.PerObjectDelayMilliseconds), + concurrency, cfg.ReindexMapToBlockmaxConfig.Selected, appState.SchemaManager, + )) + } + + if len(tasks) == 0 { + return db.NewShardReindexerV3Noop() + } + + reindexer := db.NewShardReindexerV3(reindexCtx, logger, appState.DB.GetIndex, concurrency) + for i := range tasks { + reindexer.RegisterTask(tasks[i]) + } + reindexer.Init() + return reindexer +} + +func parseNode2Port(appState *state.State) (m map[string]int, err error) { + m = make(map[string]int, len(appState.ServerConfig.Config.Raft.Join)) + for _, raftNamePort := range appState.ServerConfig.Config.Raft.Join { + np := strings.Split(raftNamePort, ":") + if np[0] == appState.Cluster.LocalName() { + m[np[0]] = appState.ServerConfig.Config.Raft.Port + continue + } + if m[np[0]], err = strconv.Atoi(np[1]); err != nil { + return m, fmt.Errorf("expect integer as raft port: got %s:: %w", raftNamePort, err) + } + } + + return m, nil +} + +// parseVotersNames parses names of all voters. +// If we reach this point, we assume that the configuration is valid +func parseVotersNames(cfg config.Raft) (m map[string]struct{}) { + m = make(map[string]struct{}, cfg.BootstrapExpect) + for _, raftNamePort := range cfg.Join[:cfg.BootstrapExpect] { + m[strings.Split(raftNamePort, ":")[0]] = struct{}{} + } + return m +} + +func configureAPI(api *operations.WeaviateAPI) http.Handler { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 60*time.Minute) + defer cancel() + + appState := MakeAppState(ctx, connectorOptionGroup) + + appState.Logger.WithFields(logrus.Fields{ + "server_version": config.ServerVersion, + "version": build.Version, + }).Infof("configured versions") + + api.ServeError = openapierrors.ServeError + + api.JSONConsumer = runtime.JSONConsumer() + + api.OidcAuth = composer.New( + appState.ServerConfig.Config.Authentication, + appState.APIKey, appState.OIDC) + + api.Logger = func(msg string, args ...interface{}) { + appState.Logger.WithFields(logrus.Fields{"action": "restapi_management", "version": build.Version}).Infof(msg, args...) + } + + classifier := classification.New(appState.SchemaManager, appState.ClassificationRepo, appState.DB, // the DB is the vectorrepo + appState.Authorizer, + appState.Logger, appState.Modules) + + setupAuthnHandlers(api, + appState.ClusterService.Raft, + appState.ServerConfig.Config.Authorization.Rbac, + appState.Logger) + authz.SetupHandlers(api, + appState.ClusterService.Raft, + appState.SchemaManager, + appState.ServerConfig.Config.Authentication.APIKey, + appState.ServerConfig.Config.Authentication.OIDC, + appState.ServerConfig.Config.Authorization.Rbac, + appState.Metrics, + appState.Authorizer, + appState.Logger) + + replicationHandlers.SetupHandlers(!appState.ServerConfig.Config.ReplicaMovementDisabled, api, appState.ClusterService.Raft, appState.Metrics, appState.Authorizer, appState.Logger) + + remoteDbUsers := clients.NewRemoteUser(appState.ClusterHttpClient, appState.Cluster) + db_users.SetupHandlers(api, appState.ClusterService.Raft, appState.Authorizer, appState.ServerConfig.Config.Authentication, appState.ServerConfig.Config.Authorization, remoteDbUsers, appState.SchemaManager, appState.Logger) + + setupSchemaHandlers(api, appState.SchemaManager, appState.Metrics, appState.Logger) + setupAliasesHandlers(api, appState.SchemaManager, appState.Metrics, appState.Logger) + objectsManager := objects.NewManager(appState.SchemaManager, appState.ServerConfig, appState.Logger, + appState.Authorizer, appState.DB, appState.Modules, + objects.NewMetrics(appState.Metrics), appState.MemWatch, appState.AutoSchemaManager) + setupObjectHandlers(api, objectsManager, appState.ServerConfig.Config, appState.Logger, + appState.Modules, appState.Metrics) + setupObjectBatchHandlers(api, appState.BatchManager, appState.Metrics, appState.Logger) + setupGraphQLHandlers(api, appState, appState.SchemaManager, appState.ServerConfig.Config.DisableGraphQL, + appState.Metrics, appState.Logger) + setupMiscHandlers(api, appState.ServerConfig, appState.Modules, + appState.Metrics, appState.Logger) + setupClassificationHandlers(api, classifier, appState.Metrics, appState.Logger) + backupScheduler := startBackupScheduler(appState) + setupBackupHandlers(api, backupScheduler, appState.Metrics, appState.Logger) + setupNodesHandlers(api, appState.SchemaManager, appState.DB, appState) + if appState.ServerConfig.Config.DistributedTasks.Enabled { + setupDistributedTasksHandlers(api, appState.Authorizer, appState.ClusterService.Raft) + } + + var grpcInstrument []grpc.ServerOption + if appState.ServerConfig.Config.Monitoring.Enabled { + grpcInstrument = monitoring.InstrumentGrpc(appState.GRPCServerMetrics) + } + + grpcShutdown := batch.NewShutdown(context.Background()) + grpcServer := createGrpcServer(appState, grpcShutdown, grpcInstrument...) + + setupMiddlewares := makeSetupMiddlewares(appState) + setupGlobalMiddleware := makeSetupGlobalMiddleware(appState, api.Context()) + + telemeter := telemetry.New(appState.DB, appState.SchemaManager, appState.Logger) + if telemetryEnabled(appState) { + enterrors.GoWrapper(func() { + if err := telemeter.Start(context.Background()); err != nil { + appState.Logger. + WithField("action", "startup"). + Errorf("telemetry failed to start: %s", err.Error()) + } + }, appState.Logger) + } + if entconfig.Enabled(os.Getenv("ENABLE_CLEANUP_UNFINISHED_BACKUPS")) { + enterrors.GoWrapper( + func() { + // cleanup unfinished backups on startup + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + backupScheduler.CleanupUnfinishedBackups(ctx) + }, appState.Logger) + } + + api.PreServerShutdown = func() { + grpcShutdown.Drain(appState.Logger) + } + + api.ServerShutdown = func() { + if telemetryEnabled(appState) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + // must be shutdown before the db, to ensure the + // termination payload contains the correct + // object count + if err := telemeter.Stop(ctx); err != nil { + appState.Logger.WithField("action", "stop_telemetry"). + Errorf("failed to stop telemetry: %s", err.Error()) + } + } + + // stop reindexing on server shutdown + appState.ReindexCtxCancel(fmt.Errorf("server shutdown")) + + if appState.DistributedTaskScheduler != nil { + appState.DistributedTaskScheduler.Close() + } + + // gracefully stop gRPC server + grpcServer.GracefulStop() + + if appState.ServerConfig.Config.Sentry.Enabled { + sentry.Flush(2 * time.Second) + } + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + if err := appState.InternalServer.Close(ctx); err != nil { + appState.Logger. + WithError(err). + WithField("action", "shutdown"). + Errorf("failed to gracefully shutdown") + } + + if err := appState.ClusterService.Close(ctx); err != nil { + appState.Logger. + WithError(err). + WithField("action", "shutdown"). + Errorf("failed to gracefully shutdown") + } + + if err := appState.APIKey.Dynamic.Close(); err != nil { + appState.Logger. + WithError(err). + WithField("action", "shutdown db users"). + Errorf("failed to gracefully shutdown") + } + + if err := appState.Modules.Close(); err != nil { + appState.Logger. + WithError(err). + WithField("action", "shutdown modules"). + Errorf("failed to gracefully shutdown") + } + } + + startGrpcServer(grpcServer, appState) + + return setupGlobalMiddleware(api.Serve(setupMiddlewares)) +} + +func startBackupScheduler(appState *state.State) *backup.Scheduler { + backupScheduler := backup.NewScheduler( + appState.Authorizer, + clients.NewClusterBackups(appState.ClusterHttpClient), + appState.DB, appState.Modules, + membership{appState.Cluster, appState.ClusterService}, + appState.SchemaManager, + appState.Logger) + return backupScheduler +} + +// TODO: Split up and don't write into global variables. Instead return an appState +func startupRoutine(ctx context.Context, options *swag.CommandLineOptionsGroup) *state.State { + appState := &state.State{} + + logger := logger() + appState.Logger = logger + + logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)). + Debug("created startup context, nothing done so far") + + ldInteg, err := configRuntime.ConfigureLDIntegration() + if err != nil { + logger.WithField("action", "startup").Infof("Feature flag LD integration disabled: %s", err) + } + appState.LDIntegration = ldInteg + // Load the config using the flags + serverConfig := &config.WeaviateConfig{} + appState.ServerConfig = serverConfig + err = serverConfig.LoadConfig(options, logger) + if err != nil { + logger.WithField("action", "startup").WithError(err).Error("could not load config") + logger.Exit(1) + } + dataPath := serverConfig.Config.Persistence.DataPath + if err := os.MkdirAll(dataPath, 0o777); err != nil { + logger.WithField("action", "startup"). + WithField("path", dataPath).Error("cannot create data directory") + logger.Exit(1) + } + + monitoring.InitConfig(serverConfig.Config.Monitoring) + + if serverConfig.Config.DisableGraphQL { + logger.WithFields(logrus.Fields{ + "action": "startup", + "disable_graphql": true, + }).Warnf("GraphQL API disabled, relying only on gRPC API for querying. " + + "This is considered experimental and will likely experience breaking changes " + + "before reaching general availability") + } + + logger.WithFields(logrus.Fields{ + "action": "startup", + "default_vectorizer_module": serverConfig.Config.DefaultVectorizerModule, + }).Infof("the default vectorizer modules is set to %q, as a result all new "+ + "schema classes without an explicit vectorizer setting, will use this "+ + "vectorizer", serverConfig.Config.DefaultVectorizerModule) + + logger.WithFields(logrus.Fields{ + "action": "startup", + "auto_schema_enabled": serverConfig.Config.AutoSchema.Enabled, + }).Infof("auto schema enabled setting is set to \"%v\"", serverConfig.Config.AutoSchema.Enabled) + + logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)). + Debug("config loaded") + + appState.OIDC = configureOIDC(appState) + appState.APIKey = configureAPIKey(appState) + appState.APIKeyRemote = apikey.NewRemoteApiKey(appState.APIKey) + appState.AnonymousAccess = configureAnonymousAccess(appState) + if err = configureAuthorizer(appState); err != nil { + logger.WithField("action", "startup").WithField("error", err).Error("cannot configure authorizer") + logger.Exit(1) + } + + logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)). + Debug("configured OIDC and anonymous access client") + + logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)). + Debug("initialized schema") + + var nonStorageNodes map[string]struct{} + if cfg := serverConfig.Config.Raft; cfg.MetadataOnlyVoters { + nonStorageNodes = parseVotersNames(cfg) + } + + clusterState, err := cluster.Init(serverConfig.Config.Cluster, serverConfig.Config.GRPC.Port, serverConfig.Config.Raft.BootstrapExpect, dataPath, nonStorageNodes, logger) + if err != nil { + logger.WithField("action", "startup").WithError(err). + Error("could not init cluster state") + logger.Exit(1) + } + + appState.Cluster = clusterState + appState.Logger. + WithField("action", "startup"). + Debug("startup routine complete") + + return appState +} + +// logger does not parse the regular config object, as logging needs to be +// configured before the configuration is even loaded/parsed. We are thus +// "manually" reading the desired env vars and set reasonable defaults if they +// are not set. +// +// Defaults to log level info and json format +func logger() *logrus.Logger { + logger := logrus.New() + logger.SetFormatter(NewWeaviateTextFormatter()) + + if os.Getenv("LOG_FORMAT") != "text" { + logger.SetFormatter(NewWeaviateJSONFormatter()) + } + logLevelStr := os.Getenv("LOG_LEVEL") + level, err := logLevelFromString(logLevelStr) + if errors.Is(err, errlogLevelNotRecognized) { + logger.WithField("log_level_env", logLevelStr).Warn("log level not recognized, defaulting to info") + level = logrus.InfoLevel + } + logger.SetLevel(level) + return logger +} + +// everything hard-coded right now, to be made dynamic (from go plugins later) +func registerModules(appState *state.State) error { + appState.Logger. + WithField("action", "startup"). + Debug("start registering modules") + + appState.Modules = modules.NewProvider(appState.Logger, appState.ServerConfig.Config) + + // Default modules + defaultVectorizers := []string{ + modtext2vecaws.Name, + modmulti2veccohere.Name, + modcohere.Name, + moddatabricks.Name, + modtext2vecgoogle.Name, + modmulti2vecgoogle.Name, + modhuggingface.Name, + modjinaai.Name, + modmulti2vecjinaai.Name, + modmistral.Name, + modtext2vecoctoai.Name, + modopenai.Name, + modvoyageai.Name, + modmulti2vecvoyageai.Name, + modweaviateembed.Name, + modtext2multivecjinaai.Name, + modnvidia.Name, + modmulti2vecnvidia.Name, + modmulti2multivecjinaai.Name, + } + defaultGenerative := []string{ + modgenerativeanthropic.Name, + modgenerativeanyscale.Name, + modgenerativeaws.Name, + modgenerativecohere.Name, + modgenerativedatabricks.Name, + modgenerativefriendliai.Name, + modgenerativegoogle.Name, + modgenerativemistral.Name, + modgenerativenvidia.Name, + modgenerativeoctoai.Name, + modgenerativeopenai.Name, + modgenerativexai.Name, + } + defaultOthers := []string{ + modrerankercohere.Name, + modrerankervoyageai.Name, + modrerankerjinaai.Name, + modrerankernvidia.Name, + } + + defaultModules := append(defaultVectorizers, defaultGenerative...) + defaultModules = append(defaultModules, defaultOthers...) + + var modules []string + + if len(appState.ServerConfig.Config.EnableModules) > 0 { + modules = strings.Split(appState.ServerConfig.Config.EnableModules, ",") + } + + if appState.ServerConfig.Config.EnableApiBasedModules { + // Concatenate modules with default modules + modules = append(modules, defaultModules...) + } + + enabledModules := map[string]bool{} + for _, module := range modules { + enabledModules[strings.TrimSpace(module)] = true + } + + if _, ok := enabledModules[modt2vbigram.Name]; ok { + appState.Modules.Register(modt2vbigram.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modt2vbigram.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modcontextionary.Name]; ok { + appState.Modules.Register(modcontextionary.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modcontextionary.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modt2vmodel2vec.Name]; ok { + appState.Modules.Register(modt2vmodel2vec.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modt2vmodel2vec.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modtransformers.Name]; ok { + appState.Modules.Register(modtransformers.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modtransformers.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgpt4all.Name]; ok { + appState.Modules.Register(modgpt4all.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgpt4all.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modrerankervoyageai.Name]; ok { + appState.Modules.Register(modrerankervoyageai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modrerankervoyageai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modrerankertransformers.Name]; ok { + appState.Modules.Register(modrerankertransformers.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modrerankertransformers.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modrerankercohere.Name]; ok { + appState.Modules.Register(modrerankercohere.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modrerankercohere.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modrerankerdummy.Name]; ok { + appState.Modules.Register(modrerankerdummy.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modrerankerdummy.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modrerankerjinaai.Name]; ok { + appState.Modules.Register(modrerankerjinaai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modrerankerjinaai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modrerankernvidia.Name]; ok { + appState.Modules.Register(modrerankernvidia.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modrerankernvidia.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modqna.Name]; ok { + appState.Modules.Register(modqna.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modqna.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modsum.Name]; ok { + appState.Modules.Register(modsum.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modsum.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modimage.Name]; ok { + appState.Modules.Register(modimage.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modimage.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modner.Name]; ok { + appState.Modules.Register(modner.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modner.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modspellcheck.Name]; ok { + appState.Modules.Register(modspellcheck.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modspellcheck.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modclip.Name]; ok { + appState.Modules.Register(modclip.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modclip.Name). + Debug("enabled module") + } + + _, enabledMulti2VecGoogle := enabledModules[modmulti2vecgoogle.Name] + _, enabledMulti2VecPaLM := enabledModules[modmulti2vecgoogle.LegacyName] + if enabledMulti2VecGoogle || enabledMulti2VecPaLM { + appState.Modules.Register(modmulti2vecgoogle.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modmulti2vecgoogle.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modmulti2veccohere.Name]; ok { + appState.Modules.Register(modmulti2veccohere.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modmulti2veccohere.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modmulti2vecjinaai.Name]; ok { + appState.Modules.Register(modmulti2vecjinaai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modmulti2vecjinaai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modmulti2multivecjinaai.Name]; ok { + appState.Modules.Register(modmulti2multivecjinaai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modmulti2multivecjinaai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modmulti2vecnvidia.Name]; ok { + appState.Modules.Register(modmulti2vecnvidia.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modmulti2vecnvidia.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modnvidia.Name]; ok { + appState.Modules.Register(modnvidia.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modnvidia.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modmulti2vecvoyageai.Name]; ok { + appState.Modules.Register(modmulti2vecvoyageai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modmulti2vecvoyageai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modopenai.Name]; ok { + appState.Modules.Register(modopenai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modopenai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[moddatabricks.Name]; ok { + appState.Modules.Register(moddatabricks.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", moddatabricks.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modqnaopenai.Name]; ok { + appState.Modules.Register(modqnaopenai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modqnaopenai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativecohere.Name]; ok { + appState.Modules.Register(modgenerativecohere.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativecohere.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativefriendliai.Name]; ok { + appState.Modules.Register(modgenerativefriendliai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativefriendliai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativenvidia.Name]; ok { + appState.Modules.Register(modgenerativenvidia.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativenvidia.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativemistral.Name]; ok { + appState.Modules.Register(modgenerativemistral.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativemistral.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativeopenai.Name]; ok { + appState.Modules.Register(modgenerativeopenai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativeopenai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativexai.Name]; ok { + appState.Modules.Register(modgenerativexai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativexai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativedatabricks.Name]; ok { + appState.Modules.Register(modgenerativedatabricks.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativedatabricks.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativeollama.Name]; ok { + appState.Modules.Register(modgenerativeollama.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativeollama.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativedummy.Name]; ok { + appState.Modules.Register(modgenerativedummy.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativedummy.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativeaws.Name]; ok { + appState.Modules.Register(modgenerativeaws.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativeaws.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modhuggingface.Name]; ok { + appState.Modules.Register(modhuggingface.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modhuggingface.Name). + Debug("enabled module") + } + + _, enabledGenerativeGoogle := enabledModules[modgenerativegoogle.Name] + _, enabledGenerativePaLM := enabledModules[modgenerativegoogle.LegacyName] + if enabledGenerativeGoogle || enabledGenerativePaLM { + appState.Modules.Register(modgenerativegoogle.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativegoogle.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativeanyscale.Name]; ok { + appState.Modules.Register(modgenerativeanyscale.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativeanyscale.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativeanthropic.Name]; ok { + appState.Modules.Register(modgenerativeanthropic.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativeanthropic.Name). + Debug("enabled module") + } + + _, enabledText2vecGoogle := enabledModules[modtext2vecgoogle.Name] + _, enabledText2vecPaLM := enabledModules[modtext2vecgoogle.LegacyName] + if enabledText2vecGoogle || enabledText2vecPaLM { + appState.Modules.Register(modtext2vecgoogle.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modtext2vecgoogle.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modtext2vecaws.Name]; ok { + appState.Modules.Register(modtext2vecaws.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modtext2vecaws.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modstgfs.Name]; ok { + appState.Modules.Register(modstgfs.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modstgfs.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modstgs3.Name]; ok { + appState.Modules.Register(modstgs3.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modstgs3.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modsloads3.Name]; ok { + appState.Modules.Register(modsloads3.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modsloads3.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modstggcs.Name]; ok { + appState.Modules.Register(modstggcs.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modstggcs.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modstgazure.Name]; ok { + appState.Modules.Register(modstgazure.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modstgazure.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modcentroid.Name]; ok { + appState.Modules.Register(modcentroid.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modcentroid.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modcohere.Name]; ok { + appState.Modules.Register(modcohere.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modcohere.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modvoyageai.Name]; ok { + appState.Modules.Register(modvoyageai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modvoyageai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modmistral.Name]; ok { + appState.Modules.Register(modmistral.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modmistral.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modbind.Name]; ok { + appState.Modules.Register(modbind.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modbind.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modjinaai.Name]; ok { + appState.Modules.Register(modjinaai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modjinaai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modollama.Name]; ok { + appState.Modules.Register(modollama.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modollama.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modweaviateembed.Name]; ok { + appState.Modules.Register(modweaviateembed.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modweaviateembed.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modgenerativeoctoai.Name]; ok { + appState.Modules.Register(modgenerativeoctoai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modgenerativeoctoai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modtext2vecoctoai.Name]; ok { + appState.Modules.Register(modtext2vecoctoai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modtext2vecoctoai.Name). + Debug("enabled module") + } + + _, enabledText2MultivecJinaAI := enabledModules[modtext2multivecjinaai.Name] + _, enabledText2ColBERTJinaAI := enabledModules[modtext2multivecjinaai.LegacyName] + if enabledText2MultivecJinaAI || enabledText2ColBERTJinaAI { + appState.Modules.Register(modtext2multivecjinaai.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modtext2multivecjinaai.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modusagegcs.Name]; ok { + appState.Modules.Register(modusagegcs.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modusagegcs.Name). + Debug("enabled module") + } + + if _, ok := enabledModules[modusages3.Name]; ok { + appState.Modules.Register(modusages3.New()) + appState.Logger. + WithField("action", "startup"). + WithField("module", modusages3.Name). + Debug("enabled module") + } + + appState.Logger. + WithField("action", "startup"). + Debug("completed registering modules") + + return nil +} + +func postInitModules(appState *state.State) { + // Initialize usage service after all components are ready + if appState.Modules.UsageEnabled() { + appState.Logger.WithField("action", "startup").Debug("initializing usage service") + + // Initialize usage service for GCS + if usageGCSModule := appState.Modules.GetByName(modusagegcs.Name); usageGCSModule != nil { + if usageModuleWithService, ok := usageGCSModule.(modulecapabilities.ModuleWithUsageService); ok { + usageService := usage.NewService(appState.ClusterService.SchemaReader(), appState.DB, appState.Modules, appState.Cluster.LocalName(), usageModuleWithService.Logger()) + usageModuleWithService.SetUsageService(usageService) + } + } + // Initialize usage service for S3 + if usageS3Module := appState.Modules.GetByName(modusages3.Name); usageS3Module != nil { + if usageModuleWithService, ok := usageS3Module.(modulecapabilities.ModuleWithUsageService); ok { + usageService := usage.NewService(appState.SchemaManager, appState.DB, appState.Modules, appState.Cluster.LocalName(), usageModuleWithService.Logger()) + usageModuleWithService.SetUsageService(usageService) + } + } + } +} + +func initModules(ctx context.Context, appState *state.State) error { + storageProvider, err := modulestorage.NewRepo( + appState.ServerConfig.Config.Persistence.DataPath, appState.Logger) + if err != nil { + return errors.Wrap(err, "init storage provider") + } + + // TODO: gh-1481 don't pass entire appState in, but only what's needed. Probably only + // config? + moduleParams := moduletools.NewInitParams(storageProvider, appState, + &appState.ServerConfig.Config, appState.Logger, prometheus.DefaultRegisterer) + + appState.Logger. + WithField("action", "startup"). + Debug("start initializing modules") + if err := appState.Modules.Init(ctx, moduleParams, appState.Logger); err != nil { + return errors.Wrap(err, "init modules") + } + + appState.Logger. + WithField("action", "startup"). + Debug("finished initializing modules") + + return nil +} + +type clientWithAuth struct { + r http.RoundTripper + basicAuth cluster.BasicAuth +} + +func (c clientWithAuth) RoundTrip(r *http.Request) (*http.Response, error) { + r.SetBasicAuth(c.basicAuth.Username, c.basicAuth.Password) + return c.r.RoundTrip(r) +} + +func reasonableHttpClient(authConfig cluster.AuthConfig, minimumInternalTimeout time.Duration) *http.Client { + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: minimumInternalTimeout, + KeepAlive: 120 * time.Second, + }).DialContext, + MaxIdleConnsPerHost: 100, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + + if authConfig.BasicAuth.Enabled() { + return &http.Client{Transport: clientWithAuth{r: t, basicAuth: authConfig.BasicAuth}} + } + return &http.Client{Transport: t} +} + +func setupGoProfiling(config config.Config, logger logrus.FieldLogger) { + if config.Profiling.Disabled { + return + } + + functionsToIgnoreInProfiling := []string{ + "raft", + "http2", + "memberlist", + "selectgo", // various tickers + "cluster", + "rest", + "signal_recv", + "backgroundRead", + "SetupGoProfiling", + "serve", + "Serve", + "batchWorker", + } + http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler(functionsToIgnoreInProfiling...)) + enterrors.GoWrapper(func() { + portNumber := config.Profiling.Port + if portNumber == 0 { + if err := http.ListenAndServe(":6060", nil); err != nil { + logger.Error("error listinening and serve :6060 : %w", err) + } + } else { + http.ListenAndServe(fmt.Sprintf(":%d", portNumber), nil) + } + }, logger) + + if config.Profiling.BlockProfileRate > 0 { + goruntime.SetBlockProfileRate(config.Profiling.BlockProfileRate) + } + + if config.Profiling.MutexProfileFraction > 0 { + goruntime.SetMutexProfileFraction(config.Profiling.MutexProfileFraction) + } +} + +func ParseVersionFromSwaggerSpec() string { + spec := struct { + Info struct { + Version string `json:"version"` + } `json:"info"` + }{} + + err := json.Unmarshal(SwaggerJSON, &spec) + if err != nil { + panic(err) + } + + return spec.Info.Version +} + +func limitResources(appState *state.State) { + if os.Getenv("LIMIT_RESOURCES") == "true" { + appState.Logger.Info("Limiting resources: memory: 80%, cores: all but one") + if os.Getenv("GOMAXPROCS") == "" { + // Fetch the number of cores from the cgroups cpuset + // and parse it into an int + cores, err := getCores() + if err == nil { + appState.Logger.WithField("cores", cores). + Warn("GOMAXPROCS not set, and unable to read from cgroups, setting to number of cores") + goruntime.GOMAXPROCS(cores) + } else { + cores = goruntime.NumCPU() - 1 + if cores > 0 { + appState.Logger.WithField("cores", cores). + Warnf("Unable to read from cgroups: %v, setting to max cores to: %v", err, cores) + goruntime.GOMAXPROCS(cores) + } + } + } + + limit, err := memlimit.SetGoMemLimit(0.8) + if err != nil { + appState.Logger.WithError(err).Warnf("Unable to set memory limit from cgroups: %v", err) + // Set memory limit to 90% of the available memory + limit := int64(float64(memory.TotalMemory()) * 0.8) + debug.SetMemoryLimit(limit) + appState.Logger.WithField("limit", limit).Info("Set memory limit based on available memory") + } else { + appState.Logger.WithField("limit", limit).Info("Set memory limit") + } + } else { + appState.Logger.Info("No resource limits set, weaviate will use all available memory and CPU. " + + "To limit resources, set LIMIT_RESOURCES=true") + } +} + +func telemetryEnabled(state *state.State) bool { + return !state.ServerConfig.Config.DisableTelemetry +} + +type membership struct { + *cluster.State + raft *rCluster.Service +} + +func (m membership) LeaderID() string { + _, id := m.raft.LeaderWithID() + return id +} + +// initRuntimeOverrides assumes, Configs from envs are loaded before +// initializing runtime overrides. +func initRuntimeOverrides(appState *state.State) { + // Enable runtime config manager + if appState.ServerConfig.Config.RuntimeOverrides.Enabled { + + // Runtimeconfig manager takes of keeping the `registered` config values upto date + registered := &config.WeaviateRuntimeConfig{} + registered.MaximumAllowedCollectionsCount = appState.ServerConfig.Config.SchemaHandlerConfig.MaximumAllowedCollectionsCount + registered.AsyncReplicationDisabled = appState.ServerConfig.Config.Replication.AsyncReplicationDisabled + registered.AutoschemaEnabled = appState.ServerConfig.Config.AutoSchema.Enabled + registered.ReplicaMovementMinimumAsyncWait = appState.ServerConfig.Config.ReplicaMovementMinimumAsyncWait + registered.TenantActivityReadLogLevel = appState.ServerConfig.Config.TenantActivityReadLogLevel + registered.TenantActivityWriteLogLevel = appState.ServerConfig.Config.TenantActivityWriteLogLevel + registered.RevectorizeCheckDisabled = appState.ServerConfig.Config.RevectorizeCheckDisabled + registered.QuerySlowLogEnabled = appState.ServerConfig.Config.QuerySlowLogEnabled + registered.QuerySlowLogThreshold = appState.ServerConfig.Config.QuerySlowLogThreshold + registered.InvertedSorterDisabled = appState.ServerConfig.Config.InvertedSorterDisabled + registered.DefaultQuantization = appState.ServerConfig.Config.DefaultQuantization + + if appState.Modules.UsageEnabled() { + // gcs config + registered.UsageGCSBucket = appState.ServerConfig.Config.Usage.GCSBucket + registered.UsageGCSPrefix = appState.ServerConfig.Config.Usage.GCSPrefix + // s3 config + registered.UsageS3Bucket = appState.ServerConfig.Config.Usage.S3Bucket + registered.UsageS3Prefix = appState.ServerConfig.Config.Usage.S3Prefix + // common config + registered.UsageScrapeInterval = appState.ServerConfig.Config.Usage.ScrapeInterval + registered.UsageShardJitterInterval = appState.ServerConfig.Config.Usage.ShardJitterInterval + registered.UsagePolicyVersion = appState.ServerConfig.Config.Usage.PolicyVersion + registered.UsageVerifyPermissions = appState.ServerConfig.Config.Usage.VerifyPermissions + } + + hooks := make(map[string]func() error) + if appState.OIDC.Config.Enabled { + registered.OIDCIssuer = appState.OIDC.Config.Issuer + registered.OIDCClientID = appState.OIDC.Config.ClientID + registered.OIDCSkipClientIDCheck = appState.OIDC.Config.SkipClientIDCheck + registered.OIDCUsernameClaim = appState.OIDC.Config.UsernameClaim + registered.OIDCGroupsClaim = appState.OIDC.Config.GroupsClaim + registered.OIDCScopes = appState.OIDC.Config.Scopes + registered.OIDCCertificate = appState.OIDC.Config.Certificate + + hooks["OIDC"] = appState.OIDC.Init + appState.Logger.Log(logrus.InfoLevel, "registereing OIDC runtime overrides hooks") + } + + cm, err := configRuntime.NewConfigManager( + appState.ServerConfig.Config.RuntimeOverrides.Path, + config.ParseRuntimeConfig, + config.UpdateRuntimeConfig, + registered, + appState.ServerConfig.Config.RuntimeOverrides.LoadInterval, + appState.Logger, + hooks, + prometheus.DefaultRegisterer) + if err != nil { + appState.Logger.WithField("action", "startup").WithError(err).Fatal("could not create runtime config manager") + os.Exit(1) + } + + enterrors.GoWrapper(func() { + // NOTE: Not using parent `ctx` because that is getting cancelled in the caller even during startup. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := cm.Run(ctx); err != nil { + appState.Logger.WithField("action", "runtime config manager startup ").WithError(err). + Fatal("runtime config manager stopped") + } + }, appState.Logger) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_api_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_api_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a726c5b83012683ba622ea1f6112b61fcfa49d76 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_api_test.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build linux +// +build linux + +package rest + +import ( + "testing" +) + +func TestGetCores(t *testing.T) { + tests := []struct { + name string + cpuset string + expected int + wantErr bool + }{ + {"Single core", "0", 1, false}, + {"Multiple cores", "0,1,2,3", 4, false}, + {"Range of cores", "0-3", 4, false}, + {"Multiple ranges", "0-3,5-7", 7, false}, + {"Mixed format", "0-2,4,6-7", 6, false}, + {"Mixed format 2", "0,2-4,7", 5, false}, + {"Empty cpuset", "", 0, false}, + {"Invalid format", "0-2-4", 0, true}, + {"Non-numeric", "a-b", 0, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := calcCPUs(tt.cpuset) + if (err != nil) != tt.wantErr { + t.Errorf("getCores() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.expected { + t.Errorf("getCores() = %v, want %v", got, tt.expected) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_server.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_server.go new file mode 100644 index 0000000000000000000000000000000000000000..6daed724bbe248ee18e98d335aa9d581ff309ec3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_server.go @@ -0,0 +1,149 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "fmt" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/handlers/graphql" + "github.com/weaviate/weaviate/adapters/handlers/graphql/utils" + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/auth/authentication/anonymous" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authentication/oidc" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/traverser" +) + +// As soon as server is initialized but not run yet, this function will be called. +// If you need to modify a config, store server instance to stop it individually later, this is the place. +// This function can be called multiple times, depending on the number of serving schemes. +// scheme value will be set accordingly: "http", "https" or "unix" +// +// we will set it through configureAPI() as it needs access to resources that +// are only available within there +var configureServer func(*http.Server, string, string) + +func makeUpdateSchemaCall(appState *state.State) func(aliases schema.SchemaWithAliases) { + return func(updatedSchema schema.SchemaWithAliases) { + if appState.ServerConfig.Config.DisableGraphQL { + return + } + + // Note that this is thread safe; we're running in a single go-routine, because the event + // handlers are called when the SchemaLock is still held. + + gql, err := rebuildGraphQL( + updatedSchema, + appState.Logger, + appState.ServerConfig.Config, + appState.Traverser, + appState.Modules, + appState.Authorizer, + ) + if err != nil && !errors.Is(err, utils.ErrEmptySchema) { + appState.Logger.WithField("action", "graphql_rebuild"). + WithError(err).Error("could not (re)build graphql provider") + } + appState.SetGraphQL(gql) + } +} + +func rebuildGraphQL(updatedSchema schema.SchemaWithAliases, logger logrus.FieldLogger, + config config.Config, traverser *traverser.Traverser, modulesProvider *modules.Provider, authorizer authorization.Authorizer, +) (graphql.GraphQL, error) { + updatedGraphQL, err := graphql.Build(&updatedSchema, traverser, logger, config, modulesProvider, authorizer) + if err != nil { + return nil, err + } + + logger.WithField("action", "graphql_rebuild").Debug("successfully rebuild graphql schema") + return updatedGraphQL, nil +} + +// configureOIDC will always be called, even if OIDC is disabled, this way the +// middleware will still be able to provide the user with a valuable error +// message, even when OIDC is globally disabled. +func configureOIDC(appState *state.State) *oidc.Client { + c, err := oidc.New(appState.ServerConfig.Config, appState.Logger) + if err != nil { + appState.Logger.WithField("action", "oidc_init").WithError(err).Fatal("oidc client could not start up") + os.Exit(1) + } + + return c +} + +func configureAPIKey(appState *state.State) *apikey.ApiKey { + c, err := apikey.New(appState.ServerConfig.Config, appState.Logger) + if err != nil { + appState.Logger.WithField("action", "api_keys_init").WithError(err).Fatal("apikey client could not start up") + os.Exit(1) + } + + return c +} + +// configureAnonymousAccess will always be called, even if anonymous access is +// disabled. In this case the middleware provided by this client will block +// anonymous requests +func configureAnonymousAccess(appState *state.State) *anonymous.Client { + return anonymous.New(appState.ServerConfig.Config) +} + +func configureAuthorizer(appState *state.State) error { + if appState.ServerConfig.Config.Authorization.Rbac.Enabled { + // if rbac enforcer enabled, start forcing all requests using the casbin enforcer + rbacController, err := rbac.New( + filepath.Join(appState.ServerConfig.Config.Persistence.DataPath, config.DefaultRaftDir), + appState.ServerConfig.Config.Authorization.Rbac, appState.ServerConfig.Config.Authentication, + appState.Logger) + if err != nil { + return fmt.Errorf("can't init casbin %w", err) + } + + appState.AuthzController = rbacController + appState.AuthzSnapshotter = rbacController + appState.RBAC = rbacController + appState.Authorizer = rbacController + } else if appState.ServerConfig.Config.Authorization.AdminList.Enabled { + appState.Authorizer = adminlist.New(appState.ServerConfig.Config.Authorization.AdminList) + } else { + appState.Authorizer = &authorization.DummyAuthorizer{} + } + + if appState.ServerConfig.Config.Authorization.Rbac.Enabled && appState.RBAC == nil { + // this in general shall not happen, it's to catch cases were RBAC expected but we weren't able + // to assign it. + return fmt.Errorf("RBAC is expected to be enabled, but the controller wasn't initialized") + } + + return nil +} + +func timeTillDeadline(ctx context.Context) string { + dl, _ := ctx.Deadline() + return time.Until(dl).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_server_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_server_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1017d4b2e4d863a9c023ff377bee5a190971f720 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_server_test.go @@ -0,0 +1,58 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist" + "github.com/weaviate/weaviate/usecases/config" +) + +func Test_DummyAuthorizer(t *testing.T) { + t.Run("when no authz is configured", func(t *testing.T) { + authorizer := authorization.DummyAuthorizer{} + + t.Run("any request is allowed", func(t *testing.T) { + err := authorizer.Authorize(context.Background(), nil, "delete", "the/world") + assert.Nil(t, err) + }) + }) +} + +func Test_AdminListAuthorizer(t *testing.T) { + t.Run("when adminlist is configured", func(t *testing.T) { + cfg := config.Config{ + Authorization: config.Authorization{ + AdminList: adminlist.Config{ + Enabled: true, + Users: []string{"user1"}, + }, + }, + } + + authorizer := adminlist.New(cfg.Authorization.AdminList) + t.Run("admin requests are allowed", func(t *testing.T) { + err := authorizer.Authorize(context.Background(), &models.Principal{Username: "user1"}, "delete", "the/world") + assert.Nil(t, err) + }) + + t.Run("non admin requests are allowed", func(t *testing.T) { + err := authorizer.Authorize(context.Background(), &models.Principal{Username: "user2"}, "delete", "the/world") + assert.NotNil(t, err) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_weaviate.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_weaviate.go new file mode 100644 index 0000000000000000000000000000000000000000..e024c48b88426be0afafa4bf468fa899599393f6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_weaviate.go @@ -0,0 +1,41 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Package rest with all rest API functions. +package rest + +import ( + "crypto/tls" + + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/usecases/config" +) + +var connectorOptionGroup *swag.CommandLineOptionsGroup + +// configureAPI -> see configure_api.go + +// configureServer -> see configure_server.go + +func configureFlags(api *operations.WeaviateAPI) { + connectorOptionGroup = config.GetConfigOptionGroup() + + api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ + *connectorOptionGroup, + } +} + +// The TLS configuration before HTTPS server starts. +func configureTLS(tlsConfig *tls.Config) { + // Make all necessary changes to the TLS configuration here. +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_weaviate_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_weaviate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..90437cb3f48e94d9bb71aefdedf22281f2608cdb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/configure_weaviate_test.go @@ -0,0 +1,30 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "reflect" + "testing" +) + +func TestCreateErrorResponseObject(t *testing.T) { + testResults := createErrorResponseObject("error message 1", "error message 2") + + // check which type is used + if typeName := reflect.TypeOf(testResults); typeName.Kind() == reflect.Ptr { + if typeName.Elem().Name() != "ErrorResponse" { + t.Error("Wrong struct used, should be ErrorResponse but is: ", typeName.Elem().Name()) + } + } else { + t.Error("Wrong struct used, should be ErrorResponse but is: ", typeName.Name()) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/context/context.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/context/context.go new file mode 100644 index 0000000000000000000000000000000000000000..a1dab5c3223feb2ddfa4e2009413682afdfc862d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/context/context.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package context + +import ( + "context" + + "github.com/weaviate/weaviate/entities/models" +) + +type contextKey string + +func (c contextKey) String() string { + return string(c) +} + +const ctxPrincipalKey = contextKey("principal") + +func GetPrincipalFromContext(ctx context.Context) *models.Principal { + principal := ctx.Value(ctxPrincipalKey) + if principal == nil { + return nil + } + + return principal.(*models.Principal) +} + +func AddPrincipalToContext(ctx context.Context, principal *models.Principal) context.Context { + return context.WithValue(ctx, ctxPrincipalKey, principal) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/activate_user_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/activate_user_test.go new file mode 100644 index 0000000000000000000000000000000000000000..55269b042ef70eca366a0b542923489d84dac7cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/activate_user_test.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/mock" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" +) + +var req, _ = http.NewRequest("POST", "/activate", nil) + +func TestSuccessActivate(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{"user": {Id: "user", Active: false}}, nil) + dynUser.On("ActivateUser", "user").Return(nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + res := h.activateUser(users.ActivateUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.ActivateUserOK) + assert.True(t, ok) +} + +func TestActivateNotFound(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.activateUser(users.ActivateUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.ActivateUserNotFound) + assert.True(t, ok) +} + +func TestActivateBadParameters(t *testing.T) { + tests := []struct { + name string + user string + }{ + {name: "static user", user: "static-user"}, + {name: "root user", user: "root-user"}, + } + + for _, test := range tests { + t.Run(fmt.Sprint(test.name), func(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users(test.user)[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + if test.user == "static-user" { + dynUser.On("GetUsers", test.user).Return(nil, nil) + } + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static-user"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root-user"}}, dbUserEnabled: true, + } + + res := h.activateUser(users.ActivateUserParams{UserID: test.user, HTTPRequest: req}, principal) + _, ok := res.(*users.ActivateUserUnprocessableEntity) + assert.True(t, ok) + }) + } +} + +func TestDoubleActivate(t *testing.T) { + user := "active-user" + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users(user)[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", user).Return(map[string]*apikey.User{user: {Id: user, Active: true}}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static-user"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root-user"}}, dbUserEnabled: true, + } + + res := h.activateUser(users.ActivateUserParams{UserID: user, HTTPRequest: req}, principal) + _, ok := res.(*users.ActivateUserConflict) + assert.True(t, ok) +} + +func TestActivateNoDynamic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + + h := dynUserHandler{ + dbUsers: NewMockDbUserAndRolesGetter(t), + authorizer: authorizer, + dbUserEnabled: false, + } + + res := h.activateUser(users.ActivateUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.ActivateUserUnprocessableEntity) + assert.True(t, ok) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/create_user_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/create_user_test.go new file mode 100644 index 0000000000000000000000000000000000000000..549b1c83fde290f6442807a0a50dab87da4eba96 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/create_user_test.go @@ -0,0 +1,286 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "errors" + "strings" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + + "github.com/weaviate/weaviate/usecases/config" + + "github.com/weaviate/weaviate/usecases/auth/authorization" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +func TestCreateUnprocessableEntity(t *testing.T) { + principal := &models.Principal{} + tests := []struct { + name string + userId string + }{ + {name: "too long", userId: strings.Repeat("A", 129)}, + {name: "invalid characters", userId: "#a"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + dynUser := NewMockDbUserAndRolesGetter(t) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.createUser(users.CreateUserParams{UserID: tt.userId, HTTPRequest: req}, principal) + parsed, ok := res.(*users.CreateUserUnprocessableEntity) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestCreateInternalServerError(t *testing.T) { + principal := &models.Principal{} + tests := []struct { + name string + GetUserReturn error + CheckUserIdentifierExistsErrorReturn error + CheckUserIdentifierExistsValueReturn bool + CreateUserReturn error + }{ + {name: "get user error", GetUserReturn: errors.New("some error")}, + {name: "check identifier exists, error", GetUserReturn: nil, CheckUserIdentifierExistsErrorReturn: errors.New("some error")}, + {name: "check identifier exists, repeated collision", GetUserReturn: nil, CheckUserIdentifierExistsErrorReturn: nil, CheckUserIdentifierExistsValueReturn: true}, + {name: "create user error", GetUserReturn: nil, CheckUserIdentifierExistsErrorReturn: nil, CreateUserReturn: errors.New("some error")}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users("user")[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(nil, tt.GetUserReturn) + if tt.GetUserReturn == nil { + dynUser.On("CheckUserIdentifierExists", mock.Anything).Return(tt.CheckUserIdentifierExistsValueReturn, tt.CheckUserIdentifierExistsErrorReturn) + } + if tt.CheckUserIdentifierExistsErrorReturn == nil && !tt.CheckUserIdentifierExistsValueReturn && tt.GetUserReturn == nil { + dynUser.On("CreateUser", "user", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tt.CreateUserReturn) + } + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.createUser(users.CreateUserParams{UserID: "user", HTTPRequest: req}, principal) + parsed, ok := res.(*users.CreateUserInternalServerError) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestCreateConflict(t *testing.T) { + tests := []struct { + name string + rbacConf config.StaticAPIKey + }{ + {name: "no rbac conf", rbacConf: config.StaticAPIKey{}}, + {name: "enabled rbac conf", rbacConf: config.StaticAPIKey{Enabled: true, Users: []string{"user"}, AllowedKeys: []string{"key"}}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + principal := &models.Principal{} + + authorizer := authorization.NewMockAuthorizer(t) + dynUser := NewMockDbUserAndRolesGetter(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users("user")[0]).Return(nil) + if !tt.rbacConf.Enabled { + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{"user": {}}, nil) + } + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: tt.rbacConf, + dbUserEnabled: true, + } + + res := h.createUser(users.CreateUserParams{UserID: "user", HTTPRequest: req}, principal) + parsed, ok := res.(*users.CreateUserConflict) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestCreateSuccess(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + user := "user@weaviate.io" + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users(user)[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", user).Return(map[string]*apikey.User{}, nil) + dynUser.On("CheckUserIdentifierExists", mock.Anything).Return(false, nil) + dynUser.On("CreateUser", user, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.createUser(users.CreateUserParams{UserID: user, HTTPRequest: req}, principal) + parsed, ok := res.(*users.CreateUserCreated) + assert.True(t, ok) + assert.NotNil(t, parsed) +} + +func TestCreateSuccessWithKey(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + user := "user@weaviate.io" + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users(user)[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("CreateUserWithKey", user, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + dbUserEnabled: true, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{user}, AllowedKeys: []string{"key"}}, + } + tp := true + + res := h.createUser(users.CreateUserParams{UserID: user, HTTPRequest: req, Body: users.CreateUserBody{Import: &tp}}, principal) + parsed, ok := res.(*users.CreateUserCreated) + assert.True(t, ok) + assert.NotNil(t, parsed) + assert.Equal(t, *parsed.Payload.Apikey, "key") +} + +func TestCreateNotFoundWithKey(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + user := "user@weaviate.io" + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users(user)[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + dbUserEnabled: true, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{user + "false"}, AllowedKeys: []string{"key"}}, + } + tp := true + + res := h.createUser(users.CreateUserParams{UserID: user, HTTPRequest: req, Body: users.CreateUserBody{Import: &tp}}, principal) + parsed, ok := res.(*users.CreateUserNotFound) + assert.True(t, ok) + assert.NotNil(t, parsed) +} + +func TestCreateForbidden(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users("user")[0]).Return(errors.New("some error")) + + dynUser := NewMockDbUserAndRolesGetter(t) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.createUser(users.CreateUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.CreateUserForbidden) + assert.True(t, ok) +} + +func TestCreateUnprocessableEntityCreatingRootUser(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users("user-root")[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + rbacConfig: rbacconf.Config{RootUsers: []string{"user-root"}}, dbUserEnabled: true, + } + + res := h.createUser(users.CreateUserParams{UserID: "user-root", HTTPRequest: req}, principal) + _, ok := res.(*users.CreateUserUnprocessableEntity) + assert.True(t, ok) +} + +func TestCreateUnprocessableEntityCreatingAdminlistUser(t *testing.T) { + tests := []struct { + name string + adminlistConf adminlist.Config + }{ + {name: "adminlist - read-only user", adminlistConf: adminlist.Config{Enabled: true, ReadOnlyUsers: []string{"user"}}}, + {name: "adminlist - admin user", adminlistConf: adminlist.Config{Enabled: true, Users: []string{"user"}}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + principal := &models.Principal{} + + authorizer := authorization.NewMockAuthorizer(t) + dynUser := NewMockDbUserAndRolesGetter(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users("user")[0]).Return(nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + adminListConfig: tt.adminlistConf, + dbUserEnabled: true, + } + + res := h.createUser(users.CreateUserParams{UserID: "user", HTTPRequest: req}, principal) + parsed, ok := res.(*users.CreateUserUnprocessableEntity) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestCreateNoDynamic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.CREATE, authorization.Users("user")[0]).Return(nil) + + h := dynUserHandler{ + dbUsers: NewMockDbUserAndRolesGetter(t), + authorizer: authorizer, + dbUserEnabled: false, + } + + res := h.createUser(users.CreateUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.CreateUserUnprocessableEntity) + assert.True(t, ok) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/deactivate_user_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/deactivate_user_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c7689e6bbeda17d3a0afe549c915b4ac1652be71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/deactivate_user_test.go @@ -0,0 +1,143 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/mock" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestSuccessDeactivate(t *testing.T) { + tests := []struct { + revokeKey bool + }{ + {false}, {true}, + } + + for _, test := range tests { + t.Run(fmt.Sprint(test.revokeKey), func(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{"user": {Id: "user", Active: true}}, nil) + dynUser.On("DeactivateUser", "user", test.revokeKey).Return(nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.deactivateUser(users.DeactivateUserParams{UserID: "user", HTTPRequest: req, Body: users.DeactivateUserBody{RevokeKey: &test.revokeKey}}, principal) + _, ok := res.(*users.DeactivateUserOK) + assert.True(t, ok) + }) + } +} + +func TestDeactivateNotFound(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.deactivateUser(users.DeactivateUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.DeactivateUserNotFound) + assert.True(t, ok) +} + +func TestDeactivateBadParameters(t *testing.T) { + tests := []struct { + name string + user string + principal string + }{ + {name: "static user", user: "static-user", principal: "admin"}, + {name: "root user", user: "root-user", principal: "admin"}, + {name: "own user", user: "myself", principal: "myself"}, + } + + for _, test := range tests { + t.Run(fmt.Sprint(test.name), func(t *testing.T) { + principal := &models.Principal{Username: test.principal} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users(test.user)[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + if test.user == "static-user" { + dynUser.On("GetUsers", test.user).Return(nil, nil) + } + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static-user"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root-user"}}, dbUserEnabled: true, + } + + res := h.deactivateUser(users.DeactivateUserParams{UserID: test.user, HTTPRequest: req}, principal) + _, ok := res.(*users.DeactivateUserUnprocessableEntity) + assert.True(t, ok) + }) + } +} + +func TestDoubleDeactivate(t *testing.T) { + user := "deactivated-user" + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users(user)[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", user).Return(map[string]*apikey.User{user: {Id: user, Active: false}}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static-user"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root-user"}}, dbUserEnabled: true, + } + + res := h.deactivateUser(users.DeactivateUserParams{UserID: user, HTTPRequest: req}, principal) + _, ok := res.(*users.DeactivateUserConflict) + assert.True(t, ok) +} + +func TestSuspendNoDynamic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + + h := dynUserHandler{ + dbUsers: NewMockDbUserAndRolesGetter(t), + authorizer: authorizer, + dbUserEnabled: false, + } + + res := h.deactivateUser(users.DeactivateUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.DeactivateUserUnprocessableEntity) + assert.True(t, ok) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/delete_user_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/delete_user_test.go new file mode 100644 index 0000000000000000000000000000000000000000..209cd3c7208c097ddb4c223c956cc5a2fa0f0426 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/delete_user_test.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "errors" + "testing" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestDeleteSuccess(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.DELETE, authorization.Users("user")[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetRolesForUserOrGroup", "user", authentication.AuthTypeDb, false).Return(map[string][]authorization.Policy{"role": {}}, nil) + dynUser.On("RevokeRolesForUser", conv.UserNameWithTypeFromId("user", authentication.AuthType(models.UserTypeInputDb)), "role").Return(nil) + dynUser.On("DeleteUser", "user").Return(nil) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{"user": {}}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.deleteUser(users.DeleteUserParams{UserID: "user", HTTPRequest: req}, principal) + parsed, ok := res.(*users.DeleteUserNoContent) + assert.True(t, ok) + assert.NotNil(t, parsed) +} + +func TestDeleteForbidden(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.DELETE, authorization.Users("user")[0]).Return(errors.New("some error")) + + dynUser := NewMockDbUserAndRolesGetter(t) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.deleteUser(users.DeleteUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.DeleteUserForbidden) + assert.True(t, ok) +} + +func TestDeleteUnprocessableEntityStaticUser(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.DELETE, authorization.Users("user")[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"user"}, AllowedKeys: []string{"key"}}, + } + + res := h.deleteUser(users.DeleteUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.DeleteUserUnprocessableEntity) + assert.True(t, ok) +} + +func TestDeleteUnprocessableEntityDeletingRootUser(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.DELETE, authorization.Users("user-root")[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + rbacConfig: rbacconf.Config{RootUsers: []string{"user-root"}}, dbUserEnabled: true, + } + + res := h.deleteUser(users.DeleteUserParams{UserID: "user-root", HTTPRequest: req}, principal) + _, ok := res.(*users.DeleteUserUnprocessableEntity) + assert.True(t, ok) +} + +func TestDeleteNoDynamic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.DELETE, authorization.Users("user")[0]).Return(nil) + + h := dynUserHandler{ + dbUsers: NewMockDbUserAndRolesGetter(t), + authorizer: authorizer, + dbUserEnabled: false, + } + + res := h.deleteUser(users.DeleteUserParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.DeleteUserUnprocessableEntity) + assert.True(t, ok) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/get_user_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/get_user_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d1014f9534d2e8a963c4c13aae9e7fb63064596a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/get_user_test.go @@ -0,0 +1,284 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "errors" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/stretchr/testify/mock" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/adapters/clients" + "github.com/weaviate/weaviate/usecases/schema" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func TestSuccessGetUser(t *testing.T) { + tests := []struct { + name string + userId string + isRoot bool + addLastUsed bool + importStatic bool + userType models.UserTypeOutput + }{ + {name: "dynamic user - non-root", userId: "dynamic", userType: models.UserTypeOutputDbUser, isRoot: false}, + {name: "dynamic user - root", userId: "dynamic", userType: models.UserTypeOutputDbUser, isRoot: true}, + {name: "dynamic user with last used - root", userId: "dynamic", userType: models.UserTypeOutputDbUser, isRoot: true, addLastUsed: true}, + {name: "static user", userId: "static", userType: models.UserTypeOutputDbEnvUser, isRoot: true}, + {name: "dynamic user after import - root", userId: "static", userType: models.UserTypeOutputDbUser, isRoot: true, importStatic: true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + username := "non-root" + if test.isRoot { + username = "root" + } + principal := &models.Principal{Username: username} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users(test.userId)[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + schemaGetter := schema.NewMockSchemaGetter(t) + if test.userType == models.UserTypeOutputDbUser { + dynUser.On("GetUsers", test.userId).Return(map[string]*apikey.User{test.userId: {Id: test.userId, ApiKeyFirstLetters: "abc"}}, nil) + } else { + dynUser.On("GetUsers", test.userId).Return(map[string]*apikey.User{}, nil) + } + dynUser.On("GetRolesForUserOrGroup", test.userId, authentication.AuthTypeDb, false).Return( + map[string][]authorization.Policy{"role": {}}, nil) + + if test.addLastUsed { + schemaGetter.On("Nodes").Return([]string{"node1"}) + } + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static"}, AllowedKeys: []string{"static"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root"}}, dbUserEnabled: true, + nodesGetter: schemaGetter, + } + + res := h.getUser(users.GetUserInfoParams{UserID: test.userId, IncludeLastUsedTime: &test.addLastUsed, HTTPRequest: req}, principal) + parsed, ok := res.(*users.GetUserInfoOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + + require.Equal(t, *parsed.Payload.UserID, test.userId) + require.Equal(t, parsed.Payload.Roles, []string{"role"}) + require.Equal(t, *parsed.Payload.DbUserType, string(test.userType)) + + if test.isRoot && test.userType == models.UserTypeOutputDbUser { + require.Equal(t, parsed.Payload.APIKeyFirstLetters, "abc") + } else { + require.Equal(t, parsed.Payload.APIKeyFirstLetters, "") + } + }) + } +} + +func TestSuccessGetUserMultiNode(t *testing.T) { + returnedTime := time.Now() + + userId := "user" + + truep := true + tests := []struct { + name string + nodeResponses []map[string]time.Time + expectedTime time.Time + }{ + {name: "single node", nodeResponses: []map[string]time.Time{{}}, expectedTime: returnedTime}, + {name: "multi node with latest time on local node", expectedTime: returnedTime, nodeResponses: []map[string]time.Time{{userId: returnedTime.Add(-time.Second)}, {userId: returnedTime.Add(-time.Second)}}}, + {name: "multi node with latest time on other node", expectedTime: returnedTime.Add(time.Hour), nodeResponses: []map[string]time.Time{{userId: returnedTime.Add(time.Hour)}, {userId: returnedTime.Add(time.Minute)}}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + principal := &models.Principal{Username: "non-root"} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users(userId)[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + schemaGetter := schema.NewMockSchemaGetter(t) + + dynUser.On("GetUsers", userId).Return(map[string]*apikey.User{userId: {Id: userId, LastUsedAt: returnedTime}}, nil) + dynUser.On("GetRolesForUserOrGroup", userId, authentication.AuthTypeDb, false).Return(map[string][]authorization.Policy{"role": {}}, nil) + + var nodes []string + for i := range test.nodeResponses { + nodes = append(nodes, string(rune(i))) + } + schemaGetter.On("Nodes").Return(nodes) + + server := httptest.NewServer(&fakeHandler{t: t, counter: atomic.Int32{}, nodeResponses: test.nodeResponses}) + defer server.Close() + + remote := clients.NewRemoteUser(&http.Client{}, FakeNodeResolver{path: server.URL}) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static"}, AllowedKeys: []string{"static"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root"}}, dbUserEnabled: true, + nodesGetter: schemaGetter, + remoteUser: remote, + } + + res := h.getUser(users.GetUserInfoParams{UserID: userId, IncludeLastUsedTime: &truep, HTTPRequest: req}, principal) + parsed, ok := res.(*users.GetUserInfoOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + + require.Equal(t, *parsed.Payload.UserID, userId) + require.Equal(t, parsed.Payload.LastUsedAt.String(), strfmt.DateTime(test.expectedTime).String()) + }) + } +} + +func TestNotFound(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users("static")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "static").Return(map[string]*apikey.User{}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static"}, AllowedKeys: []string{"static"}}, + } + + res := h.getUser(users.GetUserInfoParams{UserID: "static", HTTPRequest: req}, principal) + _, ok := res.(*users.GetUserInfoNotFound) + assert.True(t, ok) +} + +func TestNotFoundStatic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.getUser(users.GetUserInfoParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.GetUserInfoNotFound) + assert.True(t, ok) +} + +func TestGetUserInternalServerError(t *testing.T) { + principal := &models.Principal{} + tests := []struct { + name string + GetUserReturnErr error + GetUserReturnValue map[string]*apikey.User + GetRolesReturn error + }{ + {name: "get user error", GetUserReturnErr: errors.New("some error"), GetUserReturnValue: nil}, + {name: "create user error", GetUserReturnErr: nil, GetUserReturnValue: map[string]*apikey.User{"user": {Id: "user"}}, GetRolesReturn: errors.New("some error")}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(tt.GetUserReturnValue, tt.GetUserReturnErr) + if tt.GetUserReturnErr == nil { + dynUser.On("GetRolesForUserOrGroup", "user", authentication.AuthTypeDb, false).Return(nil, tt.GetRolesReturn) + } + + h := dynUserHandler{ + dbUsers: dynUser, authorizer: authorizer, dbUserEnabled: true, + } + + res := h.getUser(users.GetUserInfoParams{UserID: "user", HTTPRequest: req}, principal) + parsed, ok := res.(*users.GetUserInfoInternalServerError) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestListForbidden(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users("user")[0]).Return(errors.New("some error")) + + dynUser := NewMockDbUserAndRolesGetter(t) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.getUser(users.GetUserInfoParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.GetUserInfoForbidden) + assert.True(t, ok) +} + +func TestGetNoDynamic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users("user")[0]).Return(nil) + + h := dynUserHandler{ + dbUsers: NewMockDbUserAndRolesGetter(t), + authorizer: authorizer, + dbUserEnabled: false, + } + + res := h.getUser(users.GetUserInfoParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.GetUserInfoUnprocessableEntity) + assert.True(t, ok) +} + +func TestGetUserWithNoPrincipal(t *testing.T) { + var ( + principal *models.Principal + userID = "static" + ) + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users(userID)[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", userID).Return(map[string]*apikey.User{userID: {Id: userID, ApiKeyFirstLetters: "abc"}}, nil) + dynUser.On("GetRolesForUserOrGroup", userID, authentication.AuthTypeDb, false).Return(map[string][]authorization.Policy{"role": {}}, nil) + + h := dynUserHandler{dbUsers: dynUser, authorizer: authorizer, dbUserEnabled: true} + + res := h.getUser(users.GetUserInfoParams{UserID: "static", HTTPRequest: req}, principal) + parsed, ok := res.(*users.GetUserInfoOK) + assert.True(t, ok) + assert.NotNil(t, parsed) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/handlers_db_users.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/handlers_db_users.go new file mode 100644 index 0000000000000000000000000000000000000000..245428318ccf84bdb9f296074864b8301abfeb0e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/handlers_db_users.go @@ -0,0 +1,647 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "regexp" + "slices" + "sync" + "time" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/weaviate/weaviate/adapters/clients" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist" + + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + cerrors "github.com/weaviate/weaviate/adapters/handlers/rest/errors" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey/keys" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/conv" + "github.com/weaviate/weaviate/usecases/auth/authorization/filter" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/schema" +) + +type dynUserHandler struct { + authorizer authorization.Authorizer + dbUsers DbUserAndRolesGetter + staticApiKeysConfigs config.StaticAPIKey + rbacConfig rbacconf.Config + adminListConfig adminlist.Config + logger logrus.FieldLogger + dbUserEnabled bool + remoteUser *clients.RemoteUser + nodesGetter schema.SchemaGetter +} + +type DbUserAndRolesGetter interface { + apikey.DBUsers + GetRolesForUserOrGroup(user string, authTyoes authentication.AuthType, isGroup bool) (map[string][]authorization.Policy, error) + RevokeRolesForUser(userName string, roles ...string) error +} + +var validateUserNameRegex = regexp.MustCompile(`^` + apikey.UserNameRegexCore + `$`) + +func SetupHandlers( + api *operations.WeaviateAPI, dbUsers DbUserAndRolesGetter, authorizer authorization.Authorizer, authNConfig config.Authentication, + authZConfig config.Authorization, remoteUser *clients.RemoteUser, nodesGetter schema.SchemaGetter, logger logrus.FieldLogger, +) { + h := &dynUserHandler{ + authorizer: authorizer, + dbUsers: dbUsers, + staticApiKeysConfigs: authNConfig.APIKey, + dbUserEnabled: authNConfig.DBUsers.Enabled, + rbacConfig: authZConfig.Rbac, + remoteUser: remoteUser, + nodesGetter: nodesGetter, + logger: logger, + } + + api.UsersCreateUserHandler = users.CreateUserHandlerFunc(h.createUser) + api.UsersDeleteUserHandler = users.DeleteUserHandlerFunc(h.deleteUser) + api.UsersGetUserInfoHandler = users.GetUserInfoHandlerFunc(h.getUser) + api.UsersRotateUserAPIKeyHandler = users.RotateUserAPIKeyHandlerFunc(h.rotateKey) + api.UsersDeactivateUserHandler = users.DeactivateUserHandlerFunc(h.deactivateUser) + api.UsersActivateUserHandler = users.ActivateUserHandlerFunc(h.activateUser) + api.UsersListAllUsersHandler = users.ListAllUsersHandlerFunc(h.listUsers) +} + +func (h *dynUserHandler) listUsers(params users.ListAllUsersParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + isRootUser := h.isRequestFromRootUser(principal) + + if !h.dbUserEnabled { + return users.NewListAllUsersOK().WithPayload([]*models.DBUserInfo{}) + } + + allDbUsers, err := h.dbUsers.GetUsers() + if err != nil { + return users.NewListAllUsersInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + allUsers := make([]*apikey.User, 0, len(allDbUsers)) + for _, dbUser := range allDbUsers { + allUsers = append(allUsers, dbUser) + } + + resourceFilter := filter.New[*apikey.User](h.authorizer, h.rbacConfig) + filteredUsers := resourceFilter.Filter( + ctx, + h.logger, + principal, + allUsers, + authorization.READ, + func(user *apikey.User) string { + return authorization.Users(user.Id)[0] + }, + ) + + var usersWithTime map[string]time.Time + if params.IncludeLastUsedTime != nil && *params.IncludeLastUsedTime { + usersWithTime = h.getLastUsed(filteredUsers) + } + + allDynamicUsers := map[string]struct{}{} + response := make([]*models.DBUserInfo, 0, len(filteredUsers)) + for _, dbUser := range filteredUsers { + apiKeyFirstLetter := "" + if isRootUser { + apiKeyFirstLetter = dbUser.ApiKeyFirstLetters + } + var lastUsedTime time.Time + if val, ok := usersWithTime[dbUser.Id]; ok { + lastUsedTime = val + } + response, err = h.addToListAllResponse(response, dbUser.Id, string(models.UserTypeOutputDbUser), dbUser.Active, apiKeyFirstLetter, &dbUser.CreatedAt, &lastUsedTime) + if err != nil { + return users.NewListAllUsersInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + if isRootUser { + allDynamicUsers[dbUser.Id] = struct{}{} + } + } + + if isRootUser { + for _, staticUser := range h.staticApiKeysConfigs.Users { + if _, ok := allDynamicUsers[staticUser]; ok { + // don't overwrite dynamic users with the same name. Can happen after import + continue + } + response, err = h.addToListAllResponse(response, staticUser, string(models.UserTypeOutputDbEnvUser), true, "", nil, nil) + if err != nil { + return users.NewListAllUsersInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + } + } + + return users.NewListAllUsersOK().WithPayload(response) +} + +func (h *dynUserHandler) addToListAllResponse(response []*models.DBUserInfo, id, userType string, active bool, apiKeyFirstLetter string, createdAt *time.Time, lastusedAt *time.Time) ([]*models.DBUserInfo, error) { + roles, err := h.dbUsers.GetRolesForUserOrGroup(id, authentication.AuthTypeDb, false) + if err != nil { + return response, err + } + + roleNames := make([]string, 0, len(roles)) + for role := range roles { + roleNames = append(roleNames, role) + } + + resp := &models.DBUserInfo{ + Active: &active, + UserID: &id, + DbUserType: &userType, + Roles: roleNames, + APIKeyFirstLetters: apiKeyFirstLetter, + } + if createdAt != nil { + resp.CreatedAt = strfmt.DateTime(*createdAt) + } + if lastusedAt != nil { + resp.LastUsedAt = strfmt.DateTime(*lastusedAt) + } + + response = append(response, resp) + return response, nil +} + +func (h *dynUserHandler) getUser(params users.GetUserInfoParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Users(params.UserID)...); err != nil { + return users.NewGetUserInfoForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if !h.dbUserEnabled { + return users.NewGetUserInfoUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("db user management is not enabled"))) + } + + // also check for existing static users if request comes from root + isRootUser := h.isRequestFromRootUser(principal) + + active := true + response := &models.DBUserInfo{UserID: ¶ms.UserID, Active: &active} + + existingDbUsers, err := h.dbUsers.GetUsers(params.UserID) + if err != nil { + return users.NewGetUserInfoInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("checking user existence: %w", err))) + } + var userType string + if len(existingDbUsers) > 0 { + user := existingDbUsers[params.UserID] + response.Active = &user.Active + response.CreatedAt = strfmt.DateTime(user.CreatedAt) + if isRootUser { + response.APIKeyFirstLetters = user.ApiKeyFirstLetters + } + + if params.IncludeLastUsedTime != nil && *params.IncludeLastUsedTime { + usersWithTime := h.getLastUsed([]*apikey.User{user}) + response.LastUsedAt = strfmt.DateTime(usersWithTime[params.UserID]) + } + userType = string(models.UserTypeOutputDbUser) + } else if isRootUser && h.staticUserExists(params.UserID) { + userType = string(models.UserTypeOutputDbEnvUser) + } else { + return users.NewGetUserInfoNotFound() + } + response.DbUserType = &userType + + existingRoles, err := h.dbUsers.GetRolesForUserOrGroup(params.UserID, authentication.AuthTypeDb, false) + if err != nil { + return users.NewGetUserInfoInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("get roles: %w", err))) + } + + roles := make([]string, 0, len(existingRoles)) + for roleName := range existingRoles { + roles = append(roles, roleName) + } + response.Roles = roles + + return users.NewGetUserInfoOK().WithPayload(response) +} + +func (h *dynUserHandler) getLastUsed(users []*apikey.User) map[string]time.Time { + usersWithTime := make(map[string]time.Time, len(users)) + for _, user := range users { + usersWithTime[user.Id] = user.LastUsedAt + } + + nodes := h.nodesGetter.Nodes() + if len(nodes) == 1 { + return usersWithTime + } + + // we tolerate errors in requests to other nodes and don't want to wait too long. Last used time is a best-effort + // operation + ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second) + defer cancelFunc() + userStatuses := make([]*apikey.UserStatusResponse, len(nodes)) + wg := &sync.WaitGroup{} + wg.Add(len(nodes)) + for i, nodeName := range nodes { + i, nodeName := i, nodeName + enterrors.GoWrapper(func() { + status, err := h.remoteUser.GetAndUpdateLastUsedTime(ctx, nodeName, usersWithTime, true) + if err == nil { + userStatuses[i] = status + } + wg.Done() + }, h.logger) + } + wg.Wait() + + for _, status := range userStatuses { + if status == nil { + continue + } + for userId, lastUsedTime := range status.Users { + if lastUsedTime.After(usersWithTime[userId]) { + usersWithTime[userId] = lastUsedTime + } + } + } + + // update all other nodes with maximum time so usage does not "jump back" when the node that has the latest time + // recorded is down. + // This is opportunistic (we dont care about errors) and there is no need to keep the request waiting for this + enterrors.GoWrapper(func() { + ctx2, cancelFunc2 := context.WithTimeout(context.Background(), time.Second) + defer cancelFunc2() + wg := &sync.WaitGroup{} + wg.Add(len(nodes)) + for _, nodeName := range nodes { + nodeName := nodeName + enterrors.GoWrapper(func() { + // dont care about returns or errors + _, _ = h.remoteUser.GetAndUpdateLastUsedTime(ctx2, nodeName, usersWithTime, false) + wg.Done() + }, h.logger) + } + wg.Wait() // wait so cancelFunc2 is not executed too early + }, h.logger) + + return usersWithTime +} + +func (h *dynUserHandler) createUser(params users.CreateUserParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := validateUserName(params.UserID); err != nil { + return users.NewCreateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.CREATE, authorization.Users(params.UserID)...); err != nil { + return users.NewCreateUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if !h.dbUserEnabled { + return users.NewCreateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("db user management is not enabled"))) + } + + if params.Body.Import != nil && *params.Body.Import { + if !h.principalIsRootUser(principal.Username) { + return users.NewActivateUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("only root users can import static api keys"))) + } + + if !h.staticUserExists(params.UserID) { + return users.NewCreateUserNotFound().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("static user %v does not exist", params.UserID))) + } + + var apiKey string + for i, user := range h.staticApiKeysConfigs.Users { + if user == params.UserID { + apiKey = h.staticApiKeysConfigs.AllowedKeys[i] + } + } + + createdAt := time.Now() + if !time.Time(params.Body.CreateTime).IsZero() { + createdAt = time.Time(params.Body.CreateTime).UTC() + } + + if err := h.dbUsers.CreateUserWithKey(params.UserID, apiKey[:3], sha256.Sum256([]byte(apiKey)), createdAt); err != nil { + return users.NewCreateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("creating user: %w", err))) + } + + return users.NewCreateUserCreated().WithPayload(&models.UserAPIKey{Apikey: &apiKey}) + } + + if h.staticUserExists(params.UserID) { + return users.NewCreateUserConflict().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user '%v' already exists", params.UserID))) + } + if h.isRootUser(params.UserID) { + return users.NewCreateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("cannot create db user with root user name"))) + } + if h.isAdminlistUser(params.UserID) { + return users.NewCreateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("cannot create db user with admin list name"))) + } + + existingUser, err := h.dbUsers.GetUsers(params.UserID) + if err != nil { + return users.NewCreateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("checking user existence: %w", err))) + } + + if len(existingUser) > 0 { + return users.NewCreateUserConflict().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user '%v' already exists", params.UserID))) + } + + apiKey, hash, userIdentifier, err := h.getApiKey() + if err != nil { + return users.NewCreateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.dbUsers.CreateUser(params.UserID, hash, userIdentifier, apiKey[:3], time.Now()); err != nil { + return users.NewCreateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("creating user: %w", err))) + } + + return users.NewCreateUserCreated().WithPayload(&models.UserAPIKey{Apikey: &apiKey}) +} + +func (h *dynUserHandler) rotateKey(params users.RotateUserAPIKeyParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := h.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.Users(params.UserID)...); err != nil { + return users.NewRotateUserAPIKeyForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if !h.dbUserEnabled { + return users.NewRotateUserAPIKeyUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("db user management is not enabled"))) + } + + existingUser, err := h.dbUsers.GetUsers(params.UserID) + if err != nil { + return users.NewRotateUserAPIKeyInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("checking user existence: %w", err))) + } + + if len(existingUser) == 0 { + if h.staticUserExists(params.UserID) { + return users.NewRotateUserAPIKeyUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user '%v' is static user", params.UserID))) + } + return users.NewRotateUserAPIKeyNotFound() + } + + oldUserIdentifier := existingUser[params.UserID].InternalIdentifier + + apiKey, hash, newUserIdentifier, err := h.getApiKey() + if err != nil { + return users.NewRotateUserAPIKeyInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.dbUsers.RotateKey(params.UserID, apiKey[:3], hash, oldUserIdentifier, newUserIdentifier); err != nil { + return users.NewRotateUserAPIKeyInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("rotate key: %w", err))) + } + + return users.NewRotateUserAPIKeyOK().WithPayload(&models.UserAPIKey{Apikey: &apiKey}) +} + +func (h *dynUserHandler) getApiKey() (string, string, string, error) { + // the user identifier is random, and we need to be sure that there is no reuse. Otherwise, an existing apikey would + // become invalid. The chances are minimal, but with a lot of users it can happen (birthday paradox!). + // If we happen to have a collision by chance, simply generate a new key + count := 0 + for { + apiKey, hash, userIdentifier, err := keys.CreateApiKeyAndHash() + if err != nil { + return "", "", "", err + } + + exists, err := h.dbUsers.CheckUserIdentifierExists(userIdentifier) + if err != nil { + return "", "", "", err + } + if !exists { + return apiKey, hash, userIdentifier, nil + } + + // make sure we don't deadlock. The chance for one collision is very small, so this should never happen. But better be safe than sorry. + if count >= 10 { + return "", "", "", errors.New("could not create a new user identifier") + } + count++ + } +} + +func (h *dynUserHandler) deleteUser(params users.DeleteUserParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := h.authorizer.Authorize(ctx, principal, authorization.DELETE, authorization.Users(params.UserID)...); err != nil { + return users.NewDeleteUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if !h.dbUserEnabled { + return users.NewDeleteUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("db user management is not enabled"))) + } + + if h.isRootUser(params.UserID) { + return users.NewDeleteUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("cannot delete root user"))) + } + existingUsers, err := h.dbUsers.GetUsers(params.UserID) + if err != nil { + return users.NewDeleteUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + if len(existingUsers) == 0 { + if h.staticUserExists(params.UserID) { + return users.NewDeleteUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user '%v' is static user", params.UserID))) + } + return users.NewDeleteUserNotFound() + } + roles, err := h.dbUsers.GetRolesForUserOrGroup(params.UserID, authentication.AuthTypeDb, false) + if err != nil { + return users.NewDeleteUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + if len(roles) > 0 { + roleNames := make([]string, 0, len(roles)) + for name := range roles { + roleNames = append(roleNames, name) + } + if err := h.dbUsers.RevokeRolesForUser(conv.UserNameWithTypeFromId(params.UserID, authentication.AuthTypeDb), roleNames...); err != nil { + return users.NewDeleteUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + } + + if err := h.dbUsers.DeleteUser(params.UserID); err != nil { + return users.NewDeleteUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + return users.NewDeleteUserNoContent() +} + +func (h *dynUserHandler) deactivateUser(params users.DeactivateUserParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := h.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.Users(params.UserID)...); err != nil { + return users.NewDeactivateUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if !h.dbUserEnabled { + return users.NewDeactivateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("db user management is not enabled"))) + } + + if params.UserID == principal.Username { + return users.NewDeactivateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user '%v' cannot self-deactivate", params.UserID))) + } + + if h.isRootUser(params.UserID) { + return users.NewDeactivateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("cannot deactivate root user"))) + } + + existingUser, err := h.dbUsers.GetUsers(params.UserID) + if err != nil { + return users.NewDeactivateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("checking user existence: %w", err))) + } + + if len(existingUser) == 0 { + if h.staticUserExists(params.UserID) { + return users.NewDeactivateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user '%v' is static user", params.UserID))) + } + return users.NewDeactivateUserNotFound() + } + + if !existingUser[params.UserID].Active { + return users.NewDeactivateUserConflict() + } + + revokeKey := false + if params.Body.RevokeKey != nil { + revokeKey = *params.Body.RevokeKey + } + + if err := h.dbUsers.DeactivateUser(params.UserID, revokeKey); err != nil { + return users.NewDeactivateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("deactivate user: %w", err))) + } + + return users.NewDeactivateUserOK() +} + +func (h *dynUserHandler) activateUser(params users.ActivateUserParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + if err := h.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.Users(params.UserID)...); err != nil { + return users.NewActivateUserForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if !h.dbUserEnabled { + return users.NewActivateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("db user management is not enabled"))) + } + + if h.isRootUser(params.UserID) { + return users.NewActivateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(errors.New("cannot activate root user"))) + } + + existingUser, err := h.dbUsers.GetUsers(params.UserID) + if err != nil { + return users.NewActivateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("checking user existence: %w", err))) + } + + if len(existingUser) == 0 { + if h.staticUserExists(params.UserID) { + return users.NewActivateUserUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("user '%v' is static user", params.UserID))) + } + return users.NewActivateUserNotFound() + } + + if existingUser[params.UserID].Active { + return users.NewActivateUserConflict() + } + + if err := h.dbUsers.ActivateUser(params.UserID); err != nil { + return users.NewActivateUserInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("activate user: %w", err))) + } + + return users.NewActivateUserOK() +} + +func (h *dynUserHandler) staticUserExists(newUser string) bool { + if h.staticApiKeysConfigs.Enabled { + for _, staticUser := range h.staticApiKeysConfigs.Users { + if staticUser == newUser { + return true + } + } + } + return false +} + +func (h *dynUserHandler) principalIsRootUser(name string) bool { + if !h.rbacConfig.Enabled && !h.adminListConfig.Enabled { + return true + } + for i := range h.rbacConfig.RootUsers { + if h.rbacConfig.RootUsers[i] == name { + return true + } + } + return false +} + +func (h *dynUserHandler) isRootUser(name string) bool { + for i := range h.rbacConfig.RootUsers { + if h.rbacConfig.RootUsers[i] == name { + return true + } + } + return false +} + +func (h *dynUserHandler) isAdminlistUser(name string) bool { + for i := range h.adminListConfig.Users { + if h.adminListConfig.Users[i] == name { + return true + } + } + for i := range h.adminListConfig.ReadOnlyUsers { + if h.adminListConfig.ReadOnlyUsers[i] == name { + return true + } + } + return false +} + +func (h *dynUserHandler) isRequestFromRootUser(principal *models.Principal) bool { + if principal == nil { + return false + } + for _, groupName := range principal.Groups { + if slices.Contains(h.rbacConfig.RootGroups, groupName) { + return true + } + } + return slices.Contains(h.rbacConfig.RootUsers, principal.Username) +} + +// validateRoleName validates that this string is a valid role name (format wise) +func validateUserName(name string) error { + if len(name) > apikey.UserNameMaxLength { + return fmt.Errorf("'%s' is not a valid user name. Name should not be longer than %d characters", name, apikey.UserNameMaxLength) + } + if !validateUserNameRegex.MatchString(name) { + return fmt.Errorf("'%s' is not a valid user name", name) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/list_all_users_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/list_all_users_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6a0a871ba1670d137fae412f3506407d7f10e9dd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/list_all_users_test.go @@ -0,0 +1,279 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "errors" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/adapters/clients" + "github.com/weaviate/weaviate/usecases/schema" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/mock" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestSuccessListAll(t *testing.T) { + dbUser := "user1" + staticUser := "static" + tests := []struct { + name string + principal *models.Principal + includeStatic bool + }{ + { + name: "only db user", + principal: &models.Principal{Username: "not-root"}, + includeStatic: false, + }, + { + name: "db + static user", + principal: &models.Principal{Username: "root"}, + includeStatic: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, tt.principal, authorization.READ, authorization.Users()[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers").Return(map[string]*apikey.User{dbUser: {Id: dbUser}}, nil) + dynUser.On("GetRolesForUserOrGroup", dbUser, authentication.AuthTypeDb, false).Return( + map[string][]authorization.Policy{"role": {}}, nil) + if tt.includeStatic { + dynUser.On("GetRolesForUserOrGroup", staticUser, authentication.AuthTypeDb, false).Return( + map[string][]authorization.Policy{"role": {}}, nil) + } + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{staticUser}, AllowedKeys: []string{"static"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root"}}, + dbUserEnabled: true, + } + + res := h.listUsers(users.ListAllUsersParams{HTTPRequest: req}, tt.principal) + parsed, ok := res.(*users.ListAllUsersOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + + if tt.includeStatic { + require.Equal(t, len(parsed.Payload), 2) + } else { + require.Len(t, parsed.Payload, 1) + } + }) + } +} + +func TestSuccessListAllAfterImport(t *testing.T) { + exStaticUser := "static" + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, &models.Principal{Username: "root"}, authorization.READ, authorization.Users()[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers").Return(map[string]*apikey.User{exStaticUser: {Id: exStaticUser, Active: true}}, nil) + dynUser.On("GetRolesForUserOrGroup", exStaticUser, authentication.AuthTypeDb, false).Return( + map[string][]authorization.Policy{"role": {}}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{exStaticUser}, AllowedKeys: []string{"static"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root"}}, + dbUserEnabled: true, + } + + res := h.listUsers(users.ListAllUsersParams{HTTPRequest: req}, &models.Principal{Username: "root"}) + parsed, ok := res.(*users.ListAllUsersOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + require.Len(t, parsed.Payload, 1) + user := parsed.Payload[0] + require.Equal(t, *user.UserID, exStaticUser) + require.Equal(t, *user.Active, true) + require.Equal(t, *user.DbUserType, string(models.UserTypeOutputDbUser)) +} + +func TestSuccessListAllUserMultiNode(t *testing.T) { + baseTime := time.Now() + + usersIds := []string{"user1", "user2", "user3", "user4", "user5", "user6"} + + trueptr := true + tests := []struct { + name string + nodeResponses []map[string]time.Time + expectedTime map[string]time.Time + userIds []string + }{ + {name: "single node, single user", nodeResponses: []map[string]time.Time{{}}, expectedTime: map[string]time.Time{usersIds[0]: baseTime}, userIds: usersIds[:1]}, + {name: "single node, multi user", nodeResponses: []map[string]time.Time{{}}, expectedTime: map[string]time.Time{usersIds[0]: baseTime, usersIds[1]: baseTime}, userIds: usersIds[:2]}, + { + name: "multi node, latest time local node, single user", + userIds: usersIds[:1], + expectedTime: map[string]time.Time{usersIds[0]: baseTime}, + nodeResponses: []map[string]time.Time{{usersIds[0]: baseTime.Add(-time.Second)}, {usersIds[0]: baseTime.Add(-time.Second)}}, + }, + { + name: "multi node, latest time local node, multi user", + userIds: usersIds[:2], + expectedTime: map[string]time.Time{usersIds[0]: baseTime, usersIds[1]: baseTime}, + nodeResponses: []map[string]time.Time{ + {usersIds[0]: baseTime.Add(-time.Second), usersIds[1]: baseTime.Add(-2 * time.Second)}, + {usersIds[0]: baseTime.Add(-time.Second), usersIds[1]: baseTime.Add(-2 * time.Second)}, + }, + }, + { + name: "multi node, latest time other node, single user", + userIds: usersIds[:1], + expectedTime: map[string]time.Time{usersIds[0]: baseTime.Add(time.Hour)}, + nodeResponses: []map[string]time.Time{{usersIds[0]: baseTime.Add(time.Hour)}, {usersIds[0]: baseTime.Add(time.Minute)}}, + }, + { + name: "multi node, latest time other node, multi user", + userIds: usersIds[:2], + expectedTime: map[string]time.Time{usersIds[0]: baseTime.Add(time.Hour), usersIds[1]: baseTime.Add(2 * time.Hour)}, + nodeResponses: []map[string]time.Time{ + {usersIds[0]: baseTime.Add(time.Hour), usersIds[1]: baseTime.Add(time.Minute)}, + {usersIds[0]: baseTime.Add(time.Minute), usersIds[1]: baseTime.Add(2 * time.Hour)}, + }, + }, + { + name: "six node, six user", + userIds: usersIds, + expectedTime: map[string]time.Time{ + usersIds[0]: baseTime.Add(time.Hour), + usersIds[1]: baseTime.Add(2 * time.Hour), + usersIds[2]: baseTime.Add(3 * time.Hour), + usersIds[3]: baseTime.Add(4 * time.Hour), + usersIds[4]: baseTime.Add(5 * time.Hour), + usersIds[5]: baseTime.Add(6 * time.Hour), + }, + nodeResponses: []map[string]time.Time{ + {usersIds[0]: baseTime.Add(time.Hour), usersIds[1]: baseTime.Add(time.Minute)}, + {usersIds[0]: baseTime.Add(time.Minute), usersIds[1]: baseTime.Add(2 * time.Hour)}, + {usersIds[2]: baseTime.Add(3 * time.Hour), usersIds[3]: baseTime.Add(time.Minute), usersIds[1]: baseTime.Add(time.Minute)}, + {usersIds[2]: baseTime.Add(-time.Minute), usersIds[3]: baseTime.Add(4 * time.Hour)}, + {usersIds[4]: baseTime.Add(5 * time.Hour), usersIds[5]: baseTime.Add(time.Minute), usersIds[1]: baseTime.Add(time.Minute)}, + {usersIds[4]: baseTime.Add(-time.Minute), usersIds[5]: baseTime.Add(6 * time.Hour)}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + principal := &models.Principal{Username: "non-root"} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, authorization.Users()[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + schemaGetter := schema.NewMockSchemaGetter(t) + + usersRet := make(map[string]*apikey.User) + for _, user := range tt.userIds { + usersRet[user] = &apikey.User{Id: user, LastUsedAt: baseTime} + } + + dynUser.On("GetUsers").Return(usersRet, nil) + for _, user := range tt.userIds { + dynUser.On("GetRolesForUserOrGroup", user, authentication.AuthTypeDb, false).Return(map[string][]authorization.Policy{"role": {}}, nil) + } + + var nodes []string + for i := range tt.nodeResponses { + nodes = append(nodes, string(rune(i))) + } + schemaGetter.On("Nodes").Return(nodes) + + server := httptest.NewServer(&fakeHandler{t: t, counter: atomic.Int32{}, nodeResponses: tt.nodeResponses}) + defer server.Close() + + remote := clients.NewRemoteUser(&http.Client{}, FakeNodeResolver{path: server.URL}) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"static"}, AllowedKeys: []string{"static"}}, + rbacConfig: rbacconf.Config{Enabled: true, RootUsers: []string{"root"}}, dbUserEnabled: true, + nodesGetter: schemaGetter, + remoteUser: remote, + } + + res := h.listUsers(users.ListAllUsersParams{IncludeLastUsedTime: &trueptr, HTTPRequest: req}, principal) + parsed, ok := res.(*users.ListAllUsersOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + + for i := range tt.userIds { + uid := *parsed.Payload[i].UserID + require.Equal(t, parsed.Payload[i].LastUsedAt.String(), strfmt.DateTime(tt.expectedTime[uid]).String()) + } + }) + } +} + +func TestSuccessListForbidden(t *testing.T) { + principal := &models.Principal{Username: "not-root"} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.READ, mock.Anything).Return(errors.New("some error")) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers").Return(map[string]*apikey.User{"test": {Id: "test"}}, nil) + + log, _ := test.NewNullLogger() + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + logger: log, + dbUserEnabled: true, + } + + // no authorization for anything => response will be empty + res := h.listUsers(users.ListAllUsersParams{HTTPRequest: req}, principal) + parsed, ok := res.(*users.ListAllUsersOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + require.Len(t, parsed.Payload, 0) +} + +func TestListNoDynamic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + + h := dynUserHandler{ + dbUsers: NewMockDbUserAndRolesGetter(t), + authorizer: authorizer, + dbUserEnabled: false, + } + + res := h.listUsers(users.ListAllUsersParams{HTTPRequest: req}, principal) + parsed, ok := res.(*users.ListAllUsersOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + require.Len(t, parsed.Payload, 0) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/mock_db_user_and_roles_getter.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/mock_db_user_and_roles_getter.go new file mode 100644 index 0000000000000000000000000000000000000000..429869e916cd46c0cf34f49820fd50ee8b323135 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/mock_db_user_and_roles_getter.go @@ -0,0 +1,588 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package db_users + +import ( + authentication "github.com/weaviate/weaviate/usecases/auth/authentication" + apikey "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + + authorization "github.com/weaviate/weaviate/usecases/auth/authorization" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// MockDbUserAndRolesGetter is an autogenerated mock type for the DbUserAndRolesGetter type +type MockDbUserAndRolesGetter struct { + mock.Mock +} + +type MockDbUserAndRolesGetter_Expecter struct { + mock *mock.Mock +} + +func (_m *MockDbUserAndRolesGetter) EXPECT() *MockDbUserAndRolesGetter_Expecter { + return &MockDbUserAndRolesGetter_Expecter{mock: &_m.Mock} +} + +// ActivateUser provides a mock function with given fields: userId +func (_m *MockDbUserAndRolesGetter) ActivateUser(userId string) error { + ret := _m.Called(userId) + + if len(ret) == 0 { + panic("no return value specified for ActivateUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(userId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDbUserAndRolesGetter_ActivateUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ActivateUser' +type MockDbUserAndRolesGetter_ActivateUser_Call struct { + *mock.Call +} + +// ActivateUser is a helper method to define mock.On call +// - userId string +func (_e *MockDbUserAndRolesGetter_Expecter) ActivateUser(userId interface{}) *MockDbUserAndRolesGetter_ActivateUser_Call { + return &MockDbUserAndRolesGetter_ActivateUser_Call{Call: _e.mock.On("ActivateUser", userId)} +} + +func (_c *MockDbUserAndRolesGetter_ActivateUser_Call) Run(run func(userId string)) *MockDbUserAndRolesGetter_ActivateUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_ActivateUser_Call) Return(_a0 error) *MockDbUserAndRolesGetter_ActivateUser_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDbUserAndRolesGetter_ActivateUser_Call) RunAndReturn(run func(string) error) *MockDbUserAndRolesGetter_ActivateUser_Call { + _c.Call.Return(run) + return _c +} + +// CheckUserIdentifierExists provides a mock function with given fields: userIdentifier +func (_m *MockDbUserAndRolesGetter) CheckUserIdentifierExists(userIdentifier string) (bool, error) { + ret := _m.Called(userIdentifier) + + if len(ret) == 0 { + panic("no return value specified for CheckUserIdentifierExists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(string) (bool, error)); ok { + return rf(userIdentifier) + } + if rf, ok := ret.Get(0).(func(string) bool); ok { + r0 = rf(userIdentifier) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(userIdentifier) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckUserIdentifierExists' +type MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call struct { + *mock.Call +} + +// CheckUserIdentifierExists is a helper method to define mock.On call +// - userIdentifier string +func (_e *MockDbUserAndRolesGetter_Expecter) CheckUserIdentifierExists(userIdentifier interface{}) *MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call { + return &MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call{Call: _e.mock.On("CheckUserIdentifierExists", userIdentifier)} +} + +func (_c *MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call) Run(run func(userIdentifier string)) *MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call) Return(_a0 bool, _a1 error) *MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call) RunAndReturn(run func(string) (bool, error)) *MockDbUserAndRolesGetter_CheckUserIdentifierExists_Call { + _c.Call.Return(run) + return _c +} + +// CreateUser provides a mock function with given fields: userId, secureHash, userIdentifier, apiKeyFirstLetters, createdAt +func (_m *MockDbUserAndRolesGetter) CreateUser(userId string, secureHash string, userIdentifier string, apiKeyFirstLetters string, createdAt time.Time) error { + ret := _m.Called(userId, secureHash, userIdentifier, apiKeyFirstLetters, createdAt) + + if len(ret) == 0 { + panic("no return value specified for CreateUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, string, string, time.Time) error); ok { + r0 = rf(userId, secureHash, userIdentifier, apiKeyFirstLetters, createdAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDbUserAndRolesGetter_CreateUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateUser' +type MockDbUserAndRolesGetter_CreateUser_Call struct { + *mock.Call +} + +// CreateUser is a helper method to define mock.On call +// - userId string +// - secureHash string +// - userIdentifier string +// - apiKeyFirstLetters string +// - createdAt time.Time +func (_e *MockDbUserAndRolesGetter_Expecter) CreateUser(userId interface{}, secureHash interface{}, userIdentifier interface{}, apiKeyFirstLetters interface{}, createdAt interface{}) *MockDbUserAndRolesGetter_CreateUser_Call { + return &MockDbUserAndRolesGetter_CreateUser_Call{Call: _e.mock.On("CreateUser", userId, secureHash, userIdentifier, apiKeyFirstLetters, createdAt)} +} + +func (_c *MockDbUserAndRolesGetter_CreateUser_Call) Run(run func(userId string, secureHash string, userIdentifier string, apiKeyFirstLetters string, createdAt time.Time)) *MockDbUserAndRolesGetter_CreateUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string), args[3].(string), args[4].(time.Time)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_CreateUser_Call) Return(_a0 error) *MockDbUserAndRolesGetter_CreateUser_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDbUserAndRolesGetter_CreateUser_Call) RunAndReturn(run func(string, string, string, string, time.Time) error) *MockDbUserAndRolesGetter_CreateUser_Call { + _c.Call.Return(run) + return _c +} + +// CreateUserWithKey provides a mock function with given fields: userId, apiKeyFirstLetters, weakHash, createdAt +func (_m *MockDbUserAndRolesGetter) CreateUserWithKey(userId string, apiKeyFirstLetters string, weakHash [32]byte, createdAt time.Time) error { + ret := _m.Called(userId, apiKeyFirstLetters, weakHash, createdAt) + + if len(ret) == 0 { + panic("no return value specified for CreateUserWithKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, [32]byte, time.Time) error); ok { + r0 = rf(userId, apiKeyFirstLetters, weakHash, createdAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDbUserAndRolesGetter_CreateUserWithKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateUserWithKey' +type MockDbUserAndRolesGetter_CreateUserWithKey_Call struct { + *mock.Call +} + +// CreateUserWithKey is a helper method to define mock.On call +// - userId string +// - apiKeyFirstLetters string +// - weakHash [32]byte +// - createdAt time.Time +func (_e *MockDbUserAndRolesGetter_Expecter) CreateUserWithKey(userId interface{}, apiKeyFirstLetters interface{}, weakHash interface{}, createdAt interface{}) *MockDbUserAndRolesGetter_CreateUserWithKey_Call { + return &MockDbUserAndRolesGetter_CreateUserWithKey_Call{Call: _e.mock.On("CreateUserWithKey", userId, apiKeyFirstLetters, weakHash, createdAt)} +} + +func (_c *MockDbUserAndRolesGetter_CreateUserWithKey_Call) Run(run func(userId string, apiKeyFirstLetters string, weakHash [32]byte, createdAt time.Time)) *MockDbUserAndRolesGetter_CreateUserWithKey_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].([32]byte), args[3].(time.Time)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_CreateUserWithKey_Call) Return(_a0 error) *MockDbUserAndRolesGetter_CreateUserWithKey_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDbUserAndRolesGetter_CreateUserWithKey_Call) RunAndReturn(run func(string, string, [32]byte, time.Time) error) *MockDbUserAndRolesGetter_CreateUserWithKey_Call { + _c.Call.Return(run) + return _c +} + +// DeactivateUser provides a mock function with given fields: userId, revokeKey +func (_m *MockDbUserAndRolesGetter) DeactivateUser(userId string, revokeKey bool) error { + ret := _m.Called(userId, revokeKey) + + if len(ret) == 0 { + panic("no return value specified for DeactivateUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, bool) error); ok { + r0 = rf(userId, revokeKey) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDbUserAndRolesGetter_DeactivateUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeactivateUser' +type MockDbUserAndRolesGetter_DeactivateUser_Call struct { + *mock.Call +} + +// DeactivateUser is a helper method to define mock.On call +// - userId string +// - revokeKey bool +func (_e *MockDbUserAndRolesGetter_Expecter) DeactivateUser(userId interface{}, revokeKey interface{}) *MockDbUserAndRolesGetter_DeactivateUser_Call { + return &MockDbUserAndRolesGetter_DeactivateUser_Call{Call: _e.mock.On("DeactivateUser", userId, revokeKey)} +} + +func (_c *MockDbUserAndRolesGetter_DeactivateUser_Call) Run(run func(userId string, revokeKey bool)) *MockDbUserAndRolesGetter_DeactivateUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(bool)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_DeactivateUser_Call) Return(_a0 error) *MockDbUserAndRolesGetter_DeactivateUser_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDbUserAndRolesGetter_DeactivateUser_Call) RunAndReturn(run func(string, bool) error) *MockDbUserAndRolesGetter_DeactivateUser_Call { + _c.Call.Return(run) + return _c +} + +// DeleteUser provides a mock function with given fields: userId +func (_m *MockDbUserAndRolesGetter) DeleteUser(userId string) error { + ret := _m.Called(userId) + + if len(ret) == 0 { + panic("no return value specified for DeleteUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(userId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDbUserAndRolesGetter_DeleteUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteUser' +type MockDbUserAndRolesGetter_DeleteUser_Call struct { + *mock.Call +} + +// DeleteUser is a helper method to define mock.On call +// - userId string +func (_e *MockDbUserAndRolesGetter_Expecter) DeleteUser(userId interface{}) *MockDbUserAndRolesGetter_DeleteUser_Call { + return &MockDbUserAndRolesGetter_DeleteUser_Call{Call: _e.mock.On("DeleteUser", userId)} +} + +func (_c *MockDbUserAndRolesGetter_DeleteUser_Call) Run(run func(userId string)) *MockDbUserAndRolesGetter_DeleteUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_DeleteUser_Call) Return(_a0 error) *MockDbUserAndRolesGetter_DeleteUser_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDbUserAndRolesGetter_DeleteUser_Call) RunAndReturn(run func(string) error) *MockDbUserAndRolesGetter_DeleteUser_Call { + _c.Call.Return(run) + return _c +} + +// GetRolesForUserOrGroup provides a mock function with given fields: user, authTyoes, isGroup +func (_m *MockDbUserAndRolesGetter) GetRolesForUserOrGroup(user string, authTyoes authentication.AuthType, isGroup bool) (map[string][]authorization.Policy, error) { + ret := _m.Called(user, authTyoes, isGroup) + + if len(ret) == 0 { + panic("no return value specified for GetRolesForUserOrGroup") + } + + var r0 map[string][]authorization.Policy + var r1 error + if rf, ok := ret.Get(0).(func(string, authentication.AuthType, bool) (map[string][]authorization.Policy, error)); ok { + return rf(user, authTyoes, isGroup) + } + if rf, ok := ret.Get(0).(func(string, authentication.AuthType, bool) map[string][]authorization.Policy); ok { + r0 = rf(user, authTyoes, isGroup) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string][]authorization.Policy) + } + } + + if rf, ok := ret.Get(1).(func(string, authentication.AuthType, bool) error); ok { + r1 = rf(user, authTyoes, isGroup) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRolesForUserOrGroup' +type MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call struct { + *mock.Call +} + +// GetRolesForUserOrGroup is a helper method to define mock.On call +// - user string +// - authTyoes authentication.AuthType +// - isGroup bool +func (_e *MockDbUserAndRolesGetter_Expecter) GetRolesForUserOrGroup(user interface{}, authTyoes interface{}, isGroup interface{}) *MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call { + return &MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call{Call: _e.mock.On("GetRolesForUserOrGroup", user, authTyoes, isGroup)} +} + +func (_c *MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call) Run(run func(user string, authTyoes authentication.AuthType, isGroup bool)) *MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(authentication.AuthType), args[2].(bool)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call) Return(_a0 map[string][]authorization.Policy, _a1 error) *MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call) RunAndReturn(run func(string, authentication.AuthType, bool) (map[string][]authorization.Policy, error)) *MockDbUserAndRolesGetter_GetRolesForUserOrGroup_Call { + _c.Call.Return(run) + return _c +} + +// GetUsers provides a mock function with given fields: userIds +func (_m *MockDbUserAndRolesGetter) GetUsers(userIds ...string) (map[string]*apikey.User, error) { + _va := make([]interface{}, len(userIds)) + for _i := range userIds { + _va[_i] = userIds[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetUsers") + } + + var r0 map[string]*apikey.User + var r1 error + if rf, ok := ret.Get(0).(func(...string) (map[string]*apikey.User, error)); ok { + return rf(userIds...) + } + if rf, ok := ret.Get(0).(func(...string) map[string]*apikey.User); ok { + r0 = rf(userIds...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*apikey.User) + } + } + + if rf, ok := ret.Get(1).(func(...string) error); ok { + r1 = rf(userIds...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockDbUserAndRolesGetter_GetUsers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUsers' +type MockDbUserAndRolesGetter_GetUsers_Call struct { + *mock.Call +} + +// GetUsers is a helper method to define mock.On call +// - userIds ...string +func (_e *MockDbUserAndRolesGetter_Expecter) GetUsers(userIds ...interface{}) *MockDbUserAndRolesGetter_GetUsers_Call { + return &MockDbUserAndRolesGetter_GetUsers_Call{Call: _e.mock.On("GetUsers", + append([]interface{}{}, userIds...)...)} +} + +func (_c *MockDbUserAndRolesGetter_GetUsers_Call) Run(run func(userIds ...string)) *MockDbUserAndRolesGetter_GetUsers_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]string, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(string) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_GetUsers_Call) Return(_a0 map[string]*apikey.User, _a1 error) *MockDbUserAndRolesGetter_GetUsers_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockDbUserAndRolesGetter_GetUsers_Call) RunAndReturn(run func(...string) (map[string]*apikey.User, error)) *MockDbUserAndRolesGetter_GetUsers_Call { + _c.Call.Return(run) + return _c +} + +// RevokeRolesForUser provides a mock function with given fields: userName, roles +func (_m *MockDbUserAndRolesGetter) RevokeRolesForUser(userName string, roles ...string) error { + _va := make([]interface{}, len(roles)) + for _i := range roles { + _va[_i] = roles[_i] + } + var _ca []interface{} + _ca = append(_ca, userName) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for RevokeRolesForUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, ...string) error); ok { + r0 = rf(userName, roles...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDbUserAndRolesGetter_RevokeRolesForUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RevokeRolesForUser' +type MockDbUserAndRolesGetter_RevokeRolesForUser_Call struct { + *mock.Call +} + +// RevokeRolesForUser is a helper method to define mock.On call +// - userName string +// - roles ...string +func (_e *MockDbUserAndRolesGetter_Expecter) RevokeRolesForUser(userName interface{}, roles ...interface{}) *MockDbUserAndRolesGetter_RevokeRolesForUser_Call { + return &MockDbUserAndRolesGetter_RevokeRolesForUser_Call{Call: _e.mock.On("RevokeRolesForUser", + append([]interface{}{userName}, roles...)...)} +} + +func (_c *MockDbUserAndRolesGetter_RevokeRolesForUser_Call) Run(run func(userName string, roles ...string)) *MockDbUserAndRolesGetter_RevokeRolesForUser_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]string, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(string) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_RevokeRolesForUser_Call) Return(_a0 error) *MockDbUserAndRolesGetter_RevokeRolesForUser_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDbUserAndRolesGetter_RevokeRolesForUser_Call) RunAndReturn(run func(string, ...string) error) *MockDbUserAndRolesGetter_RevokeRolesForUser_Call { + _c.Call.Return(run) + return _c +} + +// RotateKey provides a mock function with given fields: userId, apiKeyFirstLetters, secureHash, oldIdentifier, newIdentifier +func (_m *MockDbUserAndRolesGetter) RotateKey(userId string, apiKeyFirstLetters string, secureHash string, oldIdentifier string, newIdentifier string) error { + ret := _m.Called(userId, apiKeyFirstLetters, secureHash, oldIdentifier, newIdentifier) + + if len(ret) == 0 { + panic("no return value specified for RotateKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, string, string, string) error); ok { + r0 = rf(userId, apiKeyFirstLetters, secureHash, oldIdentifier, newIdentifier) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDbUserAndRolesGetter_RotateKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RotateKey' +type MockDbUserAndRolesGetter_RotateKey_Call struct { + *mock.Call +} + +// RotateKey is a helper method to define mock.On call +// - userId string +// - apiKeyFirstLetters string +// - secureHash string +// - oldIdentifier string +// - newIdentifier string +func (_e *MockDbUserAndRolesGetter_Expecter) RotateKey(userId interface{}, apiKeyFirstLetters interface{}, secureHash interface{}, oldIdentifier interface{}, newIdentifier interface{}) *MockDbUserAndRolesGetter_RotateKey_Call { + return &MockDbUserAndRolesGetter_RotateKey_Call{Call: _e.mock.On("RotateKey", userId, apiKeyFirstLetters, secureHash, oldIdentifier, newIdentifier)} +} + +func (_c *MockDbUserAndRolesGetter_RotateKey_Call) Run(run func(userId string, apiKeyFirstLetters string, secureHash string, oldIdentifier string, newIdentifier string)) *MockDbUserAndRolesGetter_RotateKey_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string), args[3].(string), args[4].(string)) + }) + return _c +} + +func (_c *MockDbUserAndRolesGetter_RotateKey_Call) Return(_a0 error) *MockDbUserAndRolesGetter_RotateKey_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDbUserAndRolesGetter_RotateKey_Call) RunAndReturn(run func(string, string, string, string, string) error) *MockDbUserAndRolesGetter_RotateKey_Call { + _c.Call.Return(run) + return _c +} + +// NewMockDbUserAndRolesGetter creates a new instance of MockDbUserAndRolesGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockDbUserAndRolesGetter(t interface { + mock.TestingT + Cleanup(func()) +}) *MockDbUserAndRolesGetter { + mock := &MockDbUserAndRolesGetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/multi_node_helper.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/multi_node_helper.go new file mode 100644 index 0000000000000000000000000000000000000000..4a234e39d192b419e9c5b41528ea458348a8577c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/multi_node_helper.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "encoding/json" + "io" + "net/http" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +type FakeNodeResolver struct { + path string +} + +func (f FakeNodeResolver) NodeHostname(nodeName string) (string, bool) { + return strings.TrimPrefix(f.path, "http://"), true +} + +type fakeHandler struct { + t *testing.T + nodeResponses []map[string]time.Time + counter atomic.Int32 +} + +func (f *fakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + require.Equal(f.t, http.MethodPost, r.Method) + + bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "Error reading request body", http.StatusBadRequest) + return + } + defer r.Body.Close() + + var body apikey.UserStatusRequest + if err := json.Unmarshal(bodyBytes, &body); err != nil { + http.Error(w, "Error parsing JSON body", http.StatusBadRequest) + return + } + if !body.ReturnStatus { + return + } + counter := f.counter.Add(1) - 1 + + ret := apikey.UserStatusResponse{Users: f.nodeResponses[counter]} + outBytes, err := json.Marshal(ret) + require.Nil(f.t, err) + + w.Write(outBytes) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/rotate_key_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/rotate_key_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b2b3a5d2fa0c5b76123791692e85f1d9a0d5c9b2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/db_users/rotate_key_test.go @@ -0,0 +1,156 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db_users + +import ( + "errors" + "testing" + + "github.com/weaviate/weaviate/usecases/config" + + "github.com/weaviate/weaviate/usecases/auth/authorization" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +func TestSuccessRotate(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{"user": {Id: "user"}}, nil) + dynUser.On("CheckUserIdentifierExists", mock.Anything).Return(false, nil) + dynUser.On("RotateKey", "user", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, + dbUserEnabled: true, + } + + res := h.rotateKey(users.RotateUserAPIKeyParams{UserID: "user", HTTPRequest: req}, principal) + parsed, ok := res.(*users.RotateUserAPIKeyOK) + assert.True(t, ok) + assert.NotNil(t, parsed) + + require.Len(t, *parsed.Payload.Apikey, 88) +} + +func TestRotateInternalServerError(t *testing.T) { + principal := &models.Principal{} + tests := []struct { + name string + GetUserReturnErr error + GetUserReturnValue map[string]*apikey.User + RotateKeyError error + }{ + {name: "get user error", GetUserReturnErr: errors.New("some error"), GetUserReturnValue: nil}, + {name: "rotate key error", GetUserReturnErr: nil, GetUserReturnValue: map[string]*apikey.User{"user": {Id: "user", InternalIdentifier: "abc"}}, RotateKeyError: errors.New("some error")}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(tt.GetUserReturnValue, tt.GetUserReturnErr) + if tt.GetUserReturnErr == nil { + dynUser.On("CheckUserIdentifierExists", mock.Anything).Return(false, nil) + dynUser.On("RotateKey", "user", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tt.RotateKeyError) + } + + h := dynUserHandler{ + dbUsers: dynUser, authorizer: authorizer, dbUserEnabled: true, + } + + res := h.rotateKey(users.RotateUserAPIKeyParams{UserID: "user", HTTPRequest: req}, principal) + parsed, ok := res.(*users.RotateUserAPIKeyInternalServerError) + assert.True(t, ok) + assert.NotNil(t, parsed) + }) + } +} + +func TestRotateNotFound(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.rotateKey(users.RotateUserAPIKeyParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.RotateUserAPIKeyNotFound) + assert.True(t, ok) +} + +func TestRotateForbidden(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(errors.New("some error")) + + dynUser := NewMockDbUserAndRolesGetter(t) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + } + + res := h.rotateKey(users.RotateUserAPIKeyParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.RotateUserAPIKeyForbidden) + assert.True(t, ok) +} + +func TestRotateUnprocessableEntity(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + + dynUser := NewMockDbUserAndRolesGetter(t) + dynUser.On("GetUsers", "user").Return(map[string]*apikey.User{}, nil) + + h := dynUserHandler{ + dbUsers: dynUser, + authorizer: authorizer, dbUserEnabled: true, + + staticApiKeysConfigs: config.StaticAPIKey{Enabled: true, Users: []string{"user"}, AllowedKeys: []string{"key"}}, + } + + res := h.rotateKey(users.RotateUserAPIKeyParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.RotateUserAPIKeyUnprocessableEntity) + assert.True(t, ok) +} + +func TestRotateNoDynamic(t *testing.T) { + principal := &models.Principal{} + authorizer := authorization.NewMockAuthorizer(t) + authorizer.On("Authorize", mock.Anything, principal, authorization.UPDATE, authorization.Users("user")[0]).Return(nil) + + h := dynUserHandler{ + dbUsers: NewMockDbUserAndRolesGetter(t), + authorizer: authorizer, + dbUserEnabled: false, + } + + res := h.rotateKey(users.RotateUserAPIKeyParams{UserID: "user", HTTPRequest: req}, principal) + _, ok := res.(*users.RotateUserAPIKeyUnprocessableEntity) + assert.True(t, ok) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/doc.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..17035873d60939ba511d40aa38ca855517b0632f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/doc.go @@ -0,0 +1,41 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +// Package rest Weaviate +// +// # Introduction +// Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications. +// ### Base Path +// The base path for the Weaviate server is structured as `[YOUR-WEAVIATE-HOST]:[PORT]/v1`. As an example, if you wish to access the `schema` endpoint on a local instance, you would navigate to `http://localhost:8080/v1/schema`. Ensure you replace `[YOUR-WEAVIATE-HOST]` and `[PORT]` with your actual server host and port number respectively. +// ### Questions? +// If you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/). +// ### Issues? +// If you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate). +// ### Want more documentation? +// For a quickstart, code examples, concepts and more, please visit our [documentation page](https://weaviate.io/developers/weaviate). +// Schemes: +// https +// Host: localhost +// BasePath: /v1 +// Version: 1.33.0-rc.1 +// Contact: Weaviate https://github.com/weaviate +// +// Consumes: +// - application/json +// - application/yaml +// +// Produces: +// - application/json +// +// swagger:meta +package rest diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/embedded_spec.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/embedded_spec.go new file mode 100644 index 0000000000000000000000000000000000000000..ea2b53719e727d82a966529c52f9960b63871462 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/embedded_spec.go @@ -0,0 +1,18196 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package rest + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" +) + +var ( + // SwaggerJSON embedded version of the swagger document used at generation time + SwaggerJSON json.RawMessage + // FlatSwaggerJSON embedded flattened version of the swagger document used at generation time + FlatSwaggerJSON json.RawMessage +) + +func init() { + SwaggerJSON = json.RawMessage([]byte(`{ + "consumes": [ + "application/yaml", + "application/json" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "https" + ], + "swagger": "2.0", + "info": { + "description": "# Introduction\n Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications. \n ### Base Path \nThe base path for the Weaviate server is structured as ` + "`" + `[YOUR-WEAVIATE-HOST]:[PORT]/v1` + "`" + `. As an example, if you wish to access the ` + "`" + `schema` + "`" + ` endpoint on a local instance, you would navigate to ` + "`" + `http://localhost:8080/v1/schema` + "`" + `. Ensure you replace ` + "`" + `[YOUR-WEAVIATE-HOST]` + "`" + ` and ` + "`" + `[PORT]` + "`" + ` with your actual server host and port number respectively. \n ### Questions? \nIf you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/). \n### Issues? \nIf you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate). \n### Want more documentation? \nFor a quickstart, code examples, concepts and more, please visit our [documentation page](https://weaviate.io/developers/weaviate).", + "title": "Weaviate", + "contact": { + "name": "Weaviate", + "url": "https://github.com/weaviate", + "email": "hello@weaviate.io" + }, + "version": "1.33.0-rc.1" + }, + "basePath": "/v1", + "paths": { + "/": { + "get": { + "description": "Get links to other endpoints to help discover the REST API", + "summary": "List available endpoints", + "operationId": "weaviate.root", + "responses": { + "200": { + "description": "Weaviate is alive and ready to serve content", + "schema": { + "type": "object", + "properties": { + "links": { + "type": "array", + "items": { + "$ref": "#/definitions/Link" + } + } + } + } + } + } + } + }, + "/.well-known/live": { + "get": { + "description": "Determines whether the application is alive. Can be used for kubernetes liveness probe", + "summary": "Get application liveness.", + "operationId": "weaviate.wellknown.liveness", + "responses": { + "200": { + "description": "The application is able to respond to HTTP requests" + } + } + } + }, + "/.well-known/openid-configuration": { + "get": { + "description": "OIDC Discovery page, redirects to the token issuer if one is configured", + "tags": [ + "well-known", + "oidc", + "discovery" + ], + "summary": "OIDC discovery information if OIDC auth is enabled", + "responses": { + "200": { + "description": "Successful response, inspect body", + "schema": { + "type": "object", + "properties": { + "clientId": { + "description": "OAuth Client ID", + "type": "string" + }, + "href": { + "description": "The Location to redirect to", + "type": "string" + }, + "scopes": { + "description": "OAuth Scopes", + "type": "array", + "items": { + "type": "string" + }, + "x-omitempty": true + } + } + } + }, + "404": { + "description": "Not found, no oidc provider present" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/.well-known/ready": { + "get": { + "description": "Determines whether the application is ready to receive traffic. Can be used for kubernetes readiness probe.", + "summary": "Get application readiness.", + "operationId": "weaviate.wellknown.readiness", + "responses": { + "200": { + "description": "The application has completed its start-up routine and is ready to accept traffic." + }, + "503": { + "description": "The application is currently not able to serve traffic. If other horizontal replicas of weaviate are available and they are capable of receiving traffic, all traffic should be redirected there instead." + } + } + } + }, + "/aliases": { + "get": { + "description": "Retrieve a list of all aliases in the system. Results can be filtered by specifying a collection (class) name to get aliases for a specific collection only.", + "tags": [ + "schema" + ], + "summary": "List aliases", + "operationId": "aliases.get", + "parameters": [ + { + "type": "string", + "description": "Optional filter to retrieve aliases for a specific collection (class) only. If not provided, returns all aliases.", + "name": "class", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the list of aliases", + "schema": { + "$ref": "#/definitions/AliasResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid collection (class) parameter provided", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "description": "Create a new alias mapping between an alias name and a collection (class). The alias acts as an alternative name for accessing the collection.", + "tags": [ + "schema" + ], + "summary": "Create a new alias", + "operationId": "aliases.create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Alias" + } + } + ], + "responses": { + "200": { + "description": "Successfully created a new alias for the specified collection (class)", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid create alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/aliases/{aliasName}": { + "get": { + "description": "Retrieve details about a specific alias by its name, including which collection (class) it points to.", + "tags": [ + "schema" + ], + "summary": "Get an alias", + "operationId": "aliases.get.alias", + "parameters": [ + { + "type": "string", + "name": "aliasName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the alias details.", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid alias name provided.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "put": { + "description": "Update an existing alias to point to a different collection (class). This allows you to redirect an alias from one collection to another without changing the alias name.", + "tags": [ + "schema" + ], + "summary": "Update an alias", + "operationId": "aliases.update", + "parameters": [ + { + "type": "string", + "name": "aliasName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "class": { + "description": "The new collection (class) that the alias should point to.", + "type": "string" + } + } + } + } + ], + "responses": { + "200": { + "description": "Successfully updated the alias to point to the new collection (class).", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid update alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "description": "Remove an existing alias from the system. This will delete the alias mapping but will not affect the underlying collection (class).", + "tags": [ + "schema" + ], + "summary": "Delete an alias", + "operationId": "aliases.delete", + "parameters": [ + { + "type": "string", + "name": "aliasName", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted the alias." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid delete alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/groups/{groupType}": { + "get": { + "description": "Retrieves a list of all available group names for a specified group type (` + "`" + `oidc` + "`" + ` or ` + "`" + `db` + "`" + `).", + "tags": [ + "authz" + ], + "summary": "List all groups of a specific type", + "operationId": "getGroups", + "parameters": [ + { + "enum": [ + "oidc" + ], + "type": "string", + "description": "The type of group to retrieve.", + "name": "groupType", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A list of group names for the specified type.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "The request syntax is correct, but the server couldn't process it due to semantic issues.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.groups" + ] + } + }, + "/authz/groups/{id}/assign": { + "post": { + "tags": [ + "authz" + ], + "summary": "Assign a role to a group", + "operationId": "assignRoleToGroup", + "parameters": [ + { + "type": "string", + "description": "group name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "groupType": { + "$ref": "#/definitions/GroupType" + }, + "roles": { + "description": "the roles that assigned to group", + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Role assigned successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or group is not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.assign.role" + ] + } + }, + "/authz/groups/{id}/revoke": { + "post": { + "tags": [ + "authz" + ], + "summary": "Revoke a role from a group", + "operationId": "revokeRoleFromGroup", + "parameters": [ + { + "type": "string", + "description": "group name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "groupType": { + "$ref": "#/definitions/GroupType" + }, + "roles": { + "description": "the roles that revoked from group", + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Role revoked successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or group is not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.revoke.role.group" + ] + } + }, + "/authz/groups/{id}/roles/{groupType}": { + "get": { + "description": "Retrieves a list of all roles assigned to a specific group. The group must be identified by both its name (` + "`" + `id` + "`" + `) and its type (` + "`" + `db` + "`" + ` or ` + "`" + `oidc` + "`" + `).", + "tags": [ + "authz" + ], + "summary": "Get roles assigned to a specific group", + "operationId": "getRolesForGroup", + "parameters": [ + { + "type": "string", + "description": "The unique name of the group.", + "name": "id", + "in": "path", + "required": true + }, + { + "enum": [ + "oidc" + ], + "type": "string", + "description": "The type of the group.", + "name": "groupType", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": false, + "description": "If true, the response will include the full role definitions with all associated permissions. If false, only role names are returned.", + "name": "includeFullRoles", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A list of roles assigned to the specified group.", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The specified group was not found." + }, + "422": { + "description": "The request syntax is correct, but the server couldn't process it due to semantic issues.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.groups.roles" + ] + } + }, + "/authz/roles": { + "get": { + "tags": [ + "authz" + ], + "summary": "Get all roles", + "operationId": "getRoles", + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles" + ] + }, + "post": { + "tags": [ + "authz" + ], + "summary": "create new role", + "operationId": "createRole", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Role" + } + } + ], + "responses": { + "201": { + "description": "Role created successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "409": { + "description": "Role already exists", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.create.role" + ] + } + }, + "/authz/roles/{id}": { + "get": { + "tags": [ + "authz" + ], + "summary": "Get a role", + "operationId": "getRole", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Role" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.role" + ] + }, + "delete": { + "tags": [ + "authz" + ], + "summary": "Delete role", + "operationId": "deleteRole", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.delete.role" + ] + } + }, + "/authz/roles/{id}/add-permissions": { + "post": { + "tags": [ + "authz" + ], + "summary": "Add permission to a given role.", + "operationId": "addPermissions", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "name", + "permissions" + ], + "properties": { + "permissions": { + "description": "permissions to be added to the role", + "type": "array", + "items": { + "$ref": "#/definitions/Permission" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Permissions added successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.add.role.permissions" + ] + } + }, + "/authz/roles/{id}/group-assignments": { + "get": { + "description": "Retrieves a list of all groups that have been assigned a specific role, identified by its name.", + "tags": [ + "authz" + ], + "summary": "Get groups that have a specific role assigned", + "operationId": "getGroupsForRole", + "parameters": [ + { + "type": "string", + "description": "The unique name of the role.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the list of groups that have the role assigned.", + "schema": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "groupType" + ], + "properties": { + "groupId": { + "type": "string" + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The specified role was not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles.groups" + ] + } + }, + "/authz/roles/{id}/has-permission": { + "post": { + "tags": [ + "authz" + ], + "summary": "Check whether role possesses this permission.", + "operationId": "hasPermission", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Permission" + } + } + ], + "responses": { + "200": { + "description": "Permission check was successful", + "schema": { + "type": "boolean" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.has.role.permission" + ] + } + }, + "/authz/roles/{id}/remove-permissions": { + "post": { + "tags": [ + "authz" + ], + "summary": "Remove permissions from a role. If this results in an empty role, the role will be deleted.", + "operationId": "removePermissions", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "permissions" + ], + "properties": { + "permissions": { + "description": "permissions to remove from the role", + "type": "array", + "items": { + "$ref": "#/definitions/Permission" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Permissions removed successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.remove.role.permissions" + ] + } + }, + "/authz/roles/{id}/user-assignments": { + "get": { + "tags": [ + "authz" + ], + "summary": "get users assigned to role", + "operationId": "getUsersForRole", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Users assigned to this role", + "schema": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "userType" + ], + "properties": { + "userId": { + "type": "string" + }, + "userType": { + "$ref": "#/definitions/UserTypeOutput" + } + } + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles.users" + ] + } + }, + "/authz/roles/{id}/users": { + "get": { + "tags": [ + "authz" + ], + "summary": "get users (db + OIDC) assigned to role. Deprecated, will be removed when 1.29 is not supported anymore", + "operationId": "getUsersForRoleDeprecated", + "deprecated": true, + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Users assigned to this role", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles.users" + ] + } + }, + "/authz/users/{id}/assign": { + "post": { + "tags": [ + "authz" + ], + "summary": "Assign a role to a user", + "operationId": "assignRoleToUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "description": "the roles that assigned to user", + "type": "array", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role assigned successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or user is not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.assign.role.user" + ] + } + }, + "/authz/users/{id}/revoke": { + "post": { + "tags": [ + "authz" + ], + "summary": "Revoke a role from a user", + "operationId": "revokeRoleFromUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "description": "the roles that revoked from the key or user", + "type": "array", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role revoked successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or user is not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.revoke.role.user" + ] + } + }, + "/authz/users/{id}/roles": { + "get": { + "tags": [ + "authz" + ], + "summary": "get roles assigned to user (DB + OIDC). Deprecated, will be removed when 1.29 is not supported anymore", + "operationId": "getRolesForUserDeprecated", + "deprecated": true, + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Role assigned users", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found for user" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.users.roles" + ] + } + }, + "/authz/users/{id}/roles/{userType}": { + "get": { + "tags": [ + "authz" + ], + "summary": "get roles assigned to user", + "operationId": "getRolesForUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + }, + { + "enum": [ + "oidc", + "db" + ], + "type": "string", + "description": "The type of user", + "name": "userType", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": false, + "description": "Whether to include detailed role information needed the roles permission", + "name": "includeFullRoles", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Role assigned users", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found for user" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.users.roles" + ] + } + }, + "/backups/{backend}": { + "get": { + "description": "[Coming soon] List all backups in progress not implemented yet.", + "tags": [ + "backups" + ], + "summary": "List backups in progress", + "operationId": "backups.list", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "name": "backend", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Existed backups", + "schema": { + "$ref": "#/definitions/BackupListResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup list.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + }, + "post": { + "description": "Start creating a backup for a set of collections. \u003cbr/\u003e\u003cbr/\u003eNotes: \u003cbr/\u003e- Weaviate uses gzip compression by default. \u003cbr/\u003e- Weaviate stays usable while a backup process is ongoing.", + "tags": [ + "backups" + ], + "summary": "Start a backup process", + "operationId": "backups.create", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. ` + "`" + `filesystem` + "`" + `, ` + "`" + `gcs` + "`" + `, ` + "`" + `s3` + "`" + `, ` + "`" + `azure` + "`" + `.", + "name": "backend", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BackupCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "Backup create process successfully started.", + "schema": { + "$ref": "#/definitions/BackupCreateResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup creation attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + } + }, + "/backups/{backend}/{id}": { + "get": { + "description": "Returns status of backup creation attempt for a set of collections. \u003cbr/\u003e\u003cbr/\u003eAll client implementations have a ` + "`" + `wait for completion` + "`" + ` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the ` + "`" + `wait for completion` + "`" + ` option to false, you can also check the status yourself using this endpoint.", + "tags": [ + "backups" + ], + "summary": "Get backup process status", + "operationId": "backups.create.status", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Name of the bucket, container, volume, etc", + "name": "bucket", + "in": "query" + }, + { + "type": "string", + "description": "The path within the bucket", + "name": "path", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Backup creation status successfully returned", + "schema": { + "$ref": "#/definitions/BackupCreateStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + }, + "delete": { + "description": "Cancel created backup with specified ID", + "tags": [ + "backups" + ], + "summary": "Cancel backup", + "operationId": "backups.cancel", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Name of the bucket, container, volume, etc", + "name": "bucket", + "in": "query" + }, + { + "type": "string", + "description": "The path within the bucket", + "name": "path", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup cancellation attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + } + }, + "/backups/{backend}/{id}/restore": { + "get": { + "description": "Returns status of a backup restoration attempt for a set of classes. \u003cbr/\u003e\u003cbr/\u003eAll client implementations have a ` + "`" + `wait for completion` + "`" + ` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the ` + "`" + `wait for completion` + "`" + ` option to false, you can also check the status yourself using the this endpoint.", + "tags": [ + "backups" + ], + "summary": "Get restore process status", + "operationId": "backups.restore.status", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. ` + "`" + `filesystem` + "`" + `, ` + "`" + `gcs` + "`" + `, ` + "`" + `s3` + "`" + `, ` + "`" + `azure` + "`" + `.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Name of the bucket, container, volume, etc", + "name": "bucket", + "in": "query" + }, + { + "type": "string", + "description": "The path within the bucket", + "name": "path", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Backup restoration status successfully returned", + "schema": { + "$ref": "#/definitions/BackupRestoreStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + }, + "post": { + "description": "Starts a process of restoring a backup for a set of collections. \u003cbr/\u003e\u003cbr/\u003eAny backup can be restored to any machine, as long as the number of nodes between source and target are identical.\u003cbr/\u003e\u003cbr/\u003eRequrements:\u003cbr/\u003e\u003cbr/\u003e- None of the collections to be restored already exist on the target restoration node(s).\u003cbr/\u003e- The node names of the backed-up collections' must match those of the target restoration node(s).", + "tags": [ + "backups" + ], + "summary": "Start a restoration process", + "operationId": "backups.restore", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. ` + "`" + `filesystem` + "`" + `, ` + "`" + `gcs` + "`" + `, ` + "`" + `s3` + "`" + `, ` + "`" + `azure` + "`" + `.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BackupRestoreRequest" + } + } + ], + "responses": { + "200": { + "description": "Backup restoration process successfully started.", + "schema": { + "$ref": "#/definitions/BackupRestoreResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + } + }, + "/batch/objects": { + "post": { + "description": "Create new objects in bulk. \u003cbr/\u003e\u003cbr/\u003eMeta-data and schema values are validated. \u003cbr/\u003e\u003cbr/\u003e**Note: idempotence of ` + "`" + `/batch/objects` + "`" + `**: \u003cbr/\u003e` + "`" + `POST /batch/objects` + "`" + ` is idempotent, and will overwrite any existing object given the same id.", + "tags": [ + "batch", + "objects" + ], + "summary": "Creates new Objects based on a Object template as a batch.", + "operationId": "batch.objects.create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "fields": { + "description": "Define which fields need to be returned. Default value is ALL", + "type": "array", + "items": { + "type": "string", + "default": "ALL", + "enum": [ + "ALL", + "class", + "schema", + "id", + "creationTimeUnix" + ] + } + }, + "objects": { + "type": "array", + "items": { + "$ref": "#/definitions/Object" + } + } + } + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Request succeeded, see response body to get detailed information about each batched item.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/ObjectsGetResponse" + } + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.add" + ] + }, + "delete": { + "description": "Batch delete objects that match a particular filter. \u003cbr/\u003e\u003cbr/\u003eThe request body takes a single ` + "`" + `where` + "`" + ` filter and will delete all objects matched. \u003cbr/\u003e\u003cbr/\u003eNote that there is a limit to the number of objects to be deleted at once using this filter, in order to protect against unexpected memory surges and very-long-running requests. The default limit is 10,000 and may be configured by setting the ` + "`" + `QUERY_MAXIMUM_RESULTS` + "`" + ` environment variable. \u003cbr/\u003e\u003cbr/\u003eObjects are deleted in the same order that they would be returned in an equivalent Get query. To delete more objects than the limit, run the same query multiple times.", + "tags": [ + "batch", + "objects" + ], + "summary": "Deletes Objects based on a match filter as a batch.", + "operationId": "batch.objects.delete", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BatchDelete" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Request succeeded, see response body to get detailed information about each batched item.", + "schema": { + "$ref": "#/definitions/BatchDeleteResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/batch/references": { + "post": { + "description": "Batch create cross-references between collections items (objects or objects) in bulk.", + "tags": [ + "batch", + "references" + ], + "summary": "Creates new Cross-References between arbitrary classes in bulk.", + "operationId": "batch.references.create", + "parameters": [ + { + "description": "A list of references to be batched. The ideal size depends on the used database connector. Please see the documentation of the used connector for help", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/BatchReference" + } + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Request Successful. Warning: A successful request does not guarantee that every batched reference was successfully created. Inspect the response body to see which references succeeded and which failed.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/BatchReferenceResponse" + } + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.add" + ] + } + }, + "/classifications/": { + "post": { + "description": "Trigger a classification based on the specified params. Classifications will run in the background, use GET /classifications/\u003cid\u003e to retrieve the status of your classification.", + "tags": [ + "classifications" + ], + "summary": "Starts a classification.", + "operationId": "classifications.post", + "parameters": [ + { + "description": "parameters to start a classification", + "name": "params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Classification" + } + } + ], + "responses": { + "201": { + "description": "Successfully started classification.", + "schema": { + "$ref": "#/definitions/Classification" + } + }, + "400": { + "description": "Incorrect request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.classifications.post" + ] + } + }, + "/classifications/{id}": { + "get": { + "description": "Get status, results and metadata of a previously created classification", + "tags": [ + "classifications" + ], + "summary": "View previously created classification", + "operationId": "classifications.get", + "parameters": [ + { + "type": "string", + "description": "classification id", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Found the classification, returned as body", + "schema": { + "$ref": "#/definitions/Classification" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Classification does not exist" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.classifications.get" + ] + } + }, + "/cluster/statistics": { + "get": { + "description": "Returns Raft cluster statistics of Weaviate DB.", + "tags": [ + "cluster" + ], + "summary": "See Raft cluster statistics", + "operationId": "cluster.get.statistics", + "responses": { + "200": { + "description": "Cluster statistics successfully returned", + "schema": { + "$ref": "#/definitions/ClusterStatisticsResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.cluster.statistics.get" + ] + } + }, + "/graphql": { + "post": { + "description": "Get a response based on a GraphQL query", + "tags": [ + "graphql" + ], + "summary": "Get a response based on GraphQL", + "operationId": "graphql.post", + "parameters": [ + { + "description": "The GraphQL query request parameters.", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/GraphQLQuery" + } + } + ], + "responses": { + "200": { + "description": "Successful query (with select).", + "schema": { + "$ref": "#/definitions/GraphQLResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query", + "weaviate.local.query.meta", + "weaviate.network.query", + "weaviate.network.query.meta" + ] + } + }, + "/graphql/batch": { + "post": { + "description": "Perform a batched GraphQL query", + "tags": [ + "graphql" + ], + "summary": "Get a response based on GraphQL.", + "operationId": "graphql.batch", + "parameters": [ + { + "description": "The GraphQL queries.", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/GraphQLQueries" + } + } + ], + "responses": { + "200": { + "description": "Successful query (with select).", + "schema": { + "$ref": "#/definitions/GraphQLResponses" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query", + "weaviate.local.query.meta", + "weaviate.network.query", + "weaviate.network.query.meta" + ] + } + }, + "/meta": { + "get": { + "description": "Returns meta information about the server. Can be used to provide information to another Weaviate instance that wants to interact with the current instance.", + "tags": [ + "meta" + ], + "summary": "Returns meta information of the current Weaviate instance.", + "operationId": "meta.get", + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Meta" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query.meta" + ] + } + }, + "/nodes": { + "get": { + "description": "Returns node information for the entire database.", + "tags": [ + "nodes" + ], + "summary": "Node information for the database.", + "operationId": "nodes.get", + "parameters": [ + { + "$ref": "#/parameters/CommonOutputVerbosityParameterQuery" + } + ], + "responses": { + "200": { + "description": "Nodes status successfully returned", + "schema": { + "$ref": "#/definitions/NodesStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.nodes.status.get" + ] + } + }, + "/nodes/{className}": { + "get": { + "description": "Returns node information for the nodes relevant to the collection.", + "tags": [ + "nodes" + ], + "summary": "Node information for a collection.", + "operationId": "nodes.get.class", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "shardName", + "in": "query" + }, + { + "$ref": "#/parameters/CommonOutputVerbosityParameterQuery" + } + ], + "responses": { + "200": { + "description": "Nodes status successfully returned", + "schema": { + "$ref": "#/definitions/NodesStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.nodes.status.get.class" + ] + } + }, + "/objects": { + "get": { + "description": "Lists all Objects in reverse order of creation, owned by the user that belongs to the used token.", + "tags": [ + "objects" + ], + "summary": "Get a list of Objects.", + "operationId": "objects.list", + "parameters": [ + { + "$ref": "#/parameters/CommonAfterParameterQuery" + }, + { + "$ref": "#/parameters/CommonOffsetParameterQuery" + }, + { + "$ref": "#/parameters/CommonLimitParameterQuery" + }, + { + "$ref": "#/parameters/CommonIncludeParameterQuery" + }, + { + "$ref": "#/parameters/CommonSortParameterQuery" + }, + { + "$ref": "#/parameters/CommonOrderParameterQuery" + }, + { + "$ref": "#/parameters/CommonClassParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successful response. \u003cbr/\u003e\u003cbr/\u003eIf ` + "`" + `class` + "`" + ` is not provided, the response will not include any objects.", + "schema": { + "$ref": "#/definitions/ObjectsListResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query" + ] + }, + "post": { + "description": "Create a new object. \u003cbr/\u003e\u003cbr/\u003eMeta-data and schema values are validated. \u003cbr/\u003e\u003cbr/\u003e**Note: Use ` + "`" + `/batch` + "`" + ` for importing many objects**: \u003cbr/\u003eIf you plan on importing a large number of objects, it's much more efficient to use the ` + "`" + `/batch` + "`" + ` endpoint. Otherwise, sending multiple single requests sequentially would incur a large performance penalty. \u003cbr/\u003e\u003cbr/\u003e**Note: idempotence of ` + "`" + `/objects` + "`" + `**: \u003cbr/\u003ePOST /objects will fail if an id is provided which already exists in the class. To update an existing object with the objects endpoint, use the PUT or PATCH method.", + "tags": [ + "objects" + ], + "summary": "Create a new object.", + "operationId": "objects.create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Object created.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.add" + ] + } + }, + "/objects/validate": { + "post": { + "description": "Validate an object's schema and meta-data without creating it. \u003cbr/\u003e\u003cbr/\u003eIf the schema of the object is valid, the request should return nothing with a plain RESTful request. Otherwise, an error object will be returned.", + "tags": [ + "objects" + ], + "summary": "Validate an Object based on a schema.", + "operationId": "objects.validate", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + } + ], + "responses": { + "200": { + "description": "Successfully validated." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query.meta" + ] + } + }, + "/objects/{className}/{id}": { + "get": { + "description": "Get a data object based on its collection and UUID.", + "tags": [ + "objects" + ], + "summary": "Get a specific Object based on its class and UUID. Also available as Websocket bus.", + "operationId": "objects.class.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "$ref": "#/parameters/CommonIncludeParameterQuery" + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonNodeNameParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query" + ] + }, + "put": { + "description": "Update an object based on its uuid and collection. This (` + "`" + `put` + "`" + `) method replaces the object with the provided object.", + "tags": [ + "objects" + ], + "summary": "Update a class object based on its uuid", + "operationId": "objects.class.put", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The uuid of the data object to update.", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully received.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Delete an object based on its collection and UUID. \u003cbr/\u003e\u003cbr/\u003eNote: For backward compatibility, beacons also support an older, deprecated format without the collection name. As a result, when deleting a reference, the beacon specified has to match the beacon to be deleted exactly. In other words, if a beacon is present using the old format (without collection name) you also need to specify it the same way. \u003cbr/\u003e\u003cbr/\u003eIn the beacon format, you need to always use ` + "`" + `localhost` + "`" + ` as the host, rather than the actual hostname. ` + "`" + `localhost` + "`" + ` here refers to the fact that the beacon's target is on the same Weaviate instance, as opposed to a foreign instance.", + "tags": [ + "objects" + ], + "summary": "Delete object based on its class and UUID.", + "operationId": "objects.class.delete", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "head": { + "description": "Checks if a data object exists based on its collection and uuid without retrieving it. \u003cbr/\u003e\u003cbr/\u003eInternally it skips reading the object from disk other than checking if it is present. Thus it does not use resources on marshalling, parsing, etc., and is faster. Note the resulting HTTP request has no body; the existence of an object is indicated solely by the status code.", + "tags": [ + "objects" + ], + "summary": "Checks object's existence based on its class and uuid.", + "operationId": "objects.class.head", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The uuid of the data object", + "name": "id", + "in": "path", + "required": true + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Object exists." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Object doesn't exist." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "patch": { + "description": "Update an individual data object based on its class and uuid. This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.", + "tags": [ + "objects" + ], + "summary": "Update an Object based on its UUID (using patch semantics).", + "operationId": "objects.class.patch", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The uuid of the data object to update.", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "RFC 7396-style patch, the body contains the object to merge into the existing object.", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully applied. No content provided." + }, + "400": { + "description": "The patch-JSON is malformed.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "The patch-JSON is valid but unprocessable.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/objects/{className}/{id}/references/{propertyName}": { + "put": { + "description": "Replace **all** references in cross-reference property of an object.", + "tags": [ + "objects" + ], + "summary": "Replace all references to a class-property.", + "operationId": "objects.class.references.put", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MultipleRef" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully replaced all the references." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Source object doesn't exist." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "post": { + "description": "Add a single reference to an object. This adds a reference to the array of cross-references of the given property in the source object specified by its collection name and id", + "tags": [ + "objects" + ], + "summary": "Add a single reference to a class-property.", + "operationId": "objects.class.references.create", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully added the reference." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Source object doesn't exist." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Delete the single reference that is given in the body from the list of references that this property has.", + "tags": [ + "objects" + ], + "summary": "Delete a single reference from the list of references.", + "operationId": "objects.class.references.delete", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/objects/{id}": { + "get": { + "description": "Get a specific object based on its UUID. Also available as Websocket bus. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Get a specific Object based on its UUID.", + "operationId": "objects.get", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "$ref": "#/parameters/CommonIncludeParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query" + ] + }, + "put": { + "description": "Updates an object based on its UUID. Given meta-data and schema values are validated. LastUpdateTime is set to the time this function is called. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Update an Object based on its UUID.", + "operationId": "objects.update", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully received.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Deletes an object from the database based on its UUID. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Delete an Object based on its UUID.", + "operationId": "objects.delete", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "head": { + "description": "Checks if an object exists in the system based on its UUID. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Checks Object's existence based on its UUID.", + "operationId": "objects.head", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Object exists." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Object doesn't exist." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.objects.check" + ] + }, + "patch": { + "description": "Update an object based on its UUID (using patch semantics). This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Update an Object based on its UUID (using patch semantics).", + "operationId": "objects.patch", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "RFC 7396-style patch, the body contains the object to merge into the existing object.", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully applied. No content provided." + }, + "400": { + "description": "The patch-JSON is malformed." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "The patch-JSON is valid but unprocessable.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/objects/{id}/references/{propertyName}": { + "put": { + "description": "Replace all references in cross-reference property of an object. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}/references/{propertyName}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Replace all references to a class-property.", + "operationId": "objects.references.update", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MultipleRef" + } + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully replaced all the references." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "post": { + "description": "Add a cross-reference. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}/references/{propertyName}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Add a single reference to a class-property.", + "operationId": "objects.references.create", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully added the reference." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Delete the single reference that is given in the body from the list of references that this property has. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}/references/{propertyName}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Delete a single reference from the list of references.", + "operationId": "objects.references.delete", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/replication/replicate": { + "post": { + "description": "Begins an asynchronous operation to move or copy a specific shard replica from its current node to a designated target node. The operation involves copying data, synchronizing, and potentially decommissioning the source replica.", + "tags": [ + "replication" + ], + "summary": "Initiate a replica movement", + "operationId": "replicate", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ReplicationReplicateReplicaRequest" + } + } + ], + "responses": { + "200": { + "description": "Replication operation registered successfully. ID of the operation is returned.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateReplicaResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate" + ] + }, + "delete": { + "tags": [ + "replication" + ], + "summary": "Schedules all replication operations for deletion across all collections, shards, and nodes.", + "operationId": "deleteAllReplications", + "responses": { + "204": { + "description": "Replication operation registered successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.deleteAllReplications" + ] + } + }, + "/replication/replicate/force-delete": { + "post": { + "description": "USE AT OWN RISK! Synchronously force delete operations from the FSM. This will not perform any checks on which state the operation is in so may lead to data corruption or loss. It is recommended to first scale the number of replication engine workers to 0 before calling this endpoint to ensure no operations are in-flight.", + "tags": [ + "replication" + ], + "summary": "Force delete replication operations", + "operationId": "forceDeleteReplications", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/ReplicationReplicateForceDeleteRequest" + } + } + ], + "responses": { + "200": { + "description": "Replication operations force deleted successfully.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateForceDeleteResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.forceDeleteReplications" + ] + } + }, + "/replication/replicate/list": { + "get": { + "description": "Retrieves a list of currently registered replication operations, optionally filtered by collection, shard, or node ID.", + "tags": [ + "replication" + ], + "summary": "List replication operations", + "operationId": "listReplication", + "parameters": [ + { + "type": "string", + "description": "The name of the target node to get details for.", + "name": "targetNode", + "in": "query" + }, + { + "type": "string", + "description": "The name of the collection to get details for.", + "name": "collection", + "in": "query" + }, + { + "type": "string", + "description": "The shard to get details for.", + "name": "shard", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to include the history of the replication operation.", + "name": "includeHistory", + "in": "query" + } + ], + "responses": { + "200": { + "description": "The details of the replication operations.", + "schema": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaResponse" + } + } + }, + "400": { + "description": "Bad request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.details" + ] + } + }, + "/replication/replicate/{id}": { + "get": { + "description": "Fetches the current status and detailed information for a specific replication operation, identified by its unique ID. Optionally includes historical data of the operation's progress if requested.", + "tags": [ + "replication" + ], + "summary": "Retrieve a replication operation", + "operationId": "replicationDetails", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the replication operation to get details for.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to include the history of the replication operation.", + "name": "includeHistory", + "in": "query" + } + ], + "responses": { + "200": { + "description": "The details of the replication operation.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.details" + ] + }, + "delete": { + "description": "Removes a specific replication operation. If the operation is currently active, it will be cancelled and its resources cleaned up before the operation is deleted.", + "tags": [ + "replication" + ], + "summary": "Delete a replication operation", + "operationId": "deleteReplication", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the replication operation to delete.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "409": { + "description": "The operation is not in a deletable state, e.g. it is a MOVE op in the DEHYDRATING state.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.delete" + ] + } + }, + "/replication/replicate/{id}/cancel": { + "post": { + "description": "Requests the cancellation of an active replication operation identified by its ID. The operation will be stopped, but its record will remain in the 'CANCELLED' state (can't be resumed) and will not be automatically deleted.", + "tags": [ + "replication" + ], + "summary": "Cancel a replication operation", + "operationId": "cancelReplication", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the replication operation to cancel.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully cancelled." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "409": { + "description": "The operation is not in a cancellable state, e.g. it is READY or is a MOVE op in the DEHYDRATING state.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.cancel" + ] + } + }, + "/replication/sharding-state": { + "get": { + "description": "Fetches the current sharding state, including replica locations and statuses, for all collections or a specified collection. If a shard name is provided along with a collection, the state for that specific shard is returned.", + "tags": [ + "replication" + ], + "summary": "Get sharding state", + "operationId": "getCollectionShardingState", + "parameters": [ + { + "type": "string", + "description": "The collection name to get the sharding state for.", + "name": "collection", + "in": "query" + }, + { + "type": "string", + "description": "The shard to get the sharding state for.", + "name": "shard", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved sharding state.", + "schema": { + "$ref": "#/definitions/ReplicationShardingStateResponse" + } + }, + "400": { + "description": "Bad request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Collection or shard not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.shardingstate.collection.get" + ] + } + }, + "/schema": { + "get": { + "description": "Fetch an array of all collection definitions from the schema.", + "tags": [ + "schema" + ], + "summary": "Dump the current the database schema.", + "operationId": "schema.dump", + "parameters": [ + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "Successfully dumped the database schema.", + "schema": { + "$ref": "#/definitions/Schema" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.query.meta" + ] + }, + "post": { + "description": "Create a new data object collection. \u003cbr/\u003e\u003cbr/\u003eIf AutoSchema is enabled, Weaviate will attempt to infer the schema from the data at import time. However, manual schema definition is recommended for production environments.", + "tags": [ + "schema" + ], + "summary": "Create a new Object class in the schema.", + "operationId": "schema.objects.create", + "parameters": [ + { + "name": "objectClass", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Class" + } + } + ], + "responses": { + "200": { + "description": "Added the new Object class to the schema.", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Object class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.add.meta" + ] + } + }, + "/schema/{className}": { + "get": { + "tags": [ + "schema" + ], + "summary": "Get a single class from the schema", + "operationId": "schema.objects.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "Found the Class, returned as body", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "This class does not exist" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.get.meta" + ] + }, + "put": { + "description": "Add a property to an existing collection.", + "tags": [ + "schema" + ], + "summary": "Update settings of an existing schema class", + "operationId": "schema.objects.update", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "objectClass", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Class" + } + } + ], + "responses": { + "200": { + "description": "Class was updated successfully", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Class to be updated does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid update attempt", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + }, + "delete": { + "description": "Remove a collection from the schema. This will also delete all the objects in the collection.", + "tags": [ + "schema" + ], + "summary": "Remove an Object class (and all data in the instances) from the schema.", + "operationId": "schema.objects.delete", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Removed the Object class from the schema." + }, + "400": { + "description": "Could not delete the Object class.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + } + }, + "/schema/{className}/properties": { + "post": { + "tags": [ + "schema" + ], + "summary": "Add a property to an Object class.", + "operationId": "schema.objects.properties.add", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Property" + } + } + ], + "responses": { + "200": { + "description": "Added the property.", + "schema": { + "$ref": "#/definitions/Property" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid property.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + } + }, + "/schema/{className}/shards": { + "get": { + "description": "Get the status of every shard in the cluster.", + "tags": [ + "schema" + ], + "summary": "Get the shards status of an Object class", + "operationId": "schema.objects.shards.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Found the status of the shards, returned as body", + "schema": { + "$ref": "#/definitions/ShardStatusList" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "This class does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.get.meta" + ] + } + }, + "/schema/{className}/shards/{shardName}": { + "put": { + "description": "Update a shard status for a collection. For example, a shard may have been marked as ` + "`" + `READONLY` + "`" + ` because its disk was full. After providing more disk space, use this endpoint to set the shard status to ` + "`" + `READY` + "`" + ` again. There is also a convenience function in each client to set the status of all shards of a collection.", + "tags": [ + "schema" + ], + "summary": "Update a shard status.", + "operationId": "schema.objects.shards.update", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "shardName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ShardStatus" + } + } + ], + "responses": { + "200": { + "description": "Shard status was updated successfully", + "schema": { + "$ref": "#/definitions/ShardStatus" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard to be updated does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid update attempt", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + } + }, + "/schema/{className}/tenants": { + "get": { + "description": "get all tenants from a specific class", + "tags": [ + "schema" + ], + "summary": "Get the list of tenants.", + "operationId": "tenants.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "tenants from specified class.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "put": { + "description": "Update tenant of a specific class", + "tags": [ + "schema" + ], + "summary": "Update a tenant.", + "operationId": "tenants.update", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + } + ], + "responses": { + "200": { + "description": "Updated tenants of the specified class", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "description": "Create a new tenant for a collection. Multi-tenancy must be enabled in the collection definition.", + "tags": [ + "schema" + ], + "summary": "Create a new tenant", + "operationId": "tenants.create", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + } + ], + "responses": { + "200": { + "description": "Added new tenants to the specified class", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "description": "delete tenants from a specific class", + "tags": [ + "schema" + ], + "operationId": "tenants.delete", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "tenants", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "Deleted tenants from specified class." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}/tenants/{tenantName}": { + "get": { + "description": "get a specific tenant for the given class", + "tags": [ + "schema" + ], + "summary": "Get a specific tenant", + "operationId": "tenants.get.one", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenantName", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "load the tenant given the specified class", + "schema": { + "$ref": "#/definitions/Tenant" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Tenant not found" + }, + "422": { + "description": "Invalid tenant or class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "head": { + "description": "Check if a tenant exists for a specific class", + "tags": [ + "schema" + ], + "summary": "Check whether a tenant exists", + "operationId": "tenant.exists", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenantName", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "The tenant exists in the specified class" + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The tenant not found" + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/tasks": { + "get": { + "tags": [ + "distributedTasks" + ], + "summary": "Lists all distributed tasks in the cluster.", + "operationId": "distributedTasks.get", + "responses": { + "200": { + "description": "Distributed tasks successfully returned", + "schema": { + "$ref": "#/definitions/DistributedTasks" + } + }, + "403": { + "description": "Unauthorized or invalid credentials.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.distributedTasks.get" + ] + } + }, + "/users/db": { + "get": { + "tags": [ + "users" + ], + "summary": "list all db users", + "operationId": "listAllUsers", + "parameters": [ + { + "type": "boolean", + "default": false, + "description": "Whether to include the last used time of the users", + "name": "includeLastUsedTime", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Info about the users", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/DBUserInfo" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.list_all" + ] + } + }, + "/users/db/{user_id}": { + "get": { + "tags": [ + "users" + ], + "summary": "get info relevant to user, e.g. username, roles", + "operationId": "getUserInfo", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": false, + "description": "Whether to include the last used time of the given user", + "name": "includeLastUsedTime", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Info about the user", + "schema": { + "$ref": "#/definitions/DBUserInfo" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.get" + ] + }, + "post": { + "tags": [ + "users" + ], + "summary": "create new user", + "operationId": "createUser", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "createTime": { + "description": "EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - set the given time as creation time", + "type": "string", + "format": "date-time" + }, + "import": { + "description": "EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - import api key from static user", + "type": "boolean", + "default": false + } + } + } + } + ], + "responses": { + "201": { + "description": "User created successfully", + "schema": { + "$ref": "#/definitions/UserApiKey" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "409": { + "description": "User already exists", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.create" + ] + }, + "delete": { + "tags": [ + "users" + ], + "summary": "Delete User", + "operationId": "deleteUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "user_id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.delete" + ] + } + }, + "/users/db/{user_id}/activate": { + "post": { + "tags": [ + "users" + ], + "summary": "activate a deactivated user", + "operationId": "activateUser", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "User successfully activated" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "409": { + "description": "user already activated" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.activateUser" + ] + } + }, + "/users/db/{user_id}/deactivate": { + "post": { + "tags": [ + "users" + ], + "summary": "deactivate a user", + "operationId": "deactivateUser", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "revoke_key": { + "description": "if the key should be revoked when deactivating the user", + "type": "boolean", + "default": false + } + } + } + } + ], + "responses": { + "200": { + "description": "users successfully deactivated" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "409": { + "description": "user already deactivated" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.deactivateUser" + ] + } + }, + "/users/db/{user_id}/rotate-key": { + "post": { + "tags": [ + "users" + ], + "summary": "rotate user api key", + "operationId": "rotateUserApiKey", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "ApiKey successfully changed", + "schema": { + "$ref": "#/definitions/UserApiKey" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.rotateApiKey" + ] + } + }, + "/users/own-info": { + "get": { + "tags": [ + "users" + ], + "summary": "get info relevant to own user, e.g. username, roles", + "operationId": "getOwnInfo", + "responses": { + "200": { + "description": "Info about the user", + "schema": { + "$ref": "#/definitions/UserOwnInfo" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.get.own-info" + ] + } + } + }, + "definitions": { + "AdditionalProperties": { + "description": "(Response only) Additional meta information about a single object.", + "type": "object", + "additionalProperties": { + "type": "object" + } + }, + "Alias": { + "description": "Represents the mapping between an alias name and a collection. An alias provides an alternative name for accessing a collection.", + "type": "object", + "properties": { + "alias": { + "description": "The unique name of the alias that serves as an alternative identifier for the collection.", + "type": "string" + }, + "class": { + "description": "The name of the collection (class) to which this alias is mapped.", + "type": "string" + } + } + }, + "AliasResponse": { + "description": "Response object containing a list of alias mappings.", + "type": "object", + "properties": { + "aliases": { + "description": "Array of alias objects, each containing an alias-to-collection mapping.", + "type": "array", + "items": { + "$ref": "#/definitions/Alias" + } + } + } + }, + "AsyncReplicationStatus": { + "description": "The status of the async replication.", + "properties": { + "objectsPropagated": { + "description": "The number of objects propagated in the most recent iteration.", + "type": "number", + "format": "uint64" + }, + "startDiffTimeUnixMillis": { + "description": "The start time of the most recent iteration.", + "type": "number", + "format": "int64" + }, + "targetNode": { + "description": "The target node of the replication, if set, otherwise empty.", + "type": "string" + } + } + }, + "BM25Config": { + "description": "tuning parameters for the BM25 algorithm", + "type": "object", + "properties": { + "b": { + "description": "Calibrates term-weight scaling based on the document length (default: 0.75).", + "type": "number", + "format": "float" + }, + "k1": { + "description": "Calibrates term-weight scaling based on the term frequency within a document (default: 1.2).", + "type": "number", + "format": "float" + } + } + }, + "BackupConfig": { + "description": "Backup custom configuration", + "type": "object", + "properties": { + "Bucket": { + "description": "Name of the bucket, container, volume, etc", + "type": "string" + }, + "CPUPercentage": { + "description": "Desired CPU core utilization ranging from 1%-80%", + "type": "integer", + "default": 50, + "maximum": 80, + "minimum": 1, + "x-nullable": false + }, + "ChunkSize": { + "description": "Aimed chunk size, with a minimum of 2MB, default of 128MB, and a maximum of 512MB. The actual chunk size may vary.", + "type": "integer", + "default": 128, + "maximum": 512, + "minimum": 2, + "x-nullable": false + }, + "CompressionLevel": { + "description": "compression level used by compression algorithm", + "type": "string", + "default": "DefaultCompression", + "enum": [ + "DefaultCompression", + "BestSpeed", + "BestCompression" + ], + "x-nullable": false + }, + "Endpoint": { + "description": "name of the endpoint, e.g. s3.amazonaws.com", + "type": "string" + }, + "Path": { + "description": "Path or key within the bucket", + "type": "string" + } + } + }, + "BackupCreateRequest": { + "description": "Request body for creating a backup of a set of classes", + "properties": { + "config": { + "description": "Custom configuration for the backup creation process", + "type": "object", + "$ref": "#/definitions/BackupConfig" + }, + "exclude": { + "description": "List of collections to exclude from the backup creation process. If not set, all collections are included. Cannot be used together with ` + "`" + `include` + "`" + `.", + "type": "array", + "items": { + "type": "string" + } + }, + "id": { + "description": "The ID of the backup (required). Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "include": { + "description": "List of collections to include in the backup creation process. If not set, all collections are included. Cannot be used together with ` + "`" + `exclude` + "`" + `.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "BackupCreateResponse": { + "description": "The definition of a backup create response body", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "bucket": { + "description": "Name of the bucket, container, volume, etc", + "type": "string" + }, + "classes": { + "description": "The list of classes for which the backup creation process was started", + "type": "array", + "items": { + "type": "string" + } + }, + "error": { + "description": "error message if creation failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "Path within bucket of backup", + "type": "string" + }, + "status": { + "description": "phase of backup creation process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupCreateStatusResponse": { + "description": "The definition of a backup create metadata", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "error": { + "description": "error message if creation failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backend", + "type": "string" + }, + "status": { + "description": "phase of backup creation process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupListResponse": { + "description": "The definition of a backup create response body", + "type": "array", + "items": { + "type": "object", + "properties": { + "classes": { + "description": "The list of classes for which the existed backup process", + "type": "array", + "items": { + "type": "string" + } + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "status": { + "description": "status of backup process", + "type": "string", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + } + }, + "BackupRestoreRequest": { + "description": "Request body for restoring a backup for a set of classes", + "properties": { + "config": { + "description": "Custom configuration for the backup restoration process", + "type": "object", + "$ref": "#/definitions/RestoreConfig" + }, + "exclude": { + "description": "List of classes to exclude from the backup restoration process", + "type": "array", + "items": { + "type": "string" + } + }, + "include": { + "description": "List of classes to include in the backup restoration process", + "type": "array", + "items": { + "type": "string" + } + }, + "node_mapping": { + "description": "Allows overriding the node names stored in the backup with different ones. Useful when restoring backups to a different environment.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "overwriteAlias": { + "description": "Allows ovewriting the collection alias if there is a conflict", + "type": "boolean" + } + } + }, + "BackupRestoreResponse": { + "description": "The definition of a backup restore response body", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "classes": { + "description": "The list of classes for which the backup restoration process was started", + "type": "array", + "items": { + "type": "string" + } + }, + "error": { + "description": "error message if restoration failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backend", + "type": "string" + }, + "status": { + "description": "phase of backup restoration process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupRestoreStatusResponse": { + "description": "The definition of a backup restore metadata", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "error": { + "description": "error message if restoration failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backup backend, contains bucket and path", + "type": "string" + }, + "status": { + "description": "phase of backup restoration process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BatchDelete": { + "type": "object", + "properties": { + "deletionTimeUnixMilli": { + "description": "Timestamp of deletion in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64", + "x-nullable": true + }, + "dryRun": { + "description": "If true, the call will show which objects would be matched using the specified filter without deleting any objects. \u003cbr/\u003e\u003cbr/\u003eDepending on the configured verbosity, you will either receive a count of affected objects, or a list of IDs.", + "type": "boolean", + "default": false + }, + "match": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "output": { + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "type": "string", + "default": "minimal" + } + } + }, + "BatchDeleteResponse": { + "description": "Delete Objects response.", + "type": "object", + "properties": { + "deletionTimeUnixMilli": { + "description": "Timestamp of deletion in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64", + "x-nullable": true + }, + "dryRun": { + "description": "If true, objects will not be deleted yet, but merely listed. Defaults to false.", + "type": "boolean", + "default": false + }, + "match": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "output": { + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "type": "string", + "default": "minimal" + }, + "results": { + "type": "object", + "properties": { + "failed": { + "description": "How many objects should have been deleted but could not be deleted.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "limit": { + "description": "The most amount of objects that can be deleted in a single query, equals QUERY_MAXIMUM_RESULTS.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "matches": { + "description": "How many objects were matched by the filter.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "objects": { + "description": "With output set to \"minimal\" only objects with error occurred will the be described. Successfully deleted objects would be omitted. Output set to \"verbose\" will list all of the objets with their respective statuses.", + "type": "array", + "items": { + "description": "Results for this specific Object.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "id": { + "description": "ID of the Object.", + "type": "string", + "format": "uuid" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "DRYRUN", + "FAILED" + ] + } + } + } + }, + "successful": { + "description": "How many objects were successfully deleted in this round.", + "type": "number", + "format": "int64", + "x-omitempty": false + } + } + } + } + }, + "BatchReference": { + "properties": { + "from": { + "description": "Long-form beacon-style URI to identify the source of the cross-ref including the property name. Should be in the form of weaviate://localhost/\u003ckinds\u003e/\u003cuuid\u003e/\u003cclassName\u003e/\u003cpropertyName\u003e, where \u003ckinds\u003e must be one of 'objects', 'objects' and \u003cclassName\u003e and \u003cpropertyName\u003e must represent the cross-ref property of source class to be used.", + "type": "string", + "format": "uri", + "example": "weaviate://localhost/Zoo/a5d09582-4239-4702-81c9-92a6e0122bb4/hasAnimals" + }, + "tenant": { + "description": "Name of the reference tenant.", + "type": "string" + }, + "to": { + "description": "Short-form URI to point to the cross-ref. Should be in the form of weaviate://localhost/\u003cuuid\u003e for the example of a local cross-ref to an object", + "type": "string", + "format": "uri", + "example": "weaviate://localhost/97525810-a9a5-4eb0-858a-71449aeb007f" + } + } + }, + "BatchReferenceResponse": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/BatchReference" + }, + { + "properties": { + "result": { + "description": "Results for this specific reference.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + } + } + } + } + } + ] + }, + "BatchStats": { + "description": "The summary of a nodes batch queue congestion status.", + "properties": { + "queueLength": { + "description": "How many objects are currently in the batch queue.", + "type": "number", + "format": "int", + "x-nullable": true, + "x-omitempty": true + }, + "ratePerSecond": { + "description": "How many objects are approximately processed from the batch queue per second.", + "type": "number", + "format": "int", + "x-omitempty": false + } + } + }, + "C11yExtension": { + "description": "A resource describing an extension to the contextinoary, containing both the identifier and the definition of the extension", + "properties": { + "concept": { + "description": "The new concept you want to extend. Must be an all-lowercase single word, or a space delimited compound word. Examples: 'foobarium', 'my custom concept'", + "type": "string", + "example": "foobarium" + }, + "definition": { + "description": "A list of space-delimited words or a sentence describing what the custom concept is about. Avoid using the custom concept itself. An Example definition for the custom concept 'foobarium': would be 'a naturally occurring element which can only be seen by programmers'", + "type": "string" + }, + "weight": { + "description": "Weight of the definition of the new concept where 1='override existing definition entirely' and 0='ignore custom definition'. Note that if the custom concept is not present in the contextionary yet, the weight cannot be less than 1.", + "type": "number", + "format": "float" + } + } + }, + "C11yNearestNeighbors": { + "description": "C11y function to show the nearest neighbors to a word.", + "type": "array", + "items": { + "type": "object", + "properties": { + "distance": { + "type": "number", + "format": "float" + }, + "word": { + "type": "string" + } + } + } + }, + "C11yVector": { + "description": "A vector representation of the object in the Contextionary. If provided at object creation, this wil take precedence over any vectorizer setting.", + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "C11yVectorBasedQuestion": { + "description": "Receive question based on array of classes, properties and values.", + "type": "array", + "items": { + "type": "object", + "properties": { + "classProps": { + "description": "Vectorized properties.", + "type": "array", + "maxItems": 300, + "minItems": 300, + "items": { + "type": "object", + "properties": { + "propsVectors": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "value": { + "description": "String with valuename.", + "type": "string" + } + } + } + }, + "classVectors": { + "description": "Vectorized classname.", + "type": "array", + "maxItems": 300, + "minItems": 300, + "items": { + "type": "number", + "format": "float" + } + } + } + } + }, + "C11yWordsResponse": { + "description": "An array of available words and contexts.", + "properties": { + "concatenatedWord": { + "description": "Weighted results for all words", + "type": "object", + "properties": { + "concatenatedNearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + }, + "concatenatedVector": { + "$ref": "#/definitions/C11yVector" + }, + "concatenatedWord": { + "type": "string" + }, + "singleWords": { + "type": "array", + "items": { + "format": "string" + } + } + } + }, + "individualWords": { + "description": "Weighted results for per individual word", + "type": "array", + "items": { + "type": "object", + "properties": { + "info": { + "type": "object", + "properties": { + "nearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + }, + "vector": { + "$ref": "#/definitions/C11yVector" + } + } + }, + "present": { + "type": "boolean" + }, + "word": { + "type": "string" + } + } + } + } + } + }, + "Class": { + "type": "object", + "properties": { + "class": { + "description": "Name of the class (a.k.a. 'collection') (required). Multiple words should be concatenated in CamelCase, e.g. ` + "`" + `ArticleAuthor` + "`" + `.", + "type": "string" + }, + "description": { + "description": "Description of the collection for metadata purposes.", + "type": "string" + }, + "invertedIndexConfig": { + "$ref": "#/definitions/InvertedIndexConfig" + }, + "moduleConfig": { + "description": "Configuration specific to modules in a collection context.", + "type": "object" + }, + "multiTenancyConfig": { + "$ref": "#/definitions/MultiTenancyConfig" + }, + "properties": { + "description": "Define properties of the collection.", + "type": "array", + "items": { + "$ref": "#/definitions/Property" + } + }, + "replicationConfig": { + "$ref": "#/definitions/ReplicationConfig" + }, + "shardingConfig": { + "description": "Manage how the index should be sharded and distributed in the cluster", + "type": "object" + }, + "vectorConfig": { + "description": "Configure named vectors. Either use this field or ` + "`" + `vectorizer` + "`" + `, ` + "`" + `vectorIndexType` + "`" + `, and ` + "`" + `vectorIndexConfig` + "`" + ` fields. Available from ` + "`" + `v1.24.0` + "`" + `.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/VectorConfig" + } + }, + "vectorIndexConfig": { + "description": "Vector-index config, that is specific to the type of index selected in vectorIndexType", + "type": "object" + }, + "vectorIndexType": { + "description": "Name of the vector index to use, eg. (HNSW)", + "type": "string" + }, + "vectorizer": { + "description": "Specify how the vectors for this class should be determined. The options are either 'none' - this means you have to import a vector with each object yourself - or the name of a module that provides vectorization capabilities, such as 'text2vec-contextionary'. If left empty, it will use the globally configured default which can itself either be 'none' or a specific module.", + "type": "string" + } + } + }, + "Classification": { + "description": "Manage classifications, trigger them and view status of past classifications.", + "type": "object", + "properties": { + "basedOnProperties": { + "description": "base the text-based classification on these fields (of type text)", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "description" + ] + }, + "class": { + "description": "class (name) which is used in this classification", + "type": "string", + "example": "City" + }, + "classifyProperties": { + "description": "which ref-property to set as part of the classification", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "inCountry" + ] + }, + "error": { + "description": "error message if status == failed", + "type": "string", + "default": "", + "example": "classify xzy: something went wrong" + }, + "filters": { + "type": "object", + "properties": { + "sourceWhere": { + "description": "limit the objects to be classified", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "targetWhere": { + "description": "Limit the possible sources when using an algorithm which doesn't really on training data, e.g. 'contextual'. When using an algorithm with a training set, such as 'knn', limit the training set instead", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "trainingSetWhere": { + "description": "Limit the training objects to be considered during the classification. Can only be used on types with explicit training sets, such as 'knn'", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "id": { + "description": "ID to uniquely identify this classification run", + "type": "string", + "format": "uuid", + "example": "ee722219-b8ec-4db1-8f8d-5150bb1a9e0c" + }, + "meta": { + "description": "additional meta information about the classification", + "type": "object", + "$ref": "#/definitions/ClassificationMeta" + }, + "settings": { + "description": "classification-type specific settings", + "type": "object" + }, + "status": { + "description": "status of this classification", + "type": "string", + "enum": [ + "running", + "completed", + "failed" + ], + "example": "running" + }, + "type": { + "description": "which algorithm to use for classifications", + "type": "string" + } + } + }, + "ClassificationMeta": { + "description": "Additional information to a specific classification", + "type": "object", + "properties": { + "completed": { + "description": "time when this classification finished", + "type": "string", + "format": "date-time", + "example": "2017-07-21T17:32:28Z" + }, + "count": { + "description": "number of objects which were taken into consideration for classification", + "type": "integer", + "example": 147 + }, + "countFailed": { + "description": "number of objects which could not be classified - see error message for details", + "type": "integer", + "example": 7 + }, + "countSucceeded": { + "description": "number of objects successfully classified", + "type": "integer", + "example": 140 + }, + "started": { + "description": "time when this classification was started", + "type": "string", + "format": "date-time", + "example": "2017-07-21T17:32:28Z" + } + } + }, + "ClusterStatisticsResponse": { + "description": "The cluster statistics of all of the Weaviate nodes", + "type": "object", + "properties": { + "statistics": { + "type": "array", + "items": { + "$ref": "#/definitions/Statistics" + } + }, + "synchronized": { + "type": "boolean", + "x-omitempty": false + } + } + }, + "DBUserInfo": { + "type": "object", + "required": [ + "userId", + "dbUserType", + "roles", + "active" + ], + "properties": { + "active": { + "description": "activity status of the returned user", + "type": "boolean" + }, + "apiKeyFirstLetters": { + "description": "First 3 letters of the associated API-key", + "type": [ + "string", + "null" + ], + "maxLength": 3 + }, + "createdAt": { + "description": "Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ)", + "type": [ + "string", + "null" + ], + "format": "date-time" + }, + "dbUserType": { + "description": "type of the returned user", + "type": "string", + "enum": [ + "db_user", + "db_env_user" + ] + }, + "lastUsedAt": { + "description": "Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ)", + "type": [ + "string", + "null" + ], + "format": "date-time" + }, + "roles": { + "description": "The role names associated to the user", + "type": "array", + "items": { + "type": "string" + } + }, + "userId": { + "description": "The user id of the given user", + "type": "string" + } + } + }, + "Deprecation": { + "type": "object", + "properties": { + "apiType": { + "description": "Describes which API is effected, usually one of: REST, GraphQL", + "type": "string" + }, + "id": { + "description": "The id that uniquely identifies this particular deprecations (mostly used internally)", + "type": "string" + }, + "locations": { + "description": "The locations within the specified API affected by this deprecation", + "type": "array", + "items": { + "type": "string" + } + }, + "mitigation": { + "description": "User-required object to not be affected by the (planned) removal", + "type": "string" + }, + "msg": { + "description": "What this deprecation is about", + "type": "string" + }, + "plannedRemovalVersion": { + "description": "A best-effort guess of which upcoming version will remove the feature entirely", + "type": "string" + }, + "removedIn": { + "description": "If the feature has already been removed, it was removed in this version", + "type": "string", + "x-nullable": true + }, + "removedTime": { + "description": "If the feature has already been removed, it was removed at this timestamp", + "type": "string", + "format": "date-time", + "x-nullable": true + }, + "sinceTime": { + "description": "The deprecation was introduced in this version", + "type": "string", + "format": "date-time" + }, + "sinceVersion": { + "description": "The deprecation was introduced in this version", + "type": "string" + }, + "status": { + "description": "Whether the problematic API functionality is deprecated (planned to be removed) or already removed", + "type": "string" + } + } + }, + "DistributedTask": { + "description": "Distributed task metadata.", + "type": "object", + "properties": { + "error": { + "description": "The high level reason why the task failed.", + "type": "string", + "x-omitempty": true + }, + "finishedAt": { + "description": "The time when the task was finished.", + "type": "string", + "format": "date-time" + }, + "finishedNodes": { + "description": "The nodes that finished the task.", + "type": "array", + "items": { + "type": "string" + } + }, + "id": { + "description": "The ID of the task.", + "type": "string" + }, + "payload": { + "description": "The payload of the task.", + "type": "object" + }, + "startedAt": { + "description": "The time when the task was created.", + "type": "string", + "format": "date-time" + }, + "status": { + "description": "The status of the task.", + "type": "string" + }, + "version": { + "description": "The version of the task.", + "type": "integer" + } + } + }, + "DistributedTasks": { + "description": "Active distributed tasks by namespace.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/DistributedTask" + } + } + }, + "ErrorResponse": { + "description": "An error response given by Weaviate end-points.", + "type": "object", + "properties": { + "error": { + "type": "array", + "items": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + } + } + } + }, + "GeoCoordinates": { + "properties": { + "latitude": { + "description": "The latitude of the point on earth in decimal form", + "type": "number", + "format": "float", + "x-nullable": true + }, + "longitude": { + "description": "The longitude of the point on earth in decimal form", + "type": "number", + "format": "float", + "x-nullable": true + } + } + }, + "GraphQLError": { + "description": "An error response caused by a GraphQL query.", + "properties": { + "locations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "column": { + "type": "integer", + "format": "int64" + }, + "line": { + "type": "integer", + "format": "int64" + } + } + } + }, + "message": { + "type": "string" + }, + "path": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "GraphQLQueries": { + "description": "A list of GraphQL queries.", + "type": "array", + "items": { + "$ref": "#/definitions/GraphQLQuery" + } + }, + "GraphQLQuery": { + "description": "GraphQL query based on: http://facebook.github.io/graphql/.", + "type": "object", + "properties": { + "operationName": { + "description": "The name of the operation if multiple exist in the query.", + "type": "string" + }, + "query": { + "description": "Query based on GraphQL syntax.", + "type": "string" + }, + "variables": { + "description": "Additional variables for the query.", + "type": "object" + } + } + }, + "GraphQLResponse": { + "description": "GraphQL based response: http://facebook.github.io/graphql/.", + "properties": { + "data": { + "description": "GraphQL data object.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/JsonObject" + } + }, + "errors": { + "description": "Array with errors.", + "type": "array", + "items": { + "$ref": "#/definitions/GraphQLError" + }, + "x-omitempty": true + } + } + }, + "GraphQLResponses": { + "description": "A list of GraphQL responses.", + "type": "array", + "items": { + "$ref": "#/definitions/GraphQLResponse" + } + }, + "GroupType": { + "description": "If the group contains OIDC or database users.", + "type": "string", + "enum": [ + "db", + "oidc" + ] + }, + "InvertedIndexConfig": { + "description": "Configure the inverted index built into Weaviate (default: 60).", + "type": "object", + "properties": { + "bm25": { + "$ref": "#/definitions/BM25Config" + }, + "cleanupIntervalSeconds": { + "description": "Asynchronous index clean up happens every n seconds", + "type": "number", + "format": "int" + }, + "indexNullState": { + "description": "Index each object with the null state (default: 'false').", + "type": "boolean" + }, + "indexPropertyLength": { + "description": "Index length of properties (default: 'false').", + "type": "boolean" + }, + "indexTimestamps": { + "description": "Index each object by its internal timestamps (default: 'false').", + "type": "boolean" + }, + "stopwords": { + "$ref": "#/definitions/StopwordConfig" + }, + "usingBlockMaxWAND": { + "description": "Using BlockMax WAND for query execution (default: 'false', will be 'true' for new collections created after 1.30).", + "type": "boolean" + } + } + }, + "JsonObject": { + "description": "JSON object value.", + "type": "object" + }, + "Link": { + "type": "object", + "properties": { + "documentationHref": { + "description": "weaviate documentation about this resource group", + "type": "string" + }, + "href": { + "description": "target of the link", + "type": "string" + }, + "name": { + "description": "human readable name of the resource group", + "type": "string" + }, + "rel": { + "description": "relationship if both resources are related, e.g. 'next', 'previous', 'parent', etc.", + "type": "string" + } + } + }, + "Meta": { + "description": "Contains meta information of the current Weaviate instance.", + "type": "object", + "properties": { + "grpcMaxMessageSize": { + "description": "Max message size for GRPC connection in bytes.", + "type": "integer" + }, + "hostname": { + "description": "The url of the host.", + "type": "string", + "format": "url" + }, + "modules": { + "description": "Module-specific meta information.", + "type": "object" + }, + "version": { + "description": "The Weaviate server version.", + "type": "string" + } + } + }, + "MultiTenancyConfig": { + "description": "Configuration related to multi-tenancy within a class", + "properties": { + "autoTenantActivation": { + "description": "Existing tenants should (not) be turned HOT implicitly when they are accessed and in another activity status (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "autoTenantCreation": { + "description": "Nonexistent tenants should (not) be created implicitly (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "enabled": { + "description": "Whether or not multi-tenancy is enabled for this class (default: false).", + "type": "boolean", + "x-omitempty": false + } + } + }, + "MultipleRef": { + "description": "Multiple instances of references to other objects.", + "type": "array", + "items": { + "$ref": "#/definitions/SingleRef" + } + }, + "NestedProperty": { + "type": "object", + "properties": { + "dataType": { + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "type": "string" + }, + "indexFilterable": { + "type": "boolean", + "x-nullable": true + }, + "indexRangeFilters": { + "type": "boolean", + "x-nullable": true + }, + "indexSearchable": { + "type": "boolean", + "x-nullable": true + }, + "name": { + "type": "string" + }, + "nestedProperties": { + "description": "The properties of the nested object(s). Applies to object and object[] data types.", + "type": "array", + "items": { + "$ref": "#/definitions/NestedProperty" + }, + "x-omitempty": true + }, + "tokenization": { + "type": "string", + "enum": [ + "word", + "lowercase", + "whitespace", + "field", + "trigram", + "gse", + "kagome_kr", + "kagome_ja", + "gse_ch" + ] + } + } + }, + "NodeShardStatus": { + "description": "The definition of a node shard status response body", + "properties": { + "asyncReplicationStatus": { + "description": "The status of the async replication.", + "type": "array", + "items": { + "$ref": "#/definitions/AsyncReplicationStatus" + } + }, + "class": { + "description": "The name of shard's class.", + "type": "string", + "x-omitempty": false + }, + "compressed": { + "description": "The status of vector compression/quantization.", + "format": "boolean", + "x-omitempty": false + }, + "loaded": { + "description": "The load status of the shard.", + "type": "boolean", + "x-omitempty": false + }, + "name": { + "description": "The name of the shard.", + "type": "string", + "x-omitempty": false + }, + "numberOfReplicas": { + "description": "Number of replicas for the shard.", + "type": [ + "integer", + "null" + ], + "format": "int64", + "x-omitempty": true + }, + "objectCount": { + "description": "The number of objects in shard.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "replicationFactor": { + "description": "Minimum number of replicas for the shard.", + "type": [ + "integer", + "null" + ], + "format": "int64", + "x-omitempty": true + }, + "vectorIndexingStatus": { + "description": "The status of the vector indexing process.", + "format": "string", + "x-omitempty": false + }, + "vectorQueueLength": { + "description": "The length of the vector indexing queue.", + "type": "number", + "format": "int64", + "x-omitempty": false + } + } + }, + "NodeStats": { + "description": "The summary of Weaviate's statistics.", + "properties": { + "objectCount": { + "description": "The total number of objects in DB.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "shardCount": { + "description": "The count of Weaviate's shards. To see this value, set ` + "`" + `output` + "`" + ` to ` + "`" + `verbose` + "`" + `.", + "type": "number", + "format": "int", + "x-omitempty": false + } + } + }, + "NodeStatus": { + "description": "The definition of a backup node status response body", + "properties": { + "batchStats": { + "description": "Weaviate batch statistics.", + "type": "object", + "$ref": "#/definitions/BatchStats" + }, + "gitHash": { + "description": "The gitHash of Weaviate.", + "type": "string" + }, + "name": { + "description": "The name of the node.", + "type": "string" + }, + "shards": { + "description": "The list of the shards with it's statistics.", + "type": "array", + "items": { + "$ref": "#/definitions/NodeShardStatus" + } + }, + "stats": { + "description": "Weaviate overall statistics.", + "type": "object", + "$ref": "#/definitions/NodeStats" + }, + "status": { + "description": "Node's status.", + "type": "string", + "default": "HEALTHY", + "enum": [ + "HEALTHY", + "UNHEALTHY", + "UNAVAILABLE", + "TIMEOUT" + ] + }, + "version": { + "description": "The version of Weaviate.", + "type": "string" + } + } + }, + "NodesStatusResponse": { + "description": "The status of all of the Weaviate nodes", + "type": "object", + "properties": { + "nodes": { + "type": "array", + "items": { + "$ref": "#/definitions/NodeStatus" + } + } + } + }, + "Object": { + "type": "object", + "properties": { + "additional": { + "$ref": "#/definitions/AdditionalProperties" + }, + "class": { + "description": "Class of the Object, defined in the schema.", + "type": "string" + }, + "creationTimeUnix": { + "description": "(Response only) Timestamp of creation of this object in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64" + }, + "id": { + "description": "ID of the Object.", + "type": "string", + "format": "uuid" + }, + "lastUpdateTimeUnix": { + "description": "(Response only) Timestamp of the last object update in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64" + }, + "properties": { + "$ref": "#/definitions/PropertySchema" + }, + "tenant": { + "description": "Name of the Objects tenant.", + "type": "string" + }, + "vector": { + "description": "This field returns vectors associated with the Object. C11yVector, Vector or Vectors values are possible.", + "$ref": "#/definitions/C11yVector" + }, + "vectorWeights": { + "$ref": "#/definitions/VectorWeights" + }, + "vectors": { + "description": "This field returns vectors associated with the Object.", + "$ref": "#/definitions/Vectors" + } + } + }, + "ObjectsGetResponse": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/Object" + }, + { + "properties": { + "deprecations": { + "type": "array", + "items": { + "$ref": "#/definitions/Deprecation" + } + } + } + }, + { + "properties": { + "result": { + "description": "Results for this specific Object.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + } + } + } + } + } + ] + }, + "ObjectsListResponse": { + "description": "List of Objects.", + "type": "object", + "properties": { + "deprecations": { + "type": "array", + "items": { + "$ref": "#/definitions/Deprecation" + } + }, + "objects": { + "description": "The actual list of Objects.", + "type": "array", + "items": { + "$ref": "#/definitions/Object" + } + }, + "totalResults": { + "description": "The total number of Objects for the query. The number of items in a response may be smaller due to paging.", + "type": "integer", + "format": "int64" + } + } + }, + "PatchDocumentAction": { + "description": "Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396).", + "required": [ + "op", + "path" + ], + "properties": { + "from": { + "description": "A string containing a JSON Pointer value.", + "type": "string" + }, + "merge": { + "$ref": "#/definitions/Object" + }, + "op": { + "description": "The operation to be performed.", + "type": "string", + "enum": [ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ] + }, + "path": { + "description": "A JSON-Pointer.", + "type": "string" + }, + "value": { + "description": "The value to be used within the operations.", + "type": "object" + } + } + }, + "PatchDocumentObject": { + "description": "Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396).", + "required": [ + "op", + "path" + ], + "properties": { + "from": { + "description": "A string containing a JSON Pointer value.", + "type": "string" + }, + "merge": { + "$ref": "#/definitions/Object" + }, + "op": { + "description": "The operation to be performed.", + "type": "string", + "enum": [ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ] + }, + "path": { + "description": "A JSON-Pointer.", + "type": "string" + }, + "value": { + "description": "The value to be used within the operations.", + "type": "object" + } + } + }, + "PeerUpdate": { + "description": "A single peer in the network.", + "properties": { + "id": { + "description": "The session ID of the peer.", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "Human readable name.", + "type": "string" + }, + "schemaHash": { + "description": "The latest known hash of the peer's schema.", + "type": "string" + }, + "uri": { + "description": "The location where the peer is exposed to the internet.", + "type": "string", + "format": "uri" + } + } + }, + "PeerUpdateList": { + "description": "List of known peers.", + "type": "array", + "items": { + "$ref": "#/definitions/PeerUpdate" + } + }, + "Permission": { + "description": "permissions attached to a role.", + "type": "object", + "required": [ + "action" + ], + "properties": { + "action": { + "description": "allowed actions in weaviate.", + "type": "string", + "enum": [ + "manage_backups", + "read_cluster", + "create_data", + "read_data", + "update_data", + "delete_data", + "read_nodes", + "create_roles", + "read_roles", + "update_roles", + "delete_roles", + "create_collections", + "read_collections", + "update_collections", + "delete_collections", + "assign_and_revoke_users", + "create_users", + "read_users", + "update_users", + "delete_users", + "create_tenants", + "read_tenants", + "update_tenants", + "delete_tenants", + "create_replicate", + "read_replicate", + "update_replicate", + "delete_replicate", + "create_aliases", + "read_aliases", + "update_aliases", + "delete_aliases", + "assign_and_revoke_groups", + "read_groups" + ] + }, + "aliases": { + "description": "Resource definition for alias-related actions and permissions. Used to specify which aliases and collections can be accessed or modified.", + "type": "object", + "properties": { + "alias": { + "description": "A string that specifies which aliases this permission applies to. Can be an exact alias name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all aliases.", + "type": "string", + "default": "*" + }, + "collection": { + "description": "A string that specifies which collections this permission applies to. Can be an exact collection name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all collections.", + "type": "string", + "default": "*" + } + } + }, + "backups": { + "description": "resources applicable for backup actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "collections": { + "description": "resources applicable for collection and/or tenant actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "data": { + "description": "resources applicable for data actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "object": { + "description": "string or regex. if a specific object ID, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "tenant": { + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "groups": { + "description": "Resources applicable for group actions.", + "type": "object", + "properties": { + "group": { + "description": "A string that specifies which groups this permission applies to. Can be an exact group name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all groups.", + "type": "string", + "default": "*" + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + }, + "nodes": { + "description": "resources applicable for cluster actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "verbosity": { + "description": "whether to allow (verbose) returning shards and stats data in the response", + "type": "string", + "default": "minimal", + "enum": [ + "verbose", + "minimal" + ] + } + } + }, + "replicate": { + "description": "resources applicable for replicate actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "shard": { + "description": "string or regex. if a specific shard name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "roles": { + "description": "resources applicable for role actions", + "type": "object", + "properties": { + "role": { + "description": "string or regex. if a specific role name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "scope": { + "description": "set the scope for the manage role permission", + "type": "string", + "default": "match", + "enum": [ + "all", + "match" + ] + } + } + }, + "tenants": { + "description": "resources applicable for tenant actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "tenant": { + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "users": { + "description": "resources applicable for user actions", + "type": "object", + "properties": { + "users": { + "description": "string or regex. if a specific name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + } + } + }, + "PhoneNumber": { + "properties": { + "countryCode": { + "description": "Read-only. The numerical country code (e.g. 49)", + "type": "number", + "format": "uint64" + }, + "defaultCountry": { + "description": "Optional. The ISO 3166-1 alpha-2 country code. This is used to figure out the correct countryCode and international format if only a national number (e.g. 0123 4567) is provided", + "type": "string" + }, + "input": { + "description": "The raw input as the phone number is present in your raw data set. It will be parsed into the standardized formats if valid.", + "type": "string" + }, + "internationalFormatted": { + "description": "Read-only. Parsed result in the international format (e.g. +49 123 ...)", + "type": "string" + }, + "national": { + "description": "Read-only. The numerical representation of the national part", + "type": "number", + "format": "uint64" + }, + "nationalFormatted": { + "description": "Read-only. Parsed result in the national format (e.g. 0123 456789)", + "type": "string" + }, + "valid": { + "description": "Read-only. Indicates whether the parsed number is a valid phone number", + "type": "boolean" + } + } + }, + "Principal": { + "type": "object", + "properties": { + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + }, + "username": { + "description": "The username that was extracted either from the authentication information", + "type": "string" + } + } + }, + "Property": { + "type": "object", + "properties": { + "dataType": { + "description": "Data type of the property (required). If it starts with a capital (for example Person), may be a reference to another type.", + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "description": "Description of the property.", + "type": "string" + }, + "indexFilterable": { + "description": "Whether to include this property in the filterable, Roaring Bitmap index. If ` + "`" + `false` + "`" + `, this property cannot be used in ` + "`" + `where` + "`" + ` filters. \u003cbr/\u003e\u003cbr/\u003eNote: Unrelated to vectorization behavior.", + "type": "boolean", + "x-nullable": true + }, + "indexInverted": { + "description": "(Deprecated). Whether to include this property in the inverted index. If ` + "`" + `false` + "`" + `, this property cannot be used in ` + "`" + `where` + "`" + ` filters, ` + "`" + `bm25` + "`" + ` or ` + "`" + `hybrid` + "`" + ` search. \u003cbr/\u003e\u003cbr/\u003eUnrelated to vectorization behavior (deprecated as of v1.19; use indexFilterable or/and indexSearchable instead)", + "type": "boolean", + "x-nullable": true + }, + "indexRangeFilters": { + "description": "Whether to include this property in the filterable, range-based Roaring Bitmap index. Provides better performance for range queries compared to filterable index in large datasets. Applicable only to properties of data type int, number, date.", + "type": "boolean", + "x-nullable": true + }, + "indexSearchable": { + "description": "Optional. Should this property be indexed in the inverted index. Defaults to true. Applicable only to properties of data type text and text[]. If you choose false, you will not be able to use this property in bm25 or hybrid search. This property has no affect on vectorization decisions done by modules", + "type": "boolean", + "x-nullable": true + }, + "moduleConfig": { + "description": "Configuration specific to modules this Weaviate instance has installed", + "type": "object" + }, + "name": { + "description": "The name of the property (required). Multiple words should be concatenated in camelCase, e.g. ` + "`" + `nameOfAuthor` + "`" + `.", + "type": "string" + }, + "nestedProperties": { + "description": "The properties of the nested object(s). Applies to object and object[] data types.", + "type": "array", + "items": { + "$ref": "#/definitions/NestedProperty" + }, + "x-omitempty": true + }, + "tokenization": { + "description": "Determines tokenization of the property as separate words or whole field. Optional. Applies to text and text[] data types. Allowed values are ` + "`" + `word` + "`" + ` (default; splits on any non-alphanumerical, lowercases), ` + "`" + `lowercase` + "`" + ` (splits on white spaces, lowercases), ` + "`" + `whitespace` + "`" + ` (splits on white spaces), ` + "`" + `field` + "`" + ` (trims). Not supported for remaining data types", + "type": "string", + "enum": [ + "word", + "lowercase", + "whitespace", + "field", + "trigram", + "gse", + "kagome_kr", + "kagome_ja", + "gse_ch" + ] + } + } + }, + "PropertySchema": { + "description": "Names and values of an individual property. A returned response may also contain additional metadata, such as from classification or feature projection.", + "type": "object" + }, + "RaftStatistics": { + "description": "The definition of Raft statistics.", + "properties": { + "appliedIndex": { + "type": "string" + }, + "commitIndex": { + "type": "string" + }, + "fsmPending": { + "type": "string" + }, + "lastContact": { + "type": "string" + }, + "lastLogIndex": { + "type": "string" + }, + "lastLogTerm": { + "type": "string" + }, + "lastSnapshotIndex": { + "type": "string" + }, + "lastSnapshotTerm": { + "type": "string" + }, + "latestConfiguration": { + "description": "Weaviate Raft nodes.", + "type": "object" + }, + "latestConfigurationIndex": { + "type": "string" + }, + "numPeers": { + "type": "string" + }, + "protocolVersion": { + "type": "string" + }, + "protocolVersionMax": { + "type": "string" + }, + "protocolVersionMin": { + "type": "string" + }, + "snapshotVersionMax": { + "type": "string" + }, + "snapshotVersionMin": { + "type": "string" + }, + "state": { + "type": "string" + }, + "term": { + "type": "string" + } + } + }, + "ReferenceMetaClassification": { + "description": "This meta field contains additional info about the classified reference property", + "properties": { + "closestLosingDistance": { + "description": "The lowest distance of a neighbor in the losing group. Optional. If k equals the size of the winning group, there is no losing group", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "closestOverallDistance": { + "description": "The lowest distance of any neighbor, regardless of whether they were in the winning or losing group", + "type": "number", + "format": "float32" + }, + "closestWinningDistance": { + "description": "Closest distance of a neighbor from the winning group", + "type": "number", + "format": "float32" + }, + "losingCount": { + "description": "size of the losing group, can be 0 if the winning group size equals k", + "type": "number", + "format": "int64" + }, + "losingDistance": { + "description": "deprecated - do not use, to be removed in 0.23.0", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "meanLosingDistance": { + "description": "Mean distance of all neighbors from the losing group. Optional. If k equals the size of the winning group, there is no losing group.", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "meanWinningDistance": { + "description": "Mean distance of all neighbors from the winning group", + "type": "number", + "format": "float32" + }, + "overallCount": { + "description": "overall neighbors checked as part of the classification. In most cases this will equal k, but could be lower than k - for example if not enough data was present", + "type": "number", + "format": "int64" + }, + "winningCount": { + "description": "size of the winning group, a number between 1..k", + "type": "number", + "format": "int64" + }, + "winningDistance": { + "description": "deprecated - do not use, to be removed in 0.23.0", + "type": "number", + "format": "float32" + } + } + }, + "ReplicationConfig": { + "description": "Configure how replication is executed in a cluster", + "type": "object", + "properties": { + "asyncEnabled": { + "description": "Enable asynchronous replication (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "deletionStrategy": { + "description": "Conflict resolution strategy for deleted objects.", + "type": "string", + "enum": [ + "NoAutomatedResolution", + "DeleteOnConflict", + "TimeBasedResolution" + ], + "x-omitempty": true + }, + "factor": { + "description": "Number of times a class is replicated (default: 1).", + "type": "integer" + } + } + }, + "ReplicationDeleteReplicaRequest": { + "description": "Specifies the parameters required to permanently delete a specific shard replica from a particular node. This action will remove the replica's data from the node.", + "type": "object", + "required": [ + "node", + "collection", + "shard" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the shard replica belongs.", + "type": "string" + }, + "node": { + "description": "The name of the Weaviate node from which the shard replica will be deleted.", + "type": "string" + }, + "shard": { + "description": "The ID of the shard whose replica is to be deleted.", + "type": "string" + } + } + }, + "ReplicationDisableReplicaRequest": { + "description": "Specifies the parameters required to mark a specific shard replica as inactive (soft-delete) on a particular node. This action typically prevents the replica from serving requests but does not immediately remove its data.", + "type": "object", + "required": [ + "node", + "collection", + "shard" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the shard replica belongs.", + "type": "string" + }, + "node": { + "description": "The name of the Weaviate node hosting the shard replica that is to be disabled.", + "type": "string" + }, + "shard": { + "description": "The ID of the shard whose replica is to be disabled.", + "type": "string" + } + } + }, + "ReplicationReplicateDetailsReplicaResponse": { + "description": "Provides a comprehensive overview of a specific replication operation, detailing its unique ID, the involved collection, shard, source and target nodes, transfer type, current status, and optionally, its status history.", + "required": [ + "id", + "shard", + "sourceNode", + "targetNode", + "collection", + "status", + "type" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the shard being replicated belongs.", + "type": "string" + }, + "id": { + "description": "The unique identifier (ID) of this specific replication operation.", + "type": "string", + "format": "uuid" + }, + "scheduledForCancel": { + "description": "Whether the replica operation is scheduled for cancellation.", + "type": "boolean" + }, + "scheduledForDelete": { + "description": "Whether the replica operation is scheduled for deletion.", + "type": "boolean" + }, + "shard": { + "description": "The name of the shard involved in this replication operation.", + "type": "string" + }, + "sourceNode": { + "description": "The identifier of the node from which the replica is being moved or copied (the source node).", + "type": "string" + }, + "status": { + "description": "An object detailing the current operational state of the replica movement and any errors encountered.", + "type": "object", + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatus" + }, + "statusHistory": { + "description": "An array detailing the historical sequence of statuses the replication operation has transitioned through, if requested and available.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatus" + } + }, + "targetNode": { + "description": "The identifier of the node to which the replica is being moved or copied (the target node).", + "type": "string" + }, + "type": { + "description": "Indicates whether the operation is a 'COPY' (source replica remains) or a 'MOVE' (source replica is removed after successful transfer).", + "type": "string", + "enum": [ + "COPY", + "MOVE" + ] + }, + "uncancelable": { + "description": "Whether the replica operation is uncancelable.", + "type": "boolean" + }, + "whenStartedUnixMs": { + "description": "The UNIX timestamp in ms when the replication operation was initiated. This is an approximate time and so should not be used for precise timing.", + "type": "integer", + "format": "int64" + } + } + }, + "ReplicationReplicateDetailsReplicaStatus": { + "description": "Represents the current or historical status of a shard replica involved in a replication operation, including its operational state and any associated errors.", + "type": "object", + "properties": { + "errors": { + "description": "A list of error messages encountered by this replica during the replication operation, if any.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatusError" + } + }, + "state": { + "description": "The current operational state of the replica during the replication process.", + "type": "string", + "enum": [ + "REGISTERED", + "HYDRATING", + "FINALIZING", + "DEHYDRATING", + "READY", + "CANCELLED" + ] + }, + "whenStartedUnixMs": { + "description": "The UNIX timestamp in ms when this state was first entered. This is an approximate time and so should not be used for precise timing.", + "type": "integer", + "format": "int64" + } + } + }, + "ReplicationReplicateDetailsReplicaStatusError": { + "description": "Represents an error encountered during a replication operation, including its timestamp and a human-readable message.", + "type": "object", + "properties": { + "message": { + "description": "A human-readable message describing the error.", + "type": "string" + }, + "whenErroredUnixMs": { + "description": "The unix timestamp in ms when the error occurred. This is an approximate time and so should not be used for precise timing.", + "type": "integer", + "format": "int64" + } + } + }, + "ReplicationReplicateForceDeleteRequest": { + "description": "Specifies the parameters available when force deleting replication operations.", + "type": "object", + "properties": { + "collection": { + "description": "The name of the collection to which the shard being replicated belongs.", + "type": "string" + }, + "dryRun": { + "description": "If true, the operation will not actually delete anything but will return the expected outcome of the deletion.", + "type": "boolean", + "default": false + }, + "id": { + "description": "The unique identifier (ID) of the replication operation to be forcefully deleted.", + "type": "string", + "format": "uuid" + }, + "node": { + "description": "The name of the target node where the replication operations are registered.", + "type": "string" + }, + "shard": { + "description": "The identifier of the shard involved in the replication operations.", + "type": "string" + } + } + }, + "ReplicationReplicateForceDeleteResponse": { + "description": "Provides the UUIDs that were successfully force deleted as part of the replication operation. If dryRun is true, this will return the expected outcome without actually deleting anything.", + "type": "object", + "properties": { + "deleted": { + "description": "The unique identifiers (IDs) of the replication operations that were forcefully deleted.", + "type": "array", + "items": { + "type": "string", + "format": "uuid" + } + }, + "dryRun": { + "description": "Indicates whether the operation was a dry run (true) or an actual deletion (false).", + "type": "boolean" + } + } + }, + "ReplicationReplicateReplicaRequest": { + "description": "Specifies the parameters required to initiate a shard replica movement operation between two nodes for a given collection and shard. This request defines the source and target node, the collection and type of transfer.", + "type": "object", + "required": [ + "sourceNode", + "targetNode", + "collection", + "shard" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the target shard belongs.", + "type": "string" + }, + "shard": { + "description": "The name of the shard whose replica is to be moved or copied.", + "type": "string" + }, + "sourceNode": { + "description": "The name of the Weaviate node currently hosting the shard replica that needs to be moved or copied.", + "type": "string" + }, + "targetNode": { + "description": "The name of the Weaviate node where the new shard replica will be created as part of the movement or copy operation.", + "type": "string" + }, + "type": { + "description": "Specifies the type of replication operation to perform. 'COPY' creates a new replica on the target node while keeping the source replica. 'MOVE' creates a new replica on the target node and then removes the source replica upon successful completion. Defaults to 'COPY' if omitted.", + "type": "string", + "default": "COPY", + "enum": [ + "COPY", + "MOVE" + ] + } + } + }, + "ReplicationReplicateReplicaResponse": { + "description": "Contains the unique identifier for a successfully initiated asynchronous replica movement operation. This ID can be used to track the progress of the operation.", + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "description": "The unique identifier (ID) assigned to the registered replication operation.", + "type": "string", + "format": "uuid" + } + } + }, + "ReplicationShardReplicas": { + "description": "Represents a shard and lists the nodes that currently host its replicas.", + "type": "object", + "properties": { + "replicas": { + "type": "array", + "items": { + "type": "string" + } + }, + "shard": { + "type": "string" + } + } + }, + "ReplicationShardingState": { + "description": "Details the sharding layout for a specific collection, mapping each shard to its set of replicas across the cluster.", + "type": "object", + "properties": { + "collection": { + "description": "The name of the collection.", + "type": "string" + }, + "shards": { + "description": "An array detailing each shard within the collection and the nodes hosting its replicas.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationShardReplicas" + } + } + } + }, + "ReplicationShardingStateResponse": { + "description": "Provides the detailed sharding state for one or more collections, including the distribution of shards and their replicas across the cluster nodes.", + "type": "object", + "properties": { + "shardingState": { + "$ref": "#/definitions/ReplicationShardingState" + } + } + }, + "RestoreConfig": { + "description": "Backup custom configuration", + "type": "object", + "properties": { + "Bucket": { + "description": "Name of the bucket, container, volume, etc", + "type": "string" + }, + "CPUPercentage": { + "description": "Desired CPU core utilization ranging from 1%-80%", + "type": "integer", + "default": 50, + "maximum": 80, + "minimum": 1, + "x-nullable": false + }, + "Endpoint": { + "description": "name of the endpoint, e.g. s3.amazonaws.com", + "type": "string" + }, + "Path": { + "description": "Path within the bucket", + "type": "string" + }, + "rolesOptions": { + "description": "How roles should be restored", + "type": "string", + "default": "noRestore", + "enum": [ + "noRestore", + "all" + ] + }, + "usersOptions": { + "description": "How users should be restored", + "type": "string", + "default": "noRestore", + "enum": [ + "noRestore", + "all" + ] + } + } + }, + "Role": { + "type": "object", + "required": [ + "name", + "permissions" + ], + "properties": { + "name": { + "description": "role name", + "type": "string" + }, + "permissions": { + "type": "array", + "items": { + "description": "list of permissions (level, action, resource)", + "type": "object", + "$ref": "#/definitions/Permission" + } + } + } + }, + "RolesListResponse": { + "description": "list of roles", + "type": "array", + "items": { + "$ref": "#/definitions/Role" + } + }, + "Schema": { + "description": "Definitions of semantic schemas (also see: https://github.com/weaviate/weaviate-semantic-schemas).", + "type": "object", + "properties": { + "classes": { + "description": "Semantic classes that are available.", + "type": "array", + "items": { + "$ref": "#/definitions/Class" + } + }, + "maintainer": { + "description": "Email of the maintainer.", + "type": "string", + "format": "email" + }, + "name": { + "description": "Name of the schema.", + "type": "string" + } + } + }, + "SchemaClusterStatus": { + "description": "Indicates the health of the schema in a cluster.", + "type": "object", + "properties": { + "error": { + "description": "Contains the sync check error if one occurred", + "type": "string", + "x-omitempty": true + }, + "healthy": { + "description": "True if the cluster is in sync, false if there is an issue (see error).", + "type": "boolean", + "x-omitempty": false + }, + "hostname": { + "description": "Hostname of the coordinating node, i.e. the one that received the cluster. This can be useful information if the error message contains phrases such as 'other nodes agree, but local does not', etc.", + "type": "string" + }, + "ignoreSchemaSync": { + "description": "The cluster check at startup can be ignored (to recover from an out-of-sync situation).", + "type": "boolean", + "x-omitempty": false + }, + "nodeCount": { + "description": "Number of nodes that participated in the sync check", + "type": "number", + "format": "int" + } + } + }, + "SchemaHistory": { + "description": "This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value OR a SingleRef definition.", + "type": "object" + }, + "ShardStatus": { + "description": "The status of a single shard", + "properties": { + "status": { + "description": "Status of the shard", + "type": "string" + } + } + }, + "ShardStatusGetResponse": { + "description": "Response body of shard status get request", + "properties": { + "name": { + "description": "Name of the shard", + "type": "string" + }, + "status": { + "description": "Status of the shard", + "type": "string" + }, + "vectorQueueSize": { + "description": "Size of the vector queue of the shard", + "type": "integer", + "x-omitempty": false + } + } + }, + "ShardStatusList": { + "description": "The status of all the shards of a Class", + "type": "array", + "items": { + "$ref": "#/definitions/ShardStatusGetResponse" + } + }, + "SingleRef": { + "description": "Either set beacon (direct reference) or set class and schema (concept reference)", + "properties": { + "beacon": { + "description": "If using a direct reference, specify the URI to point to the cross-ref here. Should be in the form of weaviate://localhost/\u003cuuid\u003e for the example of a local cross-ref to an object", + "type": "string", + "format": "uri" + }, + "class": { + "description": "If using a concept reference (rather than a direct reference), specify the desired class name here", + "type": "string", + "format": "uri" + }, + "classification": { + "description": "Additional Meta information about classifications if the item was part of one", + "$ref": "#/definitions/ReferenceMetaClassification" + }, + "href": { + "description": "If using a direct reference, this read-only fields provides a link to the referenced resource. If 'origin' is globally configured, an absolute URI is shown - a relative URI otherwise.", + "type": "string", + "format": "uri" + }, + "schema": { + "description": "If using a concept reference (rather than a direct reference), specify the desired properties here", + "$ref": "#/definitions/PropertySchema" + } + } + }, + "Statistics": { + "description": "The definition of node statistics.", + "properties": { + "bootstrapped": { + "type": "boolean" + }, + "candidates": { + "type": "object" + }, + "dbLoaded": { + "type": "boolean" + }, + "initialLastAppliedIndex": { + "type": "number", + "format": "uint64" + }, + "isVoter": { + "type": "boolean" + }, + "lastAppliedIndex": { + "type": "number" + }, + "leaderAddress": { + "type": "object" + }, + "leaderId": { + "type": "object" + }, + "name": { + "description": "The name of the node.", + "type": "string" + }, + "open": { + "type": "boolean" + }, + "raft": { + "description": "Weaviate Raft statistics.", + "type": "object", + "$ref": "#/definitions/RaftStatistics" + }, + "ready": { + "type": "boolean" + }, + "status": { + "description": "Node's status.", + "type": "string", + "default": "HEALTHY", + "enum": [ + "HEALTHY", + "UNHEALTHY", + "UNAVAILABLE", + "TIMEOUT" + ] + } + } + }, + "StopwordConfig": { + "description": "fine-grained control over stopword list usage", + "type": "object", + "properties": { + "additions": { + "description": "Stopwords to be considered additionally (default: []). Can be any array of custom strings.", + "type": "array", + "items": { + "type": "string" + } + }, + "preset": { + "description": "Pre-existing list of common words by language (default: 'en'). Options: ['en', 'none'].", + "type": "string" + }, + "removals": { + "description": "Stopwords to be removed from consideration (default: []). Can be any array of custom strings.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "Tenant": { + "description": "attributes representing a single tenant within weaviate", + "type": "object", + "properties": { + "activityStatus": { + "description": "activity status of the tenant's shard. Optional for creating tenant (implicit ` + "`" + `ACTIVE` + "`" + `) and required for updating tenant. For creation, allowed values are ` + "`" + `ACTIVE` + "`" + ` - tenant is fully active and ` + "`" + `INACTIVE` + "`" + ` - tenant is inactive; no actions can be performed on tenant, tenant's files are stored locally. For updating, ` + "`" + `ACTIVE` + "`" + `, ` + "`" + `INACTIVE` + "`" + ` and also ` + "`" + `OFFLOADED` + "`" + ` - as INACTIVE, but files are stored on cloud storage. The following values are read-only and are set by the server for internal use: ` + "`" + `OFFLOADING` + "`" + ` - tenant is transitioning from ACTIVE/INACTIVE to OFFLOADED, ` + "`" + `ONLOADING` + "`" + ` - tenant is transitioning from OFFLOADED to ACTIVE/INACTIVE. We still accept deprecated names ` + "`" + `HOT` + "`" + ` (now ` + "`" + `ACTIVE` + "`" + `), ` + "`" + `COLD` + "`" + ` (now ` + "`" + `INACTIVE` + "`" + `), ` + "`" + `FROZEN` + "`" + ` (now ` + "`" + `OFFLOADED` + "`" + `), ` + "`" + `FREEZING` + "`" + ` (now ` + "`" + `OFFLOADING` + "`" + `), ` + "`" + `UNFREEZING` + "`" + ` (now ` + "`" + `ONLOADING` + "`" + `).", + "type": "string", + "enum": [ + "ACTIVE", + "INACTIVE", + "OFFLOADED", + "OFFLOADING", + "ONLOADING", + "HOT", + "COLD", + "FROZEN", + "FREEZING", + "UNFREEZING" + ] + }, + "name": { + "description": "The name of the tenant (required).", + "type": "string" + } + } + }, + "UserApiKey": { + "type": "object", + "required": [ + "apikey" + ], + "properties": { + "apikey": { + "description": "The apikey", + "type": "string" + } + } + }, + "UserOwnInfo": { + "type": "object", + "required": [ + "username" + ], + "properties": { + "groups": { + "description": "The groups associated to the user", + "type": "array", + "items": { + "type": "string" + } + }, + "roles": { + "type": "array", + "items": { + "description": "The roles assigned to own user", + "type": "object", + "$ref": "#/definitions/Role" + } + }, + "username": { + "description": "The username associated with the provided key", + "type": "string" + } + } + }, + "UserTypeInput": { + "description": "the type of user", + "type": "string", + "enum": [ + "db", + "oidc" + ] + }, + "UserTypeOutput": { + "description": "the type of user", + "type": "string", + "enum": [ + "db_user", + "db_env_user", + "oidc" + ] + }, + "Vector": { + "description": "A vector representation of the object. If provided at object creation, this wil take precedence over any vectorizer setting.", + "type": "object" + }, + "VectorConfig": { + "type": "object", + "properties": { + "vectorIndexConfig": { + "description": "Vector-index config, that is specific to the type of index selected in vectorIndexType", + "type": "object" + }, + "vectorIndexType": { + "description": "Name of the vector index to use, eg. (HNSW)", + "type": "string" + }, + "vectorizer": { + "description": "Configuration of a specific vectorizer used by this vector", + "type": "object" + } + } + }, + "VectorWeights": { + "description": "Allow custom overrides of vector weights as math expressions. E.g. \"pancake\": \"7\" will set the weight for the word pancake to 7 in the vectorization, whereas \"w * 3\" would triple the originally calculated word. This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value (string/string) object.", + "type": "object" + }, + "Vectors": { + "description": "A map of named vectors for multi-vector representations.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Vector" + } + }, + "WhereFilter": { + "description": "Filter search results using a where filter", + "type": "object", + "properties": { + "operands": { + "description": "combine multiple where filters, requires 'And' or 'Or' operator", + "type": "array", + "items": { + "$ref": "#/definitions/WhereFilter" + } + }, + "operator": { + "description": "operator to use", + "type": "string", + "enum": [ + "And", + "Or", + "Equal", + "Like", + "NotEqual", + "GreaterThan", + "GreaterThanEqual", + "LessThan", + "LessThanEqual", + "WithinGeoRange", + "IsNull", + "ContainsAny", + "ContainsAll", + "ContainsNone", + "Not" + ], + "example": "GreaterThanEqual" + }, + "path": { + "description": "path to the property currently being filtered", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "inCity", + "City", + "name" + ] + }, + "valueBoolean": { + "description": "value as boolean", + "type": "boolean", + "x-nullable": true, + "example": false + }, + "valueBooleanArray": { + "description": "value as boolean", + "type": "array", + "items": { + "type": "boolean" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + true, + false + ] + }, + "valueDate": { + "description": "value as date (as string)", + "type": "string", + "x-nullable": true, + "example": "TODO" + }, + "valueDateArray": { + "description": "value as date (as string)", + "type": "array", + "items": { + "type": "string" + }, + "x-nullable": true, + "x-omitempty": true, + "example": "TODO" + }, + "valueGeoRange": { + "description": "value as geo coordinates and distance", + "type": "object", + "x-nullable": true, + "$ref": "#/definitions/WhereFilterGeoRange" + }, + "valueInt": { + "description": "value as integer", + "type": "integer", + "format": "int64", + "x-nullable": true, + "example": 2000 + }, + "valueIntArray": { + "description": "value as integer", + "type": "array", + "items": { + "type": "integer", + "format": "int64" + }, + "x-nullable": true, + "x-omitempty": true, + "example": "[100, 200]" + }, + "valueNumber": { + "description": "value as number/float", + "type": "number", + "format": "float64", + "x-nullable": true, + "example": 3.14 + }, + "valueNumberArray": { + "description": "value as number/float", + "type": "array", + "items": { + "type": "number", + "format": "float64" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + 3.14 + ] + }, + "valueString": { + "description": "value as text (deprecated as of v1.19; alias for valueText)", + "type": "string", + "x-nullable": true, + "example": "my search term" + }, + "valueStringArray": { + "description": "value as text (deprecated as of v1.19; alias for valueText)", + "type": "array", + "items": { + "type": "string" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + "my search term" + ] + }, + "valueText": { + "description": "value as text", + "type": "string", + "x-nullable": true, + "example": "my search term" + }, + "valueTextArray": { + "description": "value as text", + "type": "array", + "items": { + "type": "string" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + "my search term" + ] + } + } + }, + "WhereFilterGeoRange": { + "description": "filter within a distance of a georange", + "type": "object", + "properties": { + "distance": { + "type": "object", + "properties": { + "max": { + "type": "number", + "format": "float64" + } + } + }, + "geoCoordinates": { + "x-nullable": false, + "$ref": "#/definitions/GeoCoordinates" + } + } + } + }, + "parameters": { + "CommonAfterParameterQuery": { + "type": "string", + "description": "A threshold UUID of the objects to retrieve after, using an UUID-based ordering. This object is not part of the set. \u003cbr/\u003e\u003cbr/\u003eMust be used with ` + "`" + `class` + "`" + `, typically in conjunction with ` + "`" + `limit` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eNote ` + "`" + `after` + "`" + ` cannot be used with ` + "`" + `offset` + "`" + ` or ` + "`" + `sort` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eFor a null value similar to offset=0, set an empty string in the request, i.e. ` + "`" + `after=` + "`" + ` or ` + "`" + `after` + "`" + `.", + "name": "after", + "in": "query" + }, + "CommonClassParameterQuery": { + "type": "string", + "description": "The collection from which to query objects. \u003cbr/\u003e\u003cbr/\u003eNote that if ` + "`" + `class` + "`" + ` is not provided, the response will not include any objects.", + "name": "class", + "in": "query" + }, + "CommonConsistencyLevelParameterQuery": { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + "CommonIncludeParameterQuery": { + "type": "string", + "description": "Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation", + "name": "include", + "in": "query" + }, + "CommonLimitParameterQuery": { + "type": "integer", + "format": "int64", + "description": "The maximum number of items to be returned per page. The default is 25 unless set otherwise as an environment variable.", + "name": "limit", + "in": "query" + }, + "CommonNodeNameParameterQuery": { + "type": "string", + "description": "The target node which should fulfill the request", + "name": "node_name", + "in": "query" + }, + "CommonOffsetParameterQuery": { + "type": "integer", + "format": "int64", + "default": 0, + "description": "The starting index of the result window. Note ` + "`" + `offset` + "`" + ` will retrieve ` + "`" + `offset+limit` + "`" + ` results and return ` + "`" + `limit` + "`" + ` results from the object with index ` + "`" + `offset` + "`" + ` onwards. Limited by the value of ` + "`" + `QUERY_MAXIMUM_RESULTS` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eShould be used in conjunction with ` + "`" + `limit` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eCannot be used with ` + "`" + `after` + "`" + `.", + "name": "offset", + "in": "query" + }, + "CommonOrderParameterQuery": { + "type": "string", + "description": "Order parameter to tell how to order (asc or desc) data within given field. Should be used in conjunction with ` + "`" + `sort` + "`" + ` parameter. If providing multiple ` + "`" + `sort` + "`" + ` values, provide multiple ` + "`" + `order` + "`" + ` values in corresponding order, e.g.: ` + "`" + `sort=author_name,title\u0026order=desc,asc` + "`" + `.", + "name": "order", + "in": "query" + }, + "CommonOutputVerbosityParameterQuery": { + "type": "string", + "default": "minimal", + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "name": "output", + "in": "query" + }, + "CommonSortParameterQuery": { + "type": "string", + "description": "Name(s) of the property to sort by - e.g. ` + "`" + `city` + "`" + `, or ` + "`" + `country,city` + "`" + `.", + "name": "sort", + "in": "query" + }, + "CommonTenantParameterQuery": { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + }, + "securityDefinitions": { + "oidc": { + "description": "OIDC (OpenConnect ID - based on OAuth2)", + "type": "oauth2", + "flow": "implicit", + "authorizationUrl": "http://to-be-configured-in-the-application-config" + } + }, + "security": [ + {}, + { + "oidc": [] + } + ], + "tags": [ + { + "name": "objects" + }, + { + "description": "These operations allow to execute batch requests for Objects and Objects. Mostly used for importing large datasets.", + "name": "batch" + }, + { + "name": "graphql" + }, + { + "name": "meta" + }, + { + "name": "P2P" + }, + { + "description": "All functions related to the Contextionary.", + "name": "contextionary-API" + }, + { + "description": "These operations enable manipulation of the schema in Weaviate schema.", + "name": "schema" + }, + { + "description": "Operations related to managing data replication, including initiating and monitoring shard replica movements between nodes, querying current sharding states, and managing the lifecycle of replication tasks.", + "name": "replication" + } + ], + "externalDocs": { + "url": "https://github.com/weaviate/weaviate" + } +}`)) + FlatSwaggerJSON = json.RawMessage([]byte(`{ + "consumes": [ + "application/json", + "application/yaml" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "https" + ], + "swagger": "2.0", + "info": { + "description": "# Introduction\n Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications. \n ### Base Path \nThe base path for the Weaviate server is structured as ` + "`" + `[YOUR-WEAVIATE-HOST]:[PORT]/v1` + "`" + `. As an example, if you wish to access the ` + "`" + `schema` + "`" + ` endpoint on a local instance, you would navigate to ` + "`" + `http://localhost:8080/v1/schema` + "`" + `. Ensure you replace ` + "`" + `[YOUR-WEAVIATE-HOST]` + "`" + ` and ` + "`" + `[PORT]` + "`" + ` with your actual server host and port number respectively. \n ### Questions? \nIf you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/). \n### Issues? \nIf you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate). \n### Want more documentation? \nFor a quickstart, code examples, concepts and more, please visit our [documentation page](https://weaviate.io/developers/weaviate).", + "title": "Weaviate", + "contact": { + "name": "Weaviate", + "url": "https://github.com/weaviate", + "email": "hello@weaviate.io" + }, + "version": "1.33.0-rc.1" + }, + "basePath": "/v1", + "paths": { + "/": { + "get": { + "description": "Get links to other endpoints to help discover the REST API", + "summary": "List available endpoints", + "operationId": "weaviate.root", + "responses": { + "200": { + "description": "Weaviate is alive and ready to serve content", + "schema": { + "type": "object", + "properties": { + "links": { + "type": "array", + "items": { + "$ref": "#/definitions/Link" + } + } + } + } + } + } + } + }, + "/.well-known/live": { + "get": { + "description": "Determines whether the application is alive. Can be used for kubernetes liveness probe", + "summary": "Get application liveness.", + "operationId": "weaviate.wellknown.liveness", + "responses": { + "200": { + "description": "The application is able to respond to HTTP requests" + } + } + } + }, + "/.well-known/openid-configuration": { + "get": { + "description": "OIDC Discovery page, redirects to the token issuer if one is configured", + "tags": [ + "well-known", + "oidc", + "discovery" + ], + "summary": "OIDC discovery information if OIDC auth is enabled", + "responses": { + "200": { + "description": "Successful response, inspect body", + "schema": { + "type": "object", + "properties": { + "clientId": { + "description": "OAuth Client ID", + "type": "string" + }, + "href": { + "description": "The Location to redirect to", + "type": "string" + }, + "scopes": { + "description": "OAuth Scopes", + "type": "array", + "items": { + "type": "string" + }, + "x-omitempty": true + } + } + } + }, + "404": { + "description": "Not found, no oidc provider present" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/.well-known/ready": { + "get": { + "description": "Determines whether the application is ready to receive traffic. Can be used for kubernetes readiness probe.", + "summary": "Get application readiness.", + "operationId": "weaviate.wellknown.readiness", + "responses": { + "200": { + "description": "The application has completed its start-up routine and is ready to accept traffic." + }, + "503": { + "description": "The application is currently not able to serve traffic. If other horizontal replicas of weaviate are available and they are capable of receiving traffic, all traffic should be redirected there instead." + } + } + } + }, + "/aliases": { + "get": { + "description": "Retrieve a list of all aliases in the system. Results can be filtered by specifying a collection (class) name to get aliases for a specific collection only.", + "tags": [ + "schema" + ], + "summary": "List aliases", + "operationId": "aliases.get", + "parameters": [ + { + "type": "string", + "description": "Optional filter to retrieve aliases for a specific collection (class) only. If not provided, returns all aliases.", + "name": "class", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the list of aliases", + "schema": { + "$ref": "#/definitions/AliasResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid collection (class) parameter provided", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "description": "Create a new alias mapping between an alias name and a collection (class). The alias acts as an alternative name for accessing the collection.", + "tags": [ + "schema" + ], + "summary": "Create a new alias", + "operationId": "aliases.create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Alias" + } + } + ], + "responses": { + "200": { + "description": "Successfully created a new alias for the specified collection (class)", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid create alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/aliases/{aliasName}": { + "get": { + "description": "Retrieve details about a specific alias by its name, including which collection (class) it points to.", + "tags": [ + "schema" + ], + "summary": "Get an alias", + "operationId": "aliases.get.alias", + "parameters": [ + { + "type": "string", + "name": "aliasName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the alias details.", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid alias name provided.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "put": { + "description": "Update an existing alias to point to a different collection (class). This allows you to redirect an alias from one collection to another without changing the alias name.", + "tags": [ + "schema" + ], + "summary": "Update an alias", + "operationId": "aliases.update", + "parameters": [ + { + "type": "string", + "name": "aliasName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "class": { + "description": "The new collection (class) that the alias should point to.", + "type": "string" + } + } + } + } + ], + "responses": { + "200": { + "description": "Successfully updated the alias to point to the new collection (class).", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid update alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "description": "Remove an existing alias from the system. This will delete the alias mapping but will not affect the underlying collection (class).", + "tags": [ + "schema" + ], + "summary": "Delete an alias", + "operationId": "aliases.delete", + "parameters": [ + { + "type": "string", + "name": "aliasName", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted the alias." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid delete alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/groups/{groupType}": { + "get": { + "description": "Retrieves a list of all available group names for a specified group type (` + "`" + `oidc` + "`" + ` or ` + "`" + `db` + "`" + `).", + "tags": [ + "authz" + ], + "summary": "List all groups of a specific type", + "operationId": "getGroups", + "parameters": [ + { + "enum": [ + "oidc" + ], + "type": "string", + "description": "The type of group to retrieve.", + "name": "groupType", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A list of group names for the specified type.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "The request syntax is correct, but the server couldn't process it due to semantic issues.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.groups" + ] + } + }, + "/authz/groups/{id}/assign": { + "post": { + "tags": [ + "authz" + ], + "summary": "Assign a role to a group", + "operationId": "assignRoleToGroup", + "parameters": [ + { + "type": "string", + "description": "group name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "groupType": { + "$ref": "#/definitions/GroupType" + }, + "roles": { + "description": "the roles that assigned to group", + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Role assigned successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or group is not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.assign.role" + ] + } + }, + "/authz/groups/{id}/revoke": { + "post": { + "tags": [ + "authz" + ], + "summary": "Revoke a role from a group", + "operationId": "revokeRoleFromGroup", + "parameters": [ + { + "type": "string", + "description": "group name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "groupType": { + "$ref": "#/definitions/GroupType" + }, + "roles": { + "description": "the roles that revoked from group", + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Role revoked successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or group is not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.revoke.role.group" + ] + } + }, + "/authz/groups/{id}/roles/{groupType}": { + "get": { + "description": "Retrieves a list of all roles assigned to a specific group. The group must be identified by both its name (` + "`" + `id` + "`" + `) and its type (` + "`" + `db` + "`" + ` or ` + "`" + `oidc` + "`" + `).", + "tags": [ + "authz" + ], + "summary": "Get roles assigned to a specific group", + "operationId": "getRolesForGroup", + "parameters": [ + { + "type": "string", + "description": "The unique name of the group.", + "name": "id", + "in": "path", + "required": true + }, + { + "enum": [ + "oidc" + ], + "type": "string", + "description": "The type of the group.", + "name": "groupType", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": false, + "description": "If true, the response will include the full role definitions with all associated permissions. If false, only role names are returned.", + "name": "includeFullRoles", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A list of roles assigned to the specified group.", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The specified group was not found." + }, + "422": { + "description": "The request syntax is correct, but the server couldn't process it due to semantic issues.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.groups.roles" + ] + } + }, + "/authz/roles": { + "get": { + "tags": [ + "authz" + ], + "summary": "Get all roles", + "operationId": "getRoles", + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles" + ] + }, + "post": { + "tags": [ + "authz" + ], + "summary": "create new role", + "operationId": "createRole", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Role" + } + } + ], + "responses": { + "201": { + "description": "Role created successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "409": { + "description": "Role already exists", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.create.role" + ] + } + }, + "/authz/roles/{id}": { + "get": { + "tags": [ + "authz" + ], + "summary": "Get a role", + "operationId": "getRole", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Role" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.role" + ] + }, + "delete": { + "tags": [ + "authz" + ], + "summary": "Delete role", + "operationId": "deleteRole", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.delete.role" + ] + } + }, + "/authz/roles/{id}/add-permissions": { + "post": { + "tags": [ + "authz" + ], + "summary": "Add permission to a given role.", + "operationId": "addPermissions", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "name", + "permissions" + ], + "properties": { + "permissions": { + "description": "permissions to be added to the role", + "type": "array", + "items": { + "$ref": "#/definitions/Permission" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Permissions added successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.add.role.permissions" + ] + } + }, + "/authz/roles/{id}/group-assignments": { + "get": { + "description": "Retrieves a list of all groups that have been assigned a specific role, identified by its name.", + "tags": [ + "authz" + ], + "summary": "Get groups that have a specific role assigned", + "operationId": "getGroupsForRole", + "parameters": [ + { + "type": "string", + "description": "The unique name of the role.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the list of groups that have the role assigned.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/GetGroupsForRoleOKBodyItems0" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The specified role was not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles.groups" + ] + } + }, + "/authz/roles/{id}/has-permission": { + "post": { + "tags": [ + "authz" + ], + "summary": "Check whether role possesses this permission.", + "operationId": "hasPermission", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Permission" + } + } + ], + "responses": { + "200": { + "description": "Permission check was successful", + "schema": { + "type": "boolean" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.has.role.permission" + ] + } + }, + "/authz/roles/{id}/remove-permissions": { + "post": { + "tags": [ + "authz" + ], + "summary": "Remove permissions from a role. If this results in an empty role, the role will be deleted.", + "operationId": "removePermissions", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "permissions" + ], + "properties": { + "permissions": { + "description": "permissions to remove from the role", + "type": "array", + "items": { + "$ref": "#/definitions/Permission" + } + } + } + } + } + ], + "responses": { + "200": { + "description": "Permissions removed successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.remove.role.permissions" + ] + } + }, + "/authz/roles/{id}/user-assignments": { + "get": { + "tags": [ + "authz" + ], + "summary": "get users assigned to role", + "operationId": "getUsersForRole", + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Users assigned to this role", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/GetUsersForRoleOKBodyItems0" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles.users" + ] + } + }, + "/authz/roles/{id}/users": { + "get": { + "tags": [ + "authz" + ], + "summary": "get users (db + OIDC) assigned to role. Deprecated, will be removed when 1.29 is not supported anymore", + "operationId": "getUsersForRoleDeprecated", + "deprecated": true, + "parameters": [ + { + "type": "string", + "description": "role name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Users assigned to this role", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.roles.users" + ] + } + }, + "/authz/users/{id}/assign": { + "post": { + "tags": [ + "authz" + ], + "summary": "Assign a role to a user", + "operationId": "assignRoleToUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "description": "the roles that assigned to user", + "type": "array", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role assigned successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or user is not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.assign.role.user" + ] + } + }, + "/authz/users/{id}/revoke": { + "post": { + "tags": [ + "authz" + ], + "summary": "Revoke a role from a user", + "operationId": "revokeRoleFromUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "description": "the roles that revoked from the key or user", + "type": "array", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role revoked successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or user is not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.revoke.role.user" + ] + } + }, + "/authz/users/{id}/roles": { + "get": { + "tags": [ + "authz" + ], + "summary": "get roles assigned to user (DB + OIDC). Deprecated, will be removed when 1.29 is not supported anymore", + "operationId": "getRolesForUserDeprecated", + "deprecated": true, + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Role assigned users", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found for user" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.users.roles" + ] + } + }, + "/authz/users/{id}/roles/{userType}": { + "get": { + "tags": [ + "authz" + ], + "summary": "get roles assigned to user", + "operationId": "getRolesForUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "id", + "in": "path", + "required": true + }, + { + "enum": [ + "oidc", + "db" + ], + "type": "string", + "description": "The type of user", + "name": "userType", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": false, + "description": "Whether to include detailed role information needed the roles permission", + "name": "includeFullRoles", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Role assigned users", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found for user" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.authz.get.users.roles" + ] + } + }, + "/backups/{backend}": { + "get": { + "description": "[Coming soon] List all backups in progress not implemented yet.", + "tags": [ + "backups" + ], + "summary": "List backups in progress", + "operationId": "backups.list", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "name": "backend", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Existed backups", + "schema": { + "$ref": "#/definitions/BackupListResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup list.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + }, + "post": { + "description": "Start creating a backup for a set of collections. \u003cbr/\u003e\u003cbr/\u003eNotes: \u003cbr/\u003e- Weaviate uses gzip compression by default. \u003cbr/\u003e- Weaviate stays usable while a backup process is ongoing.", + "tags": [ + "backups" + ], + "summary": "Start a backup process", + "operationId": "backups.create", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. ` + "`" + `filesystem` + "`" + `, ` + "`" + `gcs` + "`" + `, ` + "`" + `s3` + "`" + `, ` + "`" + `azure` + "`" + `.", + "name": "backend", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BackupCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "Backup create process successfully started.", + "schema": { + "$ref": "#/definitions/BackupCreateResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup creation attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + } + }, + "/backups/{backend}/{id}": { + "get": { + "description": "Returns status of backup creation attempt for a set of collections. \u003cbr/\u003e\u003cbr/\u003eAll client implementations have a ` + "`" + `wait for completion` + "`" + ` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the ` + "`" + `wait for completion` + "`" + ` option to false, you can also check the status yourself using this endpoint.", + "tags": [ + "backups" + ], + "summary": "Get backup process status", + "operationId": "backups.create.status", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Name of the bucket, container, volume, etc", + "name": "bucket", + "in": "query" + }, + { + "type": "string", + "description": "The path within the bucket", + "name": "path", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Backup creation status successfully returned", + "schema": { + "$ref": "#/definitions/BackupCreateStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + }, + "delete": { + "description": "Cancel created backup with specified ID", + "tags": [ + "backups" + ], + "summary": "Cancel backup", + "operationId": "backups.cancel", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Name of the bucket, container, volume, etc", + "name": "bucket", + "in": "query" + }, + { + "type": "string", + "description": "The path within the bucket", + "name": "path", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup cancellation attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + } + }, + "/backups/{backend}/{id}/restore": { + "get": { + "description": "Returns status of a backup restoration attempt for a set of classes. \u003cbr/\u003e\u003cbr/\u003eAll client implementations have a ` + "`" + `wait for completion` + "`" + ` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the ` + "`" + `wait for completion` + "`" + ` option to false, you can also check the status yourself using the this endpoint.", + "tags": [ + "backups" + ], + "summary": "Get restore process status", + "operationId": "backups.restore.status", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. ` + "`" + `filesystem` + "`" + `, ` + "`" + `gcs` + "`" + `, ` + "`" + `s3` + "`" + `, ` + "`" + `azure` + "`" + `.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Name of the bucket, container, volume, etc", + "name": "bucket", + "in": "query" + }, + { + "type": "string", + "description": "The path within the bucket", + "name": "path", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Backup restoration status successfully returned", + "schema": { + "$ref": "#/definitions/BackupRestoreStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + }, + "post": { + "description": "Starts a process of restoring a backup for a set of collections. \u003cbr/\u003e\u003cbr/\u003eAny backup can be restored to any machine, as long as the number of nodes between source and target are identical.\u003cbr/\u003e\u003cbr/\u003eRequrements:\u003cbr/\u003e\u003cbr/\u003e- None of the collections to be restored already exist on the target restoration node(s).\u003cbr/\u003e- The node names of the backed-up collections' must match those of the target restoration node(s).", + "tags": [ + "backups" + ], + "summary": "Start a restoration process", + "operationId": "backups.restore", + "parameters": [ + { + "type": "string", + "description": "Backup backend name e.g. ` + "`" + `filesystem` + "`" + `, ` + "`" + `gcs` + "`" + `, ` + "`" + `s3` + "`" + `, ` + "`" + `azure` + "`" + `.", + "name": "backend", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BackupRestoreRequest" + } + } + ], + "responses": { + "200": { + "description": "Backup restoration process successfully started.", + "schema": { + "$ref": "#/definitions/BackupRestoreResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.backup" + ] + } + }, + "/batch/objects": { + "post": { + "description": "Create new objects in bulk. \u003cbr/\u003e\u003cbr/\u003eMeta-data and schema values are validated. \u003cbr/\u003e\u003cbr/\u003e**Note: idempotence of ` + "`" + `/batch/objects` + "`" + `**: \u003cbr/\u003e` + "`" + `POST /batch/objects` + "`" + ` is idempotent, and will overwrite any existing object given the same id.", + "tags": [ + "batch", + "objects" + ], + "summary": "Creates new Objects based on a Object template as a batch.", + "operationId": "batch.objects.create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "fields": { + "description": "Define which fields need to be returned. Default value is ALL", + "type": "array", + "items": { + "type": "string", + "default": "ALL", + "enum": [ + "ALL", + "class", + "schema", + "id", + "creationTimeUnix" + ] + } + }, + "objects": { + "type": "array", + "items": { + "$ref": "#/definitions/Object" + } + } + } + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Request succeeded, see response body to get detailed information about each batched item.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/ObjectsGetResponse" + } + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.add" + ] + }, + "delete": { + "description": "Batch delete objects that match a particular filter. \u003cbr/\u003e\u003cbr/\u003eThe request body takes a single ` + "`" + `where` + "`" + ` filter and will delete all objects matched. \u003cbr/\u003e\u003cbr/\u003eNote that there is a limit to the number of objects to be deleted at once using this filter, in order to protect against unexpected memory surges and very-long-running requests. The default limit is 10,000 and may be configured by setting the ` + "`" + `QUERY_MAXIMUM_RESULTS` + "`" + ` environment variable. \u003cbr/\u003e\u003cbr/\u003eObjects are deleted in the same order that they would be returned in an equivalent Get query. To delete more objects than the limit, run the same query multiple times.", + "tags": [ + "batch", + "objects" + ], + "summary": "Deletes Objects based on a match filter as a batch.", + "operationId": "batch.objects.delete", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BatchDelete" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Request succeeded, see response body to get detailed information about each batched item.", + "schema": { + "$ref": "#/definitions/BatchDeleteResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/batch/references": { + "post": { + "description": "Batch create cross-references between collections items (objects or objects) in bulk.", + "tags": [ + "batch", + "references" + ], + "summary": "Creates new Cross-References between arbitrary classes in bulk.", + "operationId": "batch.references.create", + "parameters": [ + { + "description": "A list of references to be batched. The ideal size depends on the used database connector. Please see the documentation of the used connector for help", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/BatchReference" + } + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Request Successful. Warning: A successful request does not guarantee that every batched reference was successfully created. Inspect the response body to see which references succeeded and which failed.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/BatchReferenceResponse" + } + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.add" + ] + } + }, + "/classifications/": { + "post": { + "description": "Trigger a classification based on the specified params. Classifications will run in the background, use GET /classifications/\u003cid\u003e to retrieve the status of your classification.", + "tags": [ + "classifications" + ], + "summary": "Starts a classification.", + "operationId": "classifications.post", + "parameters": [ + { + "description": "parameters to start a classification", + "name": "params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Classification" + } + } + ], + "responses": { + "201": { + "description": "Successfully started classification.", + "schema": { + "$ref": "#/definitions/Classification" + } + }, + "400": { + "description": "Incorrect request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.classifications.post" + ] + } + }, + "/classifications/{id}": { + "get": { + "description": "Get status, results and metadata of a previously created classification", + "tags": [ + "classifications" + ], + "summary": "View previously created classification", + "operationId": "classifications.get", + "parameters": [ + { + "type": "string", + "description": "classification id", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Found the classification, returned as body", + "schema": { + "$ref": "#/definitions/Classification" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Classification does not exist" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.classifications.get" + ] + } + }, + "/cluster/statistics": { + "get": { + "description": "Returns Raft cluster statistics of Weaviate DB.", + "tags": [ + "cluster" + ], + "summary": "See Raft cluster statistics", + "operationId": "cluster.get.statistics", + "responses": { + "200": { + "description": "Cluster statistics successfully returned", + "schema": { + "$ref": "#/definitions/ClusterStatisticsResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.cluster.statistics.get" + ] + } + }, + "/graphql": { + "post": { + "description": "Get a response based on a GraphQL query", + "tags": [ + "graphql" + ], + "summary": "Get a response based on GraphQL", + "operationId": "graphql.post", + "parameters": [ + { + "description": "The GraphQL query request parameters.", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/GraphQLQuery" + } + } + ], + "responses": { + "200": { + "description": "Successful query (with select).", + "schema": { + "$ref": "#/definitions/GraphQLResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query", + "weaviate.local.query.meta", + "weaviate.network.query", + "weaviate.network.query.meta" + ] + } + }, + "/graphql/batch": { + "post": { + "description": "Perform a batched GraphQL query", + "tags": [ + "graphql" + ], + "summary": "Get a response based on GraphQL.", + "operationId": "graphql.batch", + "parameters": [ + { + "description": "The GraphQL queries.", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/GraphQLQueries" + } + } + ], + "responses": { + "200": { + "description": "Successful query (with select).", + "schema": { + "$ref": "#/definitions/GraphQLResponses" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query", + "weaviate.local.query.meta", + "weaviate.network.query", + "weaviate.network.query.meta" + ] + } + }, + "/meta": { + "get": { + "description": "Returns meta information about the server. Can be used to provide information to another Weaviate instance that wants to interact with the current instance.", + "tags": [ + "meta" + ], + "summary": "Returns meta information of the current Weaviate instance.", + "operationId": "meta.get", + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Meta" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query.meta" + ] + } + }, + "/nodes": { + "get": { + "description": "Returns node information for the entire database.", + "tags": [ + "nodes" + ], + "summary": "Node information for the database.", + "operationId": "nodes.get", + "parameters": [ + { + "type": "string", + "default": "minimal", + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "name": "output", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Nodes status successfully returned", + "schema": { + "$ref": "#/definitions/NodesStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.nodes.status.get" + ] + } + }, + "/nodes/{className}": { + "get": { + "description": "Returns node information for the nodes relevant to the collection.", + "tags": [ + "nodes" + ], + "summary": "Node information for a collection.", + "operationId": "nodes.get.class", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "shardName", + "in": "query" + }, + { + "type": "string", + "default": "minimal", + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "name": "output", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Nodes status successfully returned", + "schema": { + "$ref": "#/definitions/NodesStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.nodes.status.get.class" + ] + } + }, + "/objects": { + "get": { + "description": "Lists all Objects in reverse order of creation, owned by the user that belongs to the used token.", + "tags": [ + "objects" + ], + "summary": "Get a list of Objects.", + "operationId": "objects.list", + "parameters": [ + { + "type": "string", + "description": "A threshold UUID of the objects to retrieve after, using an UUID-based ordering. This object is not part of the set. \u003cbr/\u003e\u003cbr/\u003eMust be used with ` + "`" + `class` + "`" + `, typically in conjunction with ` + "`" + `limit` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eNote ` + "`" + `after` + "`" + ` cannot be used with ` + "`" + `offset` + "`" + ` or ` + "`" + `sort` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eFor a null value similar to offset=0, set an empty string in the request, i.e. ` + "`" + `after=` + "`" + ` or ` + "`" + `after` + "`" + `.", + "name": "after", + "in": "query" + }, + { + "type": "integer", + "format": "int64", + "default": 0, + "description": "The starting index of the result window. Note ` + "`" + `offset` + "`" + ` will retrieve ` + "`" + `offset+limit` + "`" + ` results and return ` + "`" + `limit` + "`" + ` results from the object with index ` + "`" + `offset` + "`" + ` onwards. Limited by the value of ` + "`" + `QUERY_MAXIMUM_RESULTS` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eShould be used in conjunction with ` + "`" + `limit` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eCannot be used with ` + "`" + `after` + "`" + `.", + "name": "offset", + "in": "query" + }, + { + "type": "integer", + "format": "int64", + "description": "The maximum number of items to be returned per page. The default is 25 unless set otherwise as an environment variable.", + "name": "limit", + "in": "query" + }, + { + "type": "string", + "description": "Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation", + "name": "include", + "in": "query" + }, + { + "type": "string", + "description": "Name(s) of the property to sort by - e.g. ` + "`" + `city` + "`" + `, or ` + "`" + `country,city` + "`" + `.", + "name": "sort", + "in": "query" + }, + { + "type": "string", + "description": "Order parameter to tell how to order (asc or desc) data within given field. Should be used in conjunction with ` + "`" + `sort` + "`" + ` parameter. If providing multiple ` + "`" + `sort` + "`" + ` values, provide multiple ` + "`" + `order` + "`" + ` values in corresponding order, e.g.: ` + "`" + `sort=author_name,title\u0026order=desc,asc` + "`" + `.", + "name": "order", + "in": "query" + }, + { + "type": "string", + "description": "The collection from which to query objects. \u003cbr/\u003e\u003cbr/\u003eNote that if ` + "`" + `class` + "`" + ` is not provided, the response will not include any objects.", + "name": "class", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successful response. \u003cbr/\u003e\u003cbr/\u003eIf ` + "`" + `class` + "`" + ` is not provided, the response will not include any objects.", + "schema": { + "$ref": "#/definitions/ObjectsListResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query" + ] + }, + "post": { + "description": "Create a new object. \u003cbr/\u003e\u003cbr/\u003eMeta-data and schema values are validated. \u003cbr/\u003e\u003cbr/\u003e**Note: Use ` + "`" + `/batch` + "`" + ` for importing many objects**: \u003cbr/\u003eIf you plan on importing a large number of objects, it's much more efficient to use the ` + "`" + `/batch` + "`" + ` endpoint. Otherwise, sending multiple single requests sequentially would incur a large performance penalty. \u003cbr/\u003e\u003cbr/\u003e**Note: idempotence of ` + "`" + `/objects` + "`" + `**: \u003cbr/\u003ePOST /objects will fail if an id is provided which already exists in the class. To update an existing object with the objects endpoint, use the PUT or PATCH method.", + "tags": [ + "objects" + ], + "summary": "Create a new object.", + "operationId": "objects.create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Object created.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.add" + ] + } + }, + "/objects/validate": { + "post": { + "description": "Validate an object's schema and meta-data without creating it. \u003cbr/\u003e\u003cbr/\u003eIf the schema of the object is valid, the request should return nothing with a plain RESTful request. Otherwise, an error object will be returned.", + "tags": [ + "objects" + ], + "summary": "Validate an Object based on a schema.", + "operationId": "objects.validate", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + } + ], + "responses": { + "200": { + "description": "Successfully validated." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query.meta" + ] + } + }, + "/objects/{className}/{id}": { + "get": { + "description": "Get a data object based on its collection and UUID.", + "tags": [ + "objects" + ], + "summary": "Get a specific Object based on its class and UUID. Also available as Websocket bus.", + "operationId": "objects.class.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation", + "name": "include", + "in": "query" + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "The target node which should fulfill the request", + "name": "node_name", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query" + ] + }, + "put": { + "description": "Update an object based on its uuid and collection. This (` + "`" + `put` + "`" + `) method replaces the object with the provided object.", + "tags": [ + "objects" + ], + "summary": "Update a class object based on its uuid", + "operationId": "objects.class.put", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The uuid of the data object to update.", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully received.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Delete an object based on its collection and UUID. \u003cbr/\u003e\u003cbr/\u003eNote: For backward compatibility, beacons also support an older, deprecated format without the collection name. As a result, when deleting a reference, the beacon specified has to match the beacon to be deleted exactly. In other words, if a beacon is present using the old format (without collection name) you also need to specify it the same way. \u003cbr/\u003e\u003cbr/\u003eIn the beacon format, you need to always use ` + "`" + `localhost` + "`" + ` as the host, rather than the actual hostname. ` + "`" + `localhost` + "`" + ` here refers to the fact that the beacon's target is on the same Weaviate instance, as opposed to a foreign instance.", + "tags": [ + "objects" + ], + "summary": "Delete object based on its class and UUID.", + "operationId": "objects.class.delete", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "head": { + "description": "Checks if a data object exists based on its collection and uuid without retrieving it. \u003cbr/\u003e\u003cbr/\u003eInternally it skips reading the object from disk other than checking if it is present. Thus it does not use resources on marshalling, parsing, etc., and is faster. Note the resulting HTTP request has no body; the existence of an object is indicated solely by the status code.", + "tags": [ + "objects" + ], + "summary": "Checks object's existence based on its class and uuid.", + "operationId": "objects.class.head", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The uuid of the data object", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Object exists." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Object doesn't exist." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "patch": { + "description": "Update an individual data object based on its class and uuid. This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.", + "tags": [ + "objects" + ], + "summary": "Update an Object based on its UUID (using patch semantics).", + "operationId": "objects.class.patch", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The uuid of the data object to update.", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "RFC 7396-style patch, the body contains the object to merge into the existing object.", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully applied. No content provided." + }, + "400": { + "description": "The patch-JSON is malformed.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "The patch-JSON is valid but unprocessable.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/objects/{className}/{id}/references/{propertyName}": { + "put": { + "description": "Replace **all** references in cross-reference property of an object.", + "tags": [ + "objects" + ], + "summary": "Replace all references to a class-property.", + "operationId": "objects.class.references.put", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MultipleRef" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully replaced all the references." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Source object doesn't exist." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "post": { + "description": "Add a single reference to an object. This adds a reference to the array of cross-references of the given property in the source object specified by its collection name and id", + "tags": [ + "objects" + ], + "summary": "Add a single reference to a class-property.", + "operationId": "objects.class.references.create", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully added the reference." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Source object doesn't exist." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Delete the single reference that is given in the body from the list of references that this property has.", + "tags": [ + "objects" + ], + "summary": "Delete a single reference from the list of references.", + "operationId": "objects.class.references.delete", + "parameters": [ + { + "type": "string", + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/objects/{id}": { + "get": { + "description": "Get a specific object based on its UUID. Also available as Websocket bus. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Get a specific Object based on its UUID.", + "operationId": "objects.get", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation", + "name": "include", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.query" + ] + }, + "put": { + "description": "Updates an object based on its UUID. Given meta-data and schema values are validated. LastUpdateTime is set to the time this function is called. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Update an Object based on its UUID.", + "operationId": "objects.update", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully received.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Deletes an object from the database based on its UUID. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Delete an Object based on its UUID.", + "operationId": "objects.delete", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "head": { + "description": "Checks if an object exists in the system based on its UUID. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Checks Object's existence based on its UUID.", + "operationId": "objects.head", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Object exists." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Object doesn't exist." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "x-serviceIds": [ + "weaviate.objects.check" + ] + }, + "patch": { + "description": "Update an object based on its UUID (using patch semantics). This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Update an Object based on its UUID (using patch semantics).", + "operationId": "objects.patch", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "RFC 7396-style patch, the body contains the object to merge into the existing object.", + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully applied. No content provided." + }, + "400": { + "description": "The patch-JSON is malformed." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "The patch-JSON is valid but unprocessable.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/objects/{id}/references/{propertyName}": { + "put": { + "description": "Replace all references in cross-reference property of an object. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}/references/{propertyName}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Replace all references to a class-property.", + "operationId": "objects.references.update", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MultipleRef" + } + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully replaced all the references." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "post": { + "description": "Add a cross-reference. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}/references/{propertyName}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Add a single reference to a class-property.", + "operationId": "objects.references.create", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully added the reference." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + }, + "delete": { + "description": "Delete the single reference that is given in the body from the list of references that this property has. \u003cbr/\u003e\u003cbr/\u003e**Note**: This endpoint is deprecated and will be removed in a future version. Use the ` + "`" + `/objects/{className}/{id}/references/{propertyName}` + "`" + ` endpoint instead.", + "tags": [ + "objects" + ], + "summary": "Delete a single reference from the list of references.", + "operationId": "objects.references.delete", + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Unique ID of the Object.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Unique name of the property related to the Object.", + "name": "propertyName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "x-serviceIds": [ + "weaviate.local.manipulate" + ] + } + }, + "/replication/replicate": { + "post": { + "description": "Begins an asynchronous operation to move or copy a specific shard replica from its current node to a designated target node. The operation involves copying data, synchronizing, and potentially decommissioning the source replica.", + "tags": [ + "replication" + ], + "summary": "Initiate a replica movement", + "operationId": "replicate", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ReplicationReplicateReplicaRequest" + } + } + ], + "responses": { + "200": { + "description": "Replication operation registered successfully. ID of the operation is returned.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateReplicaResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate" + ] + }, + "delete": { + "tags": [ + "replication" + ], + "summary": "Schedules all replication operations for deletion across all collections, shards, and nodes.", + "operationId": "deleteAllReplications", + "responses": { + "204": { + "description": "Replication operation registered successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.deleteAllReplications" + ] + } + }, + "/replication/replicate/force-delete": { + "post": { + "description": "USE AT OWN RISK! Synchronously force delete operations from the FSM. This will not perform any checks on which state the operation is in so may lead to data corruption or loss. It is recommended to first scale the number of replication engine workers to 0 before calling this endpoint to ensure no operations are in-flight.", + "tags": [ + "replication" + ], + "summary": "Force delete replication operations", + "operationId": "forceDeleteReplications", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/ReplicationReplicateForceDeleteRequest" + } + } + ], + "responses": { + "200": { + "description": "Replication operations force deleted successfully.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateForceDeleteResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.forceDeleteReplications" + ] + } + }, + "/replication/replicate/list": { + "get": { + "description": "Retrieves a list of currently registered replication operations, optionally filtered by collection, shard, or node ID.", + "tags": [ + "replication" + ], + "summary": "List replication operations", + "operationId": "listReplication", + "parameters": [ + { + "type": "string", + "description": "The name of the target node to get details for.", + "name": "targetNode", + "in": "query" + }, + { + "type": "string", + "description": "The name of the collection to get details for.", + "name": "collection", + "in": "query" + }, + { + "type": "string", + "description": "The shard to get details for.", + "name": "shard", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to include the history of the replication operation.", + "name": "includeHistory", + "in": "query" + } + ], + "responses": { + "200": { + "description": "The details of the replication operations.", + "schema": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaResponse" + } + } + }, + "400": { + "description": "Bad request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.details" + ] + } + }, + "/replication/replicate/{id}": { + "get": { + "description": "Fetches the current status and detailed information for a specific replication operation, identified by its unique ID. Optionally includes historical data of the operation's progress if requested.", + "tags": [ + "replication" + ], + "summary": "Retrieve a replication operation", + "operationId": "replicationDetails", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the replication operation to get details for.", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to include the history of the replication operation.", + "name": "includeHistory", + "in": "query" + } + ], + "responses": { + "200": { + "description": "The details of the replication operation.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.details" + ] + }, + "delete": { + "description": "Removes a specific replication operation. If the operation is currently active, it will be cancelled and its resources cleaned up before the operation is deleted.", + "tags": [ + "replication" + ], + "summary": "Delete a replication operation", + "operationId": "deleteReplication", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the replication operation to delete.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "409": { + "description": "The operation is not in a deletable state, e.g. it is a MOVE op in the DEHYDRATING state.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.delete" + ] + } + }, + "/replication/replicate/{id}/cancel": { + "post": { + "description": "Requests the cancellation of an active replication operation identified by its ID. The operation will be stopped, but its record will remain in the 'CANCELLED' state (can't be resumed) and will not be automatically deleted.", + "tags": [ + "replication" + ], + "summary": "Cancel a replication operation", + "operationId": "cancelReplication", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the replication operation to cancel.", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully cancelled." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "409": { + "description": "The operation is not in a cancellable state, e.g. it is READY or is a MOVE op in the DEHYDRATING state.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.replicate.cancel" + ] + } + }, + "/replication/sharding-state": { + "get": { + "description": "Fetches the current sharding state, including replica locations and statuses, for all collections or a specified collection. If a shard name is provided along with a collection, the state for that specific shard is returned.", + "tags": [ + "replication" + ], + "summary": "Get sharding state", + "operationId": "getCollectionShardingState", + "parameters": [ + { + "type": "string", + "description": "The collection name to get the sharding state for.", + "name": "collection", + "in": "query" + }, + { + "type": "string", + "description": "The shard to get the sharding state for.", + "name": "shard", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved sharding state.", + "schema": { + "$ref": "#/definitions/ReplicationShardingStateResponse" + } + }, + "400": { + "description": "Bad request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Collection or shard not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.replication.shardingstate.collection.get" + ] + } + }, + "/schema": { + "get": { + "description": "Fetch an array of all collection definitions from the schema.", + "tags": [ + "schema" + ], + "summary": "Dump the current the database schema.", + "operationId": "schema.dump", + "parameters": [ + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "Successfully dumped the database schema.", + "schema": { + "$ref": "#/definitions/Schema" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.query.meta" + ] + }, + "post": { + "description": "Create a new data object collection. \u003cbr/\u003e\u003cbr/\u003eIf AutoSchema is enabled, Weaviate will attempt to infer the schema from the data at import time. However, manual schema definition is recommended for production environments.", + "tags": [ + "schema" + ], + "summary": "Create a new Object class in the schema.", + "operationId": "schema.objects.create", + "parameters": [ + { + "name": "objectClass", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Class" + } + } + ], + "responses": { + "200": { + "description": "Added the new Object class to the schema.", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Object class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.add.meta" + ] + } + }, + "/schema/{className}": { + "get": { + "tags": [ + "schema" + ], + "summary": "Get a single class from the schema", + "operationId": "schema.objects.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "Found the Class, returned as body", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "This class does not exist" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.get.meta" + ] + }, + "put": { + "description": "Add a property to an existing collection.", + "tags": [ + "schema" + ], + "summary": "Update settings of an existing schema class", + "operationId": "schema.objects.update", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "objectClass", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Class" + } + } + ], + "responses": { + "200": { + "description": "Class was updated successfully", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Class to be updated does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid update attempt", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + }, + "delete": { + "description": "Remove a collection from the schema. This will also delete all the objects in the collection.", + "tags": [ + "schema" + ], + "summary": "Remove an Object class (and all data in the instances) from the schema.", + "operationId": "schema.objects.delete", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Removed the Object class from the schema." + }, + "400": { + "description": "Could not delete the Object class.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + } + }, + "/schema/{className}/properties": { + "post": { + "tags": [ + "schema" + ], + "summary": "Add a property to an Object class.", + "operationId": "schema.objects.properties.add", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Property" + } + } + ], + "responses": { + "200": { + "description": "Added the property.", + "schema": { + "$ref": "#/definitions/Property" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid property.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + } + }, + "/schema/{className}/shards": { + "get": { + "description": "Get the status of every shard in the cluster.", + "tags": [ + "schema" + ], + "summary": "Get the shards status of an Object class", + "operationId": "schema.objects.shards.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenant", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Found the status of the shards, returned as body", + "schema": { + "$ref": "#/definitions/ShardStatusList" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "This class does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.get.meta" + ] + } + }, + "/schema/{className}/shards/{shardName}": { + "put": { + "description": "Update a shard status for a collection. For example, a shard may have been marked as ` + "`" + `READONLY` + "`" + ` because its disk was full. After providing more disk space, use this endpoint to set the shard status to ` + "`" + `READY` + "`" + ` again. There is also a convenience function in each client to set the status of all shards of a collection.", + "tags": [ + "schema" + ], + "summary": "Update a shard status.", + "operationId": "schema.objects.shards.update", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "shardName", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ShardStatus" + } + } + ], + "responses": { + "200": { + "description": "Shard status was updated successfully", + "schema": { + "$ref": "#/definitions/ShardStatus" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard to be updated does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid update attempt", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ] + } + }, + "/schema/{className}/tenants": { + "get": { + "description": "get all tenants from a specific class", + "tags": [ + "schema" + ], + "summary": "Get the list of tenants.", + "operationId": "tenants.get", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "tenants from specified class.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "put": { + "description": "Update tenant of a specific class", + "tags": [ + "schema" + ], + "summary": "Update a tenant.", + "operationId": "tenants.update", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + } + ], + "responses": { + "200": { + "description": "Updated tenants of the specified class", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "description": "Create a new tenant for a collection. Multi-tenancy must be enabled in the collection definition.", + "tags": [ + "schema" + ], + "summary": "Create a new tenant", + "operationId": "tenants.create", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + } + ], + "responses": { + "200": { + "description": "Added new tenants to the specified class", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "description": "delete tenants from a specific class", + "tags": [ + "schema" + ], + "operationId": "tenants.delete", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "name": "tenants", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "Deleted tenants from specified class." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}/tenants/{tenantName}": { + "get": { + "description": "get a specific tenant for the given class", + "tags": [ + "schema" + ], + "summary": "Get a specific tenant", + "operationId": "tenants.get.one", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenantName", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "load the tenant given the specified class", + "schema": { + "$ref": "#/definitions/Tenant" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Tenant not found" + }, + "422": { + "description": "Invalid tenant or class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "head": { + "description": "Check if a tenant exists for a specific class", + "tags": [ + "schema" + ], + "summary": "Check whether a tenant exists", + "operationId": "tenant.exists", + "parameters": [ + { + "type": "string", + "name": "className", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenantName", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": true, + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency", + "name": "consistency", + "in": "header" + } + ], + "responses": { + "200": { + "description": "The tenant exists in the specified class" + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The tenant not found" + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/tasks": { + "get": { + "tags": [ + "distributedTasks" + ], + "summary": "Lists all distributed tasks in the cluster.", + "operationId": "distributedTasks.get", + "responses": { + "200": { + "description": "Distributed tasks successfully returned", + "schema": { + "$ref": "#/definitions/DistributedTasks" + } + }, + "403": { + "description": "Unauthorized or invalid credentials.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.distributedTasks.get" + ] + } + }, + "/users/db": { + "get": { + "tags": [ + "users" + ], + "summary": "list all db users", + "operationId": "listAllUsers", + "parameters": [ + { + "type": "boolean", + "default": false, + "description": "Whether to include the last used time of the users", + "name": "includeLastUsedTime", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Info about the users", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/DBUserInfo" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.list_all" + ] + } + }, + "/users/db/{user_id}": { + "get": { + "tags": [ + "users" + ], + "summary": "get info relevant to user, e.g. username, roles", + "operationId": "getUserInfo", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + }, + { + "type": "boolean", + "default": false, + "description": "Whether to include the last used time of the given user", + "name": "includeLastUsedTime", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Info about the user", + "schema": { + "$ref": "#/definitions/DBUserInfo" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.get" + ] + }, + "post": { + "tags": [ + "users" + ], + "summary": "create new user", + "operationId": "createUser", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "createTime": { + "description": "EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - set the given time as creation time", + "type": "string", + "format": "date-time" + }, + "import": { + "description": "EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - import api key from static user", + "type": "boolean", + "default": false + } + } + } + } + ], + "responses": { + "201": { + "description": "User created successfully", + "schema": { + "$ref": "#/definitions/UserApiKey" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "409": { + "description": "User already exists", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.create" + ] + }, + "delete": { + "tags": [ + "users" + ], + "summary": "Delete User", + "operationId": "deleteUser", + "parameters": [ + { + "type": "string", + "description": "user name", + "name": "user_id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.delete" + ] + } + }, + "/users/db/{user_id}/activate": { + "post": { + "tags": [ + "users" + ], + "summary": "activate a deactivated user", + "operationId": "activateUser", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "User successfully activated" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "409": { + "description": "user already activated" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.activateUser" + ] + } + }, + "/users/db/{user_id}/deactivate": { + "post": { + "tags": [ + "users" + ], + "summary": "deactivate a user", + "operationId": "deactivateUser", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "revoke_key": { + "description": "if the key should be revoked when deactivating the user", + "type": "boolean", + "default": false + } + } + } + } + ], + "responses": { + "200": { + "description": "users successfully deactivated" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "409": { + "description": "user already deactivated" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.deactivateUser" + ] + } + }, + "/users/db/{user_id}/rotate-key": { + "post": { + "tags": [ + "users" + ], + "summary": "rotate user api key", + "operationId": "rotateUserApiKey", + "parameters": [ + { + "type": "string", + "description": "user id", + "name": "user_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "ApiKey successfully changed", + "schema": { + "$ref": "#/definitions/UserApiKey" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.db.rotateApiKey" + ] + } + }, + "/users/own-info": { + "get": { + "tags": [ + "users" + ], + "summary": "get info relevant to own user, e.g. username, roles", + "operationId": "getOwnInfo", + "responses": { + "200": { + "description": "Info about the user", + "schema": { + "$ref": "#/definitions/UserOwnInfo" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "x-serviceIds": [ + "weaviate.users.get.own-info" + ] + } + } + }, + "definitions": { + "AdditionalProperties": { + "description": "(Response only) Additional meta information about a single object.", + "type": "object", + "additionalProperties": { + "type": "object" + } + }, + "Alias": { + "description": "Represents the mapping between an alias name and a collection. An alias provides an alternative name for accessing a collection.", + "type": "object", + "properties": { + "alias": { + "description": "The unique name of the alias that serves as an alternative identifier for the collection.", + "type": "string" + }, + "class": { + "description": "The name of the collection (class) to which this alias is mapped.", + "type": "string" + } + } + }, + "AliasResponse": { + "description": "Response object containing a list of alias mappings.", + "type": "object", + "properties": { + "aliases": { + "description": "Array of alias objects, each containing an alias-to-collection mapping.", + "type": "array", + "items": { + "$ref": "#/definitions/Alias" + } + } + } + }, + "AsyncReplicationStatus": { + "description": "The status of the async replication.", + "properties": { + "objectsPropagated": { + "description": "The number of objects propagated in the most recent iteration.", + "type": "number", + "format": "uint64" + }, + "startDiffTimeUnixMillis": { + "description": "The start time of the most recent iteration.", + "type": "number", + "format": "int64" + }, + "targetNode": { + "description": "The target node of the replication, if set, otherwise empty.", + "type": "string" + } + } + }, + "BM25Config": { + "description": "tuning parameters for the BM25 algorithm", + "type": "object", + "properties": { + "b": { + "description": "Calibrates term-weight scaling based on the document length (default: 0.75).", + "type": "number", + "format": "float" + }, + "k1": { + "description": "Calibrates term-weight scaling based on the term frequency within a document (default: 1.2).", + "type": "number", + "format": "float" + } + } + }, + "BackupConfig": { + "description": "Backup custom configuration", + "type": "object", + "properties": { + "Bucket": { + "description": "Name of the bucket, container, volume, etc", + "type": "string" + }, + "CPUPercentage": { + "description": "Desired CPU core utilization ranging from 1%-80%", + "type": "integer", + "default": 50, + "maximum": 80, + "minimum": 1, + "x-nullable": false + }, + "ChunkSize": { + "description": "Aimed chunk size, with a minimum of 2MB, default of 128MB, and a maximum of 512MB. The actual chunk size may vary.", + "type": "integer", + "default": 128, + "maximum": 512, + "minimum": 2, + "x-nullable": false + }, + "CompressionLevel": { + "description": "compression level used by compression algorithm", + "type": "string", + "default": "DefaultCompression", + "enum": [ + "DefaultCompression", + "BestSpeed", + "BestCompression" + ], + "x-nullable": false + }, + "Endpoint": { + "description": "name of the endpoint, e.g. s3.amazonaws.com", + "type": "string" + }, + "Path": { + "description": "Path or key within the bucket", + "type": "string" + } + } + }, + "BackupCreateRequest": { + "description": "Request body for creating a backup of a set of classes", + "properties": { + "config": { + "description": "Custom configuration for the backup creation process", + "type": "object", + "$ref": "#/definitions/BackupConfig" + }, + "exclude": { + "description": "List of collections to exclude from the backup creation process. If not set, all collections are included. Cannot be used together with ` + "`" + `include` + "`" + `.", + "type": "array", + "items": { + "type": "string" + } + }, + "id": { + "description": "The ID of the backup (required). Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "include": { + "description": "List of collections to include in the backup creation process. If not set, all collections are included. Cannot be used together with ` + "`" + `exclude` + "`" + `.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "BackupCreateResponse": { + "description": "The definition of a backup create response body", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "bucket": { + "description": "Name of the bucket, container, volume, etc", + "type": "string" + }, + "classes": { + "description": "The list of classes for which the backup creation process was started", + "type": "array", + "items": { + "type": "string" + } + }, + "error": { + "description": "error message if creation failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "Path within bucket of backup", + "type": "string" + }, + "status": { + "description": "phase of backup creation process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupCreateStatusResponse": { + "description": "The definition of a backup create metadata", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "error": { + "description": "error message if creation failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backend", + "type": "string" + }, + "status": { + "description": "phase of backup creation process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupListResponse": { + "description": "The definition of a backup create response body", + "type": "array", + "items": { + "$ref": "#/definitions/BackupListResponseItems0" + } + }, + "BackupListResponseItems0": { + "type": "object", + "properties": { + "classes": { + "description": "The list of classes for which the existed backup process", + "type": "array", + "items": { + "type": "string" + } + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "status": { + "description": "status of backup process", + "type": "string", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupRestoreRequest": { + "description": "Request body for restoring a backup for a set of classes", + "properties": { + "config": { + "description": "Custom configuration for the backup restoration process", + "type": "object", + "$ref": "#/definitions/RestoreConfig" + }, + "exclude": { + "description": "List of classes to exclude from the backup restoration process", + "type": "array", + "items": { + "type": "string" + } + }, + "include": { + "description": "List of classes to include in the backup restoration process", + "type": "array", + "items": { + "type": "string" + } + }, + "node_mapping": { + "description": "Allows overriding the node names stored in the backup with different ones. Useful when restoring backups to a different environment.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "overwriteAlias": { + "description": "Allows ovewriting the collection alias if there is a conflict", + "type": "boolean" + } + } + }, + "BackupRestoreResponse": { + "description": "The definition of a backup restore response body", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "classes": { + "description": "The list of classes for which the backup restoration process was started", + "type": "array", + "items": { + "type": "string" + } + }, + "error": { + "description": "error message if restoration failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backend", + "type": "string" + }, + "status": { + "description": "phase of backup restoration process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupRestoreStatusResponse": { + "description": "The definition of a backup restore metadata", + "properties": { + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "error": { + "description": "error message if restoration failed", + "type": "string" + }, + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backup backend, contains bucket and path", + "type": "string" + }, + "status": { + "description": "phase of backup restoration process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BatchDelete": { + "type": "object", + "properties": { + "deletionTimeUnixMilli": { + "description": "Timestamp of deletion in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64", + "x-nullable": true + }, + "dryRun": { + "description": "If true, the call will show which objects would be matched using the specified filter without deleting any objects. \u003cbr/\u003e\u003cbr/\u003eDepending on the configured verbosity, you will either receive a count of affected objects, or a list of IDs.", + "type": "boolean", + "default": false + }, + "match": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "output": { + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "type": "string", + "default": "minimal" + } + } + }, + "BatchDeleteMatch": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "BatchDeleteResponse": { + "description": "Delete Objects response.", + "type": "object", + "properties": { + "deletionTimeUnixMilli": { + "description": "Timestamp of deletion in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64", + "x-nullable": true + }, + "dryRun": { + "description": "If true, objects will not be deleted yet, but merely listed. Defaults to false.", + "type": "boolean", + "default": false + }, + "match": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "output": { + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "type": "string", + "default": "minimal" + }, + "results": { + "type": "object", + "properties": { + "failed": { + "description": "How many objects should have been deleted but could not be deleted.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "limit": { + "description": "The most amount of objects that can be deleted in a single query, equals QUERY_MAXIMUM_RESULTS.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "matches": { + "description": "How many objects were matched by the filter.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "objects": { + "description": "With output set to \"minimal\" only objects with error occurred will the be described. Successfully deleted objects would be omitted. Output set to \"verbose\" will list all of the objets with their respective statuses.", + "type": "array", + "items": { + "$ref": "#/definitions/BatchDeleteResponseResultsObjectsItems0" + } + }, + "successful": { + "description": "How many objects were successfully deleted in this round.", + "type": "number", + "format": "int64", + "x-omitempty": false + } + } + } + } + }, + "BatchDeleteResponseMatch": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "BatchDeleteResponseResults": { + "type": "object", + "properties": { + "failed": { + "description": "How many objects should have been deleted but could not be deleted.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "limit": { + "description": "The most amount of objects that can be deleted in a single query, equals QUERY_MAXIMUM_RESULTS.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "matches": { + "description": "How many objects were matched by the filter.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "objects": { + "description": "With output set to \"minimal\" only objects with error occurred will the be described. Successfully deleted objects would be omitted. Output set to \"verbose\" will list all of the objets with their respective statuses.", + "type": "array", + "items": { + "$ref": "#/definitions/BatchDeleteResponseResultsObjectsItems0" + } + }, + "successful": { + "description": "How many objects were successfully deleted in this round.", + "type": "number", + "format": "int64", + "x-omitempty": false + } + } + }, + "BatchDeleteResponseResultsObjectsItems0": { + "description": "Results for this specific Object.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "id": { + "description": "ID of the Object.", + "type": "string", + "format": "uuid" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "DRYRUN", + "FAILED" + ] + } + } + }, + "BatchReference": { + "properties": { + "from": { + "description": "Long-form beacon-style URI to identify the source of the cross-ref including the property name. Should be in the form of weaviate://localhost/\u003ckinds\u003e/\u003cuuid\u003e/\u003cclassName\u003e/\u003cpropertyName\u003e, where \u003ckinds\u003e must be one of 'objects', 'objects' and \u003cclassName\u003e and \u003cpropertyName\u003e must represent the cross-ref property of source class to be used.", + "type": "string", + "format": "uri", + "example": "weaviate://localhost/Zoo/a5d09582-4239-4702-81c9-92a6e0122bb4/hasAnimals" + }, + "tenant": { + "description": "Name of the reference tenant.", + "type": "string" + }, + "to": { + "description": "Short-form URI to point to the cross-ref. Should be in the form of weaviate://localhost/\u003cuuid\u003e for the example of a local cross-ref to an object", + "type": "string", + "format": "uri", + "example": "weaviate://localhost/97525810-a9a5-4eb0-858a-71449aeb007f" + } + } + }, + "BatchReferenceResponse": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/BatchReference" + }, + { + "properties": { + "result": { + "description": "Results for this specific reference.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + } + } + } + } + } + ] + }, + "BatchReferenceResponseAO1Result": { + "description": "Results for this specific reference.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + } + } + }, + "BatchStats": { + "description": "The summary of a nodes batch queue congestion status.", + "properties": { + "queueLength": { + "description": "How many objects are currently in the batch queue.", + "type": "number", + "format": "int", + "x-nullable": true, + "x-omitempty": true + }, + "ratePerSecond": { + "description": "How many objects are approximately processed from the batch queue per second.", + "type": "number", + "format": "int", + "x-omitempty": false + } + } + }, + "C11yExtension": { + "description": "A resource describing an extension to the contextinoary, containing both the identifier and the definition of the extension", + "properties": { + "concept": { + "description": "The new concept you want to extend. Must be an all-lowercase single word, or a space delimited compound word. Examples: 'foobarium', 'my custom concept'", + "type": "string", + "example": "foobarium" + }, + "definition": { + "description": "A list of space-delimited words or a sentence describing what the custom concept is about. Avoid using the custom concept itself. An Example definition for the custom concept 'foobarium': would be 'a naturally occurring element which can only be seen by programmers'", + "type": "string" + }, + "weight": { + "description": "Weight of the definition of the new concept where 1='override existing definition entirely' and 0='ignore custom definition'. Note that if the custom concept is not present in the contextionary yet, the weight cannot be less than 1.", + "type": "number", + "format": "float" + } + } + }, + "C11yNearestNeighbors": { + "description": "C11y function to show the nearest neighbors to a word.", + "type": "array", + "items": { + "$ref": "#/definitions/C11yNearestNeighborsItems0" + } + }, + "C11yNearestNeighborsItems0": { + "type": "object", + "properties": { + "distance": { + "type": "number", + "format": "float" + }, + "word": { + "type": "string" + } + } + }, + "C11yVector": { + "description": "A vector representation of the object in the Contextionary. If provided at object creation, this wil take precedence over any vectorizer setting.", + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "C11yVectorBasedQuestion": { + "description": "Receive question based on array of classes, properties and values.", + "type": "array", + "items": { + "$ref": "#/definitions/C11yVectorBasedQuestionItems0" + } + }, + "C11yVectorBasedQuestionItems0": { + "type": "object", + "properties": { + "classProps": { + "description": "Vectorized properties.", + "type": "array", + "maxItems": 300, + "minItems": 300, + "items": { + "$ref": "#/definitions/C11yVectorBasedQuestionItems0ClassPropsItems0" + } + }, + "classVectors": { + "description": "Vectorized classname.", + "type": "array", + "maxItems": 300, + "minItems": 300, + "items": { + "type": "number", + "format": "float" + } + } + } + }, + "C11yVectorBasedQuestionItems0ClassPropsItems0": { + "type": "object", + "properties": { + "propsVectors": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "value": { + "description": "String with valuename.", + "type": "string" + } + } + }, + "C11yWordsResponse": { + "description": "An array of available words and contexts.", + "properties": { + "concatenatedWord": { + "description": "Weighted results for all words", + "type": "object", + "properties": { + "concatenatedNearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + }, + "concatenatedVector": { + "$ref": "#/definitions/C11yVector" + }, + "concatenatedWord": { + "type": "string" + }, + "singleWords": { + "type": "array", + "items": { + "format": "string" + } + } + } + }, + "individualWords": { + "description": "Weighted results for per individual word", + "type": "array", + "items": { + "$ref": "#/definitions/C11yWordsResponseIndividualWordsItems0" + } + } + } + }, + "C11yWordsResponseConcatenatedWord": { + "description": "Weighted results for all words", + "type": "object", + "properties": { + "concatenatedNearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + }, + "concatenatedVector": { + "$ref": "#/definitions/C11yVector" + }, + "concatenatedWord": { + "type": "string" + }, + "singleWords": { + "type": "array", + "items": { + "format": "string" + } + } + } + }, + "C11yWordsResponseIndividualWordsItems0": { + "type": "object", + "properties": { + "info": { + "type": "object", + "properties": { + "nearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + }, + "vector": { + "$ref": "#/definitions/C11yVector" + } + } + }, + "present": { + "type": "boolean" + }, + "word": { + "type": "string" + } + } + }, + "C11yWordsResponseIndividualWordsItems0Info": { + "type": "object", + "properties": { + "nearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + }, + "vector": { + "$ref": "#/definitions/C11yVector" + } + } + }, + "Class": { + "type": "object", + "properties": { + "class": { + "description": "Name of the class (a.k.a. 'collection') (required). Multiple words should be concatenated in CamelCase, e.g. ` + "`" + `ArticleAuthor` + "`" + `.", + "type": "string" + }, + "description": { + "description": "Description of the collection for metadata purposes.", + "type": "string" + }, + "invertedIndexConfig": { + "$ref": "#/definitions/InvertedIndexConfig" + }, + "moduleConfig": { + "description": "Configuration specific to modules in a collection context.", + "type": "object" + }, + "multiTenancyConfig": { + "$ref": "#/definitions/MultiTenancyConfig" + }, + "properties": { + "description": "Define properties of the collection.", + "type": "array", + "items": { + "$ref": "#/definitions/Property" + } + }, + "replicationConfig": { + "$ref": "#/definitions/ReplicationConfig" + }, + "shardingConfig": { + "description": "Manage how the index should be sharded and distributed in the cluster", + "type": "object" + }, + "vectorConfig": { + "description": "Configure named vectors. Either use this field or ` + "`" + `vectorizer` + "`" + `, ` + "`" + `vectorIndexType` + "`" + `, and ` + "`" + `vectorIndexConfig` + "`" + ` fields. Available from ` + "`" + `v1.24.0` + "`" + `.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/VectorConfig" + } + }, + "vectorIndexConfig": { + "description": "Vector-index config, that is specific to the type of index selected in vectorIndexType", + "type": "object" + }, + "vectorIndexType": { + "description": "Name of the vector index to use, eg. (HNSW)", + "type": "string" + }, + "vectorizer": { + "description": "Specify how the vectors for this class should be determined. The options are either 'none' - this means you have to import a vector with each object yourself - or the name of a module that provides vectorization capabilities, such as 'text2vec-contextionary'. If left empty, it will use the globally configured default which can itself either be 'none' or a specific module.", + "type": "string" + } + } + }, + "Classification": { + "description": "Manage classifications, trigger them and view status of past classifications.", + "type": "object", + "properties": { + "basedOnProperties": { + "description": "base the text-based classification on these fields (of type text)", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "description" + ] + }, + "class": { + "description": "class (name) which is used in this classification", + "type": "string", + "example": "City" + }, + "classifyProperties": { + "description": "which ref-property to set as part of the classification", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "inCountry" + ] + }, + "error": { + "description": "error message if status == failed", + "type": "string", + "default": "", + "example": "classify xzy: something went wrong" + }, + "filters": { + "type": "object", + "properties": { + "sourceWhere": { + "description": "limit the objects to be classified", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "targetWhere": { + "description": "Limit the possible sources when using an algorithm which doesn't really on training data, e.g. 'contextual'. When using an algorithm with a training set, such as 'knn', limit the training set instead", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "trainingSetWhere": { + "description": "Limit the training objects to be considered during the classification. Can only be used on types with explicit training sets, such as 'knn'", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "id": { + "description": "ID to uniquely identify this classification run", + "type": "string", + "format": "uuid", + "example": "ee722219-b8ec-4db1-8f8d-5150bb1a9e0c" + }, + "meta": { + "description": "additional meta information about the classification", + "type": "object", + "$ref": "#/definitions/ClassificationMeta" + }, + "settings": { + "description": "classification-type specific settings", + "type": "object" + }, + "status": { + "description": "status of this classification", + "type": "string", + "enum": [ + "running", + "completed", + "failed" + ], + "example": "running" + }, + "type": { + "description": "which algorithm to use for classifications", + "type": "string" + } + } + }, + "ClassificationFilters": { + "type": "object", + "properties": { + "sourceWhere": { + "description": "limit the objects to be classified", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "targetWhere": { + "description": "Limit the possible sources when using an algorithm which doesn't really on training data, e.g. 'contextual'. When using an algorithm with a training set, such as 'knn', limit the training set instead", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "trainingSetWhere": { + "description": "Limit the training objects to be considered during the classification. Can only be used on types with explicit training sets, such as 'knn'", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "ClassificationMeta": { + "description": "Additional information to a specific classification", + "type": "object", + "properties": { + "completed": { + "description": "time when this classification finished", + "type": "string", + "format": "date-time", + "example": "2017-07-21T17:32:28Z" + }, + "count": { + "description": "number of objects which were taken into consideration for classification", + "type": "integer", + "example": 147 + }, + "countFailed": { + "description": "number of objects which could not be classified - see error message for details", + "type": "integer", + "example": 7 + }, + "countSucceeded": { + "description": "number of objects successfully classified", + "type": "integer", + "example": 140 + }, + "started": { + "description": "time when this classification was started", + "type": "string", + "format": "date-time", + "example": "2017-07-21T17:32:28Z" + } + } + }, + "ClusterStatisticsResponse": { + "description": "The cluster statistics of all of the Weaviate nodes", + "type": "object", + "properties": { + "statistics": { + "type": "array", + "items": { + "$ref": "#/definitions/Statistics" + } + }, + "synchronized": { + "type": "boolean", + "x-omitempty": false + } + } + }, + "DBUserInfo": { + "type": "object", + "required": [ + "userId", + "dbUserType", + "roles", + "active" + ], + "properties": { + "active": { + "description": "activity status of the returned user", + "type": "boolean" + }, + "apiKeyFirstLetters": { + "description": "First 3 letters of the associated API-key", + "type": [ + "string", + "null" + ], + "maxLength": 3 + }, + "createdAt": { + "description": "Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ)", + "type": [ + "string", + "null" + ], + "format": "date-time" + }, + "dbUserType": { + "description": "type of the returned user", + "type": "string", + "enum": [ + "db_user", + "db_env_user" + ] + }, + "lastUsedAt": { + "description": "Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ)", + "type": [ + "string", + "null" + ], + "format": "date-time" + }, + "roles": { + "description": "The role names associated to the user", + "type": "array", + "items": { + "type": "string" + } + }, + "userId": { + "description": "The user id of the given user", + "type": "string" + } + } + }, + "Deprecation": { + "type": "object", + "properties": { + "apiType": { + "description": "Describes which API is effected, usually one of: REST, GraphQL", + "type": "string" + }, + "id": { + "description": "The id that uniquely identifies this particular deprecations (mostly used internally)", + "type": "string" + }, + "locations": { + "description": "The locations within the specified API affected by this deprecation", + "type": "array", + "items": { + "type": "string" + } + }, + "mitigation": { + "description": "User-required object to not be affected by the (planned) removal", + "type": "string" + }, + "msg": { + "description": "What this deprecation is about", + "type": "string" + }, + "plannedRemovalVersion": { + "description": "A best-effort guess of which upcoming version will remove the feature entirely", + "type": "string" + }, + "removedIn": { + "description": "If the feature has already been removed, it was removed in this version", + "type": "string", + "x-nullable": true + }, + "removedTime": { + "description": "If the feature has already been removed, it was removed at this timestamp", + "type": "string", + "format": "date-time", + "x-nullable": true + }, + "sinceTime": { + "description": "The deprecation was introduced in this version", + "type": "string", + "format": "date-time" + }, + "sinceVersion": { + "description": "The deprecation was introduced in this version", + "type": "string" + }, + "status": { + "description": "Whether the problematic API functionality is deprecated (planned to be removed) or already removed", + "type": "string" + } + } + }, + "DistributedTask": { + "description": "Distributed task metadata.", + "type": "object", + "properties": { + "error": { + "description": "The high level reason why the task failed.", + "type": "string", + "x-omitempty": true + }, + "finishedAt": { + "description": "The time when the task was finished.", + "type": "string", + "format": "date-time" + }, + "finishedNodes": { + "description": "The nodes that finished the task.", + "type": "array", + "items": { + "type": "string" + } + }, + "id": { + "description": "The ID of the task.", + "type": "string" + }, + "payload": { + "description": "The payload of the task.", + "type": "object" + }, + "startedAt": { + "description": "The time when the task was created.", + "type": "string", + "format": "date-time" + }, + "status": { + "description": "The status of the task.", + "type": "string" + }, + "version": { + "description": "The version of the task.", + "type": "integer" + } + } + }, + "DistributedTasks": { + "description": "Active distributed tasks by namespace.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/DistributedTask" + } + } + }, + "ErrorResponse": { + "description": "An error response given by Weaviate end-points.", + "type": "object", + "properties": { + "error": { + "type": "array", + "items": { + "$ref": "#/definitions/ErrorResponseErrorItems0" + } + } + } + }, + "ErrorResponseErrorItems0": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + }, + "GeoCoordinates": { + "properties": { + "latitude": { + "description": "The latitude of the point on earth in decimal form", + "type": "number", + "format": "float", + "x-nullable": true + }, + "longitude": { + "description": "The longitude of the point on earth in decimal form", + "type": "number", + "format": "float", + "x-nullable": true + } + } + }, + "GetGroupsForRoleOKBodyItems0": { + "type": "object", + "required": [ + "name", + "groupType" + ], + "properties": { + "groupId": { + "type": "string" + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + }, + "GetUsersForRoleOKBodyItems0": { + "type": "object", + "required": [ + "name", + "userType" + ], + "properties": { + "userId": { + "type": "string" + }, + "userType": { + "$ref": "#/definitions/UserTypeOutput" + } + } + }, + "GraphQLError": { + "description": "An error response caused by a GraphQL query.", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/GraphQLErrorLocationsItems0" + } + }, + "message": { + "type": "string" + }, + "path": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "GraphQLErrorLocationsItems0": { + "type": "object", + "properties": { + "column": { + "type": "integer", + "format": "int64" + }, + "line": { + "type": "integer", + "format": "int64" + } + } + }, + "GraphQLQueries": { + "description": "A list of GraphQL queries.", + "type": "array", + "items": { + "$ref": "#/definitions/GraphQLQuery" + } + }, + "GraphQLQuery": { + "description": "GraphQL query based on: http://facebook.github.io/graphql/.", + "type": "object", + "properties": { + "operationName": { + "description": "The name of the operation if multiple exist in the query.", + "type": "string" + }, + "query": { + "description": "Query based on GraphQL syntax.", + "type": "string" + }, + "variables": { + "description": "Additional variables for the query.", + "type": "object" + } + } + }, + "GraphQLResponse": { + "description": "GraphQL based response: http://facebook.github.io/graphql/.", + "properties": { + "data": { + "description": "GraphQL data object.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/JsonObject" + } + }, + "errors": { + "description": "Array with errors.", + "type": "array", + "items": { + "$ref": "#/definitions/GraphQLError" + }, + "x-omitempty": true + } + } + }, + "GraphQLResponses": { + "description": "A list of GraphQL responses.", + "type": "array", + "items": { + "$ref": "#/definitions/GraphQLResponse" + } + }, + "GroupType": { + "description": "If the group contains OIDC or database users.", + "type": "string", + "enum": [ + "db", + "oidc" + ] + }, + "InvertedIndexConfig": { + "description": "Configure the inverted index built into Weaviate (default: 60).", + "type": "object", + "properties": { + "bm25": { + "$ref": "#/definitions/BM25Config" + }, + "cleanupIntervalSeconds": { + "description": "Asynchronous index clean up happens every n seconds", + "type": "number", + "format": "int" + }, + "indexNullState": { + "description": "Index each object with the null state (default: 'false').", + "type": "boolean" + }, + "indexPropertyLength": { + "description": "Index length of properties (default: 'false').", + "type": "boolean" + }, + "indexTimestamps": { + "description": "Index each object by its internal timestamps (default: 'false').", + "type": "boolean" + }, + "stopwords": { + "$ref": "#/definitions/StopwordConfig" + }, + "usingBlockMaxWAND": { + "description": "Using BlockMax WAND for query execution (default: 'false', will be 'true' for new collections created after 1.30).", + "type": "boolean" + } + } + }, + "JsonObject": { + "description": "JSON object value.", + "type": "object" + }, + "Link": { + "type": "object", + "properties": { + "documentationHref": { + "description": "weaviate documentation about this resource group", + "type": "string" + }, + "href": { + "description": "target of the link", + "type": "string" + }, + "name": { + "description": "human readable name of the resource group", + "type": "string" + }, + "rel": { + "description": "relationship if both resources are related, e.g. 'next', 'previous', 'parent', etc.", + "type": "string" + } + } + }, + "Meta": { + "description": "Contains meta information of the current Weaviate instance.", + "type": "object", + "properties": { + "grpcMaxMessageSize": { + "description": "Max message size for GRPC connection in bytes.", + "type": "integer" + }, + "hostname": { + "description": "The url of the host.", + "type": "string", + "format": "url" + }, + "modules": { + "description": "Module-specific meta information.", + "type": "object" + }, + "version": { + "description": "The Weaviate server version.", + "type": "string" + } + } + }, + "MultiTenancyConfig": { + "description": "Configuration related to multi-tenancy within a class", + "properties": { + "autoTenantActivation": { + "description": "Existing tenants should (not) be turned HOT implicitly when they are accessed and in another activity status (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "autoTenantCreation": { + "description": "Nonexistent tenants should (not) be created implicitly (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "enabled": { + "description": "Whether or not multi-tenancy is enabled for this class (default: false).", + "type": "boolean", + "x-omitempty": false + } + } + }, + "MultipleRef": { + "description": "Multiple instances of references to other objects.", + "type": "array", + "items": { + "$ref": "#/definitions/SingleRef" + } + }, + "NestedProperty": { + "type": "object", + "properties": { + "dataType": { + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "type": "string" + }, + "indexFilterable": { + "type": "boolean", + "x-nullable": true + }, + "indexRangeFilters": { + "type": "boolean", + "x-nullable": true + }, + "indexSearchable": { + "type": "boolean", + "x-nullable": true + }, + "name": { + "type": "string" + }, + "nestedProperties": { + "description": "The properties of the nested object(s). Applies to object and object[] data types.", + "type": "array", + "items": { + "$ref": "#/definitions/NestedProperty" + }, + "x-omitempty": true + }, + "tokenization": { + "type": "string", + "enum": [ + "word", + "lowercase", + "whitespace", + "field", + "trigram", + "gse", + "kagome_kr", + "kagome_ja", + "gse_ch" + ] + } + } + }, + "NodeShardStatus": { + "description": "The definition of a node shard status response body", + "properties": { + "asyncReplicationStatus": { + "description": "The status of the async replication.", + "type": "array", + "items": { + "$ref": "#/definitions/AsyncReplicationStatus" + } + }, + "class": { + "description": "The name of shard's class.", + "type": "string", + "x-omitempty": false + }, + "compressed": { + "description": "The status of vector compression/quantization.", + "format": "boolean", + "x-omitempty": false + }, + "loaded": { + "description": "The load status of the shard.", + "type": "boolean", + "x-omitempty": false + }, + "name": { + "description": "The name of the shard.", + "type": "string", + "x-omitempty": false + }, + "numberOfReplicas": { + "description": "Number of replicas for the shard.", + "type": [ + "integer", + "null" + ], + "format": "int64", + "x-omitempty": true + }, + "objectCount": { + "description": "The number of objects in shard.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "replicationFactor": { + "description": "Minimum number of replicas for the shard.", + "type": [ + "integer", + "null" + ], + "format": "int64", + "x-omitempty": true + }, + "vectorIndexingStatus": { + "description": "The status of the vector indexing process.", + "format": "string", + "x-omitempty": false + }, + "vectorQueueLength": { + "description": "The length of the vector indexing queue.", + "type": "number", + "format": "int64", + "x-omitempty": false + } + } + }, + "NodeStats": { + "description": "The summary of Weaviate's statistics.", + "properties": { + "objectCount": { + "description": "The total number of objects in DB.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "shardCount": { + "description": "The count of Weaviate's shards. To see this value, set ` + "`" + `output` + "`" + ` to ` + "`" + `verbose` + "`" + `.", + "type": "number", + "format": "int", + "x-omitempty": false + } + } + }, + "NodeStatus": { + "description": "The definition of a backup node status response body", + "properties": { + "batchStats": { + "description": "Weaviate batch statistics.", + "type": "object", + "$ref": "#/definitions/BatchStats" + }, + "gitHash": { + "description": "The gitHash of Weaviate.", + "type": "string" + }, + "name": { + "description": "The name of the node.", + "type": "string" + }, + "shards": { + "description": "The list of the shards with it's statistics.", + "type": "array", + "items": { + "$ref": "#/definitions/NodeShardStatus" + } + }, + "stats": { + "description": "Weaviate overall statistics.", + "type": "object", + "$ref": "#/definitions/NodeStats" + }, + "status": { + "description": "Node's status.", + "type": "string", + "default": "HEALTHY", + "enum": [ + "HEALTHY", + "UNHEALTHY", + "UNAVAILABLE", + "TIMEOUT" + ] + }, + "version": { + "description": "The version of Weaviate.", + "type": "string" + } + } + }, + "NodesStatusResponse": { + "description": "The status of all of the Weaviate nodes", + "type": "object", + "properties": { + "nodes": { + "type": "array", + "items": { + "$ref": "#/definitions/NodeStatus" + } + } + } + }, + "Object": { + "type": "object", + "properties": { + "additional": { + "$ref": "#/definitions/AdditionalProperties" + }, + "class": { + "description": "Class of the Object, defined in the schema.", + "type": "string" + }, + "creationTimeUnix": { + "description": "(Response only) Timestamp of creation of this object in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64" + }, + "id": { + "description": "ID of the Object.", + "type": "string", + "format": "uuid" + }, + "lastUpdateTimeUnix": { + "description": "(Response only) Timestamp of the last object update in milliseconds since epoch UTC.", + "type": "integer", + "format": "int64" + }, + "properties": { + "$ref": "#/definitions/PropertySchema" + }, + "tenant": { + "description": "Name of the Objects tenant.", + "type": "string" + }, + "vector": { + "description": "This field returns vectors associated with the Object. C11yVector, Vector or Vectors values are possible.", + "$ref": "#/definitions/C11yVector" + }, + "vectorWeights": { + "$ref": "#/definitions/VectorWeights" + }, + "vectors": { + "description": "This field returns vectors associated with the Object.", + "$ref": "#/definitions/Vectors" + } + } + }, + "ObjectsGetResponse": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/Object" + }, + { + "properties": { + "deprecations": { + "type": "array", + "items": { + "$ref": "#/definitions/Deprecation" + } + } + } + }, + { + "properties": { + "result": { + "description": "Results for this specific Object.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + } + } + } + } + } + ] + }, + "ObjectsGetResponseAO2Result": { + "description": "Results for this specific Object.", + "format": "object", + "properties": { + "errors": { + "$ref": "#/definitions/ErrorResponse" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + } + } + }, + "ObjectsListResponse": { + "description": "List of Objects.", + "type": "object", + "properties": { + "deprecations": { + "type": "array", + "items": { + "$ref": "#/definitions/Deprecation" + } + }, + "objects": { + "description": "The actual list of Objects.", + "type": "array", + "items": { + "$ref": "#/definitions/Object" + } + }, + "totalResults": { + "description": "The total number of Objects for the query. The number of items in a response may be smaller due to paging.", + "type": "integer", + "format": "int64" + } + } + }, + "PatchDocumentAction": { + "description": "Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396).", + "required": [ + "op", + "path" + ], + "properties": { + "from": { + "description": "A string containing a JSON Pointer value.", + "type": "string" + }, + "merge": { + "$ref": "#/definitions/Object" + }, + "op": { + "description": "The operation to be performed.", + "type": "string", + "enum": [ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ] + }, + "path": { + "description": "A JSON-Pointer.", + "type": "string" + }, + "value": { + "description": "The value to be used within the operations.", + "type": "object" + } + } + }, + "PatchDocumentObject": { + "description": "Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396).", + "required": [ + "op", + "path" + ], + "properties": { + "from": { + "description": "A string containing a JSON Pointer value.", + "type": "string" + }, + "merge": { + "$ref": "#/definitions/Object" + }, + "op": { + "description": "The operation to be performed.", + "type": "string", + "enum": [ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ] + }, + "path": { + "description": "A JSON-Pointer.", + "type": "string" + }, + "value": { + "description": "The value to be used within the operations.", + "type": "object" + } + } + }, + "PeerUpdate": { + "description": "A single peer in the network.", + "properties": { + "id": { + "description": "The session ID of the peer.", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "Human readable name.", + "type": "string" + }, + "schemaHash": { + "description": "The latest known hash of the peer's schema.", + "type": "string" + }, + "uri": { + "description": "The location where the peer is exposed to the internet.", + "type": "string", + "format": "uri" + } + } + }, + "PeerUpdateList": { + "description": "List of known peers.", + "type": "array", + "items": { + "$ref": "#/definitions/PeerUpdate" + } + }, + "Permission": { + "description": "permissions attached to a role.", + "type": "object", + "required": [ + "action" + ], + "properties": { + "action": { + "description": "allowed actions in weaviate.", + "type": "string", + "enum": [ + "manage_backups", + "read_cluster", + "create_data", + "read_data", + "update_data", + "delete_data", + "read_nodes", + "create_roles", + "read_roles", + "update_roles", + "delete_roles", + "create_collections", + "read_collections", + "update_collections", + "delete_collections", + "assign_and_revoke_users", + "create_users", + "read_users", + "update_users", + "delete_users", + "create_tenants", + "read_tenants", + "update_tenants", + "delete_tenants", + "create_replicate", + "read_replicate", + "update_replicate", + "delete_replicate", + "create_aliases", + "read_aliases", + "update_aliases", + "delete_aliases", + "assign_and_revoke_groups", + "read_groups" + ] + }, + "aliases": { + "description": "Resource definition for alias-related actions and permissions. Used to specify which aliases and collections can be accessed or modified.", + "type": "object", + "properties": { + "alias": { + "description": "A string that specifies which aliases this permission applies to. Can be an exact alias name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all aliases.", + "type": "string", + "default": "*" + }, + "collection": { + "description": "A string that specifies which collections this permission applies to. Can be an exact collection name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all collections.", + "type": "string", + "default": "*" + } + } + }, + "backups": { + "description": "resources applicable for backup actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "collections": { + "description": "resources applicable for collection and/or tenant actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "data": { + "description": "resources applicable for data actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "object": { + "description": "string or regex. if a specific object ID, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "tenant": { + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "groups": { + "description": "Resources applicable for group actions.", + "type": "object", + "properties": { + "group": { + "description": "A string that specifies which groups this permission applies to. Can be an exact group name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all groups.", + "type": "string", + "default": "*" + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + }, + "nodes": { + "description": "resources applicable for cluster actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "verbosity": { + "description": "whether to allow (verbose) returning shards and stats data in the response", + "type": "string", + "default": "minimal", + "enum": [ + "verbose", + "minimal" + ] + } + } + }, + "replicate": { + "description": "resources applicable for replicate actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "shard": { + "description": "string or regex. if a specific shard name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "roles": { + "description": "resources applicable for role actions", + "type": "object", + "properties": { + "role": { + "description": "string or regex. if a specific role name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "scope": { + "description": "set the scope for the manage role permission", + "type": "string", + "default": "match", + "enum": [ + "all", + "match" + ] + } + } + }, + "tenants": { + "description": "resources applicable for tenant actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "tenant": { + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "users": { + "description": "resources applicable for user actions", + "type": "object", + "properties": { + "users": { + "description": "string or regex. if a specific name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + } + } + }, + "PermissionAliases": { + "description": "Resource definition for alias-related actions and permissions. Used to specify which aliases and collections can be accessed or modified.", + "type": "object", + "properties": { + "alias": { + "description": "A string that specifies which aliases this permission applies to. Can be an exact alias name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all aliases.", + "type": "string", + "default": "*" + }, + "collection": { + "description": "A string that specifies which collections this permission applies to. Can be an exact collection name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all collections.", + "type": "string", + "default": "*" + } + } + }, + "PermissionBackups": { + "description": "resources applicable for backup actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "PermissionCollections": { + "description": "resources applicable for collection and/or tenant actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "PermissionData": { + "description": "resources applicable for data actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "object": { + "description": "string or regex. if a specific object ID, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "tenant": { + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "PermissionGroups": { + "description": "Resources applicable for group actions.", + "type": "object", + "properties": { + "group": { + "description": "A string that specifies which groups this permission applies to. Can be an exact group name or a regex pattern. The default value ` + "`" + `*` + "`" + ` applies the permission to all groups.", + "type": "string", + "default": "*" + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + }, + "PermissionNodes": { + "description": "resources applicable for cluster actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "verbosity": { + "description": "whether to allow (verbose) returning shards and stats data in the response", + "type": "string", + "default": "minimal", + "enum": [ + "verbose", + "minimal" + ] + } + } + }, + "PermissionReplicate": { + "description": "resources applicable for replicate actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "shard": { + "description": "string or regex. if a specific shard name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "PermissionRoles": { + "description": "resources applicable for role actions", + "type": "object", + "properties": { + "role": { + "description": "string or regex. if a specific role name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "scope": { + "description": "set the scope for the manage role permission", + "type": "string", + "default": "match", + "enum": [ + "all", + "match" + ] + } + } + }, + "PermissionTenants": { + "description": "resources applicable for tenant actions", + "type": "object", + "properties": { + "collection": { + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + }, + "tenant": { + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "PermissionUsers": { + "description": "resources applicable for user actions", + "type": "object", + "properties": { + "users": { + "description": "string or regex. if a specific name, if left empty it will be ALL or *", + "type": "string", + "default": "*" + } + } + }, + "PhoneNumber": { + "properties": { + "countryCode": { + "description": "Read-only. The numerical country code (e.g. 49)", + "type": "number", + "format": "uint64" + }, + "defaultCountry": { + "description": "Optional. The ISO 3166-1 alpha-2 country code. This is used to figure out the correct countryCode and international format if only a national number (e.g. 0123 4567) is provided", + "type": "string" + }, + "input": { + "description": "The raw input as the phone number is present in your raw data set. It will be parsed into the standardized formats if valid.", + "type": "string" + }, + "internationalFormatted": { + "description": "Read-only. Parsed result in the international format (e.g. +49 123 ...)", + "type": "string" + }, + "national": { + "description": "Read-only. The numerical representation of the national part", + "type": "number", + "format": "uint64" + }, + "nationalFormatted": { + "description": "Read-only. Parsed result in the national format (e.g. 0123 456789)", + "type": "string" + }, + "valid": { + "description": "Read-only. Indicates whether the parsed number is a valid phone number", + "type": "boolean" + } + } + }, + "Principal": { + "type": "object", + "properties": { + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + }, + "username": { + "description": "The username that was extracted either from the authentication information", + "type": "string" + } + } + }, + "Property": { + "type": "object", + "properties": { + "dataType": { + "description": "Data type of the property (required). If it starts with a capital (for example Person), may be a reference to another type.", + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "description": "Description of the property.", + "type": "string" + }, + "indexFilterable": { + "description": "Whether to include this property in the filterable, Roaring Bitmap index. If ` + "`" + `false` + "`" + `, this property cannot be used in ` + "`" + `where` + "`" + ` filters. \u003cbr/\u003e\u003cbr/\u003eNote: Unrelated to vectorization behavior.", + "type": "boolean", + "x-nullable": true + }, + "indexInverted": { + "description": "(Deprecated). Whether to include this property in the inverted index. If ` + "`" + `false` + "`" + `, this property cannot be used in ` + "`" + `where` + "`" + ` filters, ` + "`" + `bm25` + "`" + ` or ` + "`" + `hybrid` + "`" + ` search. \u003cbr/\u003e\u003cbr/\u003eUnrelated to vectorization behavior (deprecated as of v1.19; use indexFilterable or/and indexSearchable instead)", + "type": "boolean", + "x-nullable": true + }, + "indexRangeFilters": { + "description": "Whether to include this property in the filterable, range-based Roaring Bitmap index. Provides better performance for range queries compared to filterable index in large datasets. Applicable only to properties of data type int, number, date.", + "type": "boolean", + "x-nullable": true + }, + "indexSearchable": { + "description": "Optional. Should this property be indexed in the inverted index. Defaults to true. Applicable only to properties of data type text and text[]. If you choose false, you will not be able to use this property in bm25 or hybrid search. This property has no affect on vectorization decisions done by modules", + "type": "boolean", + "x-nullable": true + }, + "moduleConfig": { + "description": "Configuration specific to modules this Weaviate instance has installed", + "type": "object" + }, + "name": { + "description": "The name of the property (required). Multiple words should be concatenated in camelCase, e.g. ` + "`" + `nameOfAuthor` + "`" + `.", + "type": "string" + }, + "nestedProperties": { + "description": "The properties of the nested object(s). Applies to object and object[] data types.", + "type": "array", + "items": { + "$ref": "#/definitions/NestedProperty" + }, + "x-omitempty": true + }, + "tokenization": { + "description": "Determines tokenization of the property as separate words or whole field. Optional. Applies to text and text[] data types. Allowed values are ` + "`" + `word` + "`" + ` (default; splits on any non-alphanumerical, lowercases), ` + "`" + `lowercase` + "`" + ` (splits on white spaces, lowercases), ` + "`" + `whitespace` + "`" + ` (splits on white spaces), ` + "`" + `field` + "`" + ` (trims). Not supported for remaining data types", + "type": "string", + "enum": [ + "word", + "lowercase", + "whitespace", + "field", + "trigram", + "gse", + "kagome_kr", + "kagome_ja", + "gse_ch" + ] + } + } + }, + "PropertySchema": { + "description": "Names and values of an individual property. A returned response may also contain additional metadata, such as from classification or feature projection.", + "type": "object" + }, + "RaftStatistics": { + "description": "The definition of Raft statistics.", + "properties": { + "appliedIndex": { + "type": "string" + }, + "commitIndex": { + "type": "string" + }, + "fsmPending": { + "type": "string" + }, + "lastContact": { + "type": "string" + }, + "lastLogIndex": { + "type": "string" + }, + "lastLogTerm": { + "type": "string" + }, + "lastSnapshotIndex": { + "type": "string" + }, + "lastSnapshotTerm": { + "type": "string" + }, + "latestConfiguration": { + "description": "Weaviate Raft nodes.", + "type": "object" + }, + "latestConfigurationIndex": { + "type": "string" + }, + "numPeers": { + "type": "string" + }, + "protocolVersion": { + "type": "string" + }, + "protocolVersionMax": { + "type": "string" + }, + "protocolVersionMin": { + "type": "string" + }, + "snapshotVersionMax": { + "type": "string" + }, + "snapshotVersionMin": { + "type": "string" + }, + "state": { + "type": "string" + }, + "term": { + "type": "string" + } + } + }, + "ReferenceMetaClassification": { + "description": "This meta field contains additional info about the classified reference property", + "properties": { + "closestLosingDistance": { + "description": "The lowest distance of a neighbor in the losing group. Optional. If k equals the size of the winning group, there is no losing group", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "closestOverallDistance": { + "description": "The lowest distance of any neighbor, regardless of whether they were in the winning or losing group", + "type": "number", + "format": "float32" + }, + "closestWinningDistance": { + "description": "Closest distance of a neighbor from the winning group", + "type": "number", + "format": "float32" + }, + "losingCount": { + "description": "size of the losing group, can be 0 if the winning group size equals k", + "type": "number", + "format": "int64" + }, + "losingDistance": { + "description": "deprecated - do not use, to be removed in 0.23.0", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "meanLosingDistance": { + "description": "Mean distance of all neighbors from the losing group. Optional. If k equals the size of the winning group, there is no losing group.", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "meanWinningDistance": { + "description": "Mean distance of all neighbors from the winning group", + "type": "number", + "format": "float32" + }, + "overallCount": { + "description": "overall neighbors checked as part of the classification. In most cases this will equal k, but could be lower than k - for example if not enough data was present", + "type": "number", + "format": "int64" + }, + "winningCount": { + "description": "size of the winning group, a number between 1..k", + "type": "number", + "format": "int64" + }, + "winningDistance": { + "description": "deprecated - do not use, to be removed in 0.23.0", + "type": "number", + "format": "float32" + } + } + }, + "ReplicationConfig": { + "description": "Configure how replication is executed in a cluster", + "type": "object", + "properties": { + "asyncEnabled": { + "description": "Enable asynchronous replication (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "deletionStrategy": { + "description": "Conflict resolution strategy for deleted objects.", + "type": "string", + "enum": [ + "NoAutomatedResolution", + "DeleteOnConflict", + "TimeBasedResolution" + ], + "x-omitempty": true + }, + "factor": { + "description": "Number of times a class is replicated (default: 1).", + "type": "integer" + } + } + }, + "ReplicationDeleteReplicaRequest": { + "description": "Specifies the parameters required to permanently delete a specific shard replica from a particular node. This action will remove the replica's data from the node.", + "type": "object", + "required": [ + "node", + "collection", + "shard" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the shard replica belongs.", + "type": "string" + }, + "node": { + "description": "The name of the Weaviate node from which the shard replica will be deleted.", + "type": "string" + }, + "shard": { + "description": "The ID of the shard whose replica is to be deleted.", + "type": "string" + } + } + }, + "ReplicationDisableReplicaRequest": { + "description": "Specifies the parameters required to mark a specific shard replica as inactive (soft-delete) on a particular node. This action typically prevents the replica from serving requests but does not immediately remove its data.", + "type": "object", + "required": [ + "node", + "collection", + "shard" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the shard replica belongs.", + "type": "string" + }, + "node": { + "description": "The name of the Weaviate node hosting the shard replica that is to be disabled.", + "type": "string" + }, + "shard": { + "description": "The ID of the shard whose replica is to be disabled.", + "type": "string" + } + } + }, + "ReplicationReplicateDetailsReplicaResponse": { + "description": "Provides a comprehensive overview of a specific replication operation, detailing its unique ID, the involved collection, shard, source and target nodes, transfer type, current status, and optionally, its status history.", + "required": [ + "id", + "shard", + "sourceNode", + "targetNode", + "collection", + "status", + "type" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the shard being replicated belongs.", + "type": "string" + }, + "id": { + "description": "The unique identifier (ID) of this specific replication operation.", + "type": "string", + "format": "uuid" + }, + "scheduledForCancel": { + "description": "Whether the replica operation is scheduled for cancellation.", + "type": "boolean" + }, + "scheduledForDelete": { + "description": "Whether the replica operation is scheduled for deletion.", + "type": "boolean" + }, + "shard": { + "description": "The name of the shard involved in this replication operation.", + "type": "string" + }, + "sourceNode": { + "description": "The identifier of the node from which the replica is being moved or copied (the source node).", + "type": "string" + }, + "status": { + "description": "An object detailing the current operational state of the replica movement and any errors encountered.", + "type": "object", + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatus" + }, + "statusHistory": { + "description": "An array detailing the historical sequence of statuses the replication operation has transitioned through, if requested and available.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatus" + } + }, + "targetNode": { + "description": "The identifier of the node to which the replica is being moved or copied (the target node).", + "type": "string" + }, + "type": { + "description": "Indicates whether the operation is a 'COPY' (source replica remains) or a 'MOVE' (source replica is removed after successful transfer).", + "type": "string", + "enum": [ + "COPY", + "MOVE" + ] + }, + "uncancelable": { + "description": "Whether the replica operation is uncancelable.", + "type": "boolean" + }, + "whenStartedUnixMs": { + "description": "The UNIX timestamp in ms when the replication operation was initiated. This is an approximate time and so should not be used for precise timing.", + "type": "integer", + "format": "int64" + } + } + }, + "ReplicationReplicateDetailsReplicaStatus": { + "description": "Represents the current or historical status of a shard replica involved in a replication operation, including its operational state and any associated errors.", + "type": "object", + "properties": { + "errors": { + "description": "A list of error messages encountered by this replica during the replication operation, if any.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatusError" + } + }, + "state": { + "description": "The current operational state of the replica during the replication process.", + "type": "string", + "enum": [ + "REGISTERED", + "HYDRATING", + "FINALIZING", + "DEHYDRATING", + "READY", + "CANCELLED" + ] + }, + "whenStartedUnixMs": { + "description": "The UNIX timestamp in ms when this state was first entered. This is an approximate time and so should not be used for precise timing.", + "type": "integer", + "format": "int64" + } + } + }, + "ReplicationReplicateDetailsReplicaStatusError": { + "description": "Represents an error encountered during a replication operation, including its timestamp and a human-readable message.", + "type": "object", + "properties": { + "message": { + "description": "A human-readable message describing the error.", + "type": "string" + }, + "whenErroredUnixMs": { + "description": "The unix timestamp in ms when the error occurred. This is an approximate time and so should not be used for precise timing.", + "type": "integer", + "format": "int64" + } + } + }, + "ReplicationReplicateForceDeleteRequest": { + "description": "Specifies the parameters available when force deleting replication operations.", + "type": "object", + "properties": { + "collection": { + "description": "The name of the collection to which the shard being replicated belongs.", + "type": "string" + }, + "dryRun": { + "description": "If true, the operation will not actually delete anything but will return the expected outcome of the deletion.", + "type": "boolean", + "default": false + }, + "id": { + "description": "The unique identifier (ID) of the replication operation to be forcefully deleted.", + "type": "string", + "format": "uuid" + }, + "node": { + "description": "The name of the target node where the replication operations are registered.", + "type": "string" + }, + "shard": { + "description": "The identifier of the shard involved in the replication operations.", + "type": "string" + } + } + }, + "ReplicationReplicateForceDeleteResponse": { + "description": "Provides the UUIDs that were successfully force deleted as part of the replication operation. If dryRun is true, this will return the expected outcome without actually deleting anything.", + "type": "object", + "properties": { + "deleted": { + "description": "The unique identifiers (IDs) of the replication operations that were forcefully deleted.", + "type": "array", + "items": { + "type": "string", + "format": "uuid" + } + }, + "dryRun": { + "description": "Indicates whether the operation was a dry run (true) or an actual deletion (false).", + "type": "boolean" + } + } + }, + "ReplicationReplicateReplicaRequest": { + "description": "Specifies the parameters required to initiate a shard replica movement operation between two nodes for a given collection and shard. This request defines the source and target node, the collection and type of transfer.", + "type": "object", + "required": [ + "sourceNode", + "targetNode", + "collection", + "shard" + ], + "properties": { + "collection": { + "description": "The name of the collection to which the target shard belongs.", + "type": "string" + }, + "shard": { + "description": "The name of the shard whose replica is to be moved or copied.", + "type": "string" + }, + "sourceNode": { + "description": "The name of the Weaviate node currently hosting the shard replica that needs to be moved or copied.", + "type": "string" + }, + "targetNode": { + "description": "The name of the Weaviate node where the new shard replica will be created as part of the movement or copy operation.", + "type": "string" + }, + "type": { + "description": "Specifies the type of replication operation to perform. 'COPY' creates a new replica on the target node while keeping the source replica. 'MOVE' creates a new replica on the target node and then removes the source replica upon successful completion. Defaults to 'COPY' if omitted.", + "type": "string", + "default": "COPY", + "enum": [ + "COPY", + "MOVE" + ] + } + } + }, + "ReplicationReplicateReplicaResponse": { + "description": "Contains the unique identifier for a successfully initiated asynchronous replica movement operation. This ID can be used to track the progress of the operation.", + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "description": "The unique identifier (ID) assigned to the registered replication operation.", + "type": "string", + "format": "uuid" + } + } + }, + "ReplicationShardReplicas": { + "description": "Represents a shard and lists the nodes that currently host its replicas.", + "type": "object", + "properties": { + "replicas": { + "type": "array", + "items": { + "type": "string" + } + }, + "shard": { + "type": "string" + } + } + }, + "ReplicationShardingState": { + "description": "Details the sharding layout for a specific collection, mapping each shard to its set of replicas across the cluster.", + "type": "object", + "properties": { + "collection": { + "description": "The name of the collection.", + "type": "string" + }, + "shards": { + "description": "An array detailing each shard within the collection and the nodes hosting its replicas.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationShardReplicas" + } + } + } + }, + "ReplicationShardingStateResponse": { + "description": "Provides the detailed sharding state for one or more collections, including the distribution of shards and their replicas across the cluster nodes.", + "type": "object", + "properties": { + "shardingState": { + "$ref": "#/definitions/ReplicationShardingState" + } + } + }, + "RestoreConfig": { + "description": "Backup custom configuration", + "type": "object", + "properties": { + "Bucket": { + "description": "Name of the bucket, container, volume, etc", + "type": "string" + }, + "CPUPercentage": { + "description": "Desired CPU core utilization ranging from 1%-80%", + "type": "integer", + "default": 50, + "maximum": 80, + "minimum": 1, + "x-nullable": false + }, + "Endpoint": { + "description": "name of the endpoint, e.g. s3.amazonaws.com", + "type": "string" + }, + "Path": { + "description": "Path within the bucket", + "type": "string" + }, + "rolesOptions": { + "description": "How roles should be restored", + "type": "string", + "default": "noRestore", + "enum": [ + "noRestore", + "all" + ] + }, + "usersOptions": { + "description": "How users should be restored", + "type": "string", + "default": "noRestore", + "enum": [ + "noRestore", + "all" + ] + } + } + }, + "Role": { + "type": "object", + "required": [ + "name", + "permissions" + ], + "properties": { + "name": { + "description": "role name", + "type": "string" + }, + "permissions": { + "type": "array", + "items": { + "description": "list of permissions (level, action, resource)", + "type": "object", + "$ref": "#/definitions/Permission" + } + } + } + }, + "RolesListResponse": { + "description": "list of roles", + "type": "array", + "items": { + "$ref": "#/definitions/Role" + } + }, + "Schema": { + "description": "Definitions of semantic schemas (also see: https://github.com/weaviate/weaviate-semantic-schemas).", + "type": "object", + "properties": { + "classes": { + "description": "Semantic classes that are available.", + "type": "array", + "items": { + "$ref": "#/definitions/Class" + } + }, + "maintainer": { + "description": "Email of the maintainer.", + "type": "string", + "format": "email" + }, + "name": { + "description": "Name of the schema.", + "type": "string" + } + } + }, + "SchemaClusterStatus": { + "description": "Indicates the health of the schema in a cluster.", + "type": "object", + "properties": { + "error": { + "description": "Contains the sync check error if one occurred", + "type": "string", + "x-omitempty": true + }, + "healthy": { + "description": "True if the cluster is in sync, false if there is an issue (see error).", + "type": "boolean", + "x-omitempty": false + }, + "hostname": { + "description": "Hostname of the coordinating node, i.e. the one that received the cluster. This can be useful information if the error message contains phrases such as 'other nodes agree, but local does not', etc.", + "type": "string" + }, + "ignoreSchemaSync": { + "description": "The cluster check at startup can be ignored (to recover from an out-of-sync situation).", + "type": "boolean", + "x-omitempty": false + }, + "nodeCount": { + "description": "Number of nodes that participated in the sync check", + "type": "number", + "format": "int" + } + } + }, + "SchemaHistory": { + "description": "This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value OR a SingleRef definition.", + "type": "object" + }, + "ShardStatus": { + "description": "The status of a single shard", + "properties": { + "status": { + "description": "Status of the shard", + "type": "string" + } + } + }, + "ShardStatusGetResponse": { + "description": "Response body of shard status get request", + "properties": { + "name": { + "description": "Name of the shard", + "type": "string" + }, + "status": { + "description": "Status of the shard", + "type": "string" + }, + "vectorQueueSize": { + "description": "Size of the vector queue of the shard", + "type": "integer", + "x-omitempty": false + } + } + }, + "ShardStatusList": { + "description": "The status of all the shards of a Class", + "type": "array", + "items": { + "$ref": "#/definitions/ShardStatusGetResponse" + } + }, + "SingleRef": { + "description": "Either set beacon (direct reference) or set class and schema (concept reference)", + "properties": { + "beacon": { + "description": "If using a direct reference, specify the URI to point to the cross-ref here. Should be in the form of weaviate://localhost/\u003cuuid\u003e for the example of a local cross-ref to an object", + "type": "string", + "format": "uri" + }, + "class": { + "description": "If using a concept reference (rather than a direct reference), specify the desired class name here", + "type": "string", + "format": "uri" + }, + "classification": { + "description": "Additional Meta information about classifications if the item was part of one", + "$ref": "#/definitions/ReferenceMetaClassification" + }, + "href": { + "description": "If using a direct reference, this read-only fields provides a link to the referenced resource. If 'origin' is globally configured, an absolute URI is shown - a relative URI otherwise.", + "type": "string", + "format": "uri" + }, + "schema": { + "description": "If using a concept reference (rather than a direct reference), specify the desired properties here", + "$ref": "#/definitions/PropertySchema" + } + } + }, + "Statistics": { + "description": "The definition of node statistics.", + "properties": { + "bootstrapped": { + "type": "boolean" + }, + "candidates": { + "type": "object" + }, + "dbLoaded": { + "type": "boolean" + }, + "initialLastAppliedIndex": { + "type": "number", + "format": "uint64" + }, + "isVoter": { + "type": "boolean" + }, + "lastAppliedIndex": { + "type": "number" + }, + "leaderAddress": { + "type": "object" + }, + "leaderId": { + "type": "object" + }, + "name": { + "description": "The name of the node.", + "type": "string" + }, + "open": { + "type": "boolean" + }, + "raft": { + "description": "Weaviate Raft statistics.", + "type": "object", + "$ref": "#/definitions/RaftStatistics" + }, + "ready": { + "type": "boolean" + }, + "status": { + "description": "Node's status.", + "type": "string", + "default": "HEALTHY", + "enum": [ + "HEALTHY", + "UNHEALTHY", + "UNAVAILABLE", + "TIMEOUT" + ] + } + } + }, + "StopwordConfig": { + "description": "fine-grained control over stopword list usage", + "type": "object", + "properties": { + "additions": { + "description": "Stopwords to be considered additionally (default: []). Can be any array of custom strings.", + "type": "array", + "items": { + "type": "string" + } + }, + "preset": { + "description": "Pre-existing list of common words by language (default: 'en'). Options: ['en', 'none'].", + "type": "string" + }, + "removals": { + "description": "Stopwords to be removed from consideration (default: []). Can be any array of custom strings.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "Tenant": { + "description": "attributes representing a single tenant within weaviate", + "type": "object", + "properties": { + "activityStatus": { + "description": "activity status of the tenant's shard. Optional for creating tenant (implicit ` + "`" + `ACTIVE` + "`" + `) and required for updating tenant. For creation, allowed values are ` + "`" + `ACTIVE` + "`" + ` - tenant is fully active and ` + "`" + `INACTIVE` + "`" + ` - tenant is inactive; no actions can be performed on tenant, tenant's files are stored locally. For updating, ` + "`" + `ACTIVE` + "`" + `, ` + "`" + `INACTIVE` + "`" + ` and also ` + "`" + `OFFLOADED` + "`" + ` - as INACTIVE, but files are stored on cloud storage. The following values are read-only and are set by the server for internal use: ` + "`" + `OFFLOADING` + "`" + ` - tenant is transitioning from ACTIVE/INACTIVE to OFFLOADED, ` + "`" + `ONLOADING` + "`" + ` - tenant is transitioning from OFFLOADED to ACTIVE/INACTIVE. We still accept deprecated names ` + "`" + `HOT` + "`" + ` (now ` + "`" + `ACTIVE` + "`" + `), ` + "`" + `COLD` + "`" + ` (now ` + "`" + `INACTIVE` + "`" + `), ` + "`" + `FROZEN` + "`" + ` (now ` + "`" + `OFFLOADED` + "`" + `), ` + "`" + `FREEZING` + "`" + ` (now ` + "`" + `OFFLOADING` + "`" + `), ` + "`" + `UNFREEZING` + "`" + ` (now ` + "`" + `ONLOADING` + "`" + `).", + "type": "string", + "enum": [ + "ACTIVE", + "INACTIVE", + "OFFLOADED", + "OFFLOADING", + "ONLOADING", + "HOT", + "COLD", + "FROZEN", + "FREEZING", + "UNFREEZING" + ] + }, + "name": { + "description": "The name of the tenant (required).", + "type": "string" + } + } + }, + "UserApiKey": { + "type": "object", + "required": [ + "apikey" + ], + "properties": { + "apikey": { + "description": "The apikey", + "type": "string" + } + } + }, + "UserOwnInfo": { + "type": "object", + "required": [ + "username" + ], + "properties": { + "groups": { + "description": "The groups associated to the user", + "type": "array", + "items": { + "type": "string" + } + }, + "roles": { + "type": "array", + "items": { + "description": "The roles assigned to own user", + "type": "object", + "$ref": "#/definitions/Role" + } + }, + "username": { + "description": "The username associated with the provided key", + "type": "string" + } + } + }, + "UserTypeInput": { + "description": "the type of user", + "type": "string", + "enum": [ + "db", + "oidc" + ] + }, + "UserTypeOutput": { + "description": "the type of user", + "type": "string", + "enum": [ + "db_user", + "db_env_user", + "oidc" + ] + }, + "Vector": { + "description": "A vector representation of the object. If provided at object creation, this wil take precedence over any vectorizer setting.", + "type": "object" + }, + "VectorConfig": { + "type": "object", + "properties": { + "vectorIndexConfig": { + "description": "Vector-index config, that is specific to the type of index selected in vectorIndexType", + "type": "object" + }, + "vectorIndexType": { + "description": "Name of the vector index to use, eg. (HNSW)", + "type": "string" + }, + "vectorizer": { + "description": "Configuration of a specific vectorizer used by this vector", + "type": "object" + } + } + }, + "VectorWeights": { + "description": "Allow custom overrides of vector weights as math expressions. E.g. \"pancake\": \"7\" will set the weight for the word pancake to 7 in the vectorization, whereas \"w * 3\" would triple the originally calculated word. This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value (string/string) object.", + "type": "object" + }, + "Vectors": { + "description": "A map of named vectors for multi-vector representations.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Vector" + } + }, + "WhereFilter": { + "description": "Filter search results using a where filter", + "type": "object", + "properties": { + "operands": { + "description": "combine multiple where filters, requires 'And' or 'Or' operator", + "type": "array", + "items": { + "$ref": "#/definitions/WhereFilter" + } + }, + "operator": { + "description": "operator to use", + "type": "string", + "enum": [ + "And", + "Or", + "Equal", + "Like", + "NotEqual", + "GreaterThan", + "GreaterThanEqual", + "LessThan", + "LessThanEqual", + "WithinGeoRange", + "IsNull", + "ContainsAny", + "ContainsAll", + "ContainsNone", + "Not" + ], + "example": "GreaterThanEqual" + }, + "path": { + "description": "path to the property currently being filtered", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "inCity", + "City", + "name" + ] + }, + "valueBoolean": { + "description": "value as boolean", + "type": "boolean", + "x-nullable": true, + "example": false + }, + "valueBooleanArray": { + "description": "value as boolean", + "type": "array", + "items": { + "type": "boolean" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + true, + false + ] + }, + "valueDate": { + "description": "value as date (as string)", + "type": "string", + "x-nullable": true, + "example": "TODO" + }, + "valueDateArray": { + "description": "value as date (as string)", + "type": "array", + "items": { + "type": "string" + }, + "x-nullable": true, + "x-omitempty": true, + "example": "TODO" + }, + "valueGeoRange": { + "description": "value as geo coordinates and distance", + "type": "object", + "x-nullable": true, + "$ref": "#/definitions/WhereFilterGeoRange" + }, + "valueInt": { + "description": "value as integer", + "type": "integer", + "format": "int64", + "x-nullable": true, + "example": 2000 + }, + "valueIntArray": { + "description": "value as integer", + "type": "array", + "items": { + "type": "integer", + "format": "int64" + }, + "x-nullable": true, + "x-omitempty": true, + "example": "[100, 200]" + }, + "valueNumber": { + "description": "value as number/float", + "type": "number", + "format": "float64", + "x-nullable": true, + "example": 3.14 + }, + "valueNumberArray": { + "description": "value as number/float", + "type": "array", + "items": { + "type": "number", + "format": "float64" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + 3.14 + ] + }, + "valueString": { + "description": "value as text (deprecated as of v1.19; alias for valueText)", + "type": "string", + "x-nullable": true, + "example": "my search term" + }, + "valueStringArray": { + "description": "value as text (deprecated as of v1.19; alias for valueText)", + "type": "array", + "items": { + "type": "string" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + "my search term" + ] + }, + "valueText": { + "description": "value as text", + "type": "string", + "x-nullable": true, + "example": "my search term" + }, + "valueTextArray": { + "description": "value as text", + "type": "array", + "items": { + "type": "string" + }, + "x-nullable": true, + "x-omitempty": true, + "example": [ + "my search term" + ] + } + } + }, + "WhereFilterGeoRange": { + "description": "filter within a distance of a georange", + "type": "object", + "properties": { + "distance": { + "type": "object", + "properties": { + "max": { + "type": "number", + "format": "float64" + } + } + }, + "geoCoordinates": { + "x-nullable": false, + "$ref": "#/definitions/GeoCoordinates" + } + } + }, + "WhereFilterGeoRangeDistance": { + "type": "object", + "properties": { + "max": { + "type": "number", + "format": "float64" + } + } + } + }, + "parameters": { + "CommonAfterParameterQuery": { + "type": "string", + "description": "A threshold UUID of the objects to retrieve after, using an UUID-based ordering. This object is not part of the set. \u003cbr/\u003e\u003cbr/\u003eMust be used with ` + "`" + `class` + "`" + `, typically in conjunction with ` + "`" + `limit` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eNote ` + "`" + `after` + "`" + ` cannot be used with ` + "`" + `offset` + "`" + ` or ` + "`" + `sort` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eFor a null value similar to offset=0, set an empty string in the request, i.e. ` + "`" + `after=` + "`" + ` or ` + "`" + `after` + "`" + `.", + "name": "after", + "in": "query" + }, + "CommonClassParameterQuery": { + "type": "string", + "description": "The collection from which to query objects. \u003cbr/\u003e\u003cbr/\u003eNote that if ` + "`" + `class` + "`" + ` is not provided, the response will not include any objects.", + "name": "class", + "in": "query" + }, + "CommonConsistencyLevelParameterQuery": { + "type": "string", + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "name": "consistency_level", + "in": "query" + }, + "CommonIncludeParameterQuery": { + "type": "string", + "description": "Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation", + "name": "include", + "in": "query" + }, + "CommonLimitParameterQuery": { + "type": "integer", + "format": "int64", + "description": "The maximum number of items to be returned per page. The default is 25 unless set otherwise as an environment variable.", + "name": "limit", + "in": "query" + }, + "CommonNodeNameParameterQuery": { + "type": "string", + "description": "The target node which should fulfill the request", + "name": "node_name", + "in": "query" + }, + "CommonOffsetParameterQuery": { + "type": "integer", + "format": "int64", + "default": 0, + "description": "The starting index of the result window. Note ` + "`" + `offset` + "`" + ` will retrieve ` + "`" + `offset+limit` + "`" + ` results and return ` + "`" + `limit` + "`" + ` results from the object with index ` + "`" + `offset` + "`" + ` onwards. Limited by the value of ` + "`" + `QUERY_MAXIMUM_RESULTS` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eShould be used in conjunction with ` + "`" + `limit` + "`" + `. \u003cbr/\u003e\u003cbr/\u003eCannot be used with ` + "`" + `after` + "`" + `.", + "name": "offset", + "in": "query" + }, + "CommonOrderParameterQuery": { + "type": "string", + "description": "Order parameter to tell how to order (asc or desc) data within given field. Should be used in conjunction with ` + "`" + `sort` + "`" + ` parameter. If providing multiple ` + "`" + `sort` + "`" + ` values, provide multiple ` + "`" + `order` + "`" + ` values in corresponding order, e.g.: ` + "`" + `sort=author_name,title\u0026order=desc,asc` + "`" + `.", + "name": "order", + "in": "query" + }, + "CommonOutputVerbosityParameterQuery": { + "type": "string", + "default": "minimal", + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "name": "output", + "in": "query" + }, + "CommonSortParameterQuery": { + "type": "string", + "description": "Name(s) of the property to sort by - e.g. ` + "`" + `city` + "`" + `, or ` + "`" + `country,city` + "`" + `.", + "name": "sort", + "in": "query" + }, + "CommonTenantParameterQuery": { + "type": "string", + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "name": "tenant", + "in": "query" + } + }, + "securityDefinitions": { + "oidc": { + "description": "OIDC (OpenConnect ID - based on OAuth2)", + "type": "oauth2", + "flow": "implicit", + "authorizationUrl": "http://to-be-configured-in-the-application-config" + } + }, + "security": [ + {}, + { + "oidc": [] + } + ], + "tags": [ + { + "name": "objects" + }, + { + "description": "These operations allow to execute batch requests for Objects and Objects. Mostly used for importing large datasets.", + "name": "batch" + }, + { + "name": "graphql" + }, + { + "name": "meta" + }, + { + "name": "P2P" + }, + { + "description": "All functions related to the Contextionary.", + "name": "contextionary-API" + }, + { + "description": "These operations enable manipulation of the schema in Weaviate schema.", + "name": "schema" + }, + { + "description": "Operations related to managing data replication, including initiating and monitoring shard replica movements between nodes, querying current sharding states, and managing the lifecycle of replication tasks.", + "name": "replication" + } + ], + "externalDocs": { + "url": "https://github.com/weaviate/weaviate" + } +}`)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/errors/errors.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/errors/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..4f0a0c2e2f811f9430fb928292d7f43e03d4a6f3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/errors/errors.go @@ -0,0 +1,24 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package errors + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/models" +) + +func ErrPayloadFromSingleErr(err error) *models.ErrorResponse { + return &models.ErrorResponse{Error: []*models.ErrorResponseErrorItems0{{ + Message: fmt.Sprintf("%s", err), + }}} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse.go new file mode 100644 index 0000000000000000000000000000000000000000..4be9ac1aaf5444071e59648d803af445693d8291 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse.go @@ -0,0 +1,184 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filterext + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" +) + +// Parse Filter from REST construct to entities filter +func Parse(in *models.WhereFilter, rootClass string) (*filters.LocalFilter, error) { + if in == nil { + return nil, nil + } + + operator, err := parseOperator(in.Operator) + if err != nil { + return nil, err + } + + if operator.OnValue() { + filter, err := parseValueFilter(in, operator, rootClass) + if err != nil { + return nil, fmt.Errorf("invalid where filter: %w", err) + } + return filter, nil + } + + filter, err := parseNestedFilter(in, operator, rootClass) + if err != nil { + return nil, fmt.Errorf("invalid where filter: %w", err) + } + return filter, nil +} + +func parseValueFilter(in *models.WhereFilter, + operator filters.Operator, rootClass string, +) (*filters.LocalFilter, error) { + value, err := parseValue(in) + if err != nil { + return nil, err + } + + path, err := parsePath(in.Path, rootClass) + if err != nil { + return nil, err + } + + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: operator, + Value: value, + On: path, + }, + }, nil +} + +func parseNestedFilter(in *models.WhereFilter, + operator filters.Operator, rootClass string, +) (*filters.LocalFilter, error) { + if in.Path != nil { + return nil, fmt.Errorf( + "operator '%s' not compatible with field 'path', remove 'path' "+ + "or switch to compare operator (eg. Equal, NotEqual, etc.)", + operator.Name()) + } + + if !allValuesNil(in) { + return nil, fmt.Errorf( + "operator '%s' not compatible with field 'value', "+ + "remove value field or switch to compare operator "+ + "(eg. Equal, NotEqual, etc.)", + operator.Name()) + } + + if len(in.Operands) == 0 { + return nil, fmt.Errorf( + "operator '%s', but no operands set - add at least one operand", + operator.Name()) + } + + operands, err := parseOperands(in.Operands, rootClass) + if err != nil { + return nil, err + } + + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: operator, + Operands: operands, + }, + }, nil +} + +func parseOperands(ops []*models.WhereFilter, rootClass string) ([]filters.Clause, error) { + out := make([]filters.Clause, len(ops)) + for i, operand := range ops { + res, err := Parse(operand, rootClass) + if err != nil { + return nil, fmt.Errorf("operand %d: %w", i, err) + } + + out[i] = *res.Root + } + + return out, nil +} + +func parseOperator(in string) (filters.Operator, error) { + switch in { + case models.WhereFilterOperatorEqual: + return filters.OperatorEqual, nil + case models.WhereFilterOperatorLike: + return filters.OperatorLike, nil + case models.WhereFilterOperatorLessThan: + return filters.OperatorLessThan, nil + case models.WhereFilterOperatorLessThanEqual: + return filters.OperatorLessThanEqual, nil + case models.WhereFilterOperatorGreaterThan: + return filters.OperatorGreaterThan, nil + case models.WhereFilterOperatorGreaterThanEqual: + return filters.OperatorGreaterThanEqual, nil + case models.WhereFilterOperatorNotEqual: + return filters.OperatorNotEqual, nil + case models.WhereFilterOperatorWithinGeoRange: + return filters.OperatorWithinGeoRange, nil + case models.WhereFilterOperatorAnd: + return filters.OperatorAnd, nil + case models.WhereFilterOperatorOr: + return filters.OperatorOr, nil + case models.WhereFilterOperatorIsNull: + return filters.OperatorIsNull, nil + case models.WhereFilterOperatorContainsAny: + return filters.ContainsAny, nil + case models.WhereFilterOperatorContainsAll: + return filters.ContainsAll, nil + case models.WhereFilterOperatorContainsNone: + return filters.ContainsNone, nil + case models.WhereFilterOperatorNot: + return filters.OperatorNot, nil + default: + return -1, fmt.Errorf("unrecognized operator: %s", in) + } +} + +func parsePath(in []string, rootClass string) (*filters.Path, error) { + if len(in) == 0 { + return nil, fmt.Errorf("field 'path': must have at least one element") + } + + pathElements := make([]interface{}, len(in)) + for i, elem := range in { + pathElements[i] = elem + } + + return filters.ParsePath(pathElements, rootClass) +} + +func allValuesNil(in *models.WhereFilter) bool { + return in.ValueBoolean == nil && + in.ValueDate == nil && + in.ValueString == nil && + in.ValueText == nil && + in.ValueInt == nil && + in.ValueNumber == nil && + in.ValueGeoRange == nil && + len(in.ValueBooleanArray) == 0 && + len(in.ValueDateArray) == 0 && + len(in.ValueStringArray) == 0 && + len(in.ValueTextArray) == 0 && + len(in.ValueIntArray) == 0 && + len(in.ValueNumberArray) == 0 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b1a7042eb2c51bd2c99025856defc0d3a9a8bcfb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse_test.go @@ -0,0 +1,553 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filterext + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func Test_ExtractFlatFilters(t *testing.T) { + t.Parallel() + + type test struct { + name string + input *models.WhereFilter + expectedFilter *filters.LocalFilter + expectedErr error + } + + t.Run("all value types", func(t *testing.T) { + tests := []test{ + { + name: "no filter", + }, + { + name: "valid int filter", + input: &models.WhereFilter{ + Operator: "Equal", + ValueInt: ptInt(42), + Path: []string{"intField"}, + }, + expectedFilter: &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("intField"), + }, + Value: &filters.Value{ + Value: 42, + Type: schema.DataTypeInt, + }, + }}, + }, + { + name: "valid date filter", + input: &models.WhereFilter{ + Operator: "Equal", + ValueDate: ptString("foo bar"), + Path: []string{"dateField"}, + }, + expectedFilter: &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("dateField"), + }, + Value: &filters.Value{ + Value: "foo bar", + Type: schema.DataTypeDate, + }, + }}, + }, + { + name: "valid text filter", + input: &models.WhereFilter{ + Operator: "Equal", + ValueText: ptString("foo bar"), + Path: []string{"textField"}, + }, + expectedFilter: &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("textField"), + }, + Value: &filters.Value{ + Value: "foo bar", + Type: schema.DataTypeText, + }, + }}, + }, + { + name: "valid number filter", + input: &models.WhereFilter{ + Operator: "Equal", + ValueNumber: ptFloat(20.20), + Path: []string{"numberField"}, + }, + expectedFilter: &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("numberField"), + }, + Value: &filters.Value{ + Value: 20.20, + Type: schema.DataTypeNumber, + }, + }}, + }, + { + name: "valid bool filter", + input: &models.WhereFilter{ + Operator: "Equal", + ValueBoolean: ptBool(true), + Path: []string{"booleanField"}, + }, + expectedFilter: &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("booleanField"), + }, + Value: &filters.Value{ + Value: true, + Type: schema.DataTypeBoolean, + }, + }}, + }, + { + name: "valid geo range filter", + input: &models.WhereFilter{ + Operator: "WithinGeoRange", + ValueGeoRange: inputGeoRangeFilter(0.5, 0.6, 2.0), + Path: []string{"geoField"}, + }, + expectedFilter: &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorWithinGeoRange, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("geoField"), + }, + Value: &filters.Value{ + Value: filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(0.5), + Longitude: ptFloat32(0.6), + }, + Distance: 2.0, + }, + Type: schema.DataTypeGeoCoordinates, + }, + }}, + }, + { + name: "[deprecated string] valid string filter", + input: &models.WhereFilter{ + Operator: "Equal", + ValueString: ptString("foo bar"), + Path: []string{"stringField"}, + }, + expectedFilter: &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("stringField"), + }, + Value: &filters.Value{ + Value: "foo bar", + Type: schema.DataTypeString, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + filter, err := Parse(test.input, "Todo") + assert.Equal(t, test.expectedErr, err) + assert.Equal(t, test.expectedFilter, filter) + }) + } + }) + + t.Run("invalid cases", func(t *testing.T) { + tests := []test{ + { + name: "geo missing coordinates", + input: &models.WhereFilter{ + Operator: "WithinGeoRange", + ValueGeoRange: &models.WhereFilterGeoRange{ + Distance: &models.WhereFilterGeoRangeDistance{ + Max: 20.0, + }, + }, + Path: []string{"geoField"}, + }, + expectedErr: fmt.Errorf("invalid where filter: valueGeoRange: " + + "field 'geoCoordinates' must be set"), + }, + { + name: "geo missing distance object", + input: &models.WhereFilter{ + Operator: "WithinGeoRange", + ValueGeoRange: &models.WhereFilterGeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(4.5), + Longitude: ptFloat32(3.7), + }, + }, + Path: []string{"geoField"}, + }, + expectedErr: fmt.Errorf("invalid where filter: valueGeoRange: " + + "field 'distance' must be set"), + }, + { + name: "geo having negative distance", + input: &models.WhereFilter{ + Operator: "WithinGeoRange", + ValueGeoRange: &models.WhereFilterGeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(4.5), + Longitude: ptFloat32(3.7), + }, + Distance: &models.WhereFilterGeoRangeDistance{ + Max: -20.0, + }, + }, + Path: []string{"geoField"}, + }, + expectedErr: fmt.Errorf("invalid where filter: valueGeoRange: " + + "field 'distance.max' must be a positive number"), + }, + { + name: "and operator and path set", + input: &models.WhereFilter{ + Operator: "And", + Path: []string{"some field"}, + }, + expectedErr: fmt.Errorf("invalid where filter: " + + "operator 'And' not compatible with field 'path', remove 'path' " + + "or switch to compare operator (eg. Equal, NotEqual, etc.)"), + }, + { + name: "and operator and value set", + input: &models.WhereFilter{ + Operator: "And", + ValueInt: ptInt(43), + }, + expectedErr: fmt.Errorf("invalid where filter: " + + "operator 'And' not compatible with field 'value', " + + "remove value field or switch to compare operator " + + "(eg. Equal, NotEqual, etc.)"), + }, + { + name: "and operator and no operands set", + input: &models.WhereFilter{ + Operator: "And", + }, + expectedErr: fmt.Errorf("invalid where filter: " + + "operator 'And', but no operands set - add at least one operand"), + }, + { + name: "equal operator and no values set", + input: &models.WhereFilter{ + Operator: "Equal", + }, + expectedErr: fmt.Errorf("invalid where filter: " + + "got operator 'Equal', but no value field set"), + }, + { + name: "equal operator and no path set", + input: &models.WhereFilter{ + Operator: "Equal", + ValueInt: ptInt(43), + }, + expectedErr: fmt.Errorf("invalid where filter: " + + "field 'path': must have at least one element"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + filter, err := Parse(test.input, "Todo") + assert.ErrorAs(t, err, &test.expectedErr) + assert.Equal(t, test.expectedFilter, filter) + }) + } + }) + + t.Run("all operator types", func(t *testing.T) { + // all tests use int as the value type, value types are tested separately + tests := []test{ + { + name: "equal", + input: inputIntFilterWithOp("Equal"), + expectedFilter: intFilterWithOp(filters.OperatorEqual), + }, + { + name: "like", // doesn't make sense on an int, but that's irrelevant for parsing + input: inputIntFilterWithOp("Like"), + expectedFilter: intFilterWithOp(filters.OperatorLike), + }, + { + name: "not equal", + input: inputIntFilterWithOp("NotEqual"), + expectedFilter: intFilterWithOp(filters.OperatorNotEqual), + }, + { + name: "greater than", + input: inputIntFilterWithOp("GreaterThan"), + expectedFilter: intFilterWithOp(filters.OperatorGreaterThan), + }, + { + name: "greater than/equal", + input: inputIntFilterWithOp("GreaterThanEqual"), + expectedFilter: intFilterWithOp(filters.OperatorGreaterThanEqual), + }, + { + name: "less than", + input: inputIntFilterWithOp("LessThan"), + expectedFilter: intFilterWithOp(filters.OperatorLessThan), + }, + { + name: "less than/equal", + input: inputIntFilterWithOp("LessThanEqual"), + expectedFilter: intFilterWithOp(filters.OperatorLessThanEqual), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + filter, err := Parse(test.input, "Todo") + assert.Equal(t, test.expectedErr, err) + assert.Equal(t, test.expectedFilter, filter) + }) + } + }) + + t.Run("nested filters", func(t *testing.T) { + // all tests use int as the value type, value types are tested separately + tests := []test{ + { + name: "chained together using and", + input: &models.WhereFilter{ + Operator: "And", + Operands: []*models.WhereFilter{ + inputIntFilterWithValue(42), + inputIntFilterWithValueAndPath(43, + []string{"hasAction", "SomeAction", "intField"}), + }, + }, + expectedFilter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("intField"), + }, + Value: &filters.Value{ + Value: 42, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("hasAction"), + Child: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("intField"), + }, + }, + Value: &filters.Value{ + Value: 43, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + }, + { + name: "chained together using or", + input: &models.WhereFilter{ + Operator: "Or", + Operands: []*models.WhereFilter{ + inputIntFilterWithValue(42), + inputIntFilterWithValueAndPath(43, + []string{"hasAction", "SomeAction", "intField"}), + }, + }, + expectedFilter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("intField"), + }, + Value: &filters.Value{ + Value: 42, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("hasAction"), + Child: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("intField"), + }, + }, + Value: &filters.Value{ + Value: 43, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + }, + { + name: "chained together using not", + input: &models.WhereFilter{ + Operator: "Not", + Operands: []*models.WhereFilter{ + inputIntFilterWithValueAndPath(44, + []string{"hasAction", "SomeAction", "intField"}), + }, + }, + expectedFilter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("hasAction"), + Child: &filters.Path{ + Class: schema.AssertValidClassName("SomeAction"), + Property: schema.AssertValidPropertyName("intField"), + }, + }, + Value: &filters.Value{ + Value: 44, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + filter, err := Parse(test.input, "Todo") + assert.Equal(t, test.expectedErr, err) + assert.Equal(t, test.expectedFilter, filter) + }) + } + }) +} + +func ptInt(in int) *int64 { + a := int64(in) + return &a +} + +func ptFloat(in float64) *float64 { + return &in +} + +func ptString(in string) *string { + return &in +} + +func ptBool(in bool) *bool { + return &in +} + +func intFilterWithOp(op filters.Operator) *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: op, + On: &filters.Path{ + Class: schema.AssertValidClassName("Todo"), + Property: schema.AssertValidPropertyName("intField"), + }, + Value: &filters.Value{ + Value: 42, + Type: schema.DataTypeInt, + }, + }, + } +} + +func inputIntFilterWithOp(op string) *models.WhereFilter { + return &models.WhereFilter{ + Operator: op, + ValueInt: ptInt(42), + Path: []string{"intField"}, + } +} + +func inputIntFilterWithValue(value int) *models.WhereFilter { + return &models.WhereFilter{ + Operator: "Equal", + ValueInt: ptInt(value), + Path: []string{"intField"}, + } +} + +func inputIntFilterWithValueAndPath(value int, + path []string, +) *models.WhereFilter { + return &models.WhereFilter{ + Operator: "Equal", + ValueInt: ptInt(value), + Path: path, + } +} + +func inputGeoRangeFilter(lat, lon, max float64) *models.WhereFilterGeoRange { + return &models.WhereFilterGeoRange{ + Distance: &models.WhereFilterGeoRangeDistance{ + Max: max, + }, + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(float32(lat)), + Longitude: ptFloat32(float32(lon)), + }, + } +} + +func ptFloat32(in float32) *float32 { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse_value.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse_value.go new file mode 100644 index 0000000000000000000000000000000000000000..0b69ecd16455723fdb8bbeb95e3fd960a920de17 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/filterext/parse_value.go @@ -0,0 +1,194 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package filterext + +import ( + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func parseValue(in *models.WhereFilter) (*filters.Value, error) { + var value *filters.Value + + for _, extractor := range valueExtractors { + foundValue, err := extractor(in) + // Abort if we found a value, but it's for being passed a string to an int value. + if err != nil { + return nil, err + } + + if foundValue != nil { + if value != nil { + return nil, fmt.Errorf("found more than one values the clause '%s'", jsonify(in)) + } else { + value = foundValue + } + } + } + + if value == nil { + return nil, fmt.Errorf("got operator '%s', but no value field set", + in.Operator) + } + + return value, nil +} + +type valueExtractorFunc func(*models.WhereFilter) (*filters.Value, error) + +var valueExtractors = []valueExtractorFunc{ + // int + func(in *models.WhereFilter) (*filters.Value, error) { + if in.ValueInt == nil { + return nil, nil + } + + return valueFilter(int(*in.ValueInt), schema.DataTypeInt), nil + }, + // number + func(in *models.WhereFilter) (*filters.Value, error) { + if in.ValueNumber == nil { + return nil, nil + } + + return valueFilter(*in.ValueNumber, schema.DataTypeNumber), nil + }, + // text + func(in *models.WhereFilter) (*filters.Value, error) { + if in.ValueText == nil { + return nil, nil + } + + return valueFilter(*in.ValueText, schema.DataTypeText), nil + }, + // date (as string) + func(in *models.WhereFilter) (*filters.Value, error) { + if in.ValueDate == nil { + return nil, nil + } + + return valueFilter(*in.ValueDate, schema.DataTypeDate), nil + }, + // boolean + func(in *models.WhereFilter) (*filters.Value, error) { + if in.ValueBoolean == nil { + return nil, nil + } + + return valueFilter(*in.ValueBoolean, schema.DataTypeBoolean), nil + }, + + // int array + func(in *models.WhereFilter) (*filters.Value, error) { + if len(in.ValueIntArray) == 0 { + return nil, nil + } + + valueInts := make([]int, len(in.ValueIntArray)) + for i := range in.ValueIntArray { + valueInts[i] = int(in.ValueIntArray[i]) + } + return valueFilter(valueInts, schema.DataTypeInt), nil + }, + // number array + func(in *models.WhereFilter) (*filters.Value, error) { + if len(in.ValueNumberArray) == 0 { + return nil, nil + } + + return valueFilter(in.ValueNumberArray, schema.DataTypeNumber), nil + }, + // text array + func(in *models.WhereFilter) (*filters.Value, error) { + if len(in.ValueTextArray) == 0 { + return nil, nil + } + + return valueFilter(in.ValueTextArray, schema.DataTypeText), nil + }, + // date (as string) array + func(in *models.WhereFilter) (*filters.Value, error) { + if len(in.ValueDateArray) == 0 { + return nil, nil + } + + return valueFilter(in.ValueDateArray, schema.DataTypeDate), nil + }, + // boolean + func(in *models.WhereFilter) (*filters.Value, error) { + if len(in.ValueBooleanArray) == 0 { + return nil, nil + } + + return valueFilter(in.ValueBooleanArray, schema.DataTypeBoolean), nil + }, + + // geo range + func(in *models.WhereFilter) (*filters.Value, error) { + if in.ValueGeoRange == nil { + return nil, nil + } + + if in.ValueGeoRange.Distance == nil { + return nil, fmt.Errorf("valueGeoRange: field 'distance' must be set") + } + + if in.ValueGeoRange.Distance.Max < 0 { + return nil, fmt.Errorf("valueGeoRange: field 'distance.max' must be a positive number") + } + + if in.ValueGeoRange.GeoCoordinates == nil { + return nil, fmt.Errorf("valueGeoRange: field 'geoCoordinates' must be set") + } + + return valueFilter(filters.GeoRange{ + Distance: float32(in.ValueGeoRange.Distance.Max), + GeoCoordinates: &models.GeoCoordinates{ + Latitude: in.ValueGeoRange.GeoCoordinates.Latitude, + Longitude: in.ValueGeoRange.GeoCoordinates.Longitude, + }, + }, schema.DataTypeGeoCoordinates), nil + }, + // deprecated string + func(in *models.WhereFilter) (*filters.Value, error) { + if in.ValueString == nil { + return nil, nil + } + + return valueFilter(*in.ValueString, schema.DataTypeString), nil + }, + // deprecated string array + func(in *models.WhereFilter) (*filters.Value, error) { + if len(in.ValueStringArray) == 0 { + return nil, nil + } + + return valueFilter(in.ValueStringArray, schema.DataTypeString), nil + }, +} + +func valueFilter(value interface{}, dt schema.DataType) *filters.Value { + return &filters.Value{ + Type: dt, + Value: value, + } +} + +// Small utility function used in printing error messages. +func jsonify(stuff interface{}) string { + j, _ := json.Marshal(stuff) + return string(j) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/grpc.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/grpc.go new file mode 100644 index 0000000000000000000000000000000000000000..944beddc6eacb802085cb9541ef879e6449887c5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/grpc.go @@ -0,0 +1,33 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + grpcHandler "github.com/weaviate/weaviate/adapters/handlers/grpc" + "github.com/weaviate/weaviate/adapters/handlers/grpc/v1/batch" + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + enterrors "github.com/weaviate/weaviate/entities/errors" + "google.golang.org/grpc" +) + +func createGrpcServer(state *state.State, shutdown *batch.Shutdown, options ...grpc.ServerOption) *grpc.Server { + return grpcHandler.CreateGRPCServer(state, shutdown, options...) +} + +func startGrpcServer(server *grpc.Server, state *state.State) { + enterrors.GoWrapper(func() { + if err := grpcHandler.StartAndListen(server, state); err != nil { + state.Logger.WithField("action", "grpc_startup").WithError(err). + Fatal("failed to start grpc server") + } + }, state.Logger) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_aliases.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_aliases.go new file mode 100644 index 0000000000000000000000000000000000000000..ea4aa13b6035b529437ab3d3f957ac0a4a94d659 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_aliases.go @@ -0,0 +1,185 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "errors" + + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/schema" + "github.com/weaviate/weaviate/entities/models" + authzerrors "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + uco "github.com/weaviate/weaviate/usecases/objects" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +type aliasesHandlers struct { + manager *schemaUC.Manager + metricRequestsTotal restApiRequestsTotal +} + +func (s *aliasesHandlers) getAliases(params schema.AliasesGetParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + className := "" + if params.Class != nil { + className = *params.Class + } + aliases, err := s.manager.GetAliases(ctx, principal, "", className) + if err != nil { + s.metricRequestsTotal.logError(className, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewAliasesGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewAliasesGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + aliasesResponse := &models.AliasResponse{Aliases: aliases} + + s.metricRequestsTotal.logOk(className) + return schema.NewAliasesGetOK().WithPayload(aliasesResponse) +} + +func (s *aliasesHandlers) getAlias(params schema.AliasesGetAliasParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + alias, err := s.manager.GetAlias(ctx, principal, params.AliasName) + if err != nil { + s.metricRequestsTotal.logError("", err) + if errors.Is(err, schemaUC.ErrNotFound) { + return schema.NewAliasesGetAliasNotFound() + } + + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewAliasesGetAliasForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewAliasesGetAliasForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk("") + return schema.NewAliasesGetAliasOK().WithPayload(alias) +} + +func (s *aliasesHandlers) addAlias(params schema.AliasesCreateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + _, _, err := s.manager.AddAlias(ctx, principal, params.Body) + if err != nil { + s.metricRequestsTotal.logError(params.Body.Class, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewAliasesCreateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewAliasesCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.Body.Class) + return schema.NewAliasesCreateOK().WithPayload(params.Body) +} + +func (s *aliasesHandlers) updateAlias(params schema.AliasesUpdateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + alias, err := s.manager.UpdateAlias(ctx, principal, params.AliasName, params.Body.Class) + if err != nil { + s.metricRequestsTotal.logError(params.Body.Class, err) + if errors.Is(err, schemaUC.ErrNotFound) { + return schema.NewAliasesUpdateNotFound() + } + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewAliasesUpdateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewAliasesUpdateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.Body.Class) + return schema.NewAliasesUpdateOK().WithPayload(alias) +} + +func (s *aliasesHandlers) deleteAlias(params schema.AliasesDeleteParams, principal *models.Principal) middleware.Responder { + err := s.manager.DeleteAlias(params.HTTPRequest.Context(), principal, params.AliasName) + if err != nil { + s.metricRequestsTotal.logError(params.AliasName, err) + if errors.Is(err, schemaUC.ErrNotFound) { + return schema.NewAliasesDeleteNotFound() + } + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewAliasesDeleteForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewAliasesCreateUnprocessableEntity().WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.AliasName) + return schema.NewAliasesDeleteNoContent() +} + +func setupAliasesHandlers(api *operations.WeaviateAPI, + manager *schemaUC.Manager, + metrics *monitoring.PrometheusMetrics, + logger logrus.FieldLogger, +) { + h := &aliasesHandlers{manager, newAliasesRequestsTotal(metrics, logger)} + + api.SchemaAliasesGetHandler = schema.AliasesGetHandlerFunc(h.getAliases) + api.SchemaAliasesGetAliasHandler = schema.AliasesGetAliasHandlerFunc(h.getAlias) + api.SchemaAliasesCreateHandler = schema.AliasesCreateHandlerFunc(h.addAlias) + api.SchemaAliasesUpdateHandler = schema.AliasesUpdateHandlerFunc(h.updateAlias) + api.SchemaAliasesDeleteHandler = schema.AliasesDeleteHandlerFunc(h.deleteAlias) +} + +type aliasesRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newAliasesRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &aliasesRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "aliases", logger}, + } +} + +func (e *aliasesRequestsTotal) logError(className string, err error) { + switch { + case errors.As(err, &authzerrors.Forbidden{}): + e.logUserError(className) + case errors.As(err, &uco.ErrMultiTenancy{}): + e.logUserError(className) + default: + e.logUserError(className) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_authn.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_authn.go new file mode 100644 index 0000000000000000000000000000000000000000..7a3a34d3abdf45713468a31788c4a192340562b5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_authn.go @@ -0,0 +1,86 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/usecases/auth/authentication" + + cerrors "github.com/weaviate/weaviate/adapters/handlers/rest/errors" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + authzConv "github.com/weaviate/weaviate/usecases/auth/authorization/conv" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" +) + +type authNHandlers struct { + authzController authorization.Controller + rbacConfig rbacconf.Config + logger logrus.FieldLogger +} + +func setupAuthnHandlers(api *operations.WeaviateAPI, controller authorization.Controller, rbacConfig rbacconf.Config, logger logrus.FieldLogger, +) { + h := &authNHandlers{authzController: controller, logger: logger, rbacConfig: rbacConfig} + // user handlers + api.UsersGetOwnInfoHandler = users.GetOwnInfoHandlerFunc(h.getOwnInfo) +} + +func (h *authNHandlers) getOwnInfo(_ users.GetOwnInfoParams, principal *models.Principal) middleware.Responder { + if principal == nil { + return users.NewGetOwnInfoUnauthorized() + } + + var roles []*models.Role + rolenames := map[string]struct{}{} + if h.rbacConfig.Enabled { + existingRoles, err := h.authzController.GetRolesForUserOrGroup(principal.Username, authentication.AuthType(principal.UserType), false) + if err != nil { + return users.NewGetOwnInfoInternalServerError() + } + for _, group := range principal.Groups { + groupRoles, err := h.authzController.GetRolesForUserOrGroup(group, authentication.AuthType(principal.UserType), true) + if err != nil { + return users.NewGetOwnInfoInternalServerError() + } + for roleName, policies := range groupRoles { + existingRoles[roleName] = policies + } + } + for roleName, policies := range existingRoles { + perms, err := authzConv.PoliciesToPermission(policies...) + if err != nil { + return users.NewGetOwnInfoInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + roles = append(roles, &models.Role{ + Name: &roleName, + Permissions: perms, + }) + rolenames[roleName] = struct{}{} + } + } + + h.logger.WithFields(logrus.Fields{ + "action": "get_own_info", + "component": "authN", + "user": principal.Username, + }).Info("own info requested") + + return users.NewGetOwnInfoOK().WithPayload(&models.UserOwnInfo{ + Groups: principal.Groups, + Roles: roles, + Username: &principal.Username, + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_backup.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_backup.go new file mode 100644 index 0000000000000000000000000000000000000000..a2db566fd3f83bce9abe4a8857ff5e6d6ad8f3cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_backup.go @@ -0,0 +1,362 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "errors" + + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/backups" + "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/models" + authzerrors "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + ubak "github.com/weaviate/weaviate/usecases/backup" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type backupHandlers struct { + manager *ubak.Scheduler + metricRequestsTotal restApiRequestsTotal + logger logrus.FieldLogger +} + +// compressionFromBCfg transforms model backup config to a backup compression config +func compressionFromBCfg(cfg *models.BackupConfig) ubak.Compression { + if cfg != nil { + if cfg.CPUPercentage == 0 { + cfg.CPUPercentage = ubak.DefaultCPUPercentage + } + + if cfg.ChunkSize == 0 { + cfg.ChunkSize = ubak.DefaultChunkSize + } + + if cfg.CompressionLevel == "" { + cfg.CompressionLevel = models.BackupConfigCompressionLevelDefaultCompression + } + + return ubak.Compression{ + CPUPercentage: int(cfg.CPUPercentage), + ChunkSize: int(cfg.ChunkSize), + Level: parseCompressionLevel(cfg.CompressionLevel), + } + } + + return ubak.Compression{ + Level: ubak.DefaultCompression, + CPUPercentage: ubak.DefaultCPUPercentage, + ChunkSize: ubak.DefaultChunkSize, + } +} + +func compressionFromRCfg(cfg *models.RestoreConfig) ubak.Compression { + if cfg != nil { + if cfg.CPUPercentage == 0 { + cfg.CPUPercentage = ubak.DefaultCPUPercentage + } + + return ubak.Compression{ + CPUPercentage: int(cfg.CPUPercentage), + Level: ubak.DefaultCompression, + ChunkSize: ubak.DefaultChunkSize, + } + } + + return ubak.Compression{ + Level: ubak.DefaultCompression, + CPUPercentage: ubak.DefaultCPUPercentage, + ChunkSize: ubak.DefaultChunkSize, + } +} + +func parseCompressionLevel(l string) ubak.CompressionLevel { + switch l { + case models.BackupConfigCompressionLevelBestSpeed: + return ubak.BestSpeed + case models.BackupConfigCompressionLevelBestCompression: + return ubak.BestCompression + default: + return ubak.DefaultCompression + } +} + +func (s *backupHandlers) createBackup(params backups.BackupsCreateParams, + principal *models.Principal, +) middleware.Responder { + overrideBucket := "" + overridePath := "" + if params.Body.Config != nil { + overrideBucket = params.Body.Config.Bucket + overridePath = params.Body.Config.Path + } + meta, err := s.manager.Backup(params.HTTPRequest.Context(), principal, &ubak.BackupRequest{ + ID: params.Body.ID, + Backend: params.Backend, + Bucket: overrideBucket, + Path: overridePath, + Include: params.Body.Include, + Exclude: params.Body.Exclude, + Compression: compressionFromBCfg(params.Body.Config), + }) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return backups.NewBackupsCreateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrUnprocessable{}): + return backups.NewBackupsCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return backups.NewBackupsCreateInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk("") + return backups.NewBackupsCreateOK().WithPayload(meta) +} + +func (s *backupHandlers) createBackupStatus(params backups.BackupsCreateStatusParams, + principal *models.Principal, +) middleware.Responder { + overrideBucket := "" + if params.Bucket != nil { + overrideBucket = *params.Bucket + } + overridePath := "" + if params.Path != nil { + overridePath = *params.Path + } + status, err := s.manager.BackupStatus(params.HTTPRequest.Context(), principal, params.Backend, params.ID, overrideBucket, overridePath) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return backups.NewBackupsCreateStatusForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrUnprocessable{}): + return backups.NewBackupsCreateStatusUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrNotFound{}): + return backups.NewBackupsCreateStatusNotFound(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return backups.NewBackupsCreateStatusInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + strStatus := string(status.Status) + payload := models.BackupCreateStatusResponse{ + Status: &strStatus, + ID: params.ID, + Path: status.Path, + Backend: params.Backend, + Error: status.Err, + } + s.metricRequestsTotal.logOk("") + return backups.NewBackupsCreateStatusOK().WithPayload(&payload) +} + +func (s *backupHandlers) restoreBackup(params backups.BackupsRestoreParams, + principal *models.Principal, +) middleware.Responder { + bucket := "" + path := "" + roleOption := models.RestoreConfigRolesOptionsNoRestore + userOption := models.RestoreConfigUsersOptionsNoRestore + if params.Body.Config != nil { + bucket = params.Body.Config.Bucket + path = params.Body.Config.Path + if params.Body.Config.RolesOptions != nil { + roleOption = *params.Body.Config.RolesOptions + } + if params.Body.Config.UsersOptions != nil { + userOption = *params.Body.Config.UsersOptions + } + } + meta, err := s.manager.Restore(params.HTTPRequest.Context(), principal, &ubak.BackupRequest{ + ID: params.ID, + Backend: params.Backend, + Include: params.Body.Include, + Exclude: params.Body.Exclude, + NodeMapping: params.Body.NodeMapping, + Compression: compressionFromRCfg(params.Body.Config), + Bucket: bucket, + Path: path, + RbacRestoreOption: roleOption, + UserRestoreOption: userOption, + }, params.Body.OverwriteAlias) + if err != nil { + s.metricRequestsTotal.logError("", err) + s.logger.WithError(err).WithField("id", params.ID). + WithField("backend", params.Backend). + WithField("bucket", bucket).WithField("path", path). + Warn("failed to restore backup") + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return backups.NewBackupsRestoreForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrNotFound{}): + return backups.NewBackupsRestoreNotFound(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrUnprocessable{}): + return backups.NewBackupsRestoreUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return backups.NewBackupsRestoreInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk("") + return backups.NewBackupsRestoreOK().WithPayload(meta) +} + +func (s *backupHandlers) restoreBackupStatus(params backups.BackupsRestoreStatusParams, + principal *models.Principal, +) middleware.Responder { + var overrideBucket string + if params.Bucket != nil { + overrideBucket = *params.Bucket + } + var overridePath string + if params.Path != nil { + overridePath = *params.Path + } + status, err := s.manager.RestorationStatus( + params.HTTPRequest.Context(), principal, params.Backend, params.ID, overrideBucket, overridePath) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return backups.NewBackupsRestoreForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrNotFound{}): + return backups.NewBackupsRestoreNotFound(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrUnprocessable{}): + return backups.NewBackupsRestoreUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return backups.NewBackupsRestoreInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + strStatus := string(status.Status) + payload := models.BackupRestoreStatusResponse{ + Status: &strStatus, + ID: params.ID, + Path: status.Path, + Backend: params.Backend, + Error: status.Err, + } + s.metricRequestsTotal.logOk("") + return backups.NewBackupsRestoreStatusOK().WithPayload(&payload) +} + +func (s *backupHandlers) cancel(params backups.BackupsCancelParams, + principal *models.Principal, +) middleware.Responder { + overrideBucket := "" + if params.Bucket != nil { + overrideBucket = *params.Bucket + } + overridePath := "" + if params.Path != nil { + overridePath = *params.Path + } + err := s.manager.Cancel(params.HTTPRequest.Context(), principal, params.Backend, params.ID, overrideBucket, overridePath) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return backups.NewBackupsCancelForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &backup.ErrUnprocessable{}): + return backups.NewBackupsCancelUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return backups.NewBackupsCancelInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk("") + return backups.NewBackupsCancelNoContent() +} + +func (s *backupHandlers) list(params backups.BackupsListParams, + principal *models.Principal, +) middleware.Responder { + payload, err := s.manager.List( + params.HTTPRequest.Context(), principal, params.Backend) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return backups.NewBackupsRestoreForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + + case errors.As(err, &backup.ErrUnprocessable{}): + return backups.NewBackupsRestoreUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return backups.NewBackupsRestoreInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk("") + return backups.NewBackupsListOK().WithPayload(*payload) +} + +func setupBackupHandlers(api *operations.WeaviateAPI, + scheduler *ubak.Scheduler, metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger, +) { + h := &backupHandlers{scheduler, newBackupRequestsTotal(metrics, logger), logger} + api.BackupsBackupsCreateHandler = backups. + BackupsCreateHandlerFunc(h.createBackup) + api.BackupsBackupsCreateStatusHandler = backups. + BackupsCreateStatusHandlerFunc(h.createBackupStatus) + api.BackupsBackupsRestoreHandler = backups. + BackupsRestoreHandlerFunc(h.restoreBackup) + api.BackupsBackupsRestoreStatusHandler = backups. + BackupsRestoreStatusHandlerFunc(h.restoreBackupStatus) + api.BackupsBackupsCancelHandler = backups.BackupsCancelHandlerFunc(h.cancel) + api.BackupsBackupsListHandler = backups.BackupsListHandlerFunc(h.list) +} + +type backupRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newBackupRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &backupRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "backup", logger}, + } +} + +func (e *backupRequestsTotal) logError(className string, err error) { + switch { + case errors.As(err, &authzerrors.Forbidden{}): + e.logUserError(className) + case errors.As(err, &backup.ErrUnprocessable{}) || errors.As(err, &backup.ErrNotFound{}): + e.logUserError(className) + default: + e.logServerError(className, err) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_backup_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_backup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a3975a0185ed4d90eb33b432efaa00ba184c53cd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_backup_test.go @@ -0,0 +1,128 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" + ubak "github.com/weaviate/weaviate/usecases/backup" +) + +func TestCompressionBackupCfg(t *testing.T) { + tcs := map[string]struct { + cfg *models.BackupConfig + expectedCompression ubak.CompressionLevel + expectedCPU int + expectedChunkSize int + expectedBucket string + expectedPath string + }{ + "without config": { + cfg: nil, + expectedCompression: ubak.DefaultCompression, + expectedCPU: ubak.DefaultCPUPercentage, + expectedChunkSize: ubak.DefaultChunkSize, + }, + "with config": { + cfg: &models.BackupConfig{ + CPUPercentage: 25, + ChunkSize: 512, + CompressionLevel: models.BackupConfigCompressionLevelBestSpeed, + }, + expectedCompression: ubak.BestSpeed, + expectedCPU: 25, + expectedChunkSize: 512, + }, + "with partial config [CPU]": { + cfg: &models.BackupConfig{ + CPUPercentage: 25, + }, + expectedCompression: ubak.DefaultCompression, + expectedCPU: 25, + expectedChunkSize: ubak.DefaultChunkSize, + }, + "with partial config [ChunkSize]": { + cfg: &models.BackupConfig{ + ChunkSize: 125, + }, + expectedCompression: ubak.DefaultCompression, + expectedCPU: ubak.DefaultCPUPercentage, + expectedChunkSize: 125, + }, + "with partial config [Compression]": { + cfg: &models.BackupConfig{ + CompressionLevel: models.BackupConfigCompressionLevelBestSpeed, + }, + expectedCompression: ubak.BestSpeed, + expectedCPU: ubak.DefaultCPUPercentage, + expectedChunkSize: ubak.DefaultChunkSize, + }, + "with partial config [Bucket]": { + cfg: &models.BackupConfig{ + Bucket: "a bucket name", + }, + expectedCompression: ubak.DefaultCompression, + expectedCPU: ubak.DefaultCPUPercentage, + expectedChunkSize: ubak.DefaultChunkSize, + expectedBucket: "a bucket name", + }, + "with partial config [Path]": { + cfg: &models.BackupConfig{ + Path: "a path", + }, + expectedCompression: ubak.DefaultCompression, + expectedCPU: ubak.DefaultCPUPercentage, + expectedChunkSize: ubak.DefaultChunkSize, + expectedPath: "a path", + }, + } + + for n, tc := range tcs { + t.Run(n, func(t *testing.T) { + ccfg := compressionFromBCfg(tc.cfg) + assert.Equal(t, tc.expectedCompression, ccfg.Level) + assert.Equal(t, tc.expectedCPU, ccfg.CPUPercentage) + assert.Equal(t, tc.expectedChunkSize, ccfg.ChunkSize) + }) + } +} + +func TestCompressionRestoreCfg(t *testing.T) { + tcs := map[string]struct { + cfg *models.RestoreConfig + expectedCompression ubak.CompressionLevel + expectedCPU int + expectedChunkSize int + }{ + "without config": { + cfg: nil, + expectedCompression: ubak.DefaultCompression, + expectedCPU: ubak.DefaultCPUPercentage, + expectedChunkSize: ubak.DefaultChunkSize, + }, + "with config": { + cfg: &models.RestoreConfig{ + CPUPercentage: 25, + }, + expectedCPU: 25, + }, + } + + for n, tc := range tcs { + t.Run(n, func(t *testing.T) { + ccfg := compressionFromRCfg(tc.cfg) + assert.Equal(t, tc.expectedCPU, ccfg.CPUPercentage) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_batch_objects.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_batch_objects.go new file mode 100644 index 0000000000000000000000000000000000000000..df13de40eeb0eba74c3f04320c2e25c136ffbad3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_batch_objects.go @@ -0,0 +1,285 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "errors" + + middleware "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/batch" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/verbosity" + autherrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects" +) + +type batchObjectHandlers struct { + manager *objects.BatchManager + metricRequestsTotal restApiRequestsTotal +} + +func (h *batchObjectHandlers) addObjects(params batch.BatchObjectsCreateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError("", err) + return batch.NewBatchObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + objs, err := h.manager.AddObjects(ctx, principal, + params.Body.Objects, params.Body.Fields, repl) + if err != nil { + h.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &autherrs.Forbidden{}): + return batch.NewBatchObjectsCreateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &objects.ErrInvalidUserInput{}): + return batch.NewBatchObjectsCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &objects.ErrMultiTenancy{}): + return batch.NewBatchObjectsCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return batch.NewBatchObjectsCreateInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + h.metricRequestsTotal.logOk("") + return batch.NewBatchObjectsCreateOK(). + WithPayload(h.objectsResponse(objs)) +} + +func (h *batchObjectHandlers) objectsResponse(input objects.BatchObjects) []*models.ObjectsGetResponse { + response := make([]*models.ObjectsGetResponse, len(input)) + for i, object := range input { + var errorResponse *models.ErrorResponse + status := models.ObjectsGetResponseAO2ResultStatusSUCCESS + if object.Err != nil { + errorResponse = errPayloadFromSingleErr(object.Err) + status = models.ObjectsGetResponseAO2ResultStatusFAILED + } + + object.Object.ID = object.UUID + response[i] = &models.ObjectsGetResponse{ + Object: *object.Object, + Result: &models.ObjectsGetResponseAO2Result{ + Errors: errorResponse, + Status: &status, + }, + } + } + + return response +} + +func (h *batchObjectHandlers) addReferences(params batch.BatchReferencesCreateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError("", err) + return batch.NewBatchReferencesCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + references, err := h.manager.AddReferences(ctx, principal, params.Body, repl) + if err != nil { + h.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &autherrs.Forbidden{}): + return batch.NewBatchReferencesCreateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &objects.ErrInvalidUserInput{}): + return batch.NewBatchReferencesCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &objects.ErrMultiTenancy{}): + return batch.NewBatchReferencesCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return batch.NewBatchReferencesCreateInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + h.metricRequestsTotal.logOk("") + return batch.NewBatchReferencesCreateOK(). + WithPayload(h.referencesResponse(references)) +} + +func (h *batchObjectHandlers) referencesResponse(input objects.BatchReferences) []*models.BatchReferenceResponse { + response := make([]*models.BatchReferenceResponse, len(input)) + for i, ref := range input { + var errorResponse *models.ErrorResponse + var reference models.BatchReference + + status := models.BatchReferenceResponseAO1ResultStatusSUCCESS + if ref.Err != nil { + errorResponse = errPayloadFromSingleErr(ref.Err) + status = models.BatchReferenceResponseAO1ResultStatusFAILED + } else { + reference.From = strfmt.URI(ref.From.String()) + reference.To = strfmt.URI(ref.To.String()) + } + + response[i] = &models.BatchReferenceResponse{ + BatchReference: reference, + Result: &models.BatchReferenceResponseAO1Result{ + Errors: errorResponse, + Status: &status, + }, + } + } + + return response +} + +func (h *batchObjectHandlers) deleteObjects(params batch.BatchObjectsDeleteParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError("", err) + return batch.NewBatchObjectsDeleteBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + tenant := getTenant(params.Tenant) + + res, err := h.manager.DeleteObjects(ctx, principal, + params.Body.Match, params.Body.DeletionTimeUnixMilli, params.Body.DryRun, params.Body.Output, repl, tenant) + if err != nil { + h.metricRequestsTotal.logError("", err) + if errors.As(err, &objects.ErrInvalidUserInput{}) { + return batch.NewBatchObjectsDeleteUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } else if errors.As(err, &objects.ErrMultiTenancy{}) { + return batch.NewBatchObjectsDeleteUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } else if errors.As(err, &autherrs.Forbidden{}) { + return batch.NewBatchObjectsDeleteForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + } else { + return batch.NewBatchObjectsDeleteInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + h.metricRequestsTotal.logOk("") + return batch.NewBatchObjectsDeleteOK(). + WithPayload(h.objectsDeleteResponse(res)) +} + +func (h *batchObjectHandlers) objectsDeleteResponse(input *objects.BatchDeleteResponse) *models.BatchDeleteResponse { + var successful, failed int64 + output := input.Output + var objects []*models.BatchDeleteResponseResultsObjectsItems0 + for _, obj := range input.Result.Objects { + var errorResponse *models.ErrorResponse + + status := models.BatchDeleteResponseResultsObjectsItems0StatusSUCCESS + if input.DryRun { + status = models.BatchDeleteResponseResultsObjectsItems0StatusDRYRUN + } else if obj.Err != nil { + status = models.BatchDeleteResponseResultsObjectsItems0StatusFAILED + errorResponse = errPayloadFromSingleErr(obj.Err) + failed += 1 + } else { + successful += 1 + } + + if output == verbosity.OutputMinimal && + (status == models.BatchDeleteResponseResultsObjectsItems0StatusSUCCESS || + status == models.BatchDeleteResponseResultsObjectsItems0StatusDRYRUN) { + // only add SUCCESS and DRYRUN results if output is "verbose" + continue + } + + objects = append(objects, &models.BatchDeleteResponseResultsObjectsItems0{ + ID: obj.UUID, + Status: &status, + Errors: errorResponse, + }) + } + + deletionTimeUnixMilli := input.DeletionTime.UnixMilli() + + response := &models.BatchDeleteResponse{ + Match: &models.BatchDeleteResponseMatch{ + Class: input.Match.Class, + Where: input.Match.Where, + }, + DeletionTimeUnixMilli: &deletionTimeUnixMilli, + DryRun: &input.DryRun, + Output: &output, + Results: &models.BatchDeleteResponseResults{ + Matches: input.Result.Matches, + Limit: input.Result.Limit, + Successful: successful, + Failed: failed, + Objects: objects, + }, + } + return response +} + +func setupObjectBatchHandlers(api *operations.WeaviateAPI, manager *objects.BatchManager, metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) { + h := &batchObjectHandlers{manager, newBatchRequestsTotal(metrics, logger)} + + api.BatchBatchObjectsCreateHandler = batch. + BatchObjectsCreateHandlerFunc(h.addObjects) + api.BatchBatchReferencesCreateHandler = batch. + BatchReferencesCreateHandlerFunc(h.addReferences) + api.BatchBatchObjectsDeleteHandler = batch. + BatchObjectsDeleteHandlerFunc(h.deleteObjects) +} + +type batchRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newBatchRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &batchRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "batch", logger}, + } +} + +func (e *batchRequestsTotal) logError(className string, err error) { + switch { + case errors.As(err, &errReplication{}): + e.logUserError(className) + case errors.As(err, &autherrs.Forbidden{}), errors.As(err, &objects.ErrInvalidUserInput{}): + e.logUserError(className) + case errors.As(err, &objects.ErrMultiTenancy{}): + e.logUserError(className) + default: + if errors.As(err, &objects.ErrMultiTenancy{}) || + errors.As(err, &objects.ErrInvalidUserInput{}) || + errors.As(err, &autherrs.Forbidden{}) { + e.logUserError(className) + } else { + e.logServerError(className, err) + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_classification.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_classification.go new file mode 100644 index 0000000000000000000000000000000000000000..ef274dedd7d6e3b6052fbc7b63214cef8c13b061 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_classification.go @@ -0,0 +1,91 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + middleware "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/classifications" + "github.com/weaviate/weaviate/entities/models" + autherrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/classification" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func setupClassificationHandlers(api *operations.WeaviateAPI, + classifier *classification.Classifier, metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger, +) { + metricRequestsTotal := newClassificationRequestsTotal(metrics, logger) + api.ClassificationsClassificationsGetHandler = classifications.ClassificationsGetHandlerFunc( + func(params classifications.ClassificationsGetParams, principal *models.Principal) middleware.Responder { + res, err := classifier.Get(params.HTTPRequest.Context(), principal, strfmt.UUID(params.ID)) + if err != nil { + metricRequestsTotal.logError("", err) + var forbidden autherrs.Forbidden + switch { + case errors.As(err, &forbidden): + return classifications.NewClassificationsGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return classifications.NewClassificationsPostBadRequest().WithPayload(errPayloadFromSingleErr(err)) + } + } + + if res == nil { + metricRequestsTotal.logUserError("") + return classifications.NewClassificationsGetNotFound() + } + + metricRequestsTotal.logOk("") + return classifications.NewClassificationsGetOK().WithPayload(res) + }, + ) + + api.ClassificationsClassificationsPostHandler = classifications.ClassificationsPostHandlerFunc( + func(params classifications.ClassificationsPostParams, principal *models.Principal) middleware.Responder { + res, err := classifier.Schedule(params.HTTPRequest.Context(), principal, *params.Params) + if err != nil { + metricRequestsTotal.logUserError("") + + var forbidden autherrs.Forbidden + switch { + case errors.As(err, &forbidden): + return classifications.NewClassificationsPostForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return classifications.NewClassificationsPostBadRequest().WithPayload(errPayloadFromSingleErr(err)) + } + } + + metricRequestsTotal.logOk("") + return classifications.NewClassificationsPostCreated().WithPayload(res) + }, + ) +} + +type classificationRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newClassificationRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &classificationRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "classification", logger}, + } +} + +func (e *classificationRequestsTotal) logError(className string, err error) { + e.logServerError(className, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_debug.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_debug.go new file mode 100644 index 0000000000000000000000000000000000000000..4916ebb72db0a6139b6523fa1834848993252cb5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_debug.go @@ -0,0 +1,724 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "slices" + "strings" + "time" + + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/entities/config" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func setupDebugHandlers(appState *state.State) { + logger := appState.Logger.WithField("handler", "debug") + + http.HandleFunc("/debug/index/rebuild/inverted", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + colName := r.URL.Query().Get("collection") + if colName == "" { + http.Error(w, "collection is required", http.StatusBadRequest) + return + } + propertyNamesStr := r.URL.Query().Get("propertyNames") + if propertyNamesStr == "" { + http.Error(w, "propertyNames is required", http.StatusBadRequest) + return + } + propertyNames := strings.Split(propertyNamesStr, ",") + if len(propertyNames) == 0 { + http.Error(w, "propertyNames len > 0 is required", http.StatusBadRequest) + return + } + timeoutStr := r.URL.Query().Get("timeout") + timeoutDuration := time.Hour + var err error + if timeoutStr != "" { + timeoutDuration, err = time.ParseDuration(timeoutStr) + if err != nil { + http.Error(w, "timeout duration has invalid format", http.StatusBadRequest) + return + } + } + ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) + defer cancel() + + err = appState.Migrator.InvertedReindex(ctx, map[string]any{ + "ShardInvertedReindexTask_SpecifiedIndex": map[string][]string{colName: propertyNames}, + }) + if err != nil { + logger.WithError(err).Error("failed to rebuild inverted index") + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusAccepted) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/cancelReindexContext", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + appState.ReindexCtxCancel(fmt.Errorf("cancelReindexContext endpoint")) + w.WriteHeader(http.StatusAccepted) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/suspend", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("paused.mig", false, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/resume", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("paused.mig", true, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/rollback", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("rollback.mig", false, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/unrollback", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("rollback.mig", true, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/start", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("start.mig", false, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/unstart", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("start.mig", true, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/reset", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("reset.mig", false, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/unreset", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + changeFile("reset.mig", true, nil, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/setProperties", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + propertiesToMigrateString := strings.TrimSpace(r.URL.Query().Get("properties")) + + if propertiesToMigrateString == "" { + http.Error(w, "properties is required", http.StatusBadRequest) + return + } + propertiesToMigrate := strings.Split(propertiesToMigrateString, ",") + props := []byte(strings.Join(propertiesToMigrate, ",")) + + changeFile("properties.mig", false, props, logger, appState, r, w) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/reload", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + colName := r.URL.Query().Get("collection") + + if colName == "" { + http.Error(w, "collection name is required", http.StatusBadRequest) + return + } + + shardsToMigrateString := strings.TrimSpace(r.URL.Query().Get("shards")) + + shardsToMigrate := []string{} + if shardsToMigrateString != "" { + shardsToMigrate = strings.Split(shardsToMigrateString, ",") + } + + className := schema.ClassName(colName) + idx := appState.DB.GetIndex(className) + + if idx == nil { + logger.WithField("collection", colName).Error("collection not found or not ready") + http.Error(w, "collection not found or not ready", http.StatusNotFound) + return + } + + output := make(map[string]map[string]string) + // shards will not be force loaded, as we are only getting the name + err := idx.ForEachShard( + func(shardName string, shard db.ShardLike) error { + if len(shardsToMigrate) == 0 || slices.Contains(shardsToMigrate, shardName) { + err := idx.IncomingReinitShard( + context.Background(), + shardName, + ) + if err != nil { + logger.WithField("shard", shardName).Error("failed to reinit shard " + err.Error()) + output[shardName] = map[string]string{ + "shard": shardName, + "shardStatus": shard.GetStatus().String(), + "status": "error", + "message": "failed to reinit shard", + "error": err.Error(), + } + return nil + } + output[shardName] = map[string]string{ + "shard": shardName, + "shardStatus": shard.GetStatus().String(), + "status": "reinit", + "message": "reinit shard started", + } + } else { + output[shardName] = map[string]string{ + "shard": shardName, + "shardStatus": shard.GetStatus().String(), + "status": "skipped", + "message": fmt.Sprintf("shard %s not selected", shardName), + } + } + return nil + }, + ) + + response := map[string]interface{}{ + "shards": output, + } + + if err != nil { + logger.WithField("collection", colName).Error("failed to get shard names") + http.Error(w, "failed to get shard names", http.StatusInternalServerError) + response["error"] = "failed to get shard names: " + err.Error() + } + + jsonBytes, err := json.Marshal(response) + if err != nil { + logger.WithError(err).Error("marshal failed on stats") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(jsonBytes) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/status", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := map[string]interface{}{ + "BlockMax WAND": "unknown", + "status": "unknown", + } + output := make(map[string]map[string]interface{}) + func() { + rootPath := appState.DB.GetConfig().RootPath + colName := r.URL.Query().Get("collection") + className := schema.ClassName(colName) + classNameString, _, idx, err := parseIndexAndShards(appState, r) + if err != nil { + logger.WithError(err).Error("failed to parse index and shards") + response["status"] = "error" + response["error"] = err.Error() + return + } + + response["BlockMax WAND"] = "not_ready" + blockMaxEnabled := idx.GetInvertedIndexConfig().UsingBlockMaxWAND + if blockMaxEnabled { + response["BlockMax WAND"] = "enabled" + } + // shard map: shardName -> shardPath + paths := make(map[string]string) + + // shards will not be force loaded, as we are only getting the name + err = idx.ForEachShard( + func(shardName string, shard db.ShardLike) error { + shardPath := rootPath + "/" + classNameString + "/" + shardName + "/lsm/" + paths[shardName] = shardPath + output[shardName] = map[string]interface{}{ + "shardStatus": shard.GetStatus().String(), + "status": "unknown", + } + return nil + }, + ) + if err != nil { + logger.WithField("collection", colName).Error("failed to get shard names") + response["status"] = "error" + response["error"] = err.Error() + return + } + + // tenant map: tenantName -> *models.TenantResponse + tenantMap := make(map[string]*models.Tenant) + + info := appState.SchemaManager.SchemaReader.ClassInfo(className.String()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if info.MultiTenancy.Enabled { + + tenantResponses, err := appState.SchemaManager.GetConsistentTenants(ctx, nil, colName, true, []string{}) + if err != nil { + logger.WithField("collection", colName).Error("failed to get tenant responses") + response["status"] = "error" + response["error"] = err.Error() + return + } + + for _, tenant := range tenantResponses { + tenantMap[tenant.Name] = tenant + } + } else { + for name := range paths { + tenantMap[name] = &models.Tenant{ + Name: name, + } + } + } + + if len(tenantMap) == 0 { + logger.WithField("collection", colName).Error("no tenants found") + response["status"] = "error" + response["error"] = "no tenants found" + return + } + + for i, tenant := range tenantMap { + path := paths[tenant.Name] + + if path == "" { + output[i]["status"] = "shard_not_loaded" + output[i]["message"] = "shard not loaded" + output[i]["action"] = "load shard or activate tenant" + continue + } + // check if the shard directory exists + _, err := os.Stat(path) + if err != nil { + output[i]["status"] = "shard_not_loaded" + output[i]["message"] = "shard directory does not exist" + output[i]["action"] = "load shard or activate tenant" + continue + } + + // check if a .migrations/searchable_map_to_blockmax exists + _, err = os.Stat(path + ".migrations/searchable_map_to_blockmax") + if err != nil { + output[i]["status"] = "not_started" + output[i]["message"] = "no searchable_map_to_blockmax folder found" + output[i]["action"] = "enable relevant REINDEX_MAP_TO_BLOCKMAX_* env vars" + continue + } + + keyParser := &db.UuidKeyParser{} + rt := db.NewFileMapToBlockmaxReindexTracker(path, keyParser) + + status, message, action := rt.GetStatusStrings() + + if appState.ServerConfig.Config.ReindexMapToBlockmaxConfig.ConditionalStart && !rt.HasStartCondition() { + message = "reindexing not started, no start condition file found" + status = "not_started" + action = "call /start?collection=<> endpoint to start reindexing" + } + + output[i]["status"] = status + output[i]["message"] = message + output[i]["action"] = action + + properties, _ := rt.GetProps() + if properties != nil { + output[i]["properties"] = properties + } + output[i]["times"] = rt.GetTimes() + + migratedCount, snapshots, _ := rt.GetMigratedCount() + + output[i]["migratedCount"] = fmt.Sprintf("%d", migratedCount) + output[i]["snapshotCount"] = fmt.Sprintf("%d", len(snapshots)) + output[i]["snapshots"] = snapshots + + } + response["status"] = "success" + }() + + response["shards"] = output + jsonBytes, err := json.Marshal(response) + if err != nil { + logger.WithError(err).Error("marshal failed on stats") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(jsonBytes) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/overrides", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := map[string]interface{}{} + + func() { + classNameString, shardsToMigrate, idx, err := parseIndexAndShards(appState, r) + if err != nil { + logger.WithError(err).Error("failed to parse index and shards") + response["error"] = err.Error() + return + } + + rootPath := appState.DB.GetConfig().RootPath + err = idx.ForEachShard( + func(shardName string, shard db.ShardLike) error { + if len(shardsToMigrate) == 0 || slices.Contains(shardsToMigrate, shardName) { + shardPath := rootPath + "/" + classNameString + "/" + shardName + "/lsm/" + _, err := os.Stat(shardPath + ".migrations/searchable_map_to_blockmax") + if err != nil { + return fmt.Errorf("shard not found or not ready") + } + filename := "overrides.mig" + _, err = os.Stat(shardPath + ".migrations/searchable_map_to_blockmax/" + filename) + if err != nil { + response[shardName] = map[string]string{ + "status": "not found", + "overrides": "no overrides.mig file found", + } + return nil + } + // read the overrides.mig file + file, err := os.ReadFile(shardPath + ".migrations/searchable_map_to_blockmax/" + filename) + if err != nil { + return fmt.Errorf("failed to read %s in shard %s: %w", filename, shardName, err) + } + // parse the file content + lines := strings.Split(string(file), "\n") + overrides := make(map[string]string) + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue // skip empty lines and comments + } + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid override format in %s: %s", filename, line) + } + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + overrides[key] = value + } + + response[shardName] = map[string]interface{}{ + "status": "success", + "overrides": overrides, + } + } else { + response[shardName] = map[string]string{ + "status": "skipped", + "message": fmt.Sprintf("shard %s not selected", shardName), + } + } + return nil + }, + ) + if err != nil { + logger.WithField("collection", classNameString).WithField("shards", shardsToMigrate).WithError(err).Error("failed to iterate over shards") + response["error"] = err.Error() + return + } + }() + + jsonBytes, err := json.Marshal(response) + if err != nil { + logger.WithError(err).Error("marshal failed on stats") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(jsonBytes) + })) + + http.HandleFunc("/debug/index/rebuild/inverted/set_overrides", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := map[string]interface{}{} + + clear := config.Enabled(r.URL.Query().Get("clear")) + func() { + classNameString, shardsToMigrate, idx, err := parseIndexAndShards(appState, r) + if err != nil { + logger.WithError(err).Error("failed to parse index and shards") + response["error"] = err.Error() + return + } + + rootPath := appState.DB.GetConfig().RootPath + err = idx.ForEachShard( + func(shardName string, shard db.ShardLike) error { + if len(shardsToMigrate) == 0 || slices.Contains(shardsToMigrate, shardName) { + shardPath := rootPath + "/" + classNameString + "/" + shardName + "/lsm/" + _, err := os.Stat(shardPath + ".migrations/searchable_map_to_blockmax") + if err != nil { + return fmt.Errorf("shard not found or not ready") + } + filename := "overrides.mig" + // open file for writing + filePath := shardPath + ".migrations/searchable_map_to_blockmax/" + filename + if clear { + err := os.Remove(filePath) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to clear %s in shard %s: %w", filename, shardName, err) + } + } + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644) + if err != nil { + return fmt.Errorf("failed to open %s in shard %s: %w", filename, shardName, err) + } + defer file.Close() + // get overrides from query params + overridesURL := r.URL.Query() + overrides := make(map[string][]string) + for key, values := range overridesURL { + if key == "clear" || key == "collection" || key == "shards" { + continue + } + for _, value := range values { + overrides[key] = append(overrides[key], value) + _, err := file.WriteString(fmt.Sprintf("%s=%s\n", key, value)) + if err != nil { + return fmt.Errorf("failed to write to %s in shard %s: %w", filename, shardName, err) + } + } + } + + response[shardName] = map[string]interface{}{ + "status": "success", + "wrote": overrides, + } + } else { + response[shardName] = map[string]string{ + "status": "skipped", + "message": fmt.Sprintf("shard %s not selected", shardName), + } + } + return nil + }, + ) + if err != nil { + logger.WithField("collection", classNameString).WithField("shards", shardsToMigrate).WithError(err).Error("failed to iterate over shards") + response["error"] = err.Error() + return + } + }() + + jsonBytes, err := json.Marshal(response) + if err != nil { + logger.WithError(err).Error("marshal failed on stats") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(jsonBytes) + })) + + // newLogLevel can be one of: panic, fatal, error, warn, info, debug, trace (defaults to info) + http.HandleFunc("/debug/config/logger/level", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + newLogLevel := r.URL.Query().Get("newLogLevel") + if newLogLevel == "" { + http.Error(w, "newLogLevel is required", http.StatusBadRequest) + return + } + level, err := logLevelFromString(newLogLevel) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + appState.Logger.SetLevel(level) + w.WriteHeader(http.StatusOK) + })) + + http.HandleFunc("/debug/index/rebuild/vector", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !config.Enabled(os.Getenv("ASYNC_INDEXING")) { + http.Error(w, "async indexing is not enabled", http.StatusNotImplemented) + return + } + + colName := r.URL.Query().Get("collection") + shardName := r.URL.Query().Get("shard") + targetVector := r.URL.Query().Get("vector") + + if colName == "" || shardName == "" { + http.Error(w, "collection and shard are required", http.StatusBadRequest) + return + } + + idx := appState.DB.GetIndex(schema.ClassName(colName)) + if idx == nil { + logger.WithField("collection", colName).Error("collection not found") + http.Error(w, "collection not found", http.StatusNotFound) + return + } + + err := idx.DebugResetVectorIndex(context.Background(), shardName, targetVector) + if err != nil { + logger. + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithError(err). + Error("failed to reset vector index") + if errTxt := err.Error(); strings.Contains(errTxt, "not found") { + http.Error(w, "shard not found", http.StatusNotFound) + } + + http.Error(w, "failed to reset vector index", http.StatusInternalServerError) + return + } + + logger.WithField("shard", shardName).Info("reindexing started") + + w.WriteHeader(http.StatusAccepted) + })) + + http.HandleFunc("/debug/index/repair/vector", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !config.Enabled(os.Getenv("ASYNC_INDEXING")) { + http.Error(w, "async indexing is not enabled", http.StatusNotImplemented) + return + } + + colName := r.URL.Query().Get("collection") + shardName := r.URL.Query().Get("shard") + targetVector := r.URL.Query().Get("vector") + + if colName == "" || shardName == "" { + http.Error(w, "collection and shard are required", http.StatusBadRequest) + return + } + + idx := appState.DB.GetIndex(schema.ClassName(colName)) + if idx == nil { + logger.WithField("collection", colName).Error("collection not found") + http.Error(w, "collection not found", http.StatusNotFound) + return + } + + err := idx.DebugRepairIndex(context.Background(), shardName, targetVector) + if err != nil { + logger. + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithError(err). + Error("failed to repair vector index") + if errTxt := err.Error(); strings.Contains(errTxt, "not found") { + http.Error(w, "shard not found", http.StatusNotFound) + } + + http.Error(w, "failed to repair vector index", http.StatusInternalServerError) + return + } + + logger. + WithField("shard", shardName). + WithField("targetVector", targetVector). + Info("repair started") + + w.WriteHeader(http.StatusAccepted) + })) + + http.HandleFunc("/debug/stats/collection/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + path := strings.TrimSpace(strings.TrimPrefix(r.URL.Path, "/debug/stats/collection/")) + parts := strings.Split(path, "/") + if len(parts) < 3 || len(parts) > 5 || parts[1] != "shards" { + logger.WithField("parts", parts).Info("invalid path") + http.Error(w, "invalid path", http.StatusNotFound) + return + } + + colName, shardName := parts[0], parts[2] + var targetVector string + if len(parts) == 4 { + targetVector = parts[3] + } + + idx := appState.DB.GetIndex(schema.ClassName(colName)) + if idx == nil { + logger.WithField("collection", colName).Error("collection not found") + http.Error(w, "collection not found", http.StatusNotFound) + return + } + + shard, release, err := idx.GetShard(context.Background(), shardName) + if err != nil { + logger.WithField("shard", shardName).Error(err) + http.Error(w, err.Error(), http.StatusNotFound) + return + } + if shard == nil { + logger.WithField("shard", shardName).Error("shard not found") + http.Error(w, "shard not found", http.StatusNotFound) + return + } + defer release() + + vidx, ok := shard.GetVectorIndex(targetVector) + if !ok { + logger.WithField("shard", shardName).Error("vector index not found") + http.Error(w, "vector index not found", http.StatusNotFound) + return + } + + h, ok := vidx.(hnswStats) + if !ok { + w.WriteHeader(http.StatusBadRequest) + return + } + + stats, err := h.Stats() + if err != nil { + logger.Error(err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + jsonBytes, err := json.Marshal(stats) + if err != nil { + logger.WithError(err).Error("marshal failed on stats") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + logger.Info("Stats on HNSW started") + + w.WriteHeader(http.StatusOK) + w.Write(jsonBytes) + })) + + // Call via something like: curl -X GET localhost:6060/debug/config/maintenance_mode (can replace GET w/ POST or DELETE) + // The port is Weaviate's configured Go profiling port (defaults to 6060) + http.HandleFunc("/debug/config/maintenance_mode", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var bytesToWrite []byte = nil + switch r.Method { + case http.MethodGet: + jsonBytes, err := json.Marshal(MaintenanceMode{Enabled: appState.Cluster.MaintenanceModeEnabledForLocalhost()}) + if err != nil { + logger.WithError(err).Error("marshal failed on stats") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + bytesToWrite = jsonBytes + case http.MethodPost: + appState.Cluster.SetMaintenanceModeForLocalhost(true) + case http.MethodDelete: + appState.Cluster.SetMaintenanceModeForLocalhost(false) + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } + w.WriteHeader(http.StatusOK) + if bytesToWrite != nil { + w.Write(bytesToWrite) + } + })) +} + +type MaintenanceMode struct { + Enabled bool `json:"enabled"` +} + +type hnswStats interface { + Stats() (*hnsw.HnswStats, error) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_debug_bmw_aux.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_debug_bmw_aux.go new file mode 100644 index 0000000000000000000000000000000000000000..f2ee48d370e2abd1d0b7f545b810ef710cb07da6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_debug_bmw_aux.go @@ -0,0 +1,154 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "slices" + "strings" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/entities/schema" +) + +func parseIndexAndShards(appState *state.State, r *http.Request) (string, []string, *db.Index, error) { + colName := strings.TrimSpace(r.URL.Query().Get("collection")) + if colName == "" { + return "", nil, nil, fmt.Errorf("collection is required") + } + + shardsToMigrateString := strings.TrimSpace(r.URL.Query().Get("shards")) + + shardsToMigrate := []string{} + if shardsToMigrateString != "" { + shardsToMigrate = strings.Split(shardsToMigrateString, ",") + } + + className := schema.ClassName(colName) + classNameString := strings.ToLower(className.String()) + idx := appState.DB.GetIndex(className) + + if idx == nil { + return "", nil, nil, fmt.Errorf("collection not found or not ready") + } + + return classNameString, shardsToMigrate, idx, nil +} + +func changeFile(filename string, delete bool, content []byte, logger *logrus.Entry, appState *state.State, r *http.Request, w http.ResponseWriter) { + response := map[string]interface{}{} + + func() { + classNameString, shardsToMigrate, idx, err := parseIndexAndShards(appState, r) + if err != nil { + logger.WithError(err).Error("failed to parse index and shards") + response["error"] = err.Error() + return + } + + rootPath := appState.DB.GetConfig().RootPath + err = idx.ForEachShard( + func(shardName string, shard db.ShardLike) error { + alreadyDid := false + if len(shardsToMigrate) == 0 || slices.Contains(shardsToMigrate, shardName) { + shardPath := filepath.Join(rootPath, classNameString, shardName, "lsm", ".migrations", "searchable_map_to_blockmax") + filenameShard := filepath.Join(shardPath, filename) + _, err := os.Stat(shardPath) + if err != nil { + return fmt.Errorf("shard not found or not ready") + } + if delete { + err = os.Remove(filenameShard) + if os.IsNotExist(err) { + alreadyDid = true + } else if err != nil { + return fmt.Errorf("failed to delete %s: %w", filenameShard, err) + } + } else { + // check if the file already exists + _, err = os.Stat(filenameShard) + if err == nil { + alreadyDid = true + } else { + file, err := os.Create(filenameShard) + if os.IsExist(err) { + alreadyDid = true + } else if err != nil { + return fmt.Errorf("failed to create %s: %w", filenameShard, err) + } + file.Close() + } + + if content != nil { + file, err := os.Create(filenameShard) + if err != nil { + return fmt.Errorf("failed to create %s: %w", filenameShard, err) + } + _, err = file.Write(content) + if err != nil { + return fmt.Errorf("failed to write to %s: %w", filenameShard, err) + } + file.Close() + } + } + response[shardName] = map[string]string{ + "status": "success", + "message": fmt.Sprintf("file %s %s in shard %s", filenameShard, + func() string { + if delete { + if alreadyDid { + return "already deleted" + } + return "deleted" + } else { + if alreadyDid { + return "already created" + } else if content != nil { + return "updated" + } + } + return "created" + }(), shardName), + } + } else { + response[shardName] = map[string]string{ + "status": "skipped", + "message": fmt.Sprintf("shard %s not selected", shardName), + } + } + + return nil + }, + ) + if err != nil { + logger.WithField("collection", classNameString).WithField("shards", shardsToMigrate).WithError(err).Error("failed to iterate over shards") + response["error"] = err.Error() + return + } + }() + + jsonBytes, err := json.Marshal(response) + if err != nil { + logger.WithError(err).Error("marshal failed on stats") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(jsonBytes) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_distributed_tasks.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_distributed_tasks.go new file mode 100644 index 0000000000000000000000000000000000000000..1229987b070a391c59ba921fe57c4e6bd4a87268 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_distributed_tasks.go @@ -0,0 +1,52 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "errors" + + "github.com/go-openapi/runtime/middleware" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/distributed_tasks" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/nodes" + "github.com/weaviate/weaviate/cluster/distributedtask" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + autherrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + + distributedtaskUC "github.com/weaviate/weaviate/usecases/distributedtask" +) + +func setupDistributedTasksHandlers(api *operations.WeaviateAPI, authorizer authorization.Authorizer, tasksLister distributedtask.TasksLister) { + h := distributedTasksHandlers{ + handler: distributedtaskUC.NewHandler(authorizer, tasksLister), + } + + api.DistributedTasksDistributedTasksGetHandler = distributed_tasks.DistributedTasksGetHandlerFunc(h.getTasks) +} + +type distributedTasksHandlers struct { + handler *distributedtaskUC.Handler +} + +func (h *distributedTasksHandlers) getTasks(params distributed_tasks.DistributedTasksGetParams, principal *models.Principal) middleware.Responder { + tasks, err := h.handler.ListTasks(params.HTTPRequest.Context(), principal) + if err != nil { + if errors.As(err, &autherrs.Forbidden{}) { + return distributed_tasks.NewDistributedTasksGetForbidden() + } + return nodes.NewNodesGetClassInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + + return distributed_tasks.NewDistributedTasksGetOK().WithPayload(tasks) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_graphql.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_graphql.go new file mode 100644 index 0000000000000000000000000000000000000000..aa3cccb731ff46a057784b1f0c0736e1d2571732 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_graphql.go @@ -0,0 +1,424 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "sync" + + middleware "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + tailorincgraphql "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/gqlerrors" + + libgraphql "github.com/weaviate/weaviate/adapters/handlers/graphql" + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/graphql" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + authzerrors "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/schema" +) + +const error422 string = "The request is well-formed but was unable to be followed due to semantic errors." + +type gqlUnbatchedRequestResponse struct { + RequestIndex int + Response *models.GraphQLResponse +} + +type graphQLProvider interface { + GetGraphQL() libgraphql.GraphQL +} + +func setupGraphQLHandlers( + api *operations.WeaviateAPI, + gqlProvider graphQLProvider, + m *schema.Manager, + disabled bool, + metrics *monitoring.PrometheusMetrics, + logger logrus.FieldLogger, +) { + metricRequestsTotal := newGraphqlRequestsTotal(metrics, logger) + api.GraphqlGraphqlPostHandler = graphql.GraphqlPostHandlerFunc(func(params graphql.GraphqlPostParams, principal *models.Principal) middleware.Responder { + // All requests to the graphQL API need at least permissions to read the schema. Request might have further + // authorization requirements. + err := m.Authorizer.Authorize(params.HTTPRequest.Context(), principal, authorization.READ, authorization.CollectionsMetadata()...) + if err != nil { + metricRequestsTotal.logUserError() + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return graphql.NewGraphqlPostForbidden(). + WithPayload(errPayloadFromSingleErr( + fmt.Errorf("due to GraphQL introspection, this role must have the permission to `read_collections` on `*` (all) collections: %w", err), + )) + default: + return graphql.NewGraphqlPostUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + if disabled { + metricRequestsTotal.logUserError() + err := fmt.Errorf("graphql api is disabled") + return graphql.NewGraphqlPostUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + + errorResponse := &models.ErrorResponse{} + + // Get all input from the body of the request, as it is a POST. + query := params.Body.Query + operationName := params.Body.OperationName + + // If query is empty, the request is unprocessable + if query == "" { + metricRequestsTotal.logUserError() + errorResponse.Error = []*models.ErrorResponseErrorItems0{ + { + Message: "query cannot be empty", + }, + } + return graphql.NewGraphqlPostUnprocessableEntity().WithPayload(errorResponse) + } + + // Only set variables if exists in request + var variables map[string]interface{} + if params.Body.Variables != nil { + variables = params.Body.Variables.(map[string]interface{}) + } + + graphQL := gqlProvider.GetGraphQL() + if graphQL == nil { + metricRequestsTotal.logUserError() + errorResponse.Error = []*models.ErrorResponseErrorItems0{ + { + Message: "no graphql provider present, this is most likely because no schema is present. Import a schema first!", + }, + } + return graphql.NewGraphqlPostUnprocessableEntity().WithPayload(errorResponse) + } + + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + + result := graphQL.Resolve(ctx, query, + operationName, variables) + + // Marshal the JSON + resultJSON, jsonErr := json.Marshal(result) + if jsonErr != nil { + metricRequestsTotal.logUserError() + errorResponse.Error = []*models.ErrorResponseErrorItems0{ + { + Message: fmt.Sprintf("couldn't marshal json: %s", jsonErr), + }, + } + return graphql.NewGraphqlPostUnprocessableEntity().WithPayload(errorResponse) + } + + // Put the data in a response ready object + graphQLResponse := &models.GraphQLResponse{} + marshallErr := json.Unmarshal(resultJSON, graphQLResponse) + + // If json gave error, return nothing. + if marshallErr != nil { + metricRequestsTotal.logUserError() + errorResponse.Error = []*models.ErrorResponseErrorItems0{ + { + Message: fmt.Sprintf("couldn't unmarshal json: %s\noriginal result was %#v", marshallErr, result), + }, + } + return graphql.NewGraphqlPostUnprocessableEntity().WithPayload(errorResponse) + } + + metricRequestsTotal.log(result) + // Return the response + return graphql.NewGraphqlPostOK().WithPayload(graphQLResponse) + }) + + api.GraphqlGraphqlBatchHandler = graphql.GraphqlBatchHandlerFunc(func(params graphql.GraphqlBatchParams, principal *models.Principal) middleware.Responder { + // this is barely used (if at all) - so require read access to all collections for data and metadata + err := m.Authorizer.Authorize(params.HTTPRequest.Context(), principal, authorization.READ, authorization.Collections()...) + if err != nil { + return graphql.NewGraphqlBatchForbidden().WithPayload(errPayloadFromSingleErr(err)) + } + + errorResponse := &models.ErrorResponse{} + amountOfBatchedRequests := len(params.Body) + if amountOfBatchedRequests == 0 { + metricRequestsTotal.logUserError() + return graphql.NewGraphqlBatchUnprocessableEntity().WithPayload(errorResponse) + } + requestResults := make(chan gqlUnbatchedRequestResponse, amountOfBatchedRequests) + + wg := new(sync.WaitGroup) + + ctx := params.HTTPRequest.Context() + ctx = context.WithValue(ctx, "principal", principal) + + graphQL := gqlProvider.GetGraphQL() + if graphQL == nil { + metricRequestsTotal.logUserError() + errRes := errPayloadFromSingleErr(fmt.Errorf("no graphql provider present, " + + "this is most likely because no schema is present. Import a schema first")) + return graphql.NewGraphqlBatchUnprocessableEntity().WithPayload(errRes) + } + + // Generate a goroutine for each separate request + for requestIndex, unbatchedRequest := range params.Body { + requestIndex, unbatchedRequest := requestIndex, unbatchedRequest + wg.Add(1) + enterrors.GoWrapper(func() { + handleUnbatchedGraphQLRequest(ctx, wg, graphQL, unbatchedRequest, requestIndex, &requestResults, metricRequestsTotal) + }, logger) + } + + wg.Wait() + + close(requestResults) + + batchedRequestResponse := make([]*models.GraphQLResponse, amountOfBatchedRequests) + + // Add the requests to the result array in the correct order + for unbatchedRequestResult := range requestResults { + batchedRequestResponse[unbatchedRequestResult.RequestIndex] = unbatchedRequestResult.Response + } + + return graphql.NewGraphqlBatchOK().WithPayload(batchedRequestResponse) + }) +} + +// Handle a single unbatched GraphQL request, return a tuple containing the index of the request in the batch and either the response or an error +func handleUnbatchedGraphQLRequest(ctx context.Context, wg *sync.WaitGroup, graphQL libgraphql.GraphQL, unbatchedRequest *models.GraphQLQuery, requestIndex int, requestResults *chan gqlUnbatchedRequestResponse, metricRequestsTotal *graphqlRequestsTotal) { + defer wg.Done() + + // Get all input from the body of the request + query := unbatchedRequest.Query + operationName := unbatchedRequest.OperationName + graphQLResponse := &models.GraphQLResponse{} + + // Return an unprocessable error if the query is empty + if query == "" { + metricRequestsTotal.logUserError() + // Regular error messages are returned as an error code in the request header, but that doesn't work for batched requests + errorCode := strconv.Itoa(graphql.GraphqlBatchUnprocessableEntityCode) + errorMessage := fmt.Sprintf("%s: %s", errorCode, error422) + errors := []*models.GraphQLError{{Message: errorMessage}} + graphQLResponse := models.GraphQLResponse{Data: nil, Errors: errors} + *requestResults <- gqlUnbatchedRequestResponse{ + requestIndex, + &graphQLResponse, + } + } else { + // Extract any variables from the request + var variables map[string]interface{} + if unbatchedRequest.Variables != nil { + var ok bool + variables, ok = unbatchedRequest.Variables.(map[string]interface{}) + if !ok { + errorCode := strconv.Itoa(graphql.GraphqlBatchUnprocessableEntityCode) + errorMessage := fmt.Sprintf("%s: %s", errorCode, fmt.Sprintf("expected map[string]interface{}, received %v", unbatchedRequest.Variables)) + + error := []*models.GraphQLError{{Message: errorMessage}} + graphQLResponse := models.GraphQLResponse{Data: nil, Errors: error} + *requestResults <- gqlUnbatchedRequestResponse{ + requestIndex, + &graphQLResponse, + } + return + } + } + + result := graphQL.Resolve(ctx, query, operationName, variables) + + // Marshal the JSON + resultJSON, jsonErr := json.Marshal(result) + + // Return an unprocessable error if marshalling the result to JSON failed + if jsonErr != nil { + metricRequestsTotal.logUserError() + // Regular error messages are returned as an error code in the request header, but that doesn't work for batched requests + errorCode := strconv.Itoa(graphql.GraphqlBatchUnprocessableEntityCode) + errorMessage := fmt.Sprintf("%s: %s", errorCode, error422) + errors := []*models.GraphQLError{{Message: errorMessage}} + graphQLResponse := models.GraphQLResponse{Data: nil, Errors: errors} + *requestResults <- gqlUnbatchedRequestResponse{ + requestIndex, + &graphQLResponse, + } + } else { + // Put the result data in a response ready object + marshallErr := json.Unmarshal(resultJSON, graphQLResponse) + + // Return an unprocessable error if unmarshalling the result to JSON failed + if marshallErr != nil { + metricRequestsTotal.logUserError() + // Regular error messages are returned as an error code in the request header, but that doesn't work for batched requests + errorCode := strconv.Itoa(graphql.GraphqlBatchUnprocessableEntityCode) + errorMessage := fmt.Sprintf("%s: %s", errorCode, error422) + errors := []*models.GraphQLError{{Message: errorMessage}} + graphQLResponse := models.GraphQLResponse{Data: nil, Errors: errors} + *requestResults <- gqlUnbatchedRequestResponse{ + requestIndex, + &graphQLResponse, + } + } else { + metricRequestsTotal.log(result) + // Return the GraphQL response + *requestResults <- gqlUnbatchedRequestResponse{ + requestIndex, + graphQLResponse, + } + } + } + } +} + +type graphqlRequestsTotal struct { + metrics *requestsTotalMetric + logger logrus.FieldLogger +} + +func newGraphqlRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) *graphqlRequestsTotal { + return &graphqlRequestsTotal{newRequestsTotalMetric(metrics, "graphql"), logger} +} + +func (e *graphqlRequestsTotal) getQueryType(path []interface{}) string { + if len(path) > 0 { + return fmt.Sprintf("%v", path[0]) + } + return "" +} + +func (e *graphqlRequestsTotal) getClassName(path []interface{}) string { + if len(path) > 1 { + return fmt.Sprintf("%v", path[1]) + } + return "" +} + +func (e *graphqlRequestsTotal) getErrGraphQLUser(gqlError gqlerrors.FormattedError) (bool, *enterrors.ErrGraphQLUser) { + if gqlError.OriginalError() != nil { + var gqlOriginalErr *gqlerrors.Error + if errors.As(gqlError.OriginalError(), &gqlOriginalErr) { + if gqlOriginalErr.OriginalError != nil { + switch { + case errors.As(gqlOriginalErr.OriginalError, &enterrors.ErrGraphQLUser{}): + return e.getError(gqlOriginalErr.OriginalError) + default: + var gqlFormatted *gqlerrors.FormattedError + if errors.As(gqlOriginalErr.OriginalError, &gqlFormatted) { + if gqlFormatted.OriginalError() != nil { + return e.getError(gqlFormatted.OriginalError()) + } + } + } + } + } + } + return false, nil +} + +func (e *graphqlRequestsTotal) isSyntaxRelatedError(gqlError gqlerrors.FormattedError) bool { + for _, prefix := range []string{"Syntax Error ", "Cannot query field"} { + if strings.HasPrefix(gqlError.Message, prefix) { + return true + } + } + return false +} + +func (e *graphqlRequestsTotal) getError(err error) (bool, *enterrors.ErrGraphQLUser) { + var errGraphQLUser *enterrors.ErrGraphQLUser + switch { + case errors.As(err, &errGraphQLUser): + return true, errGraphQLUser + default: + return false, nil + } +} + +func (e *graphqlRequestsTotal) log(result *tailorincgraphql.Result) { + if len(result.Errors) > 0 { + for _, gqlErr := range result.Errors { + if isUserError, err := e.getErrGraphQLUser(gqlErr); isUserError { + if e.metrics != nil { + e.metrics.RequestsTotalInc(UserError, err.ClassName(), err.QueryType()) + } + } else if e.isSyntaxRelatedError(gqlErr) { + if e.metrics != nil { + e.metrics.RequestsTotalInc(UserError, "", "") + } + } else { + e.logServerError(gqlErr, e.getClassName(gqlErr.Path), e.getQueryType(gqlErr.Path)) + } + } + } else if result.Data != nil { + e.logOk(result.Data) + } +} + +func (e *graphqlRequestsTotal) logServerError(err error, className, queryType string) { + e.logger.WithFields(logrus.Fields{ + "action": "requests_total", + "api": "graphql", + "query_type": queryType, + "class_name": className, + }).WithError(err).Error("unexpected error") + if e.metrics != nil { + e.metrics.RequestsTotalInc(ServerError, className, queryType) + } +} + +func (e *graphqlRequestsTotal) logUserError() { + if e.metrics != nil { + e.metrics.RequestsTotalInc(UserError, "", "") + } +} + +func (e *graphqlRequestsTotal) logOk(data interface{}) { + if e.metrics != nil { + className, queryType := e.getClassNameAndQueryType(data) + e.metrics.RequestsTotalInc(Ok, className, queryType) + } +} + +func (e *graphqlRequestsTotal) getClassNameAndQueryType(data interface{}) (className, queryType string) { + dataMap, ok := data.(map[string]interface{}) + if ok { + for query, value := range dataMap { + queryType = query + if queryType == "Explore" { + // Explore queries are cross class queries, we won't get a className in this case + // there's no sense in further value investigation + return + } + if value != nil { + if valueMap, ok := value.(map[string]interface{}); ok { + for class := range valueMap { + className = class + return + } + } + } + } + } + return +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_misc.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_misc.go new file mode 100644 index 0000000000000000000000000000000000000000..a9e29255ec6a26d43f982900b4b184fb62b1b6aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_misc.go @@ -0,0 +1,145 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "fmt" + "net/url" + + middleware "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/meta" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/well_known" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func setupMiscHandlers(api *operations.WeaviateAPI, serverConfig *config.WeaviateConfig, + modulesProvider ModulesProvider, metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger, +) { + metricRequestsTotal := newMiscRequestsTotal(metrics, logger) + api.MetaMetaGetHandler = meta.MetaGetHandlerFunc(func(params meta.MetaGetParams, principal *models.Principal) middleware.Responder { + var ( + metaInfos = map[string]interface{}{} + err error + ) + + if modulesProvider != nil { + metaInfos, err = modulesProvider.GetMeta() + if err != nil { + metricRequestsTotal.logError("", err) + return meta.NewMetaGetInternalServerError().WithPayload(errPayloadFromSingleErr(err)) + } + } + + res := &models.Meta{ + Hostname: serverConfig.GetHostAddress(), + Version: config.ServerVersion, + Modules: metaInfos, + GrpcMaxMessageSize: int64(serverConfig.Config.GRPC.MaxMsgSize), + } + metricRequestsTotal.logOk("") + return meta.NewMetaGetOK().WithPayload(res) + }) + + api.WellKnownGetWellKnownOpenidConfigurationHandler = well_known.GetWellKnownOpenidConfigurationHandlerFunc( + func(params well_known.GetWellKnownOpenidConfigurationParams, principal *models.Principal) middleware.Responder { + if !serverConfig.Config.Authentication.OIDC.Enabled { + metricRequestsTotal.logUserError("") + return well_known.NewGetWellKnownOpenidConfigurationNotFound() + } + + target, err := url.JoinPath(serverConfig.Config.Authentication.OIDC.Issuer.Get(), "/.well-known/openid-configuration") + if err != nil { + metricRequestsTotal.logError("", err) + return well_known.NewGetWellKnownOpenidConfigurationInternalServerError().WithPayload(errPayloadFromSingleErr(err)) + } + clientID := serverConfig.Config.Authentication.OIDC.ClientID + scopes := serverConfig.Config.Authentication.OIDC.Scopes + body := &well_known.GetWellKnownOpenidConfigurationOKBody{ + Href: target, + ClientID: clientID.Get(), + Scopes: scopes.Get(), + } + + metricRequestsTotal.logOk("") + return well_known.NewGetWellKnownOpenidConfigurationOK().WithPayload(body) + }) + + api.WeaviateRootHandler = operations.WeaviateRootHandlerFunc( + func(params operations.WeaviateRootParams, principal *models.Principal) middleware.Responder { + origin := serverConfig.Config.Origin + body := &operations.WeaviateRootOKBody{ + Links: []*models.Link{ + { + Name: "Meta information about this instance/cluster", + Href: fmt.Sprintf("%s/v1/meta", origin), + }, + { + Name: "view complete schema", + Href: fmt.Sprintf("%s/v1/schema", origin), + DocumentationHref: "https://weaviate.io/developers/weaviate/api/rest#tag/schema/get/schema", + }, + { + Name: "CRUD schema", + Href: fmt.Sprintf("%s/v1/schema{/:className}", origin), + DocumentationHref: "https://weaviate.io/developers/weaviate/api/rest#tag/schema/put/schema/{className}", + }, + { + Name: "CRUD objects", + Href: fmt.Sprintf("%s/v1/objects{/:id}", origin), + DocumentationHref: "https://weaviate.io/developers/weaviate/api/rest#tag/objects/", + }, + { + Name: "trigger and view status of classifications", + Href: fmt.Sprintf("%s/v1/classifications{/:id}", origin), + DocumentationHref: "https://weaviate.io/developers/weaviate/api/rest#tag/classifications", + }, + { + Name: "check if Weaviate is live (returns 200 on GET when live)", + Href: fmt.Sprintf("%s/v1/.well-known/live", origin), + DocumentationHref: "https://weaviate.io/developers/weaviate/api/rest#tag/well-known/get/.well-known/live", + }, + { + Name: "check if Weaviate is ready (returns 200 on GET when ready)", + Href: fmt.Sprintf("%s/v1/.well-known/ready", origin), + DocumentationHref: "https://weaviate.io/developers/weaviate/api/rest#tag/well-known/get/.well-known/ready", + }, + { + Name: "view link to openid configuration (returns 404 on GET if no openid is configured)", + Href: fmt.Sprintf("%s/v1/.well-known/openid-configuration", origin), + DocumentationHref: "https://weaviate.io/developers/weaviate/api/rest#tag/well-known/get/.well-known/openid-configuration", + }, + }, + } + + metricRequestsTotal.logOk("") + return operations.NewWeaviateRootOK().WithPayload(body) + }) +} + +type miscRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newMiscRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &miscRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "misc", logger}, + } +} + +func (e *miscRequestsTotal) logError(className string, err error) { + e.logServerError(className, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_nodes.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_nodes.go new file mode 100644 index 0000000000000000000000000000000000000000..b2433b07f48c0d8607b6c493109674b40e288a3b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_nodes.go @@ -0,0 +1,160 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "errors" + + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/cluster" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/nodes" + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + "github.com/weaviate/weaviate/adapters/repos/db" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/verbosity" + autherrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + nodesUC "github.com/weaviate/weaviate/usecases/nodes" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +type nodesHandlers struct { + manager *nodesUC.Manager + metricRequestsTotal restApiRequestsTotal +} + +func (n *nodesHandlers) getNodesStatus(params nodes.NodesGetParams, principal *models.Principal) middleware.Responder { + output, err := verbosity.ParseOutput(params.Output) + if err != nil { + return nodes.NewNodesGetUnprocessableEntity().WithPayload(errPayloadFromSingleErr(err)) + } + + nodeStatuses, err := n.manager.GetNodeStatus(params.HTTPRequest.Context(), principal, "", "", output) + if err != nil { + return n.handleGetNodesError(err) + } + + status := &models.NodesStatusResponse{ + Nodes: nodeStatuses, + } + + n.metricRequestsTotal.logOk("") + return nodes.NewNodesGetOK().WithPayload(status) +} + +func (n *nodesHandlers) getNodesStatusByClass(params nodes.NodesGetClassParams, principal *models.Principal) middleware.Responder { + output, err := verbosity.ParseOutput(params.Output) + if err != nil { + return nodes.NewNodesGetUnprocessableEntity().WithPayload(errPayloadFromSingleErr(err)) + } + + shardName := "" + if params.ShardName != nil { + shardName = *params.ShardName + } + + nodeStatuses, err := n.manager.GetNodeStatus(params.HTTPRequest.Context(), principal, params.ClassName, shardName, output) + if err != nil { + return n.handleGetNodesError(err) + } + + status := &models.NodesStatusResponse{ + Nodes: nodeStatuses, + } + + n.metricRequestsTotal.logOk("") + return nodes.NewNodesGetOK().WithPayload(status) +} + +func (n *nodesHandlers) getNodesStatistics(params cluster.ClusterGetStatisticsParams, principal *models.Principal) middleware.Responder { + nodeStatistics, err := n.manager.GetNodeStatistics(params.HTTPRequest.Context(), principal) + if err != nil { + return n.handleGetNodesError(err) + } + + synchronized := map[string]struct{}{} + for _, stats := range nodeStatistics { + if stats.Status == nil || *stats.Status != models.StatisticsStatusHEALTHY { + synchronized = nil + break + } + if stats.Raft != nil { + synchronized[stats.Raft.AppliedIndex] = struct{}{} + } + } + + statistics := &models.ClusterStatisticsResponse{ + Statistics: nodeStatistics, + Synchronized: len(synchronized) == 1, + } + + n.metricRequestsTotal.logOk("") + return cluster.NewClusterGetStatisticsOK().WithPayload(statistics) +} + +func (n *nodesHandlers) handleGetNodesError(err error) middleware.Responder { + n.metricRequestsTotal.logError("", err) + if errors.As(err, &enterrors.ErrNotFound{}) { + return nodes.NewNodesGetClassNotFound(). + WithPayload(errPayloadFromSingleErr(err)) + } + if errors.As(err, &autherrs.Forbidden{}) { + return nodes.NewNodesGetClassForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + } + if errors.As(err, &enterrors.ErrUnprocessable{}) { + return nodes.NewNodesGetClassUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + return nodes.NewNodesGetClassInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) +} + +func setupNodesHandlers(api *operations.WeaviateAPI, + schemaManger *schemaUC.Manager, repo *db.DB, appState *state.State, +) { + nodesManager := nodesUC.NewManager(appState.Logger, appState.Authorizer, + repo, schemaManger, appState.ServerConfig.Config.Authorization.Rbac, appState.ServerConfig.Config.MinimumInternalTimeout) + + h := &nodesHandlers{nodesManager, newNodesRequestsTotal(appState.Metrics, appState.Logger)} + api.NodesNodesGetHandler = nodes. + NodesGetHandlerFunc(h.getNodesStatus) + api.NodesNodesGetClassHandler = nodes. + NodesGetClassHandlerFunc(h.getNodesStatusByClass) + api.ClusterClusterGetStatisticsHandler = cluster. + ClusterGetStatisticsHandlerFunc(h.getNodesStatistics) +} + +type nodesRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newNodesRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &nodesRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "nodes", logger}, + } +} + +func (e *nodesRequestsTotal) logError(className string, err error) { + switch { + case errors.As(err, &enterrors.ErrNotFound{}), errors.As(err, &enterrors.ErrUnprocessable{}): + e.logUserError(className) + case errors.As(err, &autherrs.Forbidden{}): + e.logUserError(className) + default: + e.logServerError(className, err) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_objects.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_objects.go new file mode 100644 index 0000000000000000000000000000000000000000..ba9d8363d55bb30dfe41afefc7a408a3380fd319 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_objects.go @@ -0,0 +1,960 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "errors" + "fmt" + "strings" + + middleware "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/objects" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/crossref" + authzerrors "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/monitoring" + uco "github.com/weaviate/weaviate/usecases/objects" +) + +type objectHandlers struct { + manager objectsManager + logger logrus.FieldLogger + config config.Config + modulesProvider ModulesProvider + metricRequestsTotal restApiRequestsTotal +} + +type ModulesProvider interface { + RestApiAdditionalProperties(includeProp string, class *models.Class) map[string]interface{} + GetMeta() (map[string]interface{}, error) + HasMultipleVectorizers() bool +} + +type objectsManager interface { + AddObject(context.Context, *models.Principal, *models.Object, + *additional.ReplicationProperties) (*models.Object, error) + ValidateObject(context.Context, *models.Principal, + *models.Object, *additional.ReplicationProperties) error + GetObject(context.Context, *models.Principal, string, strfmt.UUID, + additional.Properties, *additional.ReplicationProperties, string) (*models.Object, error) + DeleteObject(context.Context, *models.Principal, string, + strfmt.UUID, *additional.ReplicationProperties, string) error + UpdateObject(context.Context, *models.Principal, string, strfmt.UUID, + *models.Object, *additional.ReplicationProperties) (*models.Object, error) + HeadObject(ctx context.Context, principal *models.Principal, class string, id strfmt.UUID, + repl *additional.ReplicationProperties, tenant string) (bool, *uco.Error) + GetObjects(context.Context, *models.Principal, *int64, *int64, + *string, *string, *string, additional.Properties, string) ([]*models.Object, error) + Query(ctx context.Context, principal *models.Principal, + params *uco.QueryParams) ([]*models.Object, *uco.Error) + MergeObject(context.Context, *models.Principal, *models.Object, + *additional.ReplicationProperties) *uco.Error + AddObjectReference(context.Context, *models.Principal, *uco.AddReferenceInput, + *additional.ReplicationProperties, string) *uco.Error + UpdateObjectReferences(context.Context, *models.Principal, + *uco.PutReferenceInput, *additional.ReplicationProperties, string) *uco.Error + DeleteObjectReference(context.Context, *models.Principal, *uco.DeleteReferenceInput, + *additional.ReplicationProperties, string) *uco.Error + GetObjectsClass(ctx context.Context, principal *models.Principal, id strfmt.UUID) (*models.Class, error) + GetObjectClassFromName(ctx context.Context, principal *models.Principal, className string) (*models.Class, error) +} + +func (h *objectHandlers) addObject(params objects.ObjectsCreateParams, + principal *models.Principal, +) middleware.Responder { + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError("", err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + className := getClassName(params.Body) + + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + object, err := h.manager.AddObject(ctx, principal, params.Body, repl) + if err != nil { + h.metricRequestsTotal.logError(className, err) + if errors.As(err, &uco.ErrInvalidUserInput{}) { + return objects.NewObjectsCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } else if errors.As(err, &uco.ErrMultiTenancy{}) { + return objects.NewObjectsCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } else if errors.As(err, &authzerrors.Forbidden{}) { + return objects.NewObjectsCreateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + } else { + return objects.NewObjectsCreateInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + propertiesMap, ok := object.Properties.(map[string]interface{}) + if ok { + object.Properties = h.extendPropertiesWithAPILinks(propertiesMap) + } + + h.metricRequestsTotal.logOk(className) + return objects.NewObjectsCreateOK().WithPayload(object) +} + +func (h *objectHandlers) validateObject(params objects.ObjectsValidateParams, + principal *models.Principal, +) middleware.Responder { + className := getClassName(params.Body) + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + err := h.manager.ValidateObject(ctx, principal, params.Body, nil) + if err != nil { + h.metricRequestsTotal.logError(className, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return objects.NewObjectsValidateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &uco.ErrInvalidUserInput{}): + return objects.NewObjectsValidateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &uco.ErrMultiTenancy{}): + return objects.NewObjectsValidateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return objects.NewObjectsValidateInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + h.metricRequestsTotal.logOk(className) + return objects.NewObjectsValidateOK() +} + +// getObject gets object of a specific class +func (h *objectHandlers) getObject(params objects.ObjectsClassGetParams, + principal *models.Principal, +) middleware.Responder { + var additional additional.Properties + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + + // The process to extract additional params depends on knowing the schema + // which in turn requires a preflight load of the object. We can save this + // second db request if we know that the user did not specify any additional + // params. This could potentially be optimized further by checking if only + // non-module specific params are contained and decide then, but we do not + // know if this path is critical enough for this level of optimization. + if params.Include != nil { + var class *models.Class + var err error + if params.ClassName == "" { // deprecated request without classname + class, err = h.manager.GetObjectsClass(ctx, principal, params.ID) + } else { + class, err = h.manager.GetObjectClassFromName(ctx, principal, params.ClassName) + } + if err != nil { + h.metricRequestsTotal.logUserError(params.ClassName) + return objects.NewObjectsClassGetBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + additional, err = parseIncludeParam(params.Include, h.modulesProvider, true, class) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + return objects.NewObjectsClassGetBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + replProps, err := getReplicationProperties(params.ConsistencyLevel, params.NodeName) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + return objects.NewObjectsClassGetBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + tenant := getTenant(params.Tenant) + + object, err := h.manager.GetObject(ctx, principal, + params.ClassName, params.ID, additional, replProps, tenant) + if err != nil { + h.metricRequestsTotal.logError(getClassName(object), err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return objects.NewObjectsClassGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &uco.ErrNotFound{}): + return objects.NewObjectsClassGetNotFound() + case errors.As(err, &uco.ErrMultiTenancy{}): + return objects.NewObjectsClassGetUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return objects.NewObjectsClassGetInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + propertiesMap, ok := object.Properties.(map[string]interface{}) + if ok { + object.Properties = h.extendPropertiesWithAPILinks(propertiesMap) + } + + h.metricRequestsTotal.logOk(getClassName(object)) + return objects.NewObjectsClassGetOK().WithPayload(object) +} + +func (h *objectHandlers) getObjects(params objects.ObjectsListParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + if params.Class != nil && *params.Class != "" { + return h.query(params, principal) + } + additional, err := parseIncludeParam(params.Include, h.modulesProvider, h.shouldIncludeGetObjectsModuleParams(), nil) + if err != nil { + h.metricRequestsTotal.logError("", err) + return objects.NewObjectsListBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + var deprecationsRes []*models.Deprecation + + list, err := h.manager.GetObjects(ctx, principal, + params.Offset, params.Limit, params.Sort, params.Order, params.After, additional, + getTenant(params.Tenant)) + if err != nil { + h.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return objects.NewObjectsListForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &uco.ErrMultiTenancy{}): + return objects.NewObjectsListUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return objects.NewObjectsListInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + for i, object := range list { + propertiesMap, ok := object.Properties.(map[string]interface{}) + if ok { + list[i].Properties = h.extendPropertiesWithAPILinks(propertiesMap) + } + } + + h.metricRequestsTotal.logOk("") + return objects.NewObjectsListOK(). + WithPayload(&models.ObjectsListResponse{ + Objects: list, + TotalResults: int64(len(list)), + Deprecations: deprecationsRes, + }) +} + +func (h *objectHandlers) query(params objects.ObjectsListParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + additional, err := parseIncludeParam(params.Include, h.modulesProvider, h.shouldIncludeGetObjectsModuleParams(), nil) + if err != nil { + h.metricRequestsTotal.logError(*params.Class, err) + return objects.NewObjectsListBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + req := uco.QueryParams{ + Class: *params.Class, + Offset: params.Offset, + Limit: params.Limit, + After: params.After, + Sort: params.Sort, + Order: params.Order, + Tenant: params.Tenant, + Additional: additional, + } + resultSet, rerr := h.manager.Query(ctx, principal, &req) + if rerr != nil { + h.metricRequestsTotal.logError(req.Class, rerr) + switch rerr.Code { + case uco.StatusForbidden: + return objects.NewObjectsListForbidden(). + WithPayload(errPayloadFromSingleErr(rerr)) + case uco.StatusNotFound: + return objects.NewObjectsListNotFound() + case uco.StatusBadRequest: + return objects.NewObjectsListUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(rerr)) + case uco.StatusUnprocessableEntity: + return objects.NewObjectsListUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(rerr)) + default: + return objects.NewObjectsListInternalServerError(). + WithPayload(errPayloadFromSingleErr(rerr)) + } + } + + for i, object := range resultSet { + propertiesMap, ok := object.Properties.(map[string]interface{}) + if ok { + resultSet[i].Properties = h.extendPropertiesWithAPILinks(propertiesMap) + } + } + + h.metricRequestsTotal.logOk(req.Class) + return objects.NewObjectsListOK(). + WithPayload(&models.ObjectsListResponse{ + Objects: resultSet, + TotalResults: int64(len(resultSet)), + Deprecations: []*models.Deprecation{}, + }) +} + +// deleteObject delete a single object of giving class +func (h *objectHandlers) deleteObject(params objects.ObjectsClassDeleteParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + tenant := getTenant(params.Tenant) + + err = h.manager.DeleteObject(ctx, + principal, params.ClassName, params.ID, repl, tenant) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return objects.NewObjectsClassDeleteForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + case errors.As(err, &uco.ErrNotFound{}): + return objects.NewObjectsClassDeleteNotFound() + case errors.As(err, &uco.ErrMultiTenancy{}): + return objects.NewObjectsClassDeleteUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return objects.NewObjectsClassDeleteInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + h.metricRequestsTotal.logOk(params.ClassName) + return objects.NewObjectsClassDeleteNoContent() +} + +func (h *objectHandlers) updateObject(params objects.ObjectsClassPutParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + className := getClassName(params.Body) + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError(className, err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + object, err := h.manager.UpdateObject(ctx, + principal, params.ClassName, params.ID, params.Body, repl) + if err != nil { + h.metricRequestsTotal.logError(className, err) + if errors.As(err, &uco.ErrInvalidUserInput{}) { + return objects.NewObjectsClassPutUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } else if errors.As(err, &uco.ErrMultiTenancy{}) { + return objects.NewObjectsClassPutUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } else if errors.As(err, &authzerrors.Forbidden{}) { + return objects.NewObjectsClassPutForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + } else { + return objects.NewObjectsClassPutInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + propertiesMap, ok := object.Properties.(map[string]interface{}) + if ok { + object.Properties = h.extendPropertiesWithAPILinks(propertiesMap) + } + + h.metricRequestsTotal.logOk(className) + return objects.NewObjectsClassPutOK().WithPayload(object) +} + +func (h *objectHandlers) headObject(params objects.ObjectsClassHeadParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + tenant := getTenant(params.Tenant) + + exists, objErr := h.manager.HeadObject(ctx, + principal, params.ClassName, params.ID, repl, tenant) + if objErr != nil { + h.metricRequestsTotal.logError(params.ClassName, objErr) + switch { + case objErr.Forbidden(): + return objects.NewObjectsClassHeadForbidden(). + WithPayload(errPayloadFromSingleErr(objErr)) + case objErr.UnprocessableEntity(): + return objects.NewObjectsClassHeadUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + default: + return objects.NewObjectsClassHeadInternalServerError(). + WithPayload(errPayloadFromSingleErr(objErr)) + } + } + + h.metricRequestsTotal.logOk(params.ClassName) + if !exists { + return objects.NewObjectsClassHeadNotFound() + } + return objects.NewObjectsClassHeadNoContent() +} + +func (h *objectHandlers) patchObject(params objects.ObjectsClassPatchParams, principal *models.Principal) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + updates := params.Body + if updates == nil { + return objects.NewObjectsClassPatchBadRequest() + } + updates.ID = params.ID + updates.Class = params.ClassName + + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError(getClassName(updates), err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + objErr := h.manager.MergeObject(ctx, principal, updates, repl) + if objErr != nil { + h.metricRequestsTotal.logError(getClassName(updates), objErr) + switch { + case objErr.NotFound(): + return objects.NewObjectsClassPatchNotFound() + case objErr.Forbidden(): + return objects.NewObjectsClassPatchForbidden(). + WithPayload(errPayloadFromSingleErr(objErr)) + case objErr.BadRequest(): + return objects.NewObjectsClassPatchUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + case objErr.UnprocessableEntity(): + return objects.NewObjectsClassPatchUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + default: + return objects.NewObjectsClassPatchInternalServerError(). + WithPayload(errPayloadFromSingleErr(objErr)) + } + } + + h.metricRequestsTotal.logOk(getClassName(updates)) + return objects.NewObjectsClassPatchNoContent() +} + +func (h *objectHandlers) addObjectReference( + params objects.ObjectsClassReferencesCreateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + input := uco.AddReferenceInput{ + Class: params.ClassName, + ID: params.ID, + Property: params.PropertyName, + Ref: *params.Body, + } + + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + tenant := getTenant(params.Tenant) + + objErr := h.manager.AddObjectReference(ctx, principal, &input, repl, tenant) + if objErr != nil { + h.metricRequestsTotal.logError(params.ClassName, objErr) + switch { + case objErr.Forbidden(): + return objects.NewObjectsClassReferencesCreateForbidden(). + WithPayload(errPayloadFromSingleErr(objErr)) + case objErr.NotFound(): + return objects.NewObjectsClassReferencesCreateNotFound() + case objErr.BadRequest(): + return objects.NewObjectsClassReferencesCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + case objErr.UnprocessableEntity(): + return objects.NewObjectsClassReferencesCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + default: + return objects.NewObjectsClassReferencesCreateInternalServerError(). + WithPayload(errPayloadFromSingleErr(objErr)) + } + } + + h.metricRequestsTotal.logOk(params.ClassName) + return objects.NewObjectsClassReferencesCreateOK() +} + +func (h *objectHandlers) putObjectReferences(params objects.ObjectsClassReferencesPutParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + input := uco.PutReferenceInput{ + Class: params.ClassName, + ID: params.ID, + Property: params.PropertyName, + Refs: params.Body, + } + + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + + tenant := getTenant(params.Tenant) + + objErr := h.manager.UpdateObjectReferences(ctx, principal, &input, repl, tenant) + if objErr != nil { + h.metricRequestsTotal.logError(params.ClassName, objErr) + switch { + case objErr.Forbidden(): + return objects.NewObjectsClassReferencesPutForbidden(). + WithPayload(errPayloadFromSingleErr(objErr)) + case objErr.NotFound(): + return objects.NewObjectsClassReferencesPutNotFound() + case objErr.BadRequest(): + return objects.NewObjectsClassReferencesPutUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + case objErr.UnprocessableEntity(): + return objects.NewObjectsClassReferencesPutUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + default: + return objects.NewObjectsClassReferencesPutInternalServerError(). + WithPayload(errPayloadFromSingleErr(objErr)) + } + } + + h.metricRequestsTotal.logOk(params.ClassName) + return objects.NewObjectsClassReferencesPutOK() +} + +func (h *objectHandlers) deleteObjectReference(params objects.ObjectsClassReferencesDeleteParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + input := uco.DeleteReferenceInput{ + Class: params.ClassName, + ID: params.ID, + Property: params.PropertyName, + Reference: *params.Body, + } + + repl, err := getReplicationProperties(params.ConsistencyLevel, nil) + if err != nil { + h.metricRequestsTotal.logError(params.ClassName, err) + return objects.NewObjectsCreateBadRequest(). + WithPayload(errPayloadFromSingleErr(err)) + } + tenant := getTenant(params.Tenant) + + objErr := h.manager.DeleteObjectReference(ctx, principal, &input, repl, tenant) + if objErr != nil { + h.metricRequestsTotal.logError(params.ClassName, objErr) + switch objErr.Code { + case uco.StatusForbidden: + return objects.NewObjectsClassReferencesDeleteForbidden(). + WithPayload(errPayloadFromSingleErr(objErr)) + case uco.StatusNotFound: + return objects.NewObjectsClassReferencesDeleteNotFound() + case uco.StatusBadRequest: + return objects.NewObjectsClassReferencesDeleteUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + case uco.StatusUnprocessableEntity: + return objects.NewObjectsClassReferencesDeleteUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(objErr)) + default: + return objects.NewObjectsClassReferencesDeleteInternalServerError(). + WithPayload(errPayloadFromSingleErr(objErr)) + } + } + + h.metricRequestsTotal.logOk(params.ClassName) + return objects.NewObjectsClassReferencesDeleteNoContent() +} + +func setupObjectHandlers(api *operations.WeaviateAPI, + manager *uco.Manager, config config.Config, logger logrus.FieldLogger, + modulesProvider ModulesProvider, metrics *monitoring.PrometheusMetrics, +) { + h := &objectHandlers{manager, logger, config, modulesProvider, newObjectsRequestsTotal(metrics, logger)} + api.ObjectsObjectsCreateHandler = objects. + ObjectsCreateHandlerFunc(h.addObject) + api.ObjectsObjectsValidateHandler = objects. + ObjectsValidateHandlerFunc(h.validateObject) + api.ObjectsObjectsClassGetHandler = objects. + ObjectsClassGetHandlerFunc(h.getObject) + api.ObjectsObjectsClassHeadHandler = objects. + ObjectsClassHeadHandlerFunc(h.headObject) + api.ObjectsObjectsClassDeleteHandler = objects. + ObjectsClassDeleteHandlerFunc(h.deleteObject) + api.ObjectsObjectsListHandler = objects. + ObjectsListHandlerFunc(h.getObjects) + api.ObjectsObjectsClassPutHandler = objects. + ObjectsClassPutHandlerFunc(h.updateObject) + api.ObjectsObjectsClassPatchHandler = objects. + ObjectsClassPatchHandlerFunc(h.patchObject) + api.ObjectsObjectsClassReferencesCreateHandler = objects. + ObjectsClassReferencesCreateHandlerFunc(h.addObjectReference) + api.ObjectsObjectsClassReferencesDeleteHandler = objects. + ObjectsClassReferencesDeleteHandlerFunc(h.deleteObjectReference) + api.ObjectsObjectsClassReferencesPutHandler = objects. + ObjectsClassReferencesPutHandlerFunc(h.putObjectReferences) + // deprecated handlers + api.ObjectsObjectsGetHandler = objects. + ObjectsGetHandlerFunc(h.getObjectDeprecated) + api.ObjectsObjectsDeleteHandler = objects. + ObjectsDeleteHandlerFunc(h.deleteObjectDeprecated) + api.ObjectsObjectsHeadHandler = objects. + ObjectsHeadHandlerFunc(h.headObjectDeprecated) + api.ObjectsObjectsUpdateHandler = objects. + ObjectsUpdateHandlerFunc(h.updateObjectDeprecated) + api.ObjectsObjectsPatchHandler = objects. + ObjectsPatchHandlerFunc(h.patchObjectDeprecated) + api.ObjectsObjectsReferencesCreateHandler = objects. + ObjectsReferencesCreateHandlerFunc(h.addObjectReferenceDeprecated) + api.ObjectsObjectsReferencesUpdateHandler = objects. + ObjectsReferencesUpdateHandlerFunc(h.updateObjectReferencesDeprecated) + api.ObjectsObjectsReferencesDeleteHandler = objects. + ObjectsReferencesDeleteHandlerFunc(h.deleteObjectReferenceDeprecated) +} + +func (h *objectHandlers) getObjectDeprecated(params objects.ObjectsGetParams, + principal *models.Principal, +) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "GET "+params.HTTPRequest.URL.Path) + ps := objects.ObjectsClassGetParams{ + HTTPRequest: params.HTTPRequest, + ID: params.ID, + Include: params.Include, + } + return h.getObject(ps, principal) +} + +func (h *objectHandlers) headObjectDeprecated(params objects.ObjectsHeadParams, + principal *models.Principal, +) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "HEAD "+params.HTTPRequest.URL.Path) + r := objects.ObjectsClassHeadParams{ + HTTPRequest: params.HTTPRequest, + ID: params.ID, + } + return h.headObject(r, principal) +} + +func (h *objectHandlers) patchObjectDeprecated(params objects.ObjectsPatchParams, principal *models.Principal) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "PATCH "+params.HTTPRequest.URL.Path) + args := objects.ObjectsClassPatchParams{ + HTTPRequest: params.HTTPRequest, + ID: params.ID, + Body: params.Body, + } + if params.Body != nil { + args.ClassName = params.Body.Class + } + return h.patchObject(args, principal) +} + +func (h *objectHandlers) updateObjectDeprecated(params objects.ObjectsUpdateParams, + principal *models.Principal, +) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "PUT "+params.HTTPRequest.URL.Path) + ps := objects.ObjectsClassPutParams{ + HTTPRequest: params.HTTPRequest, + ClassName: params.Body.Class, + Body: params.Body, + ID: params.ID, + } + return h.updateObject(ps, principal) +} + +func (h *objectHandlers) deleteObjectDeprecated(params objects.ObjectsDeleteParams, + principal *models.Principal, +) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "DELETE "+params.HTTPRequest.URL.Path) + ps := objects.ObjectsClassDeleteParams{ + HTTPRequest: params.HTTPRequest, + ID: params.ID, + } + return h.deleteObject(ps, principal) +} + +func (h *objectHandlers) addObjectReferenceDeprecated(params objects.ObjectsReferencesCreateParams, + principal *models.Principal, +) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "POST "+params.HTTPRequest.URL.Path) + req := objects.ObjectsClassReferencesCreateParams{ + HTTPRequest: params.HTTPRequest, + Body: params.Body, + ID: params.ID, + PropertyName: params.PropertyName, + } + return h.addObjectReference(req, principal) +} + +func (h *objectHandlers) updateObjectReferencesDeprecated(params objects.ObjectsReferencesUpdateParams, + principal *models.Principal, +) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "PUT "+params.HTTPRequest.URL.Path) + req := objects.ObjectsClassReferencesPutParams{ + HTTPRequest: params.HTTPRequest, + ID: params.ID, + PropertyName: params.PropertyName, + Body: params.Body, + } + return h.putObjectReferences(req, principal) +} + +func (h *objectHandlers) deleteObjectReferenceDeprecated(params objects.ObjectsReferencesDeleteParams, + principal *models.Principal, +) middleware.Responder { + h.logger.Warn("deprecated endpoint: ", "DELETE "+params.HTTPRequest.URL.Path) + req := objects.ObjectsClassReferencesDeleteParams{ + HTTPRequest: params.HTTPRequest, + Body: params.Body, + ID: params.ID, + PropertyName: params.PropertyName, + } + return h.deleteObjectReference(req, principal) +} + +func (h *objectHandlers) extendPropertiesWithAPILinks(schema map[string]interface{}) map[string]interface{} { + if schema == nil { + return schema + } + + for key, value := range schema { + asMultiRef, ok := value.(models.MultipleRef) + if !ok { + continue + } + + schema[key] = h.extendReferencesWithAPILinks(asMultiRef) + } + return schema +} + +func (h *objectHandlers) extendReferencesWithAPILinks(refs models.MultipleRef) models.MultipleRef { + for i, ref := range refs { + refs[i] = h.extendReferenceWithAPILink(ref) + } + + return refs +} + +func (h *objectHandlers) extendReferenceWithAPILink(ref *models.SingleRef) *models.SingleRef { + parsed, err := crossref.Parse(ref.Beacon.String()) + if err != nil { + // ignore return unchanged + return ref + } + href := fmt.Sprintf("%s/v1/objects/%s/%s", h.config.Origin, parsed.Class, parsed.TargetID) + if parsed.Class == "" { + href = fmt.Sprintf("%s/v1/objects/%s", h.config.Origin, parsed.TargetID) + } + ref.Href = strfmt.URI(href) + return ref +} + +func (h *objectHandlers) shouldIncludeGetObjectsModuleParams() bool { + if h.modulesProvider == nil || !h.modulesProvider.HasMultipleVectorizers() { + return true + } + return false +} + +type objectsRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newObjectsRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &objectsRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "objects", logger}, + } +} + +func (e *objectsRequestsTotal) logError(className string, err error) { + var customError *uco.Error + switch { + + case errors.As(err, &uco.ErrMultiTenancy{}): + e.logUserError(className) + case errors.As(err, &errReplication{}), errors.As(err, &errUnregonizedProperty{}): + e.logUserError(className) + case errors.As(err, &authzerrors.Forbidden{}): + e.logUserError(className) + case errors.As(err, &uco.ErrInvalidUserInput{}), errors.As(err, &uco.ErrNotFound{}): + e.logUserError(className) + case errors.As(err, &customError): + switch customError.Code { + case uco.StatusInternalServerError: + e.logServerError(className, err) + default: + e.logUserError(className) + } + default: + if errors.As(err, &uco.ErrInvalidUserInput{}) || + errors.As(err, &uco.ErrMultiTenancy{}) || + errors.As(err, &authzerrors.Forbidden{}) { + e.logUserError(className) + } else { + e.logServerError(className, err) + } + } +} + +func parseIncludeParam(in *string, modulesProvider ModulesProvider, includeModuleParams bool, + class *models.Class, +) (additional.Properties, error) { + out := additional.Properties{} + if in == nil { + return out, nil + } + + parts := strings.Split(*in, ",") + + for _, prop := range parts { + if prop == "classification" { + out.Classification = true + out.RefMeta = true + continue + } + if prop == "vector" { + out.Vector = true + continue + } + if includeModuleParams && modulesProvider != nil { + moduleParams := modulesProvider.RestApiAdditionalProperties(prop, class) + if len(moduleParams) > 0 { + out.ModuleParams = getModuleParams(out.ModuleParams) + for param, value := range moduleParams { + out.ModuleParams[param] = value + } + continue + } + } + return out, newErrUnregonizedProperty(fmt.Errorf("unrecognized property '%s' in ?include list", prop)) + } + + return out, nil +} + +func getModuleParams(moduleParams map[string]interface{}) map[string]interface{} { + if moduleParams == nil { + return map[string]interface{}{} + } + return moduleParams +} + +func getReplicationProperties(consistencyLvl, nodeName *string) (*additional.ReplicationProperties, error) { + if nodeName == nil && consistencyLvl == nil { + return nil, nil + } + + repl := additional.ReplicationProperties{} + if nodeName != nil { + repl.NodeName = *nodeName + } + + cl, err := getConsistencyLevel(consistencyLvl) + if err != nil { + return nil, newErrReplication(err) + } + repl.ConsistencyLevel = cl + + if repl.ConsistencyLevel != "" && repl.NodeName != "" { + return nil, newErrReplication(fmt.Errorf("consistency_level and node_name are mutually exclusive")) + } + + return &repl, nil +} + +func getConsistencyLevel(lvl *string) (string, error) { + if lvl != nil { + switch types.ConsistencyLevel(*lvl) { + case types.ConsistencyLevelOne, types.ConsistencyLevelQuorum, types.ConsistencyLevelAll: + return *lvl, nil + default: + return "", fmt.Errorf("unrecognized consistency level '%v', "+ + "try one of the following: ['ONE', 'QUORUM', 'ALL']", *lvl) + } + } + + return "", nil +} + +func getTenant(maybeKey *string) string { + if maybeKey != nil { + return *maybeKey + } + return "" +} + +func getClassName(obj *models.Object) string { + if obj != nil { + return obj.Class + } + return "" +} + +type errReplication struct { + err error +} + +func newErrReplication(err error) errReplication { + return errReplication{err} +} + +func (e errReplication) Error() string { + return fmt.Sprintf("%v", e.err) +} + +type errUnregonizedProperty struct { + err error +} + +func newErrUnregonizedProperty(err error) errUnregonizedProperty { + return errUnregonizedProperty{err} +} + +func (e errUnregonizedProperty) Error() string { + return fmt.Sprintf("%v", e.err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_objects_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_objects_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b1f56377293db725ce4d0f1a2049a2a53a667ca5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_objects_test.go @@ -0,0 +1,1166 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + stderrors "errors" + "net/http/httptest" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/objects" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/config" + uco "github.com/weaviate/weaviate/usecases/objects" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEnrichObjectsWithLinks(t *testing.T) { + t.Run("add object", func(t *testing.T) { + type test struct { + name string + object *models.Object + expectedResult *models.Object + } + + tests := []test{ + { + name: "without props - nothing changes", + object: &models.Object{Class: "Foo", Properties: nil}, + expectedResult: &models.Object{Class: "Foo", Properties: nil}, + }, + { + name: "without ref props - nothing changes", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + }, + { + name: "with a ref prop - no origin configured", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + addObjectReturn: test.object, + } + h := &objectHandlers{manager: fakeManager, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.addObject(objects.ObjectsCreateParams{ + HTTPRequest: httptest.NewRequest("POST", "/v1/objects", nil), + Body: test.object, + }, nil) + parsed, ok := res.(*objects.ObjectsCreateOK) + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload) + }) + } + }) + + // This test "with an origin configured" is not repeated for every handler, + // as testing this feature once was deemed sufficient + t.Run("add object - with an origin configured", func(t *testing.T) { + type test struct { + name string + object *models.Object + expectedResult *models.Object + } + + tests := []test{ + { + name: "without props - nothing changes", + object: &models.Object{Class: "Foo", Properties: nil}, + expectedResult: &models.Object{Class: "Foo", Properties: nil}, + }, + { + name: "without ref props - nothing changes", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + }, + { + name: "with a ref prop - no origin configured", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "https://awesomehost.com/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + addObjectReturn: test.object, + } + config := config.Config{Origin: "https://awesomehost.com"} + h := &objectHandlers{manager: fakeManager, config: config, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.addObject(objects.ObjectsCreateParams{ + HTTPRequest: httptest.NewRequest("POST", "/v1/objects", nil), + Body: test.object, + }, nil) + parsed, ok := res.(*objects.ObjectsCreateOK) + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload) + }) + } + }) + + t.Run("get object deprecated", func(t *testing.T) { + type test struct { + name string + object *models.Object + expectedResult *models.Object + } + + tests := []test{ + { + name: "without props - nothing changes", + object: &models.Object{Class: "Foo", Properties: nil}, + expectedResult: &models.Object{Class: "Foo", Properties: nil}, + }, + { + name: "without ref props - nothing changes", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + }, + { + name: "with a ref prop - no origin configured", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + getObjectReturn: test.object, + } + h := &objectHandlers{manager: fakeManager, logger: &logrus.Logger{}, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.getObjectDeprecated(objects.ObjectsGetParams{HTTPRequest: httptest.NewRequest("GET", "/v1/objects", nil)}, nil) + parsed, ok := res.(*objects.ObjectsClassGetOK) + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload) + }) + } + }) + + t.Run("get objects", func(t *testing.T) { + type test struct { + name string + object []*models.Object + expectedResult []*models.Object + } + + tests := []test{ + { + name: "without props - nothing changes", + object: []*models.Object{{Class: "Foo", Properties: nil}}, + expectedResult: []*models.Object{{Class: "Foo", Properties: nil}}, + }, + { + name: "without ref props - nothing changes", + object: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + }}, + }, + expectedResult: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + }}, + }, + }, + { + name: "with a ref prop - no origin configured", + object: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + expectedResult: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + queryResult: test.object, + } + h := &objectHandlers{manager: fakeManager, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.getObjects(objects.ObjectsListParams{HTTPRequest: httptest.NewRequest("GET", "/v1/objects", nil)}, nil) + parsed, ok := res.(*objects.ObjectsListOK) + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload.Objects) + }) + } + }) + + t.Run("update object deprecated", func(t *testing.T) { + type test struct { + name string + object *models.Object + expectedResult *models.Object + } + + tests := []test{ + { + name: "without props - nothing changes", + object: &models.Object{Class: "Foo", Properties: nil}, + expectedResult: &models.Object{Class: "Foo", Properties: nil}, + }, + { + name: "without ref props - nothing changes", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + }, + { + name: "with a ref prop - no origin configured", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + updateObjectReturn: test.object, + } + h := &objectHandlers{manager: fakeManager, logger: &logrus.Logger{}, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.updateObjectDeprecated(objects.ObjectsUpdateParams{ + HTTPRequest: httptest.NewRequest("POST", "/v1/objects", nil), + Body: test.object, + }, nil) + parsed, ok := res.(*objects.ObjectsClassPutOK) + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload) + }) + } + }) + + t.Run("add object", func(t *testing.T) { + type test struct { + name string + object *models.Object + expectedResult *models.Object + } + + tests := []test{ + { + name: "without props - noaction changes", + object: &models.Object{Class: "Foo", Properties: nil}, + expectedResult: &models.Object{Class: "Foo", Properties: nil}, + }, + { + name: "without ref props - noaction changes", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + }, + { + name: "with a ref prop - no origin configured", + object: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + expectedResult: &models.Object{Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + addObjectReturn: test.object, + } + h := &objectHandlers{manager: fakeManager, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.addObject(objects.ObjectsCreateParams{ + HTTPRequest: httptest.NewRequest("POST", "/v1/objects", nil), + Body: test.object, + }, nil) + parsed, ok := res.(*objects.ObjectsCreateOK) + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload) + }) + } + }) + + t.Run("get objects", func(t *testing.T) { + type test struct { + name string + object []*models.Object + expectedResult []*models.Object + } + + tests := []test{ + { + name: "without props - noaction changes", + object: []*models.Object{{Class: "Foo", Properties: nil}}, + expectedResult: []*models.Object{{Class: "Foo", Properties: nil}}, + }, + { + name: "without ref props - noaction changes", + object: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + }}, + }, + expectedResult: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + }}, + }, + }, + { + name: "with a ref prop - no origin configured", + object: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + expectedResult: []*models.Object{ + {Class: "Foo", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + {Class: "Bar", Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 234, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + queryResult: test.object, + } + h := &objectHandlers{manager: fakeManager, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.getObjects(objects.ObjectsListParams{HTTPRequest: httptest.NewRequest("GET", "/v1/objects", nil)}, nil) + parsed, ok := res.(*objects.ObjectsListOK) + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload.Objects) + }) + } + }) + + // New endpoints which uniquely identify objects of a class + t.Run("UpdateObject", func(t *testing.T) { + cls := "MyClass" + type test struct { + name string + object *models.Object + expectedResult *models.Object + err error + } + + tests := []test{ + { + name: "without props - noaction changes", + object: &models.Object{Class: cls, Properties: nil}, + expectedResult: &models.Object{Class: cls, Properties: nil}, + }, + { + name: "without ref props - noaction changes", + object: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + expectedResult: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + }, + { + name: "with a ref prop - no origin configured", + object: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + expectedResult: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + { + name: "forbidden", + err: errors.NewForbidden(&models.Principal{}, "get", "Myclass/123"), + }, + { + name: "validation", + err: uco.ErrInvalidUserInput{}, + }, + { + name: "not found", + err: uco.ErrNotFound{}, + }, + { + name: "unknown error", + err: stderrors.New("any error"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + updateObjectReturn: test.object, + updateObjectErr: test.err, + } + h := &objectHandlers{manager: fakeManager, metricRequestsTotal: &fakeMetricRequestsTotal{}} + res := h.updateObject(objects.ObjectsClassPutParams{ + HTTPRequest: httptest.NewRequest("POST", "/v1/objects/123", nil), + Body: test.object, + ID: "123", + ClassName: cls, + }, nil) + parsed, ok := res.(*objects.ObjectsClassPutOK) + if test.err != nil { + require.False(t, ok) + return + } + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload) + }) + } + }) + + t.Run("PatchObject", func(t *testing.T) { + var ( + fakeManager = &fakeManager{} + fakeMetricRequestsTotal = &fakeMetricRequestsTotal{} + h = &objectHandlers{ + manager: fakeManager, + logger: &logrus.Logger{}, + metricRequestsTotal: fakeMetricRequestsTotal, + } + req = objects.ObjectsClassPatchParams{ + HTTPRequest: httptest.NewRequest("PATCH", "/v1/objects/MyClass/123", nil), + ClassName: "MyClass", + ID: "123", + Body: &models.Object{Properties: map[string]interface{}{"name": "hello world"}}, + } + ) + res := h.patchObject(req, nil) + if _, ok := res.(*objects.ObjectsClassPatchNoContent); !ok { + t.Errorf("unexpected result %v", res) + } + fakeManager.patchObjectReturn = &uco.Error{Code: uco.StatusBadRequest} + res = h.patchObject(req, nil) + if _, ok := res.(*objects.ObjectsClassPatchUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassPatchUnprocessableEntity{}, res) + } + fakeManager.patchObjectReturn = &uco.Error{Code: uco.StatusNotFound} + res = h.patchObject(req, nil) + if _, ok := res.(*objects.ObjectsClassPatchNotFound); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassPatchNotFound{}, res) + } + fakeManager.patchObjectReturn = &uco.Error{Code: uco.StatusForbidden} + res = h.patchObject(req, nil) + if _, ok := res.(*objects.ObjectsClassPatchForbidden); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassPatchForbidden{}, res) + } + fakeManager.patchObjectReturn = &uco.Error{Code: uco.StatusInternalServerError} + res = h.patchObject(req, nil) + if _, ok := res.(*objects.ObjectsClassPatchInternalServerError); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassPatchInternalServerError{}, res) + } + + // test deprecated function + fakeManager.patchObjectReturn = nil + res = h.patchObjectDeprecated(objects.ObjectsPatchParams{ + HTTPRequest: httptest.NewRequest("PATCH", "/v1/objects/123", nil), + ID: "123", + Body: &models.Object{ + Class: "MyClass", + Properties: map[string]interface{}{"name": "hello world"}, + }, + }, nil) + if _, ok := res.(*objects.ObjectsClassPatchNoContent); !ok { + t.Errorf("unexpected result %v", res) + } + + // test deprecated function + fakeManager.patchObjectReturn = nil + res = h.patchObjectDeprecated(objects.ObjectsPatchParams{ + HTTPRequest: httptest.NewRequest("PATCH", "/v1/objects/123", nil), + ID: "123", + Body: nil, + }, nil) + if _, ok := res.(*objects.ObjectsClassPatchBadRequest); !ok { + t.Errorf("unexpected result %v", res) + } + }) + + t.Run("GetObject", func(t *testing.T) { + cls := "MyClass" + type test struct { + name string + object *models.Object + err error + expectedResult *models.Object + } + + tests := []test{ + { + name: "without props - noaction changes", + object: &models.Object{Class: cls, Properties: nil}, + expectedResult: &models.Object{Class: cls, Properties: nil}, + }, + { + name: "without ref props - noaction changes", + object: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + expectedResult: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + }}, + }, + { + name: "with a ref prop - no origin configured", + object: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + expectedResult: &models.Object{Class: cls, Properties: map[string]interface{}{ + "name": "hello world", + "numericalField": 134, + "someRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/85f78e29-5937-4390-a121-5379f262b4e5", + Href: "/v1/objects/85f78e29-5937-4390-a121-5379f262b4e5", + }, + }, + }}, + }, + { + name: "error forbidden", + err: errors.NewForbidden(&models.Principal{}, "get", "Myclass/123"), + }, + { + name: "use case err not found", + err: uco.ErrNotFound{}, + }, + { + name: "any other error", + err: stderrors.New("unknown error"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + getObjectReturn: test.object, + getObjectErr: test.err, + } + h := &objectHandlers{manager: fakeManager, metricRequestsTotal: &fakeMetricRequestsTotal{}} + req := objects.ObjectsClassGetParams{ + HTTPRequest: httptest.NewRequest("GET", "/v1/objects/MyClass/123", nil), + ClassName: cls, + ID: "123", + } + res := h.getObject(req, nil) + parsed, ok := res.(*objects.ObjectsClassGetOK) + if test.err != nil { + require.False(t, ok) + return + } + require.True(t, ok) + assert.Equal(t, test.expectedResult, parsed.Payload) + }) + } + }) + + t.Run("DeleteObject", func(t *testing.T) { + cls := "MyClass" + type test struct { + name string + err error + } + + tests := []test{ + { + name: "without props - noaction changes", + }, + { + name: "error forbidden", + err: errors.NewForbidden(&models.Principal{}, "get", "Myclass/123"), + }, + { + name: "use case err not found", + err: uco.ErrNotFound{}, + }, + { + name: "unknown error", + err: stderrors.New("any error"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeManager := &fakeManager{ + deleteObjectReturn: test.err, + } + h := &objectHandlers{manager: fakeManager, metricRequestsTotal: &fakeMetricRequestsTotal{}} + req := objects.ObjectsClassDeleteParams{ + HTTPRequest: httptest.NewRequest("GET", "/v1/objects/MyClass/123", nil), + ClassName: cls, + ID: "123", + } + res := h.deleteObject(req, nil) + _, ok := res.(*objects.ObjectsClassDeleteNoContent) + if test.err != nil { + require.False(t, ok) + return + } + require.True(t, ok) + }) + } + }) + + t.Run("HeadObject", func(t *testing.T) { + m := &fakeManager{ + headObjectReturn: true, + } + h := &objectHandlers{manager: m, logger: &logrus.Logger{}, metricRequestsTotal: &fakeMetricRequestsTotal{}} + req := objects.ObjectsClassHeadParams{ + HTTPRequest: httptest.NewRequest("HEAD", "/v1/objects/MyClass/123", nil), + ClassName: "MyClass", + ID: "123", + } + res := h.headObject(req, nil) + if _, ok := res.(*objects.ObjectsClassHeadNoContent); !ok { + t.Errorf("unexpected result %v", res) + } + + m.headObjectErr = &uco.Error{Code: uco.StatusForbidden} + res = h.headObject(req, nil) + if _, ok := res.(*objects.ObjectsClassHeadForbidden); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassHeadForbidden{}, res) + } + m.headObjectErr = &uco.Error{Code: uco.StatusInternalServerError} + res = h.headObject(req, nil) + if _, ok := res.(*objects.ObjectsClassHeadInternalServerError); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassHeadInternalServerError{}, res) + } + m.headObjectErr = nil + m.headObjectReturn = false + res = h.headObject(req, nil) + if _, ok := res.(*objects.ObjectsClassHeadNotFound); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassHeadNotFound{}, res) + } + // same test as before but using old request + oldRequest := objects.ObjectsHeadParams{HTTPRequest: req.HTTPRequest} + res = h.headObjectDeprecated(oldRequest, nil) + if _, ok := res.(*objects.ObjectsClassHeadNotFound); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassHeadNotFound{}, res) + } + }) + + t.Run("PostReference", func(t *testing.T) { + m := &fakeManager{} + h := &objectHandlers{manager: m, logger: &logrus.Logger{}, metricRequestsTotal: &fakeMetricRequestsTotal{}} + req := objects.ObjectsClassReferencesCreateParams{ + HTTPRequest: httptest.NewRequest("HEAD", "/v1/objects/MyClass/123/references/prop", nil), + ClassName: "MyClass", + ID: "123", + Body: new(models.SingleRef), + PropertyName: "prop", + } + res := h.addObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesCreateOK); !ok { + t.Errorf("unexpected result %v", res) + } + + m.addRefErr = &uco.Error{Code: uco.StatusForbidden} + res = h.addObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesCreateForbidden); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesCreateForbidden{}, res) + } + // source object not found + m.addRefErr = &uco.Error{Code: uco.StatusNotFound} + res = h.addObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesCreateNotFound); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesCreateNotFound{}, res) + } + + m.addRefErr = &uco.Error{Code: uco.StatusInternalServerError} + res = h.addObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesCreateInternalServerError); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesCreateInternalServerError{}, res) + } + m.addRefErr = &uco.Error{Code: uco.StatusBadRequest} + res = h.addObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesCreateUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesCreateUnprocessableEntity{}, res) + } + // same test as before but using old request + oldRequest := objects.ObjectsReferencesCreateParams{ + HTTPRequest: req.HTTPRequest, + Body: req.Body, + ID: req.ID, + PropertyName: req.ClassName, + } + res = h.addObjectReferenceDeprecated(oldRequest, nil) + if _, ok := res.(*objects.ObjectsClassReferencesCreateUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesCreateUnprocessableEntity{}, res) + } + }) + + t.Run("PutReferences", func(t *testing.T) { + m := &fakeManager{} + h := &objectHandlers{manager: m, logger: &logrus.Logger{}, metricRequestsTotal: &fakeMetricRequestsTotal{}} + req := objects.ObjectsClassReferencesPutParams{ + HTTPRequest: httptest.NewRequest("HEAD", "/v1/objects/MyClass/123/references/prop", nil), + ClassName: "MyClass", + ID: "123", + Body: models.MultipleRef{}, + PropertyName: "prop", + } + res := h.putObjectReferences(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesPutOK); !ok { + t.Errorf("unexpected result %v", res) + } + + m.putRefErr = &uco.Error{Code: uco.StatusForbidden} + res = h.putObjectReferences(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesPutForbidden); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesPutForbidden{}, res) + } + m.putRefErr = &uco.Error{Code: uco.StatusInternalServerError} + res = h.putObjectReferences(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesPutInternalServerError); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesPutInternalServerError{}, res) + } + m.putRefErr = &uco.Error{Code: uco.StatusBadRequest} + res = h.putObjectReferences(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesPutUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesPutUnprocessableEntity{}, res) + } + // same test as before but using old request + oldRequest := objects.ObjectsReferencesUpdateParams{ + HTTPRequest: req.HTTPRequest, + Body: req.Body, + ID: req.ID, + PropertyName: req.ClassName, + } + res = h.updateObjectReferencesDeprecated(oldRequest, nil) + if _, ok := res.(*objects.ObjectsClassReferencesPutUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesPutUnprocessableEntity{}, res) + } + }) + + t.Run("DeleteReference", func(t *testing.T) { + m := &fakeManager{} + h := &objectHandlers{manager: m, logger: &logrus.Logger{}, metricRequestsTotal: &fakeMetricRequestsTotal{}} + req := objects.ObjectsClassReferencesDeleteParams{ + HTTPRequest: httptest.NewRequest("HEAD", "/v1/objects/MyClass/123/references/prop", nil), + ClassName: "MyClass", + ID: "123", + Body: new(models.SingleRef), + PropertyName: "prop", + } + res := h.deleteObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesDeleteNoContent); !ok { + t.Errorf("unexpected result %v", res) + } + + m.deleteRefErr = &uco.Error{Code: uco.StatusForbidden} + res = h.deleteObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesDeleteForbidden); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesDeleteForbidden{}, res) + } + // source object not found + m.deleteRefErr = &uco.Error{Code: uco.StatusNotFound} + res = h.deleteObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesDeleteNotFound); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesDeleteNotFound{}, res) + } + + m.deleteRefErr = &uco.Error{Code: uco.StatusInternalServerError} + res = h.deleteObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesDeleteInternalServerError); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesDeleteInternalServerError{}, res) + } + m.deleteRefErr = &uco.Error{Code: uco.StatusBadRequest} + res = h.deleteObjectReference(req, nil) + if _, ok := res.(*objects.ObjectsClassReferencesDeleteUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesDeleteUnprocessableEntity{}, res) + } + // same test as before but using old request + oldRequest := objects.ObjectsReferencesDeleteParams{ + HTTPRequest: req.HTTPRequest, + Body: req.Body, + ID: req.ID, + PropertyName: req.ClassName, + } + res = h.deleteObjectReferenceDeprecated(oldRequest, nil) + if _, ok := res.(*objects.ObjectsClassReferencesDeleteUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsClassReferencesDeleteUnprocessableEntity{}, res) + } + }) + + t.Run("Query", func(t *testing.T) { + var ( + cls = "MyClass" + m = &fakeManager{ + queryErr: nil, + queryResult: []*models.Object{{ + Properties: map[string]interface{}{"name": "John"}, + }}, + } + fakeMetricRequestsTotal = &fakeMetricRequestsTotal{} + h = &objectHandlers{ + manager: m, + logger: &logrus.Logger{}, + metricRequestsTotal: fakeMetricRequestsTotal, + } + req = objects.ObjectsListParams{ + HTTPRequest: httptest.NewRequest("HEAD", "/v1/objects/", nil), + Class: &cls, + } + ) + + res := h.query(req, nil) + if _, ok := res.(*objects.ObjectsListOK); !ok { + t.Errorf("unexpected result %v", res) + } + + m.queryErr = &uco.Error{Code: uco.StatusForbidden} + res = h.query(req, nil) + if _, ok := res.(*objects.ObjectsListForbidden); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsListForbidden{}, res) + } + m.queryErr = &uco.Error{Code: uco.StatusNotFound} + res = h.query(req, nil) + if _, ok := res.(*objects.ObjectsListNotFound); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsListNotFound{}, res) + } + m.queryErr = &uco.Error{Code: uco.StatusBadRequest} + res = h.query(req, nil) + if _, ok := res.(*objects.ObjectsListUnprocessableEntity); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsListUnprocessableEntity{}, res) + } + m.queryErr = &uco.Error{Code: uco.StatusInternalServerError} + res = h.query(req, nil) + if _, ok := res.(*objects.ObjectsListInternalServerError); !ok { + t.Errorf("expected: %T got: %T", objects.ObjectsListInternalServerError{}, res) + } + }) +} + +type fakeManager struct { + getObjectReturn *models.Object + getObjectErr error + + addObjectReturn *models.Object + queryResult []*models.Object + queryErr *uco.Error + updateObjectReturn *models.Object + updateObjectErr error + deleteObjectReturn error + patchObjectReturn *uco.Error + headObjectReturn bool + headObjectErr *uco.Error + addRefErr *uco.Error + putRefErr *uco.Error + deleteRefErr *uco.Error +} + +func (f *fakeManager) HeadObject(context.Context, *models.Principal, + string, strfmt.UUID, *additional.ReplicationProperties, string, +) (bool, *uco.Error) { + return f.headObjectReturn, f.headObjectErr +} + +func (f *fakeManager) AddObject(_ context.Context, _ *models.Principal, + object *models.Object, _ *additional.ReplicationProperties, +) (*models.Object, error) { + return object, nil +} + +func (f *fakeManager) ValidateObject(_ context.Context, _ *models.Principal, + _ *models.Object, _ *additional.ReplicationProperties, +) error { + panic("not implemented") // TODO: Implement +} + +func (f *fakeManager) GetObject(_ context.Context, _ *models.Principal, class string, + _ strfmt.UUID, _ additional.Properties, _ *additional.ReplicationProperties, _ string, +) (*models.Object, error) { + return f.getObjectReturn, f.getObjectErr +} + +func (f *fakeManager) GetObjectsClass(ctx context.Context, + principal *models.Principal, id strfmt.UUID, +) (*models.Class, error) { + class := &models.Class{ + Class: f.getObjectReturn.Class, + Vectorizer: "text2vec-contextionary", + } + return class, nil +} + +func (f *fakeManager) GetObjectClassFromName(ctx context.Context, principal *models.Principal, + className string, +) (*models.Class, error) { + class := &models.Class{ + Class: f.getObjectReturn.Class, + Vectorizer: "text2vec-contextionary", + } + return class, nil +} + +func (f *fakeManager) GetObjects(ctx context.Context, principal *models.Principal, offset *int64, limit *int64, sort *string, order *string, after *string, addl additional.Properties, tenant string) ([]*models.Object, error) { + return f.queryResult, nil +} + +func (f *fakeManager) Query(_ context.Context, + _ *models.Principal, _ *uco.QueryParams, +) ([]*models.Object, *uco.Error) { + return f.queryResult, f.queryErr +} + +func (f *fakeManager) UpdateObject(_ context.Context, _ *models.Principal, _ string, + _ strfmt.UUID, updates *models.Object, _ *additional.ReplicationProperties, +) (*models.Object, error) { + return updates, f.updateObjectErr +} + +func (f *fakeManager) MergeObject(_ context.Context, _ *models.Principal, + _ *models.Object, _ *additional.ReplicationProperties, +) *uco.Error { + return f.patchObjectReturn +} + +func (f *fakeManager) DeleteObject(_ context.Context, _ *models.Principal, + class string, _ strfmt.UUID, _ *additional.ReplicationProperties, _ string, +) error { + return f.deleteObjectReturn +} + +func (f *fakeManager) AddObjectReference(context.Context, *models.Principal, + *uco.AddReferenceInput, *additional.ReplicationProperties, string, +) *uco.Error { + return f.addRefErr +} + +func (f *fakeManager) UpdateObjectReferences(context.Context, *models.Principal, + *uco.PutReferenceInput, *additional.ReplicationProperties, string, +) *uco.Error { + return f.putRefErr +} + +func (f *fakeManager) DeleteObjectReference(context.Context, *models.Principal, + *uco.DeleteReferenceInput, *additional.ReplicationProperties, string, +) *uco.Error { + return f.deleteRefErr +} + +type fakeMetricRequestsTotal struct{} + +func (f *fakeMetricRequestsTotal) logError(className string, err error) {} +func (f *fakeMetricRequestsTotal) logOk(className string) {} +func (f *fakeMetricRequestsTotal) logUserError(className string) {} +func (f *fakeMetricRequestsTotal) logServerError(className string, err error) {} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_schema.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_schema.go new file mode 100644 index 0000000000000000000000000000000000000000..731bfb62a64b79204c61542c3960dce6c2338d12 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/handlers_schema.go @@ -0,0 +1,411 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "errors" + "fmt" + + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + + restCtx "github.com/weaviate/weaviate/adapters/handlers/rest/context" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/schema" + "github.com/weaviate/weaviate/entities/models" + authzerrors "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/monitoring" + uco "github.com/weaviate/weaviate/usecases/objects" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +type schemaHandlers struct { + manager *schemaUC.Manager + metricRequestsTotal restApiRequestsTotal +} + +func (s *schemaHandlers) addClass(params schema.SchemaObjectsCreateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + + _, _, err := s.manager.AddClass(ctx, principal, params.ObjectClass) + if err != nil { + s.metricRequestsTotal.logError(params.ObjectClass.Class, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaObjectsCreateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaObjectsCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ObjectClass.Class) + return schema.NewSchemaObjectsCreateOK().WithPayload(params.ObjectClass) +} + +func (s *schemaHandlers) updateClass(params schema.SchemaObjectsUpdateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + err := s.manager.UpdateClass(ctx, principal, params.ClassName, + params.ObjectClass) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + if errors.Is(err, schemaUC.ErrNotFound) { + return schema.NewSchemaObjectsUpdateNotFound() + } + + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaObjectsUpdateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaObjectsUpdateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewSchemaObjectsUpdateOK().WithPayload(params.ObjectClass) +} + +func (s *schemaHandlers) getClass(params schema.SchemaObjectsGetParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + class, _, err := s.manager.GetConsistentClass(ctx, principal, params.ClassName, *params.Consistency) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaObjectsGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaObjectsGetInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + if class == nil { + s.metricRequestsTotal.logUserError(params.ClassName) + return schema.NewSchemaObjectsGetNotFound() + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewSchemaObjectsGetOK().WithPayload(class) +} + +func (s *schemaHandlers) deleteClass(params schema.SchemaObjectsDeleteParams, principal *models.Principal) middleware.Responder { + err := s.manager.DeleteClass(params.HTTPRequest.Context(), principal, params.ClassName) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaObjectsDeleteForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaObjectsDeleteBadRequest().WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewSchemaObjectsDeleteOK() +} + +func (s *schemaHandlers) addClassProperty(params schema.SchemaObjectsPropertiesAddParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + _, _, err := s.manager.AddClassProperty(ctx, principal, s.manager.ReadOnlyClass(params.ClassName), params.ClassName, false, params.Body) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaObjectsPropertiesAddForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaObjectsPropertiesAddUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewSchemaObjectsPropertiesAddOK().WithPayload(params.Body) +} + +func (s *schemaHandlers) getSchema(params schema.SchemaDumpParams, principal *models.Principal) middleware.Responder { + dbSchema, err := s.manager.GetConsistentSchema(params.HTTPRequest.Context(), principal, *params.Consistency) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaDumpForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaDumpForbidden().WithPayload(errPayloadFromSingleErr(err)) + } + } + + payload := dbSchema.Objects + + s.metricRequestsTotal.logOk("") + return schema.NewSchemaDumpOK().WithPayload(payload) +} + +func (s *schemaHandlers) getShardsStatus(params schema.SchemaObjectsShardsGetParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + var tenant string + if params.Tenant == nil { + tenant = "" + } else { + tenant = *params.Tenant + } + + status, err := s.manager.ShardsStatus(ctx, principal, params.ClassName, tenant) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaObjectsShardsGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaObjectsShardsGetNotFound(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + payload := status + + s.metricRequestsTotal.logOk("") + return schema.NewSchemaObjectsShardsGetOK().WithPayload(payload) +} + +func (s *schemaHandlers) updateShardStatus(params schema.SchemaObjectsShardsUpdateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + _, err := s.manager.UpdateShardStatus( + ctx, principal, params.ClassName, params.ShardName, params.Body.Status) + if err != nil { + s.metricRequestsTotal.logError("", err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewSchemaObjectsShardsGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewSchemaObjectsShardsUpdateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + payload := params.Body + + s.metricRequestsTotal.logOk("") + return schema.NewSchemaObjectsShardsUpdateOK().WithPayload(payload) +} + +func (s *schemaHandlers) createTenants(params schema.TenantsCreateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + _, err := s.manager.AddTenants( + ctx, principal, params.ClassName, params.Body) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewTenantsCreateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewTenantsCreateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewTenantsCreateOK().WithPayload(params.Body) +} + +func (s *schemaHandlers) updateTenants(params schema.TenantsUpdateParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + updatedTenants, err := s.manager.UpdateTenants( + ctx, principal, params.ClassName, params.Body) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewTenantsUpdateForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewTenantsUpdateUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewTenantsUpdateOK().WithPayload(updatedTenants) +} + +func (s *schemaHandlers) deleteTenants(params schema.TenantsDeleteParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + err := s.manager.DeleteTenants( + ctx, principal, params.ClassName, params.Tenants) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewTenantsDeleteForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewTenantsDeleteUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewTenantsDeleteOK() +} + +func (s *schemaHandlers) getTenants(params schema.TenantsGetParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + tenants, err := s.manager.GetConsistentTenants(ctx, principal, params.ClassName, *params.Consistency, nil) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewTenantsGetForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewTenantsGetUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewTenantsGetOK().WithPayload(tenants) +} + +func (s *schemaHandlers) getTenant( + params schema.TenantsGetOneParams, + principal *models.Principal, +) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + tenant, err := s.manager.GetConsistentTenant(ctx, principal, params.ClassName, *params.Consistency, params.TenantName) + if err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + if errors.Is(err, schemaUC.ErrNotFound) { + return schema.NewTenantsGetOneNotFound() + } + if errors.Is(err, schemaUC.ErrUnexpectedMultiple) { + return schema.NewTenantsGetOneInternalServerError(). + WithPayload(errPayloadFromSingleErr(err)) + } + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewTenantsGetOneForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewTenantsGetOneUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + if tenant == nil { + s.metricRequestsTotal.logUserError(params.ClassName) + return schema.NewTenantsGetOneUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(fmt.Errorf("tenant '%s' not found when it should have been", params.TenantName))) + } + s.metricRequestsTotal.logOk(params.ClassName) + return schema.NewTenantsGetOneOK().WithPayload(tenant) +} + +func (s *schemaHandlers) tenantExists(params schema.TenantExistsParams, principal *models.Principal) middleware.Responder { + ctx := restCtx.AddPrincipalToContext(params.HTTPRequest.Context(), principal) + if err := s.manager.ConsistentTenantExists(ctx, principal, params.ClassName, *params.Consistency, params.TenantName); err != nil { + s.metricRequestsTotal.logError(params.ClassName, err) + if errors.Is(err, schemaUC.ErrNotFound) { + return schema.NewTenantExistsNotFound() + } + switch { + case errors.As(err, &authzerrors.Forbidden{}): + return schema.NewTenantExistsForbidden(). + WithPayload(errPayloadFromSingleErr(err)) + default: + return schema.NewTenantExistsUnprocessableEntity(). + WithPayload(errPayloadFromSingleErr(err)) + } + } + + return schema.NewTenantExistsOK() +} + +func setupSchemaHandlers(api *operations.WeaviateAPI, manager *schemaUC.Manager, metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) { + h := &schemaHandlers{manager, newSchemaRequestsTotal(metrics, logger)} + + api.SchemaSchemaObjectsCreateHandler = schema. + SchemaObjectsCreateHandlerFunc(h.addClass) + api.SchemaSchemaObjectsDeleteHandler = schema. + SchemaObjectsDeleteHandlerFunc(h.deleteClass) + api.SchemaSchemaObjectsPropertiesAddHandler = schema. + SchemaObjectsPropertiesAddHandlerFunc(h.addClassProperty) + + api.SchemaSchemaObjectsUpdateHandler = schema. + SchemaObjectsUpdateHandlerFunc(h.updateClass) + + api.SchemaSchemaObjectsGetHandler = schema. + SchemaObjectsGetHandlerFunc(h.getClass) + api.SchemaSchemaDumpHandler = schema. + SchemaDumpHandlerFunc(h.getSchema) + + api.SchemaSchemaObjectsShardsGetHandler = schema. + SchemaObjectsShardsGetHandlerFunc(h.getShardsStatus) + api.SchemaSchemaObjectsShardsUpdateHandler = schema. + SchemaObjectsShardsUpdateHandlerFunc(h.updateShardStatus) + + api.SchemaTenantsCreateHandler = schema.TenantsCreateHandlerFunc(h.createTenants) + api.SchemaTenantsUpdateHandler = schema.TenantsUpdateHandlerFunc(h.updateTenants) + api.SchemaTenantsDeleteHandler = schema.TenantsDeleteHandlerFunc(h.deleteTenants) + api.SchemaTenantsGetHandler = schema.TenantsGetHandlerFunc(h.getTenants) + api.SchemaTenantExistsHandler = schema.TenantExistsHandlerFunc(h.tenantExists) + api.SchemaTenantsGetOneHandler = schema.TenantsGetOneHandlerFunc(h.getTenant) +} + +type schemaRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newSchemaRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &schemaRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "schema", logger}, + } +} + +func (e *schemaRequestsTotal) logError(className string, err error) { + switch { + case errors.As(err, &authzerrors.Forbidden{}): + e.logUserError(className) + case errors.As(err, &uco.ErrMultiTenancy{}): + e.logUserError(className) + default: + e.logUserError(className) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/helpers.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..6a0ece0f124dac8334b9b0f90d5b5b634e5f59a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/helpers.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// createErrorResponseObject is a common function to create an error response +func createErrorResponseObject(messages ...string) *models.ErrorResponse { + // Initialize return value + er := &models.ErrorResponse{} + + // appends all error messages to the error + for _, message := range messages { + er.Error = append(er.Error, &models.ErrorResponseErrorItems0{ + Message: message, + }) + } + + return er +} + +func errPayloadFromSingleErr(err error) *models.ErrorResponse { + return &models.ErrorResponse{Error: []*models.ErrorResponseErrorItems0{{ + Message: fmt.Sprintf("%s", err), + }}} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/logger.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/logger.go new file mode 100644 index 0000000000000000000000000000000000000000..f4ce61f82d5f2cd39a2571c27631e49c76339fc7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/logger.go @@ -0,0 +1,91 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "errors" + "strings" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/usecases/build" +) + +type WeaviateJSONFormatter struct { + *logrus.JSONFormatter + gitHash, imageTag, serverVersion, goVersion string +} + +func NewWeaviateJSONFormatter() logrus.Formatter { + return &WeaviateJSONFormatter{ + &logrus.JSONFormatter{}, + build.Revision, + build.Branch, + build.Version, + build.GoVersion, + } +} + +func (wf *WeaviateJSONFormatter) Format(e *logrus.Entry) ([]byte, error) { + e.Data["build_git_commit"] = wf.gitHash + e.Data["build_image_tag"] = wf.imageTag + e.Data["build_wv_version"] = wf.serverVersion + e.Data["build_go_version"] = wf.goVersion + return wf.JSONFormatter.Format(e) +} + +type WeaviateTextFormatter struct { + *logrus.TextFormatter + gitHash, imageTag, serverVersion, goVersion string +} + +func NewWeaviateTextFormatter() logrus.Formatter { + return &WeaviateTextFormatter{ + &logrus.TextFormatter{}, + build.Revision, + build.Branch, + build.Version, + build.GoVersion, + } +} + +func (wf *WeaviateTextFormatter) Format(e *logrus.Entry) ([]byte, error) { + e.Data["build_git_commit"] = wf.gitHash + e.Data["build_image_tag"] = wf.imageTag + e.Data["build_wv_version"] = wf.serverVersion + e.Data["build_go_version"] = wf.goVersion + return wf.TextFormatter.Format(e) +} + +var errlogLevelNotRecognized = errors.New("log level not recognized") + +// logLevelFromString converts a string to a logrus log level, returns a logLevelNotRecognized +// error if the string is not recognized. level is case insensitive. +func logLevelFromString(level string) (logrus.Level, error) { + switch strings.ToLower(level) { + case "panic": + return logrus.PanicLevel, nil + case "fatal": + return logrus.FatalLevel, nil + case "error": + return logrus.ErrorLevel, nil + case "warn", "warning": + return logrus.WarnLevel, nil + case "info": + return logrus.InfoLevel, nil + case "debug": + return logrus.DebugLevel, nil + case "trace": + return logrus.TraceLevel, nil + default: + return 0, errlogLevelNotRecognized + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/middlewares.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/middlewares.go new file mode 100644 index 0000000000000000000000000000000000000000..7a4cebf54905d9970b2ea5349faa0314876e67b0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/middlewares.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strings" + "time" + + sentryhttp "github.com/getsentry/sentry-go/http" + "github.com/go-openapi/runtime/middleware" + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/cors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/handlers/rest/raft" + "github.com/weaviate/weaviate/adapters/handlers/rest/state" + "github.com/weaviate/weaviate/adapters/handlers/rest/swagger_middleware" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +// The middleware configuration is for the handler executors. These do not apply to the swagger.json document. +// The middleware executes after routing but before authentication, binding and validation +// +// we are setting the middlewares from within configureAPI, as we need access +// to some resources which are not exposed +func makeSetupMiddlewares(appState *state.State) func(http.Handler) http.Handler { + return func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/v1/.well-known/openid-configuration" || r.URL.String() == "/v1" { + handler.ServeHTTP(w, r) + return + } + appState.AnonymousAccess.Middleware(handler).ServeHTTP(w, r) + }) + } +} + +func addHandleRoot(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/" { + w.Header().Add("Location", "/v1") + w.WriteHeader(http.StatusMovedPermanently) + w.Write([]byte(`{"links":{"href":"/v1","name":"api v1","documentationHref":` + + `"https://weaviate.io/developers/weaviate/current/"}}`)) + return + } + + next.ServeHTTP(w, r) + }) +} + +// clusterv1Regexp is used to intercept requests and redirect them to a dedicated http server independent of swagger +var clusterv1Regexp = regexp.MustCompile("/v1/cluster/*") + +// addClusterHandlerMiddleware will inject a middleware that will catch all requests matching clusterv1Regexp. +// If the request match, it will route it to a dedicated http.Handler and skip the next middleware. +// If the request doesn't match, it will continue to the next handler. +func addClusterHandlerMiddleware(next http.Handler, appState *state.State) http.Handler { + // Instantiate the router outside the returned lambda to avoid re-allocating everytime a new request comes in + raftRouter := raft.ClusterRouter(appState.SchemaManager.Handler) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case r.URL.Path == "/v1/cluster/statistics": + next.ServeHTTP(w, r) + case clusterv1Regexp.MatchString(r.URL.Path): + raftRouter.ServeHTTP(w, r) + default: + next.ServeHTTP(w, r) + } + }) +} + +func makeAddModuleHandlers(modules *modules.Provider) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + mux := http.NewServeMux() + + for _, mod := range modules.GetAllWithHTTPHandlers() { + prefix := fmt.Sprintf("/v1/modules/%s", mod.Name()) + mux.Handle(fmt.Sprintf("%s/", prefix), + http.StripPrefix(prefix, mod.RootHandler())) + } + + prefix := "/v1/modules" + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if url := r.URL.String(); len(url) > len(prefix) && url[:len(prefix)] == prefix { + mux.ServeHTTP(w, r) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +// The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document. +// So this is a good place to plug in a panic handling middleware, logging and metrics +// Contains "x-api-key", "x-api-token" for legacy reasons, older interfaces might need these headers. +func makeSetupGlobalMiddleware(appState *state.State, context *middleware.Context) func(http.Handler) http.Handler { + return func(handler http.Handler) http.Handler { + handleCORS := cors.New(cors.Options{ + OptionsPassthrough: true, + AllowedMethods: strings.Split(appState.ServerConfig.Config.CORS.AllowMethods, ","), + AllowedHeaders: strings.Split(appState.ServerConfig.Config.CORS.AllowHeaders, ","), + AllowedOrigins: strings.Split(appState.ServerConfig.Config.CORS.AllowOrigin, ","), + }).Handler + handler = handleCORS(handler) + handler = swagger_middleware.AddMiddleware([]byte(SwaggerJSON), handler) + handler = makeAddLogging(appState.Logger)(handler) + if appState.ServerConfig.Config.Monitoring.Enabled { + handler = makeAddMonitoring(appState.Metrics)(handler) + } + handler = addPreflight(handler, appState.ServerConfig.Config.CORS) + handler = addLiveAndReadyness(appState, handler) + handler = addHandleRoot(handler) + handler = makeAddModuleHandlers(appState.Modules)(handler) + handler = addInjectHeadersIntoContext(handler) + handler = makeCatchPanics(appState.Logger, newPanicsRequestsTotal(appState.Metrics, appState.Logger))(handler) + handler = addSourceIpToContext(handler) + if appState.ServerConfig.Config.Monitoring.Enabled { + handler = monitoring.InstrumentHTTP( + handler, + staticRoute(context), + appState.HTTPServerMetrics.InflightRequests, + appState.HTTPServerMetrics.RequestDuration, + appState.HTTPServerMetrics.RequestBodySize, + appState.HTTPServerMetrics.ResponseBodySize, + ) + } + // Must be the last middleware as it might skip the next handler + handler = addClusterHandlerMiddleware(handler, appState) + if appState.ServerConfig.Config.Sentry.Enabled { + handler = addSentryHandler(handler) + } + + return handler + } +} + +// staticRoute is used to convert routes in our main http server into static routes +// by removing all the dynamic variables in the route. Useful for instrumentation +// where "route cardinality" matters. + +// Example: +// `/schema/Movies/properties` -> `/schema/{className}` +func staticRoute(context *middleware.Context) monitoring.StaticRouteLabel { + return func(r *http.Request) (*http.Request, string) { + route := r.URL.String() + req := r + + matched, rr, ok := context.RouteInfo(r) + if ok { + // convert dynamic route to static route. + // `/api/v1/schema/Question/tenant1` -> `/api/v1/schema/{class}/{tenant}` + route = matched.PathPattern + req = rr + } + return req, route + } +} + +func addSentryHandler(next http.Handler) http.Handler { + return sentryhttp.New(sentryhttp.Options{}).Handle(next) +} + +func makeAddLogging(logger logrus.FieldLogger) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logger. + WithField("action", "restapi_request"). + WithField("method", r.Method). + WithField("url", r.URL). + Debug("received HTTP request") + next.ServeHTTP(w, r) + }) + } +} + +func makeAddMonitoring(metrics *monitoring.PrometheusMetrics) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + before := time.Now() + method := r.Method + path := r.URL.Path + next.ServeHTTP(w, r) + + if strings.HasPrefix(path, "/v1/batch/objects") && method == http.MethodPost { + metrics.BatchTime.With(prometheus.Labels{ + "operation": "total_api_level", + "class_name": "n/a", + "shard_name": "n/a", + }). + Observe(float64(time.Since(before) / time.Millisecond)) + + metrics.BatchSizeBytes.WithLabelValues("rest").Observe(float64(r.ContentLength)) + } + }) + } +} + +func addPreflight(next http.Handler, cfg config.CORS) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", cfg.AllowOrigin) + w.Header().Set("Access-Control-Allow-Methods", cfg.AllowMethods) + w.Header().Set("Access-Control-Allow-Headers", cfg.AllowHeaders) + + if r.Method == "OPTIONS" { + return + } + + next.ServeHTTP(w, r) + }) +} + +func addInjectHeadersIntoContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + changed := false + for k, v := range r.Header { + if strings.HasPrefix(k, "X-") || k == "Authorization" { + ctx = context.WithValue(ctx, k, v) + changed = true + } + } + + if changed { + next.ServeHTTP(w, r.Clone(ctx)) + } else { + next.ServeHTTP(w, r) + } + }) +} + +func addLiveAndReadyness(state *state.State, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/v1/.well-known/live" { + w.WriteHeader(http.StatusOK) + return + } + + if r.URL.String() == "/v1/.well-known/ready" { + code := http.StatusOK + // if this node is in maintenance mode, we want to return live but not ready + // so that kubernetes will allow this pod to run but not send traffic to it + if state.Cluster.MaintenanceModeEnabledForLocalhost() { + code = http.StatusServiceUnavailable + } else if !state.ClusterService.Ready() || state.Cluster.ClusterHealthScore() != 0 { + code = http.StatusServiceUnavailable + } else if state.Modules != nil { + _, err := state.Modules.GetMeta() + if err != nil { + code = http.StatusServiceUnavailable + } + } + w.WriteHeader(code) + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/middlewares_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/middlewares_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f57f47ab82bacb91035cf9063e0d3c47b5b6ab42 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/middlewares_test.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "net/http" + "testing" + + "github.com/go-openapi/loads" + "github.com/go-openapi/runtime/middleware" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" +) + +func Test_staticRoute(t *testing.T) { + spec, err := loads.Embedded(SwaggerJSON, FlatSwaggerJSON) + require.NoError(t, err) + + api := operations.NewWeaviateAPI(spec) + api.Init() + + router := middleware.DefaultRouter(spec, api) + ctx := middleware.NewRoutableContext(spec, api, router) + + cases := []struct { + name string + req *http.Request + expected string + }{ + { + name: "unmatched route", + req: newRequest(t, "/foo"), // un-matched route + expected: "/foo", + }, + { + name: "matched route", + req: newRequest(t, "/v1/schema"), // matched route + expected: "/v1/schema", + }, + { + name: "matched route with dynamic path", + req: newRequest(t, "/v1/schema/Movies/"), // matched route. + expected: "/v1/schema/{className}", // yay! + }, + { + name: "matched route with dynamic path 2", + req: newRequest(t, "/v1/schema/Movies/shards"), // matched route. + expected: "/v1/schema/{className}/shards", // yay! + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + _, got := staticRoute(ctx)(tc.req) + assert.Equal(t, tc.expected, got) + }) + } +} + +func newRequest(t *testing.T, path string) *http.Request { + t.Helper() + + r, err := http.NewRequest("GET", path, nil) + require.NoError(t, err) + return r +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions.go new file mode 100644 index 0000000000000000000000000000000000000000..3b2e645b3723492eeb89792f4aa7887502837bcf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions.go @@ -0,0 +1,191 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// AddPermissionsHandlerFunc turns a function with the right signature into a add permissions handler +type AddPermissionsHandlerFunc func(AddPermissionsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AddPermissionsHandlerFunc) Handle(params AddPermissionsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AddPermissionsHandler interface for that can handle valid add permissions params +type AddPermissionsHandler interface { + Handle(AddPermissionsParams, *models.Principal) middleware.Responder +} + +// NewAddPermissions creates a new http.Handler for the add permissions operation +func NewAddPermissions(ctx *middleware.Context, handler AddPermissionsHandler) *AddPermissions { + return &AddPermissions{Context: ctx, Handler: handler} +} + +/* + AddPermissions swagger:route POST /authz/roles/{id}/add-permissions authz addPermissions + +Add permission to a given role. +*/ +type AddPermissions struct { + Context *middleware.Context + Handler AddPermissionsHandler +} + +func (o *AddPermissions) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAddPermissionsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// AddPermissionsBody add permissions body +// +// swagger:model AddPermissionsBody +type AddPermissionsBody struct { + + // permissions to be added to the role + // Required: true + Permissions []*models.Permission `json:"permissions" yaml:"permissions"` +} + +// Validate validates this add permissions body +func (o *AddPermissionsBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validatePermissions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AddPermissionsBody) validatePermissions(formats strfmt.Registry) error { + + if err := validate.Required("body"+"."+"permissions", "body", o.Permissions); err != nil { + return err + } + + for i := 0; i < len(o.Permissions); i++ { + if swag.IsZero(o.Permissions[i]) { // not required + continue + } + + if o.Permissions[i] != nil { + if err := o.Permissions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this add permissions body based on the context it is used +func (o *AddPermissionsBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidatePermissions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AddPermissionsBody) contextValidatePermissions(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Permissions); i++ { + + if o.Permissions[i] != nil { + if err := o.Permissions[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *AddPermissionsBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AddPermissionsBody) UnmarshalBinary(b []byte) error { + var res AddPermissionsBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..35c81c923dbdf5cff61644f57bc1364b3b77e60d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_parameters.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewAddPermissionsParams creates a new AddPermissionsParams object +// +// There are no default values defined in the spec. +func NewAddPermissionsParams() AddPermissionsParams { + + return AddPermissionsParams{} +} + +// AddPermissionsParams contains all the bound params for the add permissions operation +// typically these are obtained from a http.Request +// +// swagger:parameters addPermissions +type AddPermissionsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body AddPermissionsBody + /*role name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAddPermissionsParams() beforehand. +func (o *AddPermissionsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body AddPermissionsBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *AddPermissionsParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..abd40ee36f72813014776e104937ea1c0b74f69a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_responses.go @@ -0,0 +1,280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AddPermissionsOKCode is the HTTP code returned for type AddPermissionsOK +const AddPermissionsOKCode int = 200 + +/* +AddPermissionsOK Permissions added successfully + +swagger:response addPermissionsOK +*/ +type AddPermissionsOK struct { +} + +// NewAddPermissionsOK creates AddPermissionsOK with default headers values +func NewAddPermissionsOK() *AddPermissionsOK { + + return &AddPermissionsOK{} +} + +// WriteResponse to the client +func (o *AddPermissionsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// AddPermissionsBadRequestCode is the HTTP code returned for type AddPermissionsBadRequest +const AddPermissionsBadRequestCode int = 400 + +/* +AddPermissionsBadRequest Malformed request. + +swagger:response addPermissionsBadRequest +*/ +type AddPermissionsBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAddPermissionsBadRequest creates AddPermissionsBadRequest with default headers values +func NewAddPermissionsBadRequest() *AddPermissionsBadRequest { + + return &AddPermissionsBadRequest{} +} + +// WithPayload adds the payload to the add permissions bad request response +func (o *AddPermissionsBadRequest) WithPayload(payload *models.ErrorResponse) *AddPermissionsBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the add permissions bad request response +func (o *AddPermissionsBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AddPermissionsBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AddPermissionsUnauthorizedCode is the HTTP code returned for type AddPermissionsUnauthorized +const AddPermissionsUnauthorizedCode int = 401 + +/* +AddPermissionsUnauthorized Unauthorized or invalid credentials. + +swagger:response addPermissionsUnauthorized +*/ +type AddPermissionsUnauthorized struct { +} + +// NewAddPermissionsUnauthorized creates AddPermissionsUnauthorized with default headers values +func NewAddPermissionsUnauthorized() *AddPermissionsUnauthorized { + + return &AddPermissionsUnauthorized{} +} + +// WriteResponse to the client +func (o *AddPermissionsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AddPermissionsForbiddenCode is the HTTP code returned for type AddPermissionsForbidden +const AddPermissionsForbiddenCode int = 403 + +/* +AddPermissionsForbidden Forbidden + +swagger:response addPermissionsForbidden +*/ +type AddPermissionsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAddPermissionsForbidden creates AddPermissionsForbidden with default headers values +func NewAddPermissionsForbidden() *AddPermissionsForbidden { + + return &AddPermissionsForbidden{} +} + +// WithPayload adds the payload to the add permissions forbidden response +func (o *AddPermissionsForbidden) WithPayload(payload *models.ErrorResponse) *AddPermissionsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the add permissions forbidden response +func (o *AddPermissionsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AddPermissionsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AddPermissionsNotFoundCode is the HTTP code returned for type AddPermissionsNotFound +const AddPermissionsNotFoundCode int = 404 + +/* +AddPermissionsNotFound no role found + +swagger:response addPermissionsNotFound +*/ +type AddPermissionsNotFound struct { +} + +// NewAddPermissionsNotFound creates AddPermissionsNotFound with default headers values +func NewAddPermissionsNotFound() *AddPermissionsNotFound { + + return &AddPermissionsNotFound{} +} + +// WriteResponse to the client +func (o *AddPermissionsNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// AddPermissionsUnprocessableEntityCode is the HTTP code returned for type AddPermissionsUnprocessableEntity +const AddPermissionsUnprocessableEntityCode int = 422 + +/* +AddPermissionsUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response addPermissionsUnprocessableEntity +*/ +type AddPermissionsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAddPermissionsUnprocessableEntity creates AddPermissionsUnprocessableEntity with default headers values +func NewAddPermissionsUnprocessableEntity() *AddPermissionsUnprocessableEntity { + + return &AddPermissionsUnprocessableEntity{} +} + +// WithPayload adds the payload to the add permissions unprocessable entity response +func (o *AddPermissionsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *AddPermissionsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the add permissions unprocessable entity response +func (o *AddPermissionsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AddPermissionsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AddPermissionsInternalServerErrorCode is the HTTP code returned for type AddPermissionsInternalServerError +const AddPermissionsInternalServerErrorCode int = 500 + +/* +AddPermissionsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response addPermissionsInternalServerError +*/ +type AddPermissionsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAddPermissionsInternalServerError creates AddPermissionsInternalServerError with default headers values +func NewAddPermissionsInternalServerError() *AddPermissionsInternalServerError { + + return &AddPermissionsInternalServerError{} +} + +// WithPayload adds the payload to the add permissions internal server error response +func (o *AddPermissionsInternalServerError) WithPayload(payload *models.ErrorResponse) *AddPermissionsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the add permissions internal server error response +func (o *AddPermissionsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AddPermissionsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..ff1b4c56ffe62abeeb246598a989abb67c6c65c2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/add_permissions_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// AddPermissionsURL generates an URL for the add permissions operation +type AddPermissionsURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AddPermissionsURL) WithBasePath(bp string) *AddPermissionsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AddPermissionsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AddPermissionsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}/add-permissions" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on AddPermissionsURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AddPermissionsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AddPermissionsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AddPermissionsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AddPermissionsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AddPermissionsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AddPermissionsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group.go new file mode 100644 index 0000000000000000000000000000000000000000..170faa3fe046675eb3c077f1562c6f6af63169fe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// AssignRoleToGroupHandlerFunc turns a function with the right signature into a assign role to group handler +type AssignRoleToGroupHandlerFunc func(AssignRoleToGroupParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AssignRoleToGroupHandlerFunc) Handle(params AssignRoleToGroupParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AssignRoleToGroupHandler interface for that can handle valid assign role to group params +type AssignRoleToGroupHandler interface { + Handle(AssignRoleToGroupParams, *models.Principal) middleware.Responder +} + +// NewAssignRoleToGroup creates a new http.Handler for the assign role to group operation +func NewAssignRoleToGroup(ctx *middleware.Context, handler AssignRoleToGroupHandler) *AssignRoleToGroup { + return &AssignRoleToGroup{Context: ctx, Handler: handler} +} + +/* + AssignRoleToGroup swagger:route POST /authz/groups/{id}/assign authz assignRoleToGroup + +Assign a role to a group +*/ +type AssignRoleToGroup struct { + Context *middleware.Context + Handler AssignRoleToGroupHandler +} + +func (o *AssignRoleToGroup) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAssignRoleToGroupParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// AssignRoleToGroupBody assign role to group body +// +// swagger:model AssignRoleToGroupBody +type AssignRoleToGroupBody struct { + + // group type + GroupType models.GroupType `json:"groupType,omitempty" yaml:"groupType,omitempty"` + + // the roles that assigned to group + Roles []string `json:"roles" yaml:"roles"` +} + +// Validate validates this assign role to group body +func (o *AssignRoleToGroupBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateGroupType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToGroupBody) validateGroupType(formats strfmt.Registry) error { + if swag.IsZero(o.GroupType) { // not required + return nil + } + + if err := o.GroupType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// ContextValidate validate this assign role to group body based on the context it is used +func (o *AssignRoleToGroupBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateGroupType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToGroupBody) contextValidateGroupType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.GroupType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *AssignRoleToGroupBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AssignRoleToGroupBody) UnmarshalBinary(b []byte) error { + var res AssignRoleToGroupBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..08c5cda993da1138ad2f52c390dbf48639295cce --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_parameters.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewAssignRoleToGroupParams creates a new AssignRoleToGroupParams object +// +// There are no default values defined in the spec. +func NewAssignRoleToGroupParams() AssignRoleToGroupParams { + + return AssignRoleToGroupParams{} +} + +// AssignRoleToGroupParams contains all the bound params for the assign role to group operation +// typically these are obtained from a http.Request +// +// swagger:parameters assignRoleToGroup +type AssignRoleToGroupParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body AssignRoleToGroupBody + /*group name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAssignRoleToGroupParams() beforehand. +func (o *AssignRoleToGroupParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body AssignRoleToGroupBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *AssignRoleToGroupParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..fade02718717c5688433ae3d0832430ce12eb0a5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_responses.go @@ -0,0 +1,235 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AssignRoleToGroupOKCode is the HTTP code returned for type AssignRoleToGroupOK +const AssignRoleToGroupOKCode int = 200 + +/* +AssignRoleToGroupOK Role assigned successfully + +swagger:response assignRoleToGroupOK +*/ +type AssignRoleToGroupOK struct { +} + +// NewAssignRoleToGroupOK creates AssignRoleToGroupOK with default headers values +func NewAssignRoleToGroupOK() *AssignRoleToGroupOK { + + return &AssignRoleToGroupOK{} +} + +// WriteResponse to the client +func (o *AssignRoleToGroupOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// AssignRoleToGroupBadRequestCode is the HTTP code returned for type AssignRoleToGroupBadRequest +const AssignRoleToGroupBadRequestCode int = 400 + +/* +AssignRoleToGroupBadRequest Bad request + +swagger:response assignRoleToGroupBadRequest +*/ +type AssignRoleToGroupBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAssignRoleToGroupBadRequest creates AssignRoleToGroupBadRequest with default headers values +func NewAssignRoleToGroupBadRequest() *AssignRoleToGroupBadRequest { + + return &AssignRoleToGroupBadRequest{} +} + +// WithPayload adds the payload to the assign role to group bad request response +func (o *AssignRoleToGroupBadRequest) WithPayload(payload *models.ErrorResponse) *AssignRoleToGroupBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the assign role to group bad request response +func (o *AssignRoleToGroupBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AssignRoleToGroupBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AssignRoleToGroupUnauthorizedCode is the HTTP code returned for type AssignRoleToGroupUnauthorized +const AssignRoleToGroupUnauthorizedCode int = 401 + +/* +AssignRoleToGroupUnauthorized Unauthorized or invalid credentials. + +swagger:response assignRoleToGroupUnauthorized +*/ +type AssignRoleToGroupUnauthorized struct { +} + +// NewAssignRoleToGroupUnauthorized creates AssignRoleToGroupUnauthorized with default headers values +func NewAssignRoleToGroupUnauthorized() *AssignRoleToGroupUnauthorized { + + return &AssignRoleToGroupUnauthorized{} +} + +// WriteResponse to the client +func (o *AssignRoleToGroupUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AssignRoleToGroupForbiddenCode is the HTTP code returned for type AssignRoleToGroupForbidden +const AssignRoleToGroupForbiddenCode int = 403 + +/* +AssignRoleToGroupForbidden Forbidden + +swagger:response assignRoleToGroupForbidden +*/ +type AssignRoleToGroupForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAssignRoleToGroupForbidden creates AssignRoleToGroupForbidden with default headers values +func NewAssignRoleToGroupForbidden() *AssignRoleToGroupForbidden { + + return &AssignRoleToGroupForbidden{} +} + +// WithPayload adds the payload to the assign role to group forbidden response +func (o *AssignRoleToGroupForbidden) WithPayload(payload *models.ErrorResponse) *AssignRoleToGroupForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the assign role to group forbidden response +func (o *AssignRoleToGroupForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AssignRoleToGroupForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AssignRoleToGroupNotFoundCode is the HTTP code returned for type AssignRoleToGroupNotFound +const AssignRoleToGroupNotFoundCode int = 404 + +/* +AssignRoleToGroupNotFound role or group is not found. + +swagger:response assignRoleToGroupNotFound +*/ +type AssignRoleToGroupNotFound struct { +} + +// NewAssignRoleToGroupNotFound creates AssignRoleToGroupNotFound with default headers values +func NewAssignRoleToGroupNotFound() *AssignRoleToGroupNotFound { + + return &AssignRoleToGroupNotFound{} +} + +// WriteResponse to the client +func (o *AssignRoleToGroupNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// AssignRoleToGroupInternalServerErrorCode is the HTTP code returned for type AssignRoleToGroupInternalServerError +const AssignRoleToGroupInternalServerErrorCode int = 500 + +/* +AssignRoleToGroupInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response assignRoleToGroupInternalServerError +*/ +type AssignRoleToGroupInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAssignRoleToGroupInternalServerError creates AssignRoleToGroupInternalServerError with default headers values +func NewAssignRoleToGroupInternalServerError() *AssignRoleToGroupInternalServerError { + + return &AssignRoleToGroupInternalServerError{} +} + +// WithPayload adds the payload to the assign role to group internal server error response +func (o *AssignRoleToGroupInternalServerError) WithPayload(payload *models.ErrorResponse) *AssignRoleToGroupInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the assign role to group internal server error response +func (o *AssignRoleToGroupInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AssignRoleToGroupInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..e980fb40ca43c890dc3af5c92dec85799d9a336f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_group_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// AssignRoleToGroupURL generates an URL for the assign role to group operation +type AssignRoleToGroupURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AssignRoleToGroupURL) WithBasePath(bp string) *AssignRoleToGroupURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AssignRoleToGroupURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AssignRoleToGroupURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/groups/{id}/assign" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on AssignRoleToGroupURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AssignRoleToGroupURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AssignRoleToGroupURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AssignRoleToGroupURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AssignRoleToGroupURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AssignRoleToGroupURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AssignRoleToGroupURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user.go new file mode 100644 index 0000000000000000000000000000000000000000..76157194913213fa5a78880cb3308931de784546 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// AssignRoleToUserHandlerFunc turns a function with the right signature into a assign role to user handler +type AssignRoleToUserHandlerFunc func(AssignRoleToUserParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AssignRoleToUserHandlerFunc) Handle(params AssignRoleToUserParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AssignRoleToUserHandler interface for that can handle valid assign role to user params +type AssignRoleToUserHandler interface { + Handle(AssignRoleToUserParams, *models.Principal) middleware.Responder +} + +// NewAssignRoleToUser creates a new http.Handler for the assign role to user operation +func NewAssignRoleToUser(ctx *middleware.Context, handler AssignRoleToUserHandler) *AssignRoleToUser { + return &AssignRoleToUser{Context: ctx, Handler: handler} +} + +/* + AssignRoleToUser swagger:route POST /authz/users/{id}/assign authz assignRoleToUser + +Assign a role to a user +*/ +type AssignRoleToUser struct { + Context *middleware.Context + Handler AssignRoleToUserHandler +} + +func (o *AssignRoleToUser) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAssignRoleToUserParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// AssignRoleToUserBody assign role to user body +// +// swagger:model AssignRoleToUserBody +type AssignRoleToUserBody struct { + + // the roles that assigned to user + Roles []string `json:"roles" yaml:"roles"` + + // user type + UserType models.UserTypeInput `json:"userType,omitempty" yaml:"userType,omitempty"` +} + +// Validate validates this assign role to user body +func (o *AssignRoleToUserBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateUserType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToUserBody) validateUserType(formats strfmt.Registry) error { + if swag.IsZero(o.UserType) { // not required + return nil + } + + if err := o.UserType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// ContextValidate validate this assign role to user body based on the context it is used +func (o *AssignRoleToUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateUserType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *AssignRoleToUserBody) contextValidateUserType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.UserType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *AssignRoleToUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AssignRoleToUserBody) UnmarshalBinary(b []byte) error { + var res AssignRoleToUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fdadb5171cf3beed914e5398c09cf4d36ffe60b1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_parameters.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewAssignRoleToUserParams creates a new AssignRoleToUserParams object +// +// There are no default values defined in the spec. +func NewAssignRoleToUserParams() AssignRoleToUserParams { + + return AssignRoleToUserParams{} +} + +// AssignRoleToUserParams contains all the bound params for the assign role to user operation +// typically these are obtained from a http.Request +// +// swagger:parameters assignRoleToUser +type AssignRoleToUserParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body AssignRoleToUserBody + /*user name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAssignRoleToUserParams() beforehand. +func (o *AssignRoleToUserParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body AssignRoleToUserBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *AssignRoleToUserParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..449ab6d6ad6752b6f2d9b52356756447b075c42c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AssignRoleToUserOKCode is the HTTP code returned for type AssignRoleToUserOK +const AssignRoleToUserOKCode int = 200 + +/* +AssignRoleToUserOK Role assigned successfully + +swagger:response assignRoleToUserOK +*/ +type AssignRoleToUserOK struct { +} + +// NewAssignRoleToUserOK creates AssignRoleToUserOK with default headers values +func NewAssignRoleToUserOK() *AssignRoleToUserOK { + + return &AssignRoleToUserOK{} +} + +// WriteResponse to the client +func (o *AssignRoleToUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// AssignRoleToUserBadRequestCode is the HTTP code returned for type AssignRoleToUserBadRequest +const AssignRoleToUserBadRequestCode int = 400 + +/* +AssignRoleToUserBadRequest Bad request + +swagger:response assignRoleToUserBadRequest +*/ +type AssignRoleToUserBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAssignRoleToUserBadRequest creates AssignRoleToUserBadRequest with default headers values +func NewAssignRoleToUserBadRequest() *AssignRoleToUserBadRequest { + + return &AssignRoleToUserBadRequest{} +} + +// WithPayload adds the payload to the assign role to user bad request response +func (o *AssignRoleToUserBadRequest) WithPayload(payload *models.ErrorResponse) *AssignRoleToUserBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the assign role to user bad request response +func (o *AssignRoleToUserBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AssignRoleToUserBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AssignRoleToUserUnauthorizedCode is the HTTP code returned for type AssignRoleToUserUnauthorized +const AssignRoleToUserUnauthorizedCode int = 401 + +/* +AssignRoleToUserUnauthorized Unauthorized or invalid credentials. + +swagger:response assignRoleToUserUnauthorized +*/ +type AssignRoleToUserUnauthorized struct { +} + +// NewAssignRoleToUserUnauthorized creates AssignRoleToUserUnauthorized with default headers values +func NewAssignRoleToUserUnauthorized() *AssignRoleToUserUnauthorized { + + return &AssignRoleToUserUnauthorized{} +} + +// WriteResponse to the client +func (o *AssignRoleToUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AssignRoleToUserForbiddenCode is the HTTP code returned for type AssignRoleToUserForbidden +const AssignRoleToUserForbiddenCode int = 403 + +/* +AssignRoleToUserForbidden Forbidden + +swagger:response assignRoleToUserForbidden +*/ +type AssignRoleToUserForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAssignRoleToUserForbidden creates AssignRoleToUserForbidden with default headers values +func NewAssignRoleToUserForbidden() *AssignRoleToUserForbidden { + + return &AssignRoleToUserForbidden{} +} + +// WithPayload adds the payload to the assign role to user forbidden response +func (o *AssignRoleToUserForbidden) WithPayload(payload *models.ErrorResponse) *AssignRoleToUserForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the assign role to user forbidden response +func (o *AssignRoleToUserForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AssignRoleToUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AssignRoleToUserNotFoundCode is the HTTP code returned for type AssignRoleToUserNotFound +const AssignRoleToUserNotFoundCode int = 404 + +/* +AssignRoleToUserNotFound role or user is not found. + +swagger:response assignRoleToUserNotFound +*/ +type AssignRoleToUserNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAssignRoleToUserNotFound creates AssignRoleToUserNotFound with default headers values +func NewAssignRoleToUserNotFound() *AssignRoleToUserNotFound { + + return &AssignRoleToUserNotFound{} +} + +// WithPayload adds the payload to the assign role to user not found response +func (o *AssignRoleToUserNotFound) WithPayload(payload *models.ErrorResponse) *AssignRoleToUserNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the assign role to user not found response +func (o *AssignRoleToUserNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AssignRoleToUserNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AssignRoleToUserInternalServerErrorCode is the HTTP code returned for type AssignRoleToUserInternalServerError +const AssignRoleToUserInternalServerErrorCode int = 500 + +/* +AssignRoleToUserInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response assignRoleToUserInternalServerError +*/ +type AssignRoleToUserInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAssignRoleToUserInternalServerError creates AssignRoleToUserInternalServerError with default headers values +func NewAssignRoleToUserInternalServerError() *AssignRoleToUserInternalServerError { + + return &AssignRoleToUserInternalServerError{} +} + +// WithPayload adds the payload to the assign role to user internal server error response +func (o *AssignRoleToUserInternalServerError) WithPayload(payload *models.ErrorResponse) *AssignRoleToUserInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the assign role to user internal server error response +func (o *AssignRoleToUserInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AssignRoleToUserInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..6c7dc7e79b144744df01c9725554e44df6ff94b6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/assign_role_to_user_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// AssignRoleToUserURL generates an URL for the assign role to user operation +type AssignRoleToUserURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AssignRoleToUserURL) WithBasePath(bp string) *AssignRoleToUserURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AssignRoleToUserURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AssignRoleToUserURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/users/{id}/assign" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on AssignRoleToUserURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AssignRoleToUserURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AssignRoleToUserURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AssignRoleToUserURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AssignRoleToUserURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AssignRoleToUserURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AssignRoleToUserURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role.go new file mode 100644 index 0000000000000000000000000000000000000000..8b37e7ed6f2ffec3187607c119797b510216bba1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// CreateRoleHandlerFunc turns a function with the right signature into a create role handler +type CreateRoleHandlerFunc func(CreateRoleParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn CreateRoleHandlerFunc) Handle(params CreateRoleParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// CreateRoleHandler interface for that can handle valid create role params +type CreateRoleHandler interface { + Handle(CreateRoleParams, *models.Principal) middleware.Responder +} + +// NewCreateRole creates a new http.Handler for the create role operation +func NewCreateRole(ctx *middleware.Context, handler CreateRoleHandler) *CreateRole { + return &CreateRole{Context: ctx, Handler: handler} +} + +/* + CreateRole swagger:route POST /authz/roles authz createRole + +create new role +*/ +type CreateRole struct { + Context *middleware.Context + Handler CreateRoleHandler +} + +func (o *CreateRole) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewCreateRoleParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..98b3e19392a6aa5b1f4ce46016815a8a0cd7d8c2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewCreateRoleParams creates a new CreateRoleParams object +// +// There are no default values defined in the spec. +func NewCreateRoleParams() CreateRoleParams { + + return CreateRoleParams{} +} + +// CreateRoleParams contains all the bound params for the create role operation +// typically these are obtained from a http.Request +// +// swagger:parameters createRole +type CreateRoleParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Role +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewCreateRoleParams() beforehand. +func (o *CreateRoleParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Role + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..eca14cb76486feb28fc86baf9c600e6d19cbe5aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_responses.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// CreateRoleCreatedCode is the HTTP code returned for type CreateRoleCreated +const CreateRoleCreatedCode int = 201 + +/* +CreateRoleCreated Role created successfully + +swagger:response createRoleCreated +*/ +type CreateRoleCreated struct { +} + +// NewCreateRoleCreated creates CreateRoleCreated with default headers values +func NewCreateRoleCreated() *CreateRoleCreated { + + return &CreateRoleCreated{} +} + +// WriteResponse to the client +func (o *CreateRoleCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(201) +} + +// CreateRoleBadRequestCode is the HTTP code returned for type CreateRoleBadRequest +const CreateRoleBadRequestCode int = 400 + +/* +CreateRoleBadRequest Malformed request. + +swagger:response createRoleBadRequest +*/ +type CreateRoleBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateRoleBadRequest creates CreateRoleBadRequest with default headers values +func NewCreateRoleBadRequest() *CreateRoleBadRequest { + + return &CreateRoleBadRequest{} +} + +// WithPayload adds the payload to the create role bad request response +func (o *CreateRoleBadRequest) WithPayload(payload *models.ErrorResponse) *CreateRoleBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create role bad request response +func (o *CreateRoleBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateRoleBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateRoleUnauthorizedCode is the HTTP code returned for type CreateRoleUnauthorized +const CreateRoleUnauthorizedCode int = 401 + +/* +CreateRoleUnauthorized Unauthorized or invalid credentials. + +swagger:response createRoleUnauthorized +*/ +type CreateRoleUnauthorized struct { +} + +// NewCreateRoleUnauthorized creates CreateRoleUnauthorized with default headers values +func NewCreateRoleUnauthorized() *CreateRoleUnauthorized { + + return &CreateRoleUnauthorized{} +} + +// WriteResponse to the client +func (o *CreateRoleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// CreateRoleForbiddenCode is the HTTP code returned for type CreateRoleForbidden +const CreateRoleForbiddenCode int = 403 + +/* +CreateRoleForbidden Forbidden + +swagger:response createRoleForbidden +*/ +type CreateRoleForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateRoleForbidden creates CreateRoleForbidden with default headers values +func NewCreateRoleForbidden() *CreateRoleForbidden { + + return &CreateRoleForbidden{} +} + +// WithPayload adds the payload to the create role forbidden response +func (o *CreateRoleForbidden) WithPayload(payload *models.ErrorResponse) *CreateRoleForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create role forbidden response +func (o *CreateRoleForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateRoleForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateRoleConflictCode is the HTTP code returned for type CreateRoleConflict +const CreateRoleConflictCode int = 409 + +/* +CreateRoleConflict Role already exists + +swagger:response createRoleConflict +*/ +type CreateRoleConflict struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateRoleConflict creates CreateRoleConflict with default headers values +func NewCreateRoleConflict() *CreateRoleConflict { + + return &CreateRoleConflict{} +} + +// WithPayload adds the payload to the create role conflict response +func (o *CreateRoleConflict) WithPayload(payload *models.ErrorResponse) *CreateRoleConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create role conflict response +func (o *CreateRoleConflict) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateRoleConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateRoleUnprocessableEntityCode is the HTTP code returned for type CreateRoleUnprocessableEntity +const CreateRoleUnprocessableEntityCode int = 422 + +/* +CreateRoleUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response createRoleUnprocessableEntity +*/ +type CreateRoleUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateRoleUnprocessableEntity creates CreateRoleUnprocessableEntity with default headers values +func NewCreateRoleUnprocessableEntity() *CreateRoleUnprocessableEntity { + + return &CreateRoleUnprocessableEntity{} +} + +// WithPayload adds the payload to the create role unprocessable entity response +func (o *CreateRoleUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *CreateRoleUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create role unprocessable entity response +func (o *CreateRoleUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateRoleUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateRoleInternalServerErrorCode is the HTTP code returned for type CreateRoleInternalServerError +const CreateRoleInternalServerErrorCode int = 500 + +/* +CreateRoleInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response createRoleInternalServerError +*/ +type CreateRoleInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateRoleInternalServerError creates CreateRoleInternalServerError with default headers values +func NewCreateRoleInternalServerError() *CreateRoleInternalServerError { + + return &CreateRoleInternalServerError{} +} + +// WithPayload adds the payload to the create role internal server error response +func (o *CreateRoleInternalServerError) WithPayload(payload *models.ErrorResponse) *CreateRoleInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create role internal server error response +func (o *CreateRoleInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateRoleInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..6c15479d11bfb49bf6733e9eeac6af581199a0e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/create_role_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// CreateRoleURL generates an URL for the create role operation +type CreateRoleURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CreateRoleURL) WithBasePath(bp string) *CreateRoleURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CreateRoleURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *CreateRoleURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *CreateRoleURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *CreateRoleURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *CreateRoleURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on CreateRoleURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on CreateRoleURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *CreateRoleURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role.go new file mode 100644 index 0000000000000000000000000000000000000000..952100180424797c06bd89f8e1b2bc913f539be9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteRoleHandlerFunc turns a function with the right signature into a delete role handler +type DeleteRoleHandlerFunc func(DeleteRoleParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn DeleteRoleHandlerFunc) Handle(params DeleteRoleParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// DeleteRoleHandler interface for that can handle valid delete role params +type DeleteRoleHandler interface { + Handle(DeleteRoleParams, *models.Principal) middleware.Responder +} + +// NewDeleteRole creates a new http.Handler for the delete role operation +func NewDeleteRole(ctx *middleware.Context, handler DeleteRoleHandler) *DeleteRole { + return &DeleteRole{Context: ctx, Handler: handler} +} + +/* + DeleteRole swagger:route DELETE /authz/roles/{id} authz deleteRole + +Delete role +*/ +type DeleteRole struct { + Context *middleware.Context + Handler DeleteRoleHandler +} + +func (o *DeleteRole) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewDeleteRoleParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..0eb29c4826c25443cfa3763fdd4bd5144a1eda9e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewDeleteRoleParams creates a new DeleteRoleParams object +// +// There are no default values defined in the spec. +func NewDeleteRoleParams() DeleteRoleParams { + + return DeleteRoleParams{} +} + +// DeleteRoleParams contains all the bound params for the delete role operation +// typically these are obtained from a http.Request +// +// swagger:parameters deleteRole +type DeleteRoleParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*role name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDeleteRoleParams() beforehand. +func (o *DeleteRoleParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *DeleteRoleParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..2a60e2af1199fe35eb75711caf03c5e8aec1f17b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteRoleNoContentCode is the HTTP code returned for type DeleteRoleNoContent +const DeleteRoleNoContentCode int = 204 + +/* +DeleteRoleNoContent Successfully deleted. + +swagger:response deleteRoleNoContent +*/ +type DeleteRoleNoContent struct { +} + +// NewDeleteRoleNoContent creates DeleteRoleNoContent with default headers values +func NewDeleteRoleNoContent() *DeleteRoleNoContent { + + return &DeleteRoleNoContent{} +} + +// WriteResponse to the client +func (o *DeleteRoleNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// DeleteRoleBadRequestCode is the HTTP code returned for type DeleteRoleBadRequest +const DeleteRoleBadRequestCode int = 400 + +/* +DeleteRoleBadRequest Bad request + +swagger:response deleteRoleBadRequest +*/ +type DeleteRoleBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteRoleBadRequest creates DeleteRoleBadRequest with default headers values +func NewDeleteRoleBadRequest() *DeleteRoleBadRequest { + + return &DeleteRoleBadRequest{} +} + +// WithPayload adds the payload to the delete role bad request response +func (o *DeleteRoleBadRequest) WithPayload(payload *models.ErrorResponse) *DeleteRoleBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete role bad request response +func (o *DeleteRoleBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteRoleBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteRoleUnauthorizedCode is the HTTP code returned for type DeleteRoleUnauthorized +const DeleteRoleUnauthorizedCode int = 401 + +/* +DeleteRoleUnauthorized Unauthorized or invalid credentials. + +swagger:response deleteRoleUnauthorized +*/ +type DeleteRoleUnauthorized struct { +} + +// NewDeleteRoleUnauthorized creates DeleteRoleUnauthorized with default headers values +func NewDeleteRoleUnauthorized() *DeleteRoleUnauthorized { + + return &DeleteRoleUnauthorized{} +} + +// WriteResponse to the client +func (o *DeleteRoleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// DeleteRoleForbiddenCode is the HTTP code returned for type DeleteRoleForbidden +const DeleteRoleForbiddenCode int = 403 + +/* +DeleteRoleForbidden Forbidden + +swagger:response deleteRoleForbidden +*/ +type DeleteRoleForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteRoleForbidden creates DeleteRoleForbidden with default headers values +func NewDeleteRoleForbidden() *DeleteRoleForbidden { + + return &DeleteRoleForbidden{} +} + +// WithPayload adds the payload to the delete role forbidden response +func (o *DeleteRoleForbidden) WithPayload(payload *models.ErrorResponse) *DeleteRoleForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete role forbidden response +func (o *DeleteRoleForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteRoleForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteRoleInternalServerErrorCode is the HTTP code returned for type DeleteRoleInternalServerError +const DeleteRoleInternalServerErrorCode int = 500 + +/* +DeleteRoleInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response deleteRoleInternalServerError +*/ +type DeleteRoleInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteRoleInternalServerError creates DeleteRoleInternalServerError with default headers values +func NewDeleteRoleInternalServerError() *DeleteRoleInternalServerError { + + return &DeleteRoleInternalServerError{} +} + +// WithPayload adds the payload to the delete role internal server error response +func (o *DeleteRoleInternalServerError) WithPayload(payload *models.ErrorResponse) *DeleteRoleInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete role internal server error response +func (o *DeleteRoleInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteRoleInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..3f061069b187641b26ccb0ef4afacb4917e654d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/delete_role_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// DeleteRoleURL generates an URL for the delete role operation +type DeleteRoleURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteRoleURL) WithBasePath(bp string) *DeleteRoleURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteRoleURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DeleteRoleURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on DeleteRoleURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DeleteRoleURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DeleteRoleURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DeleteRoleURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DeleteRoleURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DeleteRoleURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DeleteRoleURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups.go new file mode 100644 index 0000000000000000000000000000000000000000..1529a0ff309c1622517fa91ca3c07bded2edb927 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetGroupsHandlerFunc turns a function with the right signature into a get groups handler +type GetGroupsHandlerFunc func(GetGroupsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetGroupsHandlerFunc) Handle(params GetGroupsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetGroupsHandler interface for that can handle valid get groups params +type GetGroupsHandler interface { + Handle(GetGroupsParams, *models.Principal) middleware.Responder +} + +// NewGetGroups creates a new http.Handler for the get groups operation +func NewGetGroups(ctx *middleware.Context, handler GetGroupsHandler) *GetGroups { + return &GetGroups{Context: ctx, Handler: handler} +} + +/* + GetGroups swagger:route GET /authz/groups/{groupType} authz getGroups + +# List all groups of a specific type + +Retrieves a list of all available group names for a specified group type (`oidc` or `db`). +*/ +type GetGroups struct { + Context *middleware.Context + Handler GetGroupsHandler +} + +func (o *GetGroups) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetGroupsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role.go new file mode 100644 index 0000000000000000000000000000000000000000..33bc670acd64d2e939d11a04b5287d5804edf780 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role.go @@ -0,0 +1,188 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetGroupsForRoleHandlerFunc turns a function with the right signature into a get groups for role handler +type GetGroupsForRoleHandlerFunc func(GetGroupsForRoleParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetGroupsForRoleHandlerFunc) Handle(params GetGroupsForRoleParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetGroupsForRoleHandler interface for that can handle valid get groups for role params +type GetGroupsForRoleHandler interface { + Handle(GetGroupsForRoleParams, *models.Principal) middleware.Responder +} + +// NewGetGroupsForRole creates a new http.Handler for the get groups for role operation +func NewGetGroupsForRole(ctx *middleware.Context, handler GetGroupsForRoleHandler) *GetGroupsForRole { + return &GetGroupsForRole{Context: ctx, Handler: handler} +} + +/* + GetGroupsForRole swagger:route GET /authz/roles/{id}/group-assignments authz getGroupsForRole + +# Get groups that have a specific role assigned + +Retrieves a list of all groups that have been assigned a specific role, identified by its name. +*/ +type GetGroupsForRole struct { + Context *middleware.Context + Handler GetGroupsForRoleHandler +} + +func (o *GetGroupsForRole) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetGroupsForRoleParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// GetGroupsForRoleOKBodyItems0 get groups for role o k body items0 +// +// swagger:model GetGroupsForRoleOKBodyItems0 +type GetGroupsForRoleOKBodyItems0 struct { + + // group Id + GroupID string `json:"groupId,omitempty" yaml:"groupId,omitempty"` + + // group type + // Required: true + GroupType *models.GroupType `json:"groupType" yaml:"groupType"` +} + +// Validate validates this get groups for role o k body items0 +func (o *GetGroupsForRoleOKBodyItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateGroupType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetGroupsForRoleOKBodyItems0) validateGroupType(formats strfmt.Registry) error { + + if err := validate.Required("groupType", "body", o.GroupType); err != nil { + return err + } + + if err := validate.Required("groupType", "body", o.GroupType); err != nil { + return err + } + + if o.GroupType != nil { + if err := o.GroupType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groupType") + } + return err + } + } + + return nil +} + +// ContextValidate validate this get groups for role o k body items0 based on the context it is used +func (o *GetGroupsForRoleOKBodyItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateGroupType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetGroupsForRoleOKBodyItems0) contextValidateGroupType(ctx context.Context, formats strfmt.Registry) error { + + if o.GroupType != nil { + if err := o.GroupType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("groupType") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *GetGroupsForRoleOKBodyItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetGroupsForRoleOKBodyItems0) UnmarshalBinary(b []byte) error { + var res GetGroupsForRoleOKBodyItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..9cc34a02cdec4e2d31ebcad20ab53397c55f5076 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewGetGroupsForRoleParams creates a new GetGroupsForRoleParams object +// +// There are no default values defined in the spec. +func NewGetGroupsForRoleParams() GetGroupsForRoleParams { + + return GetGroupsForRoleParams{} +} + +// GetGroupsForRoleParams contains all the bound params for the get groups for role operation +// typically these are obtained from a http.Request +// +// swagger:parameters getGroupsForRole +type GetGroupsForRoleParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The unique name of the role. + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetGroupsForRoleParams() beforehand. +func (o *GetGroupsForRoleParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *GetGroupsForRoleParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..2f83170e382e6aa6d0d814fe13a26ad14135139a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_responses.go @@ -0,0 +1,258 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetGroupsForRoleOKCode is the HTTP code returned for type GetGroupsForRoleOK +const GetGroupsForRoleOKCode int = 200 + +/* +GetGroupsForRoleOK Successfully retrieved the list of groups that have the role assigned. + +swagger:response getGroupsForRoleOK +*/ +type GetGroupsForRoleOK struct { + + /* + In: Body + */ + Payload []*GetGroupsForRoleOKBodyItems0 `json:"body,omitempty"` +} + +// NewGetGroupsForRoleOK creates GetGroupsForRoleOK with default headers values +func NewGetGroupsForRoleOK() *GetGroupsForRoleOK { + + return &GetGroupsForRoleOK{} +} + +// WithPayload adds the payload to the get groups for role o k response +func (o *GetGroupsForRoleOK) WithPayload(payload []*GetGroupsForRoleOKBodyItems0) *GetGroupsForRoleOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups for role o k response +func (o *GetGroupsForRoleOK) SetPayload(payload []*GetGroupsForRoleOKBodyItems0) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsForRoleOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*GetGroupsForRoleOKBodyItems0, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetGroupsForRoleBadRequestCode is the HTTP code returned for type GetGroupsForRoleBadRequest +const GetGroupsForRoleBadRequestCode int = 400 + +/* +GetGroupsForRoleBadRequest Bad request + +swagger:response getGroupsForRoleBadRequest +*/ +type GetGroupsForRoleBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetGroupsForRoleBadRequest creates GetGroupsForRoleBadRequest with default headers values +func NewGetGroupsForRoleBadRequest() *GetGroupsForRoleBadRequest { + + return &GetGroupsForRoleBadRequest{} +} + +// WithPayload adds the payload to the get groups for role bad request response +func (o *GetGroupsForRoleBadRequest) WithPayload(payload *models.ErrorResponse) *GetGroupsForRoleBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups for role bad request response +func (o *GetGroupsForRoleBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsForRoleBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetGroupsForRoleUnauthorizedCode is the HTTP code returned for type GetGroupsForRoleUnauthorized +const GetGroupsForRoleUnauthorizedCode int = 401 + +/* +GetGroupsForRoleUnauthorized Unauthorized or invalid credentials. + +swagger:response getGroupsForRoleUnauthorized +*/ +type GetGroupsForRoleUnauthorized struct { +} + +// NewGetGroupsForRoleUnauthorized creates GetGroupsForRoleUnauthorized with default headers values +func NewGetGroupsForRoleUnauthorized() *GetGroupsForRoleUnauthorized { + + return &GetGroupsForRoleUnauthorized{} +} + +// WriteResponse to the client +func (o *GetGroupsForRoleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetGroupsForRoleForbiddenCode is the HTTP code returned for type GetGroupsForRoleForbidden +const GetGroupsForRoleForbiddenCode int = 403 + +/* +GetGroupsForRoleForbidden Forbidden + +swagger:response getGroupsForRoleForbidden +*/ +type GetGroupsForRoleForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetGroupsForRoleForbidden creates GetGroupsForRoleForbidden with default headers values +func NewGetGroupsForRoleForbidden() *GetGroupsForRoleForbidden { + + return &GetGroupsForRoleForbidden{} +} + +// WithPayload adds the payload to the get groups for role forbidden response +func (o *GetGroupsForRoleForbidden) WithPayload(payload *models.ErrorResponse) *GetGroupsForRoleForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups for role forbidden response +func (o *GetGroupsForRoleForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsForRoleForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetGroupsForRoleNotFoundCode is the HTTP code returned for type GetGroupsForRoleNotFound +const GetGroupsForRoleNotFoundCode int = 404 + +/* +GetGroupsForRoleNotFound The specified role was not found. + +swagger:response getGroupsForRoleNotFound +*/ +type GetGroupsForRoleNotFound struct { +} + +// NewGetGroupsForRoleNotFound creates GetGroupsForRoleNotFound with default headers values +func NewGetGroupsForRoleNotFound() *GetGroupsForRoleNotFound { + + return &GetGroupsForRoleNotFound{} +} + +// WriteResponse to the client +func (o *GetGroupsForRoleNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetGroupsForRoleInternalServerErrorCode is the HTTP code returned for type GetGroupsForRoleInternalServerError +const GetGroupsForRoleInternalServerErrorCode int = 500 + +/* +GetGroupsForRoleInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getGroupsForRoleInternalServerError +*/ +type GetGroupsForRoleInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetGroupsForRoleInternalServerError creates GetGroupsForRoleInternalServerError with default headers values +func NewGetGroupsForRoleInternalServerError() *GetGroupsForRoleInternalServerError { + + return &GetGroupsForRoleInternalServerError{} +} + +// WithPayload adds the payload to the get groups for role internal server error response +func (o *GetGroupsForRoleInternalServerError) WithPayload(payload *models.ErrorResponse) *GetGroupsForRoleInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups for role internal server error response +func (o *GetGroupsForRoleInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsForRoleInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..89292632df0a97b97b6f5a0b341bbafc865d7c43 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_for_role_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// GetGroupsForRoleURL generates an URL for the get groups for role operation +type GetGroupsForRoleURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetGroupsForRoleURL) WithBasePath(bp string) *GetGroupsForRoleURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetGroupsForRoleURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetGroupsForRoleURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}/group-assignments" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on GetGroupsForRoleURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetGroupsForRoleURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetGroupsForRoleURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetGroupsForRoleURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetGroupsForRoleURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetGroupsForRoleURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetGroupsForRoleURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..bacf52988eb9ed068507eb6a1754d7644aff84ef --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_parameters.go @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewGetGroupsParams creates a new GetGroupsParams object +// +// There are no default values defined in the spec. +func NewGetGroupsParams() GetGroupsParams { + + return GetGroupsParams{} +} + +// GetGroupsParams contains all the bound params for the get groups operation +// typically these are obtained from a http.Request +// +// swagger:parameters getGroups +type GetGroupsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The type of group to retrieve. + Required: true + In: path + */ + GroupType string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetGroupsParams() beforehand. +func (o *GetGroupsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rGroupType, rhkGroupType, _ := route.Params.GetOK("groupType") + if err := o.bindGroupType(rGroupType, rhkGroupType, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindGroupType binds and validates parameter GroupType from path. +func (o *GetGroupsParams) bindGroupType(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.GroupType = raw + + if err := o.validateGroupType(formats); err != nil { + return err + } + + return nil +} + +// validateGroupType carries on validations for parameter GroupType +func (o *GetGroupsParams) validateGroupType(formats strfmt.Registry) error { + + if err := validate.EnumCase("groupType", "path", o.GroupType, []interface{}{"oidc"}, true); err != nil { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ef73e8f0acf027975334a276bd1565f989888ef6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_responses.go @@ -0,0 +1,278 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetGroupsOKCode is the HTTP code returned for type GetGroupsOK +const GetGroupsOKCode int = 200 + +/* +GetGroupsOK A list of group names for the specified type. + +swagger:response getGroupsOK +*/ +type GetGroupsOK struct { + + /* + In: Body + */ + Payload []string `json:"body,omitempty"` +} + +// NewGetGroupsOK creates GetGroupsOK with default headers values +func NewGetGroupsOK() *GetGroupsOK { + + return &GetGroupsOK{} +} + +// WithPayload adds the payload to the get groups o k response +func (o *GetGroupsOK) WithPayload(payload []string) *GetGroupsOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups o k response +func (o *GetGroupsOK) SetPayload(payload []string) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]string, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetGroupsBadRequestCode is the HTTP code returned for type GetGroupsBadRequest +const GetGroupsBadRequestCode int = 400 + +/* +GetGroupsBadRequest Bad request + +swagger:response getGroupsBadRequest +*/ +type GetGroupsBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetGroupsBadRequest creates GetGroupsBadRequest with default headers values +func NewGetGroupsBadRequest() *GetGroupsBadRequest { + + return &GetGroupsBadRequest{} +} + +// WithPayload adds the payload to the get groups bad request response +func (o *GetGroupsBadRequest) WithPayload(payload *models.ErrorResponse) *GetGroupsBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups bad request response +func (o *GetGroupsBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetGroupsUnauthorizedCode is the HTTP code returned for type GetGroupsUnauthorized +const GetGroupsUnauthorizedCode int = 401 + +/* +GetGroupsUnauthorized Unauthorized or invalid credentials. + +swagger:response getGroupsUnauthorized +*/ +type GetGroupsUnauthorized struct { +} + +// NewGetGroupsUnauthorized creates GetGroupsUnauthorized with default headers values +func NewGetGroupsUnauthorized() *GetGroupsUnauthorized { + + return &GetGroupsUnauthorized{} +} + +// WriteResponse to the client +func (o *GetGroupsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetGroupsForbiddenCode is the HTTP code returned for type GetGroupsForbidden +const GetGroupsForbiddenCode int = 403 + +/* +GetGroupsForbidden Forbidden + +swagger:response getGroupsForbidden +*/ +type GetGroupsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetGroupsForbidden creates GetGroupsForbidden with default headers values +func NewGetGroupsForbidden() *GetGroupsForbidden { + + return &GetGroupsForbidden{} +} + +// WithPayload adds the payload to the get groups forbidden response +func (o *GetGroupsForbidden) WithPayload(payload *models.ErrorResponse) *GetGroupsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups forbidden response +func (o *GetGroupsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetGroupsUnprocessableEntityCode is the HTTP code returned for type GetGroupsUnprocessableEntity +const GetGroupsUnprocessableEntityCode int = 422 + +/* +GetGroupsUnprocessableEntity The request syntax is correct, but the server couldn't process it due to semantic issues. + +swagger:response getGroupsUnprocessableEntity +*/ +type GetGroupsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetGroupsUnprocessableEntity creates GetGroupsUnprocessableEntity with default headers values +func NewGetGroupsUnprocessableEntity() *GetGroupsUnprocessableEntity { + + return &GetGroupsUnprocessableEntity{} +} + +// WithPayload adds the payload to the get groups unprocessable entity response +func (o *GetGroupsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GetGroupsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups unprocessable entity response +func (o *GetGroupsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetGroupsInternalServerErrorCode is the HTTP code returned for type GetGroupsInternalServerError +const GetGroupsInternalServerErrorCode int = 500 + +/* +GetGroupsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getGroupsInternalServerError +*/ +type GetGroupsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetGroupsInternalServerError creates GetGroupsInternalServerError with default headers values +func NewGetGroupsInternalServerError() *GetGroupsInternalServerError { + + return &GetGroupsInternalServerError{} +} + +// WithPayload adds the payload to the get groups internal server error response +func (o *GetGroupsInternalServerError) WithPayload(payload *models.ErrorResponse) *GetGroupsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get groups internal server error response +func (o *GetGroupsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetGroupsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..6160c3dc38a79610b0074610a459704421a0c613 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_groups_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// GetGroupsURL generates an URL for the get groups operation +type GetGroupsURL struct { + GroupType string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetGroupsURL) WithBasePath(bp string) *GetGroupsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetGroupsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetGroupsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/groups/{groupType}" + + groupType := o.GroupType + if groupType != "" { + _path = strings.Replace(_path, "{groupType}", groupType, -1) + } else { + return nil, errors.New("groupType is required on GetGroupsURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetGroupsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetGroupsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetGroupsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetGroupsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetGroupsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetGroupsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role.go new file mode 100644 index 0000000000000000000000000000000000000000..aa3cd52c1e0e0b43796bf70dcc5b486ce2da6911 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRoleHandlerFunc turns a function with the right signature into a get role handler +type GetRoleHandlerFunc func(GetRoleParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetRoleHandlerFunc) Handle(params GetRoleParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetRoleHandler interface for that can handle valid get role params +type GetRoleHandler interface { + Handle(GetRoleParams, *models.Principal) middleware.Responder +} + +// NewGetRole creates a new http.Handler for the get role operation +func NewGetRole(ctx *middleware.Context, handler GetRoleHandler) *GetRole { + return &GetRole{Context: ctx, Handler: handler} +} + +/* + GetRole swagger:route GET /authz/roles/{id} authz getRole + +Get a role +*/ +type GetRole struct { + Context *middleware.Context + Handler GetRoleHandler +} + +func (o *GetRole) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetRoleParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..bf00603188c740a1786805828b78305d79188dc9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewGetRoleParams creates a new GetRoleParams object +// +// There are no default values defined in the spec. +func NewGetRoleParams() GetRoleParams { + + return GetRoleParams{} +} + +// GetRoleParams contains all the bound params for the get role operation +// typically these are obtained from a http.Request +// +// swagger:parameters getRole +type GetRoleParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*role name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetRoleParams() beforehand. +func (o *GetRoleParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *GetRoleParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..78b0763f04b14d96db3ededc33d98002cdac4428 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRoleOKCode is the HTTP code returned for type GetRoleOK +const GetRoleOKCode int = 200 + +/* +GetRoleOK Successful response. + +swagger:response getRoleOK +*/ +type GetRoleOK struct { + + /* + In: Body + */ + Payload *models.Role `json:"body,omitempty"` +} + +// NewGetRoleOK creates GetRoleOK with default headers values +func NewGetRoleOK() *GetRoleOK { + + return &GetRoleOK{} +} + +// WithPayload adds the payload to the get role o k response +func (o *GetRoleOK) WithPayload(payload *models.Role) *GetRoleOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get role o k response +func (o *GetRoleOK) SetPayload(payload *models.Role) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRoleOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRoleBadRequestCode is the HTTP code returned for type GetRoleBadRequest +const GetRoleBadRequestCode int = 400 + +/* +GetRoleBadRequest Malformed request. + +swagger:response getRoleBadRequest +*/ +type GetRoleBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRoleBadRequest creates GetRoleBadRequest with default headers values +func NewGetRoleBadRequest() *GetRoleBadRequest { + + return &GetRoleBadRequest{} +} + +// WithPayload adds the payload to the get role bad request response +func (o *GetRoleBadRequest) WithPayload(payload *models.ErrorResponse) *GetRoleBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get role bad request response +func (o *GetRoleBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRoleBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRoleUnauthorizedCode is the HTTP code returned for type GetRoleUnauthorized +const GetRoleUnauthorizedCode int = 401 + +/* +GetRoleUnauthorized Unauthorized or invalid credentials. + +swagger:response getRoleUnauthorized +*/ +type GetRoleUnauthorized struct { +} + +// NewGetRoleUnauthorized creates GetRoleUnauthorized with default headers values +func NewGetRoleUnauthorized() *GetRoleUnauthorized { + + return &GetRoleUnauthorized{} +} + +// WriteResponse to the client +func (o *GetRoleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetRoleForbiddenCode is the HTTP code returned for type GetRoleForbidden +const GetRoleForbiddenCode int = 403 + +/* +GetRoleForbidden Forbidden + +swagger:response getRoleForbidden +*/ +type GetRoleForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRoleForbidden creates GetRoleForbidden with default headers values +func NewGetRoleForbidden() *GetRoleForbidden { + + return &GetRoleForbidden{} +} + +// WithPayload adds the payload to the get role forbidden response +func (o *GetRoleForbidden) WithPayload(payload *models.ErrorResponse) *GetRoleForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get role forbidden response +func (o *GetRoleForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRoleForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRoleNotFoundCode is the HTTP code returned for type GetRoleNotFound +const GetRoleNotFoundCode int = 404 + +/* +GetRoleNotFound no role found + +swagger:response getRoleNotFound +*/ +type GetRoleNotFound struct { +} + +// NewGetRoleNotFound creates GetRoleNotFound with default headers values +func NewGetRoleNotFound() *GetRoleNotFound { + + return &GetRoleNotFound{} +} + +// WriteResponse to the client +func (o *GetRoleNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetRoleInternalServerErrorCode is the HTTP code returned for type GetRoleInternalServerError +const GetRoleInternalServerErrorCode int = 500 + +/* +GetRoleInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getRoleInternalServerError +*/ +type GetRoleInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRoleInternalServerError creates GetRoleInternalServerError with default headers values +func NewGetRoleInternalServerError() *GetRoleInternalServerError { + + return &GetRoleInternalServerError{} +} + +// WithPayload adds the payload to the get role internal server error response +func (o *GetRoleInternalServerError) WithPayload(payload *models.ErrorResponse) *GetRoleInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get role internal server error response +func (o *GetRoleInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRoleInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..aa8b2e91813f025cc43a13190bd40fa160df1498 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_role_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// GetRoleURL generates an URL for the get role operation +type GetRoleURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRoleURL) WithBasePath(bp string) *GetRoleURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRoleURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetRoleURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on GetRoleURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetRoleURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetRoleURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetRoleURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetRoleURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetRoleURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetRoleURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles.go new file mode 100644 index 0000000000000000000000000000000000000000..6da318200cfef9c9ea3c4efc34c107718d6349b3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesHandlerFunc turns a function with the right signature into a get roles handler +type GetRolesHandlerFunc func(GetRolesParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetRolesHandlerFunc) Handle(params GetRolesParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetRolesHandler interface for that can handle valid get roles params +type GetRolesHandler interface { + Handle(GetRolesParams, *models.Principal) middleware.Responder +} + +// NewGetRoles creates a new http.Handler for the get roles operation +func NewGetRoles(ctx *middleware.Context, handler GetRolesHandler) *GetRoles { + return &GetRoles{Context: ctx, Handler: handler} +} + +/* + GetRoles swagger:route GET /authz/roles authz getRoles + +Get all roles +*/ +type GetRoles struct { + Context *middleware.Context + Handler GetRolesHandler +} + +func (o *GetRoles) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetRolesParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group.go new file mode 100644 index 0000000000000000000000000000000000000000..e409e3c2b57f46566ccd2054310c7bfecc57666b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForGroupHandlerFunc turns a function with the right signature into a get roles for group handler +type GetRolesForGroupHandlerFunc func(GetRolesForGroupParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetRolesForGroupHandlerFunc) Handle(params GetRolesForGroupParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetRolesForGroupHandler interface for that can handle valid get roles for group params +type GetRolesForGroupHandler interface { + Handle(GetRolesForGroupParams, *models.Principal) middleware.Responder +} + +// NewGetRolesForGroup creates a new http.Handler for the get roles for group operation +func NewGetRolesForGroup(ctx *middleware.Context, handler GetRolesForGroupHandler) *GetRolesForGroup { + return &GetRolesForGroup{Context: ctx, Handler: handler} +} + +/* + GetRolesForGroup swagger:route GET /authz/groups/{id}/roles/{groupType} authz getRolesForGroup + +# Get roles assigned to a specific group + +Retrieves a list of all roles assigned to a specific group. The group must be identified by both its name (`id`) and its type (`db` or `oidc`). +*/ +type GetRolesForGroup struct { + Context *middleware.Context + Handler GetRolesForGroupHandler +} + +func (o *GetRolesForGroup) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetRolesForGroupParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..288eb2a9cbfd9ae22ed7a6c645258eba4e6d591c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_parameters.go @@ -0,0 +1,166 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NewGetRolesForGroupParams creates a new GetRolesForGroupParams object +// with the default values initialized. +func NewGetRolesForGroupParams() GetRolesForGroupParams { + + var ( + // initialize parameters with default values + + includeFullRolesDefault = bool(false) + ) + + return GetRolesForGroupParams{ + IncludeFullRoles: &includeFullRolesDefault, + } +} + +// GetRolesForGroupParams contains all the bound params for the get roles for group operation +// typically these are obtained from a http.Request +// +// swagger:parameters getRolesForGroup +type GetRolesForGroupParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The type of the group. + Required: true + In: path + */ + GroupType string + /*The unique name of the group. + Required: true + In: path + */ + ID string + /*If true, the response will include the full role definitions with all associated permissions. If false, only role names are returned. + In: query + Default: false + */ + IncludeFullRoles *bool +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetRolesForGroupParams() beforehand. +func (o *GetRolesForGroupParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rGroupType, rhkGroupType, _ := route.Params.GetOK("groupType") + if err := o.bindGroupType(rGroupType, rhkGroupType, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qIncludeFullRoles, qhkIncludeFullRoles, _ := qs.GetOK("includeFullRoles") + if err := o.bindIncludeFullRoles(qIncludeFullRoles, qhkIncludeFullRoles, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindGroupType binds and validates parameter GroupType from path. +func (o *GetRolesForGroupParams) bindGroupType(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.GroupType = raw + + if err := o.validateGroupType(formats); err != nil { + return err + } + + return nil +} + +// validateGroupType carries on validations for parameter GroupType +func (o *GetRolesForGroupParams) validateGroupType(formats strfmt.Registry) error { + + if err := validate.EnumCase("groupType", "path", o.GroupType, []interface{}{"oidc"}, true); err != nil { + return err + } + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *GetRolesForGroupParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} + +// bindIncludeFullRoles binds and validates parameter IncludeFullRoles from query. +func (o *GetRolesForGroupParams) bindIncludeFullRoles(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewGetRolesForGroupParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("includeFullRoles", "query", "bool", raw) + } + o.IncludeFullRoles = &value + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..18a036e2848136ed190ed4263aa77b0feb4d4d14 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_responses.go @@ -0,0 +1,303 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForGroupOKCode is the HTTP code returned for type GetRolesForGroupOK +const GetRolesForGroupOKCode int = 200 + +/* +GetRolesForGroupOK A list of roles assigned to the specified group. + +swagger:response getRolesForGroupOK +*/ +type GetRolesForGroupOK struct { + + /* + In: Body + */ + Payload models.RolesListResponse `json:"body,omitempty"` +} + +// NewGetRolesForGroupOK creates GetRolesForGroupOK with default headers values +func NewGetRolesForGroupOK() *GetRolesForGroupOK { + + return &GetRolesForGroupOK{} +} + +// WithPayload adds the payload to the get roles for group o k response +func (o *GetRolesForGroupOK) WithPayload(payload models.RolesListResponse) *GetRolesForGroupOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for group o k response +func (o *GetRolesForGroupOK) SetPayload(payload models.RolesListResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForGroupOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.RolesListResponse{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetRolesForGroupBadRequestCode is the HTTP code returned for type GetRolesForGroupBadRequest +const GetRolesForGroupBadRequestCode int = 400 + +/* +GetRolesForGroupBadRequest Bad request + +swagger:response getRolesForGroupBadRequest +*/ +type GetRolesForGroupBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForGroupBadRequest creates GetRolesForGroupBadRequest with default headers values +func NewGetRolesForGroupBadRequest() *GetRolesForGroupBadRequest { + + return &GetRolesForGroupBadRequest{} +} + +// WithPayload adds the payload to the get roles for group bad request response +func (o *GetRolesForGroupBadRequest) WithPayload(payload *models.ErrorResponse) *GetRolesForGroupBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for group bad request response +func (o *GetRolesForGroupBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForGroupBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForGroupUnauthorizedCode is the HTTP code returned for type GetRolesForGroupUnauthorized +const GetRolesForGroupUnauthorizedCode int = 401 + +/* +GetRolesForGroupUnauthorized Unauthorized or invalid credentials. + +swagger:response getRolesForGroupUnauthorized +*/ +type GetRolesForGroupUnauthorized struct { +} + +// NewGetRolesForGroupUnauthorized creates GetRolesForGroupUnauthorized with default headers values +func NewGetRolesForGroupUnauthorized() *GetRolesForGroupUnauthorized { + + return &GetRolesForGroupUnauthorized{} +} + +// WriteResponse to the client +func (o *GetRolesForGroupUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetRolesForGroupForbiddenCode is the HTTP code returned for type GetRolesForGroupForbidden +const GetRolesForGroupForbiddenCode int = 403 + +/* +GetRolesForGroupForbidden Forbidden + +swagger:response getRolesForGroupForbidden +*/ +type GetRolesForGroupForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForGroupForbidden creates GetRolesForGroupForbidden with default headers values +func NewGetRolesForGroupForbidden() *GetRolesForGroupForbidden { + + return &GetRolesForGroupForbidden{} +} + +// WithPayload adds the payload to the get roles for group forbidden response +func (o *GetRolesForGroupForbidden) WithPayload(payload *models.ErrorResponse) *GetRolesForGroupForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for group forbidden response +func (o *GetRolesForGroupForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForGroupForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForGroupNotFoundCode is the HTTP code returned for type GetRolesForGroupNotFound +const GetRolesForGroupNotFoundCode int = 404 + +/* +GetRolesForGroupNotFound The specified group was not found. + +swagger:response getRolesForGroupNotFound +*/ +type GetRolesForGroupNotFound struct { +} + +// NewGetRolesForGroupNotFound creates GetRolesForGroupNotFound with default headers values +func NewGetRolesForGroupNotFound() *GetRolesForGroupNotFound { + + return &GetRolesForGroupNotFound{} +} + +// WriteResponse to the client +func (o *GetRolesForGroupNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetRolesForGroupUnprocessableEntityCode is the HTTP code returned for type GetRolesForGroupUnprocessableEntity +const GetRolesForGroupUnprocessableEntityCode int = 422 + +/* +GetRolesForGroupUnprocessableEntity The request syntax is correct, but the server couldn't process it due to semantic issues. + +swagger:response getRolesForGroupUnprocessableEntity +*/ +type GetRolesForGroupUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForGroupUnprocessableEntity creates GetRolesForGroupUnprocessableEntity with default headers values +func NewGetRolesForGroupUnprocessableEntity() *GetRolesForGroupUnprocessableEntity { + + return &GetRolesForGroupUnprocessableEntity{} +} + +// WithPayload adds the payload to the get roles for group unprocessable entity response +func (o *GetRolesForGroupUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GetRolesForGroupUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for group unprocessable entity response +func (o *GetRolesForGroupUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForGroupUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForGroupInternalServerErrorCode is the HTTP code returned for type GetRolesForGroupInternalServerError +const GetRolesForGroupInternalServerErrorCode int = 500 + +/* +GetRolesForGroupInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getRolesForGroupInternalServerError +*/ +type GetRolesForGroupInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForGroupInternalServerError creates GetRolesForGroupInternalServerError with default headers values +func NewGetRolesForGroupInternalServerError() *GetRolesForGroupInternalServerError { + + return &GetRolesForGroupInternalServerError{} +} + +// WithPayload adds the payload to the get roles for group internal server error response +func (o *GetRolesForGroupInternalServerError) WithPayload(payload *models.ErrorResponse) *GetRolesForGroupInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for group internal server error response +func (o *GetRolesForGroupInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForGroupInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..3ab4c2125c1f819ffb33887ffa0d25c6f08aaf7a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_group_urlbuilder.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/swag" +) + +// GetRolesForGroupURL generates an URL for the get roles for group operation +type GetRolesForGroupURL struct { + GroupType string + ID string + + IncludeFullRoles *bool + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesForGroupURL) WithBasePath(bp string) *GetRolesForGroupURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesForGroupURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetRolesForGroupURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/groups/{id}/roles/{groupType}" + + groupType := o.GroupType + if groupType != "" { + _path = strings.Replace(_path, "{groupType}", groupType, -1) + } else { + return nil, errors.New("groupType is required on GetRolesForGroupURL") + } + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on GetRolesForGroupURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var includeFullRolesQ string + if o.IncludeFullRoles != nil { + includeFullRolesQ = swag.FormatBool(*o.IncludeFullRoles) + } + if includeFullRolesQ != "" { + qs.Set("includeFullRoles", includeFullRolesQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetRolesForGroupURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetRolesForGroupURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetRolesForGroupURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetRolesForGroupURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetRolesForGroupURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetRolesForGroupURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user.go new file mode 100644 index 0000000000000000000000000000000000000000..7c55a399a0ab832510543381e9043745fe38ad78 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForUserHandlerFunc turns a function with the right signature into a get roles for user handler +type GetRolesForUserHandlerFunc func(GetRolesForUserParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetRolesForUserHandlerFunc) Handle(params GetRolesForUserParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetRolesForUserHandler interface for that can handle valid get roles for user params +type GetRolesForUserHandler interface { + Handle(GetRolesForUserParams, *models.Principal) middleware.Responder +} + +// NewGetRolesForUser creates a new http.Handler for the get roles for user operation +func NewGetRolesForUser(ctx *middleware.Context, handler GetRolesForUserHandler) *GetRolesForUser { + return &GetRolesForUser{Context: ctx, Handler: handler} +} + +/* + GetRolesForUser swagger:route GET /authz/users/{id}/roles/{userType} authz getRolesForUser + +get roles assigned to user +*/ +type GetRolesForUser struct { + Context *middleware.Context + Handler GetRolesForUserHandler +} + +func (o *GetRolesForUser) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetRolesForUserParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated.go new file mode 100644 index 0000000000000000000000000000000000000000..eae93e65c694ae134f3703e2c57c500c7270f9b1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForUserDeprecatedHandlerFunc turns a function with the right signature into a get roles for user deprecated handler +type GetRolesForUserDeprecatedHandlerFunc func(GetRolesForUserDeprecatedParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetRolesForUserDeprecatedHandlerFunc) Handle(params GetRolesForUserDeprecatedParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetRolesForUserDeprecatedHandler interface for that can handle valid get roles for user deprecated params +type GetRolesForUserDeprecatedHandler interface { + Handle(GetRolesForUserDeprecatedParams, *models.Principal) middleware.Responder +} + +// NewGetRolesForUserDeprecated creates a new http.Handler for the get roles for user deprecated operation +func NewGetRolesForUserDeprecated(ctx *middleware.Context, handler GetRolesForUserDeprecatedHandler) *GetRolesForUserDeprecated { + return &GetRolesForUserDeprecated{Context: ctx, Handler: handler} +} + +/* + GetRolesForUserDeprecated swagger:route GET /authz/users/{id}/roles authz getRolesForUserDeprecated + +get roles assigned to user (DB + OIDC). Deprecated, will be removed when 1.29 is not supported anymore +*/ +type GetRolesForUserDeprecated struct { + Context *middleware.Context + Handler GetRolesForUserDeprecatedHandler +} + +func (o *GetRolesForUserDeprecated) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetRolesForUserDeprecatedParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..7c734d7c73d24b9b23893c20686c7dfafd9eccc0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewGetRolesForUserDeprecatedParams creates a new GetRolesForUserDeprecatedParams object +// +// There are no default values defined in the spec. +func NewGetRolesForUserDeprecatedParams() GetRolesForUserDeprecatedParams { + + return GetRolesForUserDeprecatedParams{} +} + +// GetRolesForUserDeprecatedParams contains all the bound params for the get roles for user deprecated operation +// typically these are obtained from a http.Request +// +// swagger:parameters getRolesForUserDeprecated +type GetRolesForUserDeprecatedParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*user name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetRolesForUserDeprecatedParams() beforehand. +func (o *GetRolesForUserDeprecatedParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *GetRolesForUserDeprecatedParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6cf3bb7d45322b3bf66457a46bbc4af65ab5552b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_responses.go @@ -0,0 +1,303 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForUserDeprecatedOKCode is the HTTP code returned for type GetRolesForUserDeprecatedOK +const GetRolesForUserDeprecatedOKCode int = 200 + +/* +GetRolesForUserDeprecatedOK Role assigned users + +swagger:response getRolesForUserDeprecatedOK +*/ +type GetRolesForUserDeprecatedOK struct { + + /* + In: Body + */ + Payload models.RolesListResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserDeprecatedOK creates GetRolesForUserDeprecatedOK with default headers values +func NewGetRolesForUserDeprecatedOK() *GetRolesForUserDeprecatedOK { + + return &GetRolesForUserDeprecatedOK{} +} + +// WithPayload adds the payload to the get roles for user deprecated o k response +func (o *GetRolesForUserDeprecatedOK) WithPayload(payload models.RolesListResponse) *GetRolesForUserDeprecatedOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user deprecated o k response +func (o *GetRolesForUserDeprecatedOK) SetPayload(payload models.RolesListResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserDeprecatedOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.RolesListResponse{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetRolesForUserDeprecatedBadRequestCode is the HTTP code returned for type GetRolesForUserDeprecatedBadRequest +const GetRolesForUserDeprecatedBadRequestCode int = 400 + +/* +GetRolesForUserDeprecatedBadRequest Bad request + +swagger:response getRolesForUserDeprecatedBadRequest +*/ +type GetRolesForUserDeprecatedBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserDeprecatedBadRequest creates GetRolesForUserDeprecatedBadRequest with default headers values +func NewGetRolesForUserDeprecatedBadRequest() *GetRolesForUserDeprecatedBadRequest { + + return &GetRolesForUserDeprecatedBadRequest{} +} + +// WithPayload adds the payload to the get roles for user deprecated bad request response +func (o *GetRolesForUserDeprecatedBadRequest) WithPayload(payload *models.ErrorResponse) *GetRolesForUserDeprecatedBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user deprecated bad request response +func (o *GetRolesForUserDeprecatedBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserDeprecatedBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForUserDeprecatedUnauthorizedCode is the HTTP code returned for type GetRolesForUserDeprecatedUnauthorized +const GetRolesForUserDeprecatedUnauthorizedCode int = 401 + +/* +GetRolesForUserDeprecatedUnauthorized Unauthorized or invalid credentials. + +swagger:response getRolesForUserDeprecatedUnauthorized +*/ +type GetRolesForUserDeprecatedUnauthorized struct { +} + +// NewGetRolesForUserDeprecatedUnauthorized creates GetRolesForUserDeprecatedUnauthorized with default headers values +func NewGetRolesForUserDeprecatedUnauthorized() *GetRolesForUserDeprecatedUnauthorized { + + return &GetRolesForUserDeprecatedUnauthorized{} +} + +// WriteResponse to the client +func (o *GetRolesForUserDeprecatedUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetRolesForUserDeprecatedForbiddenCode is the HTTP code returned for type GetRolesForUserDeprecatedForbidden +const GetRolesForUserDeprecatedForbiddenCode int = 403 + +/* +GetRolesForUserDeprecatedForbidden Forbidden + +swagger:response getRolesForUserDeprecatedForbidden +*/ +type GetRolesForUserDeprecatedForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserDeprecatedForbidden creates GetRolesForUserDeprecatedForbidden with default headers values +func NewGetRolesForUserDeprecatedForbidden() *GetRolesForUserDeprecatedForbidden { + + return &GetRolesForUserDeprecatedForbidden{} +} + +// WithPayload adds the payload to the get roles for user deprecated forbidden response +func (o *GetRolesForUserDeprecatedForbidden) WithPayload(payload *models.ErrorResponse) *GetRolesForUserDeprecatedForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user deprecated forbidden response +func (o *GetRolesForUserDeprecatedForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserDeprecatedForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForUserDeprecatedNotFoundCode is the HTTP code returned for type GetRolesForUserDeprecatedNotFound +const GetRolesForUserDeprecatedNotFoundCode int = 404 + +/* +GetRolesForUserDeprecatedNotFound no role found for user + +swagger:response getRolesForUserDeprecatedNotFound +*/ +type GetRolesForUserDeprecatedNotFound struct { +} + +// NewGetRolesForUserDeprecatedNotFound creates GetRolesForUserDeprecatedNotFound with default headers values +func NewGetRolesForUserDeprecatedNotFound() *GetRolesForUserDeprecatedNotFound { + + return &GetRolesForUserDeprecatedNotFound{} +} + +// WriteResponse to the client +func (o *GetRolesForUserDeprecatedNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetRolesForUserDeprecatedUnprocessableEntityCode is the HTTP code returned for type GetRolesForUserDeprecatedUnprocessableEntity +const GetRolesForUserDeprecatedUnprocessableEntityCode int = 422 + +/* +GetRolesForUserDeprecatedUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response getRolesForUserDeprecatedUnprocessableEntity +*/ +type GetRolesForUserDeprecatedUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserDeprecatedUnprocessableEntity creates GetRolesForUserDeprecatedUnprocessableEntity with default headers values +func NewGetRolesForUserDeprecatedUnprocessableEntity() *GetRolesForUserDeprecatedUnprocessableEntity { + + return &GetRolesForUserDeprecatedUnprocessableEntity{} +} + +// WithPayload adds the payload to the get roles for user deprecated unprocessable entity response +func (o *GetRolesForUserDeprecatedUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GetRolesForUserDeprecatedUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user deprecated unprocessable entity response +func (o *GetRolesForUserDeprecatedUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserDeprecatedUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForUserDeprecatedInternalServerErrorCode is the HTTP code returned for type GetRolesForUserDeprecatedInternalServerError +const GetRolesForUserDeprecatedInternalServerErrorCode int = 500 + +/* +GetRolesForUserDeprecatedInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getRolesForUserDeprecatedInternalServerError +*/ +type GetRolesForUserDeprecatedInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserDeprecatedInternalServerError creates GetRolesForUserDeprecatedInternalServerError with default headers values +func NewGetRolesForUserDeprecatedInternalServerError() *GetRolesForUserDeprecatedInternalServerError { + + return &GetRolesForUserDeprecatedInternalServerError{} +} + +// WithPayload adds the payload to the get roles for user deprecated internal server error response +func (o *GetRolesForUserDeprecatedInternalServerError) WithPayload(payload *models.ErrorResponse) *GetRolesForUserDeprecatedInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user deprecated internal server error response +func (o *GetRolesForUserDeprecatedInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserDeprecatedInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..35d7932d00d1cead4e94d0a2ec8fa59982b51937 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_deprecated_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// GetRolesForUserDeprecatedURL generates an URL for the get roles for user deprecated operation +type GetRolesForUserDeprecatedURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesForUserDeprecatedURL) WithBasePath(bp string) *GetRolesForUserDeprecatedURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesForUserDeprecatedURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetRolesForUserDeprecatedURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/users/{id}/roles" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on GetRolesForUserDeprecatedURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetRolesForUserDeprecatedURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetRolesForUserDeprecatedURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetRolesForUserDeprecatedURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetRolesForUserDeprecatedURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetRolesForUserDeprecatedURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetRolesForUserDeprecatedURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..0753cbffcd898944e2984ff165d2ce946454dd7d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_parameters.go @@ -0,0 +1,166 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NewGetRolesForUserParams creates a new GetRolesForUserParams object +// with the default values initialized. +func NewGetRolesForUserParams() GetRolesForUserParams { + + var ( + // initialize parameters with default values + + includeFullRolesDefault = bool(false) + ) + + return GetRolesForUserParams{ + IncludeFullRoles: &includeFullRolesDefault, + } +} + +// GetRolesForUserParams contains all the bound params for the get roles for user operation +// typically these are obtained from a http.Request +// +// swagger:parameters getRolesForUser +type GetRolesForUserParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*user name + Required: true + In: path + */ + ID string + /*Whether to include detailed role information needed the roles permission + In: query + Default: false + */ + IncludeFullRoles *bool + /*The type of user + Required: true + In: path + */ + UserType string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetRolesForUserParams() beforehand. +func (o *GetRolesForUserParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qIncludeFullRoles, qhkIncludeFullRoles, _ := qs.GetOK("includeFullRoles") + if err := o.bindIncludeFullRoles(qIncludeFullRoles, qhkIncludeFullRoles, route.Formats); err != nil { + res = append(res, err) + } + + rUserType, rhkUserType, _ := route.Params.GetOK("userType") + if err := o.bindUserType(rUserType, rhkUserType, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *GetRolesForUserParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} + +// bindIncludeFullRoles binds and validates parameter IncludeFullRoles from query. +func (o *GetRolesForUserParams) bindIncludeFullRoles(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewGetRolesForUserParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("includeFullRoles", "query", "bool", raw) + } + o.IncludeFullRoles = &value + + return nil +} + +// bindUserType binds and validates parameter UserType from path. +func (o *GetRolesForUserParams) bindUserType(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.UserType = raw + + if err := o.validateUserType(formats); err != nil { + return err + } + + return nil +} + +// validateUserType carries on validations for parameter UserType +func (o *GetRolesForUserParams) validateUserType(formats strfmt.Registry) error { + + if err := validate.EnumCase("userType", "path", o.UserType, []interface{}{"oidc", "db"}, true); err != nil { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..fc4f992337be9a46bbddeae6b8984d5f79e78cdc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_responses.go @@ -0,0 +1,303 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesForUserOKCode is the HTTP code returned for type GetRolesForUserOK +const GetRolesForUserOKCode int = 200 + +/* +GetRolesForUserOK Role assigned users + +swagger:response getRolesForUserOK +*/ +type GetRolesForUserOK struct { + + /* + In: Body + */ + Payload models.RolesListResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserOK creates GetRolesForUserOK with default headers values +func NewGetRolesForUserOK() *GetRolesForUserOK { + + return &GetRolesForUserOK{} +} + +// WithPayload adds the payload to the get roles for user o k response +func (o *GetRolesForUserOK) WithPayload(payload models.RolesListResponse) *GetRolesForUserOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user o k response +func (o *GetRolesForUserOK) SetPayload(payload models.RolesListResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.RolesListResponse{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetRolesForUserBadRequestCode is the HTTP code returned for type GetRolesForUserBadRequest +const GetRolesForUserBadRequestCode int = 400 + +/* +GetRolesForUserBadRequest Bad request + +swagger:response getRolesForUserBadRequest +*/ +type GetRolesForUserBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserBadRequest creates GetRolesForUserBadRequest with default headers values +func NewGetRolesForUserBadRequest() *GetRolesForUserBadRequest { + + return &GetRolesForUserBadRequest{} +} + +// WithPayload adds the payload to the get roles for user bad request response +func (o *GetRolesForUserBadRequest) WithPayload(payload *models.ErrorResponse) *GetRolesForUserBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user bad request response +func (o *GetRolesForUserBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForUserUnauthorizedCode is the HTTP code returned for type GetRolesForUserUnauthorized +const GetRolesForUserUnauthorizedCode int = 401 + +/* +GetRolesForUserUnauthorized Unauthorized or invalid credentials. + +swagger:response getRolesForUserUnauthorized +*/ +type GetRolesForUserUnauthorized struct { +} + +// NewGetRolesForUserUnauthorized creates GetRolesForUserUnauthorized with default headers values +func NewGetRolesForUserUnauthorized() *GetRolesForUserUnauthorized { + + return &GetRolesForUserUnauthorized{} +} + +// WriteResponse to the client +func (o *GetRolesForUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetRolesForUserForbiddenCode is the HTTP code returned for type GetRolesForUserForbidden +const GetRolesForUserForbiddenCode int = 403 + +/* +GetRolesForUserForbidden Forbidden + +swagger:response getRolesForUserForbidden +*/ +type GetRolesForUserForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserForbidden creates GetRolesForUserForbidden with default headers values +func NewGetRolesForUserForbidden() *GetRolesForUserForbidden { + + return &GetRolesForUserForbidden{} +} + +// WithPayload adds the payload to the get roles for user forbidden response +func (o *GetRolesForUserForbidden) WithPayload(payload *models.ErrorResponse) *GetRolesForUserForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user forbidden response +func (o *GetRolesForUserForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForUserNotFoundCode is the HTTP code returned for type GetRolesForUserNotFound +const GetRolesForUserNotFoundCode int = 404 + +/* +GetRolesForUserNotFound no role found for user + +swagger:response getRolesForUserNotFound +*/ +type GetRolesForUserNotFound struct { +} + +// NewGetRolesForUserNotFound creates GetRolesForUserNotFound with default headers values +func NewGetRolesForUserNotFound() *GetRolesForUserNotFound { + + return &GetRolesForUserNotFound{} +} + +// WriteResponse to the client +func (o *GetRolesForUserNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetRolesForUserUnprocessableEntityCode is the HTTP code returned for type GetRolesForUserUnprocessableEntity +const GetRolesForUserUnprocessableEntityCode int = 422 + +/* +GetRolesForUserUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response getRolesForUserUnprocessableEntity +*/ +type GetRolesForUserUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserUnprocessableEntity creates GetRolesForUserUnprocessableEntity with default headers values +func NewGetRolesForUserUnprocessableEntity() *GetRolesForUserUnprocessableEntity { + + return &GetRolesForUserUnprocessableEntity{} +} + +// WithPayload adds the payload to the get roles for user unprocessable entity response +func (o *GetRolesForUserUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GetRolesForUserUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user unprocessable entity response +func (o *GetRolesForUserUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesForUserInternalServerErrorCode is the HTTP code returned for type GetRolesForUserInternalServerError +const GetRolesForUserInternalServerErrorCode int = 500 + +/* +GetRolesForUserInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getRolesForUserInternalServerError +*/ +type GetRolesForUserInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForUserInternalServerError creates GetRolesForUserInternalServerError with default headers values +func NewGetRolesForUserInternalServerError() *GetRolesForUserInternalServerError { + + return &GetRolesForUserInternalServerError{} +} + +// WithPayload adds the payload to the get roles for user internal server error response +func (o *GetRolesForUserInternalServerError) WithPayload(payload *models.ErrorResponse) *GetRolesForUserInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles for user internal server error response +func (o *GetRolesForUserInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForUserInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..d4a539dd776bb37c54324570b3d85463f3613ad0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_for_user_urlbuilder.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/swag" +) + +// GetRolesForUserURL generates an URL for the get roles for user operation +type GetRolesForUserURL struct { + ID string + UserType string + + IncludeFullRoles *bool + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesForUserURL) WithBasePath(bp string) *GetRolesForUserURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesForUserURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetRolesForUserURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/users/{id}/roles/{userType}" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on GetRolesForUserURL") + } + + userType := o.UserType + if userType != "" { + _path = strings.Replace(_path, "{userType}", userType, -1) + } else { + return nil, errors.New("userType is required on GetRolesForUserURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var includeFullRolesQ string + if o.IncludeFullRoles != nil { + includeFullRolesQ = swag.FormatBool(*o.IncludeFullRoles) + } + if includeFullRolesQ != "" { + qs.Set("includeFullRoles", includeFullRolesQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetRolesForUserURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetRolesForUserURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetRolesForUserURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetRolesForUserURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetRolesForUserURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetRolesForUserURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ae29af6699da32c7cee41a7a50fc73fe985f1432 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewGetRolesParams creates a new GetRolesParams object +// +// There are no default values defined in the spec. +func NewGetRolesParams() GetRolesParams { + + return GetRolesParams{} +} + +// GetRolesParams contains all the bound params for the get roles operation +// typically these are obtained from a http.Request +// +// swagger:parameters getRoles +type GetRolesParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetRolesParams() beforehand. +func (o *GetRolesParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..01f1a4ca903c551ce79642f3a237b031e08da928 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_responses.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetRolesOKCode is the HTTP code returned for type GetRolesOK +const GetRolesOKCode int = 200 + +/* +GetRolesOK Successful response. + +swagger:response getRolesOK +*/ +type GetRolesOK struct { + + /* + In: Body + */ + Payload models.RolesListResponse `json:"body,omitempty"` +} + +// NewGetRolesOK creates GetRolesOK with default headers values +func NewGetRolesOK() *GetRolesOK { + + return &GetRolesOK{} +} + +// WithPayload adds the payload to the get roles o k response +func (o *GetRolesOK) WithPayload(payload models.RolesListResponse) *GetRolesOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles o k response +func (o *GetRolesOK) SetPayload(payload models.RolesListResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.RolesListResponse{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetRolesBadRequestCode is the HTTP code returned for type GetRolesBadRequest +const GetRolesBadRequestCode int = 400 + +/* +GetRolesBadRequest Malformed request. + +swagger:response getRolesBadRequest +*/ +type GetRolesBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesBadRequest creates GetRolesBadRequest with default headers values +func NewGetRolesBadRequest() *GetRolesBadRequest { + + return &GetRolesBadRequest{} +} + +// WithPayload adds the payload to the get roles bad request response +func (o *GetRolesBadRequest) WithPayload(payload *models.ErrorResponse) *GetRolesBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles bad request response +func (o *GetRolesBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesUnauthorizedCode is the HTTP code returned for type GetRolesUnauthorized +const GetRolesUnauthorizedCode int = 401 + +/* +GetRolesUnauthorized Unauthorized or invalid credentials. + +swagger:response getRolesUnauthorized +*/ +type GetRolesUnauthorized struct { +} + +// NewGetRolesUnauthorized creates GetRolesUnauthorized with default headers values +func NewGetRolesUnauthorized() *GetRolesUnauthorized { + + return &GetRolesUnauthorized{} +} + +// WriteResponse to the client +func (o *GetRolesUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetRolesForbiddenCode is the HTTP code returned for type GetRolesForbidden +const GetRolesForbiddenCode int = 403 + +/* +GetRolesForbidden Forbidden + +swagger:response getRolesForbidden +*/ +type GetRolesForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesForbidden creates GetRolesForbidden with default headers values +func NewGetRolesForbidden() *GetRolesForbidden { + + return &GetRolesForbidden{} +} + +// WithPayload adds the payload to the get roles forbidden response +func (o *GetRolesForbidden) WithPayload(payload *models.ErrorResponse) *GetRolesForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles forbidden response +func (o *GetRolesForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetRolesInternalServerErrorCode is the HTTP code returned for type GetRolesInternalServerError +const GetRolesInternalServerErrorCode int = 500 + +/* +GetRolesInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getRolesInternalServerError +*/ +type GetRolesInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetRolesInternalServerError creates GetRolesInternalServerError with default headers values +func NewGetRolesInternalServerError() *GetRolesInternalServerError { + + return &GetRolesInternalServerError{} +} + +// WithPayload adds the payload to the get roles internal server error response +func (o *GetRolesInternalServerError) WithPayload(payload *models.ErrorResponse) *GetRolesInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get roles internal server error response +func (o *GetRolesInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetRolesInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..7c3a8b2cf595a4d0f9724fa10bf55eedc1022b09 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_roles_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GetRolesURL generates an URL for the get roles operation +type GetRolesURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesURL) WithBasePath(bp string) *GetRolesURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetRolesURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetRolesURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetRolesURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetRolesURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetRolesURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetRolesURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetRolesURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetRolesURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role.go new file mode 100644 index 0000000000000000000000000000000000000000..075eac1a0a1492e4c7af8c6619dd92b03345486a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role.go @@ -0,0 +1,186 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUsersForRoleHandlerFunc turns a function with the right signature into a get users for role handler +type GetUsersForRoleHandlerFunc func(GetUsersForRoleParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetUsersForRoleHandlerFunc) Handle(params GetUsersForRoleParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetUsersForRoleHandler interface for that can handle valid get users for role params +type GetUsersForRoleHandler interface { + Handle(GetUsersForRoleParams, *models.Principal) middleware.Responder +} + +// NewGetUsersForRole creates a new http.Handler for the get users for role operation +func NewGetUsersForRole(ctx *middleware.Context, handler GetUsersForRoleHandler) *GetUsersForRole { + return &GetUsersForRole{Context: ctx, Handler: handler} +} + +/* + GetUsersForRole swagger:route GET /authz/roles/{id}/user-assignments authz getUsersForRole + +get users assigned to role +*/ +type GetUsersForRole struct { + Context *middleware.Context + Handler GetUsersForRoleHandler +} + +func (o *GetUsersForRole) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetUsersForRoleParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// GetUsersForRoleOKBodyItems0 get users for role o k body items0 +// +// swagger:model GetUsersForRoleOKBodyItems0 +type GetUsersForRoleOKBodyItems0 struct { + + // user Id + UserID string `json:"userId,omitempty" yaml:"userId,omitempty"` + + // user type + // Required: true + UserType *models.UserTypeOutput `json:"userType" yaml:"userType"` +} + +// Validate validates this get users for role o k body items0 +func (o *GetUsersForRoleOKBodyItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateUserType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetUsersForRoleOKBodyItems0) validateUserType(formats strfmt.Registry) error { + + if err := validate.Required("userType", "body", o.UserType); err != nil { + return err + } + + if err := validate.Required("userType", "body", o.UserType); err != nil { + return err + } + + if o.UserType != nil { + if err := o.UserType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("userType") + } + return err + } + } + + return nil +} + +// ContextValidate validate this get users for role o k body items0 based on the context it is used +func (o *GetUsersForRoleOKBodyItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateUserType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetUsersForRoleOKBodyItems0) contextValidateUserType(ctx context.Context, formats strfmt.Registry) error { + + if o.UserType != nil { + if err := o.UserType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("userType") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *GetUsersForRoleOKBodyItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetUsersForRoleOKBodyItems0) UnmarshalBinary(b []byte) error { + var res GetUsersForRoleOKBodyItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated.go new file mode 100644 index 0000000000000000000000000000000000000000..7493b2a63c60272b2889811438aa1d39fbf31a86 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUsersForRoleDeprecatedHandlerFunc turns a function with the right signature into a get users for role deprecated handler +type GetUsersForRoleDeprecatedHandlerFunc func(GetUsersForRoleDeprecatedParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetUsersForRoleDeprecatedHandlerFunc) Handle(params GetUsersForRoleDeprecatedParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetUsersForRoleDeprecatedHandler interface for that can handle valid get users for role deprecated params +type GetUsersForRoleDeprecatedHandler interface { + Handle(GetUsersForRoleDeprecatedParams, *models.Principal) middleware.Responder +} + +// NewGetUsersForRoleDeprecated creates a new http.Handler for the get users for role deprecated operation +func NewGetUsersForRoleDeprecated(ctx *middleware.Context, handler GetUsersForRoleDeprecatedHandler) *GetUsersForRoleDeprecated { + return &GetUsersForRoleDeprecated{Context: ctx, Handler: handler} +} + +/* + GetUsersForRoleDeprecated swagger:route GET /authz/roles/{id}/users authz getUsersForRoleDeprecated + +get users (db + OIDC) assigned to role. Deprecated, will be removed when 1.29 is not supported anymore +*/ +type GetUsersForRoleDeprecated struct { + Context *middleware.Context + Handler GetUsersForRoleDeprecatedHandler +} + +func (o *GetUsersForRoleDeprecated) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetUsersForRoleDeprecatedParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ea565c84376d881c4846f45ae51d09712489b89f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewGetUsersForRoleDeprecatedParams creates a new GetUsersForRoleDeprecatedParams object +// +// There are no default values defined in the spec. +func NewGetUsersForRoleDeprecatedParams() GetUsersForRoleDeprecatedParams { + + return GetUsersForRoleDeprecatedParams{} +} + +// GetUsersForRoleDeprecatedParams contains all the bound params for the get users for role deprecated operation +// typically these are obtained from a http.Request +// +// swagger:parameters getUsersForRoleDeprecated +type GetUsersForRoleDeprecatedParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*role name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetUsersForRoleDeprecatedParams() beforehand. +func (o *GetUsersForRoleDeprecatedParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *GetUsersForRoleDeprecatedParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6e1b7d7408621c3e4a6da1ff1663e9d142f0fb03 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_responses.go @@ -0,0 +1,258 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUsersForRoleDeprecatedOKCode is the HTTP code returned for type GetUsersForRoleDeprecatedOK +const GetUsersForRoleDeprecatedOKCode int = 200 + +/* +GetUsersForRoleDeprecatedOK Users assigned to this role + +swagger:response getUsersForRoleDeprecatedOK +*/ +type GetUsersForRoleDeprecatedOK struct { + + /* + In: Body + */ + Payload []string `json:"body,omitempty"` +} + +// NewGetUsersForRoleDeprecatedOK creates GetUsersForRoleDeprecatedOK with default headers values +func NewGetUsersForRoleDeprecatedOK() *GetUsersForRoleDeprecatedOK { + + return &GetUsersForRoleDeprecatedOK{} +} + +// WithPayload adds the payload to the get users for role deprecated o k response +func (o *GetUsersForRoleDeprecatedOK) WithPayload(payload []string) *GetUsersForRoleDeprecatedOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role deprecated o k response +func (o *GetUsersForRoleDeprecatedOK) SetPayload(payload []string) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleDeprecatedOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]string, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetUsersForRoleDeprecatedBadRequestCode is the HTTP code returned for type GetUsersForRoleDeprecatedBadRequest +const GetUsersForRoleDeprecatedBadRequestCode int = 400 + +/* +GetUsersForRoleDeprecatedBadRequest Bad request + +swagger:response getUsersForRoleDeprecatedBadRequest +*/ +type GetUsersForRoleDeprecatedBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUsersForRoleDeprecatedBadRequest creates GetUsersForRoleDeprecatedBadRequest with default headers values +func NewGetUsersForRoleDeprecatedBadRequest() *GetUsersForRoleDeprecatedBadRequest { + + return &GetUsersForRoleDeprecatedBadRequest{} +} + +// WithPayload adds the payload to the get users for role deprecated bad request response +func (o *GetUsersForRoleDeprecatedBadRequest) WithPayload(payload *models.ErrorResponse) *GetUsersForRoleDeprecatedBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role deprecated bad request response +func (o *GetUsersForRoleDeprecatedBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleDeprecatedBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetUsersForRoleDeprecatedUnauthorizedCode is the HTTP code returned for type GetUsersForRoleDeprecatedUnauthorized +const GetUsersForRoleDeprecatedUnauthorizedCode int = 401 + +/* +GetUsersForRoleDeprecatedUnauthorized Unauthorized or invalid credentials. + +swagger:response getUsersForRoleDeprecatedUnauthorized +*/ +type GetUsersForRoleDeprecatedUnauthorized struct { +} + +// NewGetUsersForRoleDeprecatedUnauthorized creates GetUsersForRoleDeprecatedUnauthorized with default headers values +func NewGetUsersForRoleDeprecatedUnauthorized() *GetUsersForRoleDeprecatedUnauthorized { + + return &GetUsersForRoleDeprecatedUnauthorized{} +} + +// WriteResponse to the client +func (o *GetUsersForRoleDeprecatedUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetUsersForRoleDeprecatedForbiddenCode is the HTTP code returned for type GetUsersForRoleDeprecatedForbidden +const GetUsersForRoleDeprecatedForbiddenCode int = 403 + +/* +GetUsersForRoleDeprecatedForbidden Forbidden + +swagger:response getUsersForRoleDeprecatedForbidden +*/ +type GetUsersForRoleDeprecatedForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUsersForRoleDeprecatedForbidden creates GetUsersForRoleDeprecatedForbidden with default headers values +func NewGetUsersForRoleDeprecatedForbidden() *GetUsersForRoleDeprecatedForbidden { + + return &GetUsersForRoleDeprecatedForbidden{} +} + +// WithPayload adds the payload to the get users for role deprecated forbidden response +func (o *GetUsersForRoleDeprecatedForbidden) WithPayload(payload *models.ErrorResponse) *GetUsersForRoleDeprecatedForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role deprecated forbidden response +func (o *GetUsersForRoleDeprecatedForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleDeprecatedForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetUsersForRoleDeprecatedNotFoundCode is the HTTP code returned for type GetUsersForRoleDeprecatedNotFound +const GetUsersForRoleDeprecatedNotFoundCode int = 404 + +/* +GetUsersForRoleDeprecatedNotFound no role found + +swagger:response getUsersForRoleDeprecatedNotFound +*/ +type GetUsersForRoleDeprecatedNotFound struct { +} + +// NewGetUsersForRoleDeprecatedNotFound creates GetUsersForRoleDeprecatedNotFound with default headers values +func NewGetUsersForRoleDeprecatedNotFound() *GetUsersForRoleDeprecatedNotFound { + + return &GetUsersForRoleDeprecatedNotFound{} +} + +// WriteResponse to the client +func (o *GetUsersForRoleDeprecatedNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetUsersForRoleDeprecatedInternalServerErrorCode is the HTTP code returned for type GetUsersForRoleDeprecatedInternalServerError +const GetUsersForRoleDeprecatedInternalServerErrorCode int = 500 + +/* +GetUsersForRoleDeprecatedInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getUsersForRoleDeprecatedInternalServerError +*/ +type GetUsersForRoleDeprecatedInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUsersForRoleDeprecatedInternalServerError creates GetUsersForRoleDeprecatedInternalServerError with default headers values +func NewGetUsersForRoleDeprecatedInternalServerError() *GetUsersForRoleDeprecatedInternalServerError { + + return &GetUsersForRoleDeprecatedInternalServerError{} +} + +// WithPayload adds the payload to the get users for role deprecated internal server error response +func (o *GetUsersForRoleDeprecatedInternalServerError) WithPayload(payload *models.ErrorResponse) *GetUsersForRoleDeprecatedInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role deprecated internal server error response +func (o *GetUsersForRoleDeprecatedInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleDeprecatedInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..bba5a4e602cd0090652b9cd153480b3a96b98ca2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_deprecated_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// GetUsersForRoleDeprecatedURL generates an URL for the get users for role deprecated operation +type GetUsersForRoleDeprecatedURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetUsersForRoleDeprecatedURL) WithBasePath(bp string) *GetUsersForRoleDeprecatedURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetUsersForRoleDeprecatedURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetUsersForRoleDeprecatedURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}/users" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on GetUsersForRoleDeprecatedURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetUsersForRoleDeprecatedURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetUsersForRoleDeprecatedURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetUsersForRoleDeprecatedURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetUsersForRoleDeprecatedURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetUsersForRoleDeprecatedURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetUsersForRoleDeprecatedURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..376d65c09868875d1893708e6f90cd2dca0d2ff8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewGetUsersForRoleParams creates a new GetUsersForRoleParams object +// +// There are no default values defined in the spec. +func NewGetUsersForRoleParams() GetUsersForRoleParams { + + return GetUsersForRoleParams{} +} + +// GetUsersForRoleParams contains all the bound params for the get users for role operation +// typically these are obtained from a http.Request +// +// swagger:parameters getUsersForRole +type GetUsersForRoleParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*role name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetUsersForRoleParams() beforehand. +func (o *GetUsersForRoleParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *GetUsersForRoleParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..8c98962988a5fd3902b966fa26525dbde69f17a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_responses.go @@ -0,0 +1,258 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUsersForRoleOKCode is the HTTP code returned for type GetUsersForRoleOK +const GetUsersForRoleOKCode int = 200 + +/* +GetUsersForRoleOK Users assigned to this role + +swagger:response getUsersForRoleOK +*/ +type GetUsersForRoleOK struct { + + /* + In: Body + */ + Payload []*GetUsersForRoleOKBodyItems0 `json:"body,omitempty"` +} + +// NewGetUsersForRoleOK creates GetUsersForRoleOK with default headers values +func NewGetUsersForRoleOK() *GetUsersForRoleOK { + + return &GetUsersForRoleOK{} +} + +// WithPayload adds the payload to the get users for role o k response +func (o *GetUsersForRoleOK) WithPayload(payload []*GetUsersForRoleOKBodyItems0) *GetUsersForRoleOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role o k response +func (o *GetUsersForRoleOK) SetPayload(payload []*GetUsersForRoleOKBodyItems0) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*GetUsersForRoleOKBodyItems0, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetUsersForRoleBadRequestCode is the HTTP code returned for type GetUsersForRoleBadRequest +const GetUsersForRoleBadRequestCode int = 400 + +/* +GetUsersForRoleBadRequest Bad request + +swagger:response getUsersForRoleBadRequest +*/ +type GetUsersForRoleBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUsersForRoleBadRequest creates GetUsersForRoleBadRequest with default headers values +func NewGetUsersForRoleBadRequest() *GetUsersForRoleBadRequest { + + return &GetUsersForRoleBadRequest{} +} + +// WithPayload adds the payload to the get users for role bad request response +func (o *GetUsersForRoleBadRequest) WithPayload(payload *models.ErrorResponse) *GetUsersForRoleBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role bad request response +func (o *GetUsersForRoleBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetUsersForRoleUnauthorizedCode is the HTTP code returned for type GetUsersForRoleUnauthorized +const GetUsersForRoleUnauthorizedCode int = 401 + +/* +GetUsersForRoleUnauthorized Unauthorized or invalid credentials. + +swagger:response getUsersForRoleUnauthorized +*/ +type GetUsersForRoleUnauthorized struct { +} + +// NewGetUsersForRoleUnauthorized creates GetUsersForRoleUnauthorized with default headers values +func NewGetUsersForRoleUnauthorized() *GetUsersForRoleUnauthorized { + + return &GetUsersForRoleUnauthorized{} +} + +// WriteResponse to the client +func (o *GetUsersForRoleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetUsersForRoleForbiddenCode is the HTTP code returned for type GetUsersForRoleForbidden +const GetUsersForRoleForbiddenCode int = 403 + +/* +GetUsersForRoleForbidden Forbidden + +swagger:response getUsersForRoleForbidden +*/ +type GetUsersForRoleForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUsersForRoleForbidden creates GetUsersForRoleForbidden with default headers values +func NewGetUsersForRoleForbidden() *GetUsersForRoleForbidden { + + return &GetUsersForRoleForbidden{} +} + +// WithPayload adds the payload to the get users for role forbidden response +func (o *GetUsersForRoleForbidden) WithPayload(payload *models.ErrorResponse) *GetUsersForRoleForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role forbidden response +func (o *GetUsersForRoleForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetUsersForRoleNotFoundCode is the HTTP code returned for type GetUsersForRoleNotFound +const GetUsersForRoleNotFoundCode int = 404 + +/* +GetUsersForRoleNotFound no role found + +swagger:response getUsersForRoleNotFound +*/ +type GetUsersForRoleNotFound struct { +} + +// NewGetUsersForRoleNotFound creates GetUsersForRoleNotFound with default headers values +func NewGetUsersForRoleNotFound() *GetUsersForRoleNotFound { + + return &GetUsersForRoleNotFound{} +} + +// WriteResponse to the client +func (o *GetUsersForRoleNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetUsersForRoleInternalServerErrorCode is the HTTP code returned for type GetUsersForRoleInternalServerError +const GetUsersForRoleInternalServerErrorCode int = 500 + +/* +GetUsersForRoleInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getUsersForRoleInternalServerError +*/ +type GetUsersForRoleInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUsersForRoleInternalServerError creates GetUsersForRoleInternalServerError with default headers values +func NewGetUsersForRoleInternalServerError() *GetUsersForRoleInternalServerError { + + return &GetUsersForRoleInternalServerError{} +} + +// WithPayload adds the payload to the get users for role internal server error response +func (o *GetUsersForRoleInternalServerError) WithPayload(payload *models.ErrorResponse) *GetUsersForRoleInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get users for role internal server error response +func (o *GetUsersForRoleInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUsersForRoleInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..65473bcd5af8b03f04c6e5ccd29d4df32d3fd81b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/get_users_for_role_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// GetUsersForRoleURL generates an URL for the get users for role operation +type GetUsersForRoleURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetUsersForRoleURL) WithBasePath(bp string) *GetUsersForRoleURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetUsersForRoleURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetUsersForRoleURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}/user-assignments" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on GetUsersForRoleURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetUsersForRoleURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetUsersForRoleURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetUsersForRoleURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetUsersForRoleURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetUsersForRoleURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetUsersForRoleURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission.go new file mode 100644 index 0000000000000000000000000000000000000000..c609ee2634cdfba11d8d378dc2680fb18e5127e4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// HasPermissionHandlerFunc turns a function with the right signature into a has permission handler +type HasPermissionHandlerFunc func(HasPermissionParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn HasPermissionHandlerFunc) Handle(params HasPermissionParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// HasPermissionHandler interface for that can handle valid has permission params +type HasPermissionHandler interface { + Handle(HasPermissionParams, *models.Principal) middleware.Responder +} + +// NewHasPermission creates a new http.Handler for the has permission operation +func NewHasPermission(ctx *middleware.Context, handler HasPermissionHandler) *HasPermission { + return &HasPermission{Context: ctx, Handler: handler} +} + +/* + HasPermission swagger:route POST /authz/roles/{id}/has-permission authz hasPermission + +Check whether role possesses this permission. +*/ +type HasPermission struct { + Context *middleware.Context + Handler HasPermissionHandler +} + +func (o *HasPermission) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewHasPermissionParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..1606ed9cd7c20769273a22a2978efffb0c13684f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_parameters.go @@ -0,0 +1,120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewHasPermissionParams creates a new HasPermissionParams object +// +// There are no default values defined in the spec. +func NewHasPermissionParams() HasPermissionParams { + + return HasPermissionParams{} +} + +// HasPermissionParams contains all the bound params for the has permission operation +// typically these are obtained from a http.Request +// +// swagger:parameters hasPermission +type HasPermissionParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Permission + /*role name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewHasPermissionParams() beforehand. +func (o *HasPermissionParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Permission + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *HasPermissionParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..47cd852dbe02bfd5ae1b86394d010eb22f3c8259 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_responses.go @@ -0,0 +1,273 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// HasPermissionOKCode is the HTTP code returned for type HasPermissionOK +const HasPermissionOKCode int = 200 + +/* +HasPermissionOK Permission check was successful + +swagger:response hasPermissionOK +*/ +type HasPermissionOK struct { + + /* + In: Body + */ + Payload bool `json:"body,omitempty"` +} + +// NewHasPermissionOK creates HasPermissionOK with default headers values +func NewHasPermissionOK() *HasPermissionOK { + + return &HasPermissionOK{} +} + +// WithPayload adds the payload to the has permission o k response +func (o *HasPermissionOK) WithPayload(payload bool) *HasPermissionOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the has permission o k response +func (o *HasPermissionOK) SetPayload(payload bool) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *HasPermissionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// HasPermissionBadRequestCode is the HTTP code returned for type HasPermissionBadRequest +const HasPermissionBadRequestCode int = 400 + +/* +HasPermissionBadRequest Malformed request. + +swagger:response hasPermissionBadRequest +*/ +type HasPermissionBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewHasPermissionBadRequest creates HasPermissionBadRequest with default headers values +func NewHasPermissionBadRequest() *HasPermissionBadRequest { + + return &HasPermissionBadRequest{} +} + +// WithPayload adds the payload to the has permission bad request response +func (o *HasPermissionBadRequest) WithPayload(payload *models.ErrorResponse) *HasPermissionBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the has permission bad request response +func (o *HasPermissionBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *HasPermissionBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// HasPermissionUnauthorizedCode is the HTTP code returned for type HasPermissionUnauthorized +const HasPermissionUnauthorizedCode int = 401 + +/* +HasPermissionUnauthorized Unauthorized or invalid credentials. + +swagger:response hasPermissionUnauthorized +*/ +type HasPermissionUnauthorized struct { +} + +// NewHasPermissionUnauthorized creates HasPermissionUnauthorized with default headers values +func NewHasPermissionUnauthorized() *HasPermissionUnauthorized { + + return &HasPermissionUnauthorized{} +} + +// WriteResponse to the client +func (o *HasPermissionUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// HasPermissionForbiddenCode is the HTTP code returned for type HasPermissionForbidden +const HasPermissionForbiddenCode int = 403 + +/* +HasPermissionForbidden Forbidden + +swagger:response hasPermissionForbidden +*/ +type HasPermissionForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewHasPermissionForbidden creates HasPermissionForbidden with default headers values +func NewHasPermissionForbidden() *HasPermissionForbidden { + + return &HasPermissionForbidden{} +} + +// WithPayload adds the payload to the has permission forbidden response +func (o *HasPermissionForbidden) WithPayload(payload *models.ErrorResponse) *HasPermissionForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the has permission forbidden response +func (o *HasPermissionForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *HasPermissionForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// HasPermissionUnprocessableEntityCode is the HTTP code returned for type HasPermissionUnprocessableEntity +const HasPermissionUnprocessableEntityCode int = 422 + +/* +HasPermissionUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response hasPermissionUnprocessableEntity +*/ +type HasPermissionUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewHasPermissionUnprocessableEntity creates HasPermissionUnprocessableEntity with default headers values +func NewHasPermissionUnprocessableEntity() *HasPermissionUnprocessableEntity { + + return &HasPermissionUnprocessableEntity{} +} + +// WithPayload adds the payload to the has permission unprocessable entity response +func (o *HasPermissionUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *HasPermissionUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the has permission unprocessable entity response +func (o *HasPermissionUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *HasPermissionUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// HasPermissionInternalServerErrorCode is the HTTP code returned for type HasPermissionInternalServerError +const HasPermissionInternalServerErrorCode int = 500 + +/* +HasPermissionInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response hasPermissionInternalServerError +*/ +type HasPermissionInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewHasPermissionInternalServerError creates HasPermissionInternalServerError with default headers values +func NewHasPermissionInternalServerError() *HasPermissionInternalServerError { + + return &HasPermissionInternalServerError{} +} + +// WithPayload adds the payload to the has permission internal server error response +func (o *HasPermissionInternalServerError) WithPayload(payload *models.ErrorResponse) *HasPermissionInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the has permission internal server error response +func (o *HasPermissionInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *HasPermissionInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..6b9764851f53e8f5ff0365350957ee8d9dd122f6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/has_permission_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// HasPermissionURL generates an URL for the has permission operation +type HasPermissionURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *HasPermissionURL) WithBasePath(bp string) *HasPermissionURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *HasPermissionURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *HasPermissionURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}/has-permission" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on HasPermissionURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *HasPermissionURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *HasPermissionURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *HasPermissionURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on HasPermissionURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on HasPermissionURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *HasPermissionURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions.go new file mode 100644 index 0000000000000000000000000000000000000000..31f5be315288710dbfff22ef5d8d3aa3550ed63b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions.go @@ -0,0 +1,191 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// RemovePermissionsHandlerFunc turns a function with the right signature into a remove permissions handler +type RemovePermissionsHandlerFunc func(RemovePermissionsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn RemovePermissionsHandlerFunc) Handle(params RemovePermissionsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// RemovePermissionsHandler interface for that can handle valid remove permissions params +type RemovePermissionsHandler interface { + Handle(RemovePermissionsParams, *models.Principal) middleware.Responder +} + +// NewRemovePermissions creates a new http.Handler for the remove permissions operation +func NewRemovePermissions(ctx *middleware.Context, handler RemovePermissionsHandler) *RemovePermissions { + return &RemovePermissions{Context: ctx, Handler: handler} +} + +/* + RemovePermissions swagger:route POST /authz/roles/{id}/remove-permissions authz removePermissions + +Remove permissions from a role. If this results in an empty role, the role will be deleted. +*/ +type RemovePermissions struct { + Context *middleware.Context + Handler RemovePermissionsHandler +} + +func (o *RemovePermissions) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewRemovePermissionsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// RemovePermissionsBody remove permissions body +// +// swagger:model RemovePermissionsBody +type RemovePermissionsBody struct { + + // permissions to remove from the role + // Required: true + Permissions []*models.Permission `json:"permissions" yaml:"permissions"` +} + +// Validate validates this remove permissions body +func (o *RemovePermissionsBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validatePermissions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RemovePermissionsBody) validatePermissions(formats strfmt.Registry) error { + + if err := validate.Required("body"+"."+"permissions", "body", o.Permissions); err != nil { + return err + } + + for i := 0; i < len(o.Permissions); i++ { + if swag.IsZero(o.Permissions[i]) { // not required + continue + } + + if o.Permissions[i] != nil { + if err := o.Permissions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this remove permissions body based on the context it is used +func (o *RemovePermissionsBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidatePermissions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RemovePermissionsBody) contextValidatePermissions(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Permissions); i++ { + + if o.Permissions[i] != nil { + if err := o.Permissions[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "permissions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *RemovePermissionsBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *RemovePermissionsBody) UnmarshalBinary(b []byte) error { + var res RemovePermissionsBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..71aef580d0d7aebb052cc785eb04b2fc3795cba9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_parameters.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewRemovePermissionsParams creates a new RemovePermissionsParams object +// +// There are no default values defined in the spec. +func NewRemovePermissionsParams() RemovePermissionsParams { + + return RemovePermissionsParams{} +} + +// RemovePermissionsParams contains all the bound params for the remove permissions operation +// typically these are obtained from a http.Request +// +// swagger:parameters removePermissions +type RemovePermissionsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body RemovePermissionsBody + /*role name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewRemovePermissionsParams() beforehand. +func (o *RemovePermissionsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body RemovePermissionsBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *RemovePermissionsParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6ccd641cce82461bb79e8a65308c1aabde732f60 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_responses.go @@ -0,0 +1,280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// RemovePermissionsOKCode is the HTTP code returned for type RemovePermissionsOK +const RemovePermissionsOKCode int = 200 + +/* +RemovePermissionsOK Permissions removed successfully + +swagger:response removePermissionsOK +*/ +type RemovePermissionsOK struct { +} + +// NewRemovePermissionsOK creates RemovePermissionsOK with default headers values +func NewRemovePermissionsOK() *RemovePermissionsOK { + + return &RemovePermissionsOK{} +} + +// WriteResponse to the client +func (o *RemovePermissionsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// RemovePermissionsBadRequestCode is the HTTP code returned for type RemovePermissionsBadRequest +const RemovePermissionsBadRequestCode int = 400 + +/* +RemovePermissionsBadRequest Malformed request. + +swagger:response removePermissionsBadRequest +*/ +type RemovePermissionsBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRemovePermissionsBadRequest creates RemovePermissionsBadRequest with default headers values +func NewRemovePermissionsBadRequest() *RemovePermissionsBadRequest { + + return &RemovePermissionsBadRequest{} +} + +// WithPayload adds the payload to the remove permissions bad request response +func (o *RemovePermissionsBadRequest) WithPayload(payload *models.ErrorResponse) *RemovePermissionsBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the remove permissions bad request response +func (o *RemovePermissionsBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RemovePermissionsBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RemovePermissionsUnauthorizedCode is the HTTP code returned for type RemovePermissionsUnauthorized +const RemovePermissionsUnauthorizedCode int = 401 + +/* +RemovePermissionsUnauthorized Unauthorized or invalid credentials. + +swagger:response removePermissionsUnauthorized +*/ +type RemovePermissionsUnauthorized struct { +} + +// NewRemovePermissionsUnauthorized creates RemovePermissionsUnauthorized with default headers values +func NewRemovePermissionsUnauthorized() *RemovePermissionsUnauthorized { + + return &RemovePermissionsUnauthorized{} +} + +// WriteResponse to the client +func (o *RemovePermissionsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// RemovePermissionsForbiddenCode is the HTTP code returned for type RemovePermissionsForbidden +const RemovePermissionsForbiddenCode int = 403 + +/* +RemovePermissionsForbidden Forbidden + +swagger:response removePermissionsForbidden +*/ +type RemovePermissionsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRemovePermissionsForbidden creates RemovePermissionsForbidden with default headers values +func NewRemovePermissionsForbidden() *RemovePermissionsForbidden { + + return &RemovePermissionsForbidden{} +} + +// WithPayload adds the payload to the remove permissions forbidden response +func (o *RemovePermissionsForbidden) WithPayload(payload *models.ErrorResponse) *RemovePermissionsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the remove permissions forbidden response +func (o *RemovePermissionsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RemovePermissionsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RemovePermissionsNotFoundCode is the HTTP code returned for type RemovePermissionsNotFound +const RemovePermissionsNotFoundCode int = 404 + +/* +RemovePermissionsNotFound no role found + +swagger:response removePermissionsNotFound +*/ +type RemovePermissionsNotFound struct { +} + +// NewRemovePermissionsNotFound creates RemovePermissionsNotFound with default headers values +func NewRemovePermissionsNotFound() *RemovePermissionsNotFound { + + return &RemovePermissionsNotFound{} +} + +// WriteResponse to the client +func (o *RemovePermissionsNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// RemovePermissionsUnprocessableEntityCode is the HTTP code returned for type RemovePermissionsUnprocessableEntity +const RemovePermissionsUnprocessableEntityCode int = 422 + +/* +RemovePermissionsUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response removePermissionsUnprocessableEntity +*/ +type RemovePermissionsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRemovePermissionsUnprocessableEntity creates RemovePermissionsUnprocessableEntity with default headers values +func NewRemovePermissionsUnprocessableEntity() *RemovePermissionsUnprocessableEntity { + + return &RemovePermissionsUnprocessableEntity{} +} + +// WithPayload adds the payload to the remove permissions unprocessable entity response +func (o *RemovePermissionsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *RemovePermissionsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the remove permissions unprocessable entity response +func (o *RemovePermissionsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RemovePermissionsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RemovePermissionsInternalServerErrorCode is the HTTP code returned for type RemovePermissionsInternalServerError +const RemovePermissionsInternalServerErrorCode int = 500 + +/* +RemovePermissionsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response removePermissionsInternalServerError +*/ +type RemovePermissionsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRemovePermissionsInternalServerError creates RemovePermissionsInternalServerError with default headers values +func NewRemovePermissionsInternalServerError() *RemovePermissionsInternalServerError { + + return &RemovePermissionsInternalServerError{} +} + +// WithPayload adds the payload to the remove permissions internal server error response +func (o *RemovePermissionsInternalServerError) WithPayload(payload *models.ErrorResponse) *RemovePermissionsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the remove permissions internal server error response +func (o *RemovePermissionsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RemovePermissionsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..0fd896ce1217d21dbb1a6b95b63f699b83641a93 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/remove_permissions_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// RemovePermissionsURL generates an URL for the remove permissions operation +type RemovePermissionsURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RemovePermissionsURL) WithBasePath(bp string) *RemovePermissionsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RemovePermissionsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *RemovePermissionsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/roles/{id}/remove-permissions" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on RemovePermissionsURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *RemovePermissionsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *RemovePermissionsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *RemovePermissionsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on RemovePermissionsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on RemovePermissionsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *RemovePermissionsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group.go new file mode 100644 index 0000000000000000000000000000000000000000..5a1c9cee72dd7482a7b53d42da2cf41700eb17c3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// RevokeRoleFromGroupHandlerFunc turns a function with the right signature into a revoke role from group handler +type RevokeRoleFromGroupHandlerFunc func(RevokeRoleFromGroupParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn RevokeRoleFromGroupHandlerFunc) Handle(params RevokeRoleFromGroupParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// RevokeRoleFromGroupHandler interface for that can handle valid revoke role from group params +type RevokeRoleFromGroupHandler interface { + Handle(RevokeRoleFromGroupParams, *models.Principal) middleware.Responder +} + +// NewRevokeRoleFromGroup creates a new http.Handler for the revoke role from group operation +func NewRevokeRoleFromGroup(ctx *middleware.Context, handler RevokeRoleFromGroupHandler) *RevokeRoleFromGroup { + return &RevokeRoleFromGroup{Context: ctx, Handler: handler} +} + +/* + RevokeRoleFromGroup swagger:route POST /authz/groups/{id}/revoke authz revokeRoleFromGroup + +Revoke a role from a group +*/ +type RevokeRoleFromGroup struct { + Context *middleware.Context + Handler RevokeRoleFromGroupHandler +} + +func (o *RevokeRoleFromGroup) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewRevokeRoleFromGroupParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// RevokeRoleFromGroupBody revoke role from group body +// +// swagger:model RevokeRoleFromGroupBody +type RevokeRoleFromGroupBody struct { + + // group type + GroupType models.GroupType `json:"groupType,omitempty" yaml:"groupType,omitempty"` + + // the roles that revoked from group + Roles []string `json:"roles" yaml:"roles"` +} + +// Validate validates this revoke role from group body +func (o *RevokeRoleFromGroupBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateGroupType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromGroupBody) validateGroupType(formats strfmt.Registry) error { + if swag.IsZero(o.GroupType) { // not required + return nil + } + + if err := o.GroupType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// ContextValidate validate this revoke role from group body based on the context it is used +func (o *RevokeRoleFromGroupBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateGroupType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromGroupBody) contextValidateGroupType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.GroupType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "groupType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "groupType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *RevokeRoleFromGroupBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *RevokeRoleFromGroupBody) UnmarshalBinary(b []byte) error { + var res RevokeRoleFromGroupBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..4f7ad5f7bdd7c92370f4e12c24bb13bb14a023cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_parameters.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewRevokeRoleFromGroupParams creates a new RevokeRoleFromGroupParams object +// +// There are no default values defined in the spec. +func NewRevokeRoleFromGroupParams() RevokeRoleFromGroupParams { + + return RevokeRoleFromGroupParams{} +} + +// RevokeRoleFromGroupParams contains all the bound params for the revoke role from group operation +// typically these are obtained from a http.Request +// +// swagger:parameters revokeRoleFromGroup +type RevokeRoleFromGroupParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body RevokeRoleFromGroupBody + /*group name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewRevokeRoleFromGroupParams() beforehand. +func (o *RevokeRoleFromGroupParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body RevokeRoleFromGroupBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *RevokeRoleFromGroupParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..d8e10f889c24aeff81a009e29a9591035ad0d2a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_responses.go @@ -0,0 +1,235 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// RevokeRoleFromGroupOKCode is the HTTP code returned for type RevokeRoleFromGroupOK +const RevokeRoleFromGroupOKCode int = 200 + +/* +RevokeRoleFromGroupOK Role revoked successfully + +swagger:response revokeRoleFromGroupOK +*/ +type RevokeRoleFromGroupOK struct { +} + +// NewRevokeRoleFromGroupOK creates RevokeRoleFromGroupOK with default headers values +func NewRevokeRoleFromGroupOK() *RevokeRoleFromGroupOK { + + return &RevokeRoleFromGroupOK{} +} + +// WriteResponse to the client +func (o *RevokeRoleFromGroupOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// RevokeRoleFromGroupBadRequestCode is the HTTP code returned for type RevokeRoleFromGroupBadRequest +const RevokeRoleFromGroupBadRequestCode int = 400 + +/* +RevokeRoleFromGroupBadRequest Bad request + +swagger:response revokeRoleFromGroupBadRequest +*/ +type RevokeRoleFromGroupBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRevokeRoleFromGroupBadRequest creates RevokeRoleFromGroupBadRequest with default headers values +func NewRevokeRoleFromGroupBadRequest() *RevokeRoleFromGroupBadRequest { + + return &RevokeRoleFromGroupBadRequest{} +} + +// WithPayload adds the payload to the revoke role from group bad request response +func (o *RevokeRoleFromGroupBadRequest) WithPayload(payload *models.ErrorResponse) *RevokeRoleFromGroupBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the revoke role from group bad request response +func (o *RevokeRoleFromGroupBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RevokeRoleFromGroupBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RevokeRoleFromGroupUnauthorizedCode is the HTTP code returned for type RevokeRoleFromGroupUnauthorized +const RevokeRoleFromGroupUnauthorizedCode int = 401 + +/* +RevokeRoleFromGroupUnauthorized Unauthorized or invalid credentials. + +swagger:response revokeRoleFromGroupUnauthorized +*/ +type RevokeRoleFromGroupUnauthorized struct { +} + +// NewRevokeRoleFromGroupUnauthorized creates RevokeRoleFromGroupUnauthorized with default headers values +func NewRevokeRoleFromGroupUnauthorized() *RevokeRoleFromGroupUnauthorized { + + return &RevokeRoleFromGroupUnauthorized{} +} + +// WriteResponse to the client +func (o *RevokeRoleFromGroupUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// RevokeRoleFromGroupForbiddenCode is the HTTP code returned for type RevokeRoleFromGroupForbidden +const RevokeRoleFromGroupForbiddenCode int = 403 + +/* +RevokeRoleFromGroupForbidden Forbidden + +swagger:response revokeRoleFromGroupForbidden +*/ +type RevokeRoleFromGroupForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRevokeRoleFromGroupForbidden creates RevokeRoleFromGroupForbidden with default headers values +func NewRevokeRoleFromGroupForbidden() *RevokeRoleFromGroupForbidden { + + return &RevokeRoleFromGroupForbidden{} +} + +// WithPayload adds the payload to the revoke role from group forbidden response +func (o *RevokeRoleFromGroupForbidden) WithPayload(payload *models.ErrorResponse) *RevokeRoleFromGroupForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the revoke role from group forbidden response +func (o *RevokeRoleFromGroupForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RevokeRoleFromGroupForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RevokeRoleFromGroupNotFoundCode is the HTTP code returned for type RevokeRoleFromGroupNotFound +const RevokeRoleFromGroupNotFoundCode int = 404 + +/* +RevokeRoleFromGroupNotFound role or group is not found. + +swagger:response revokeRoleFromGroupNotFound +*/ +type RevokeRoleFromGroupNotFound struct { +} + +// NewRevokeRoleFromGroupNotFound creates RevokeRoleFromGroupNotFound with default headers values +func NewRevokeRoleFromGroupNotFound() *RevokeRoleFromGroupNotFound { + + return &RevokeRoleFromGroupNotFound{} +} + +// WriteResponse to the client +func (o *RevokeRoleFromGroupNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// RevokeRoleFromGroupInternalServerErrorCode is the HTTP code returned for type RevokeRoleFromGroupInternalServerError +const RevokeRoleFromGroupInternalServerErrorCode int = 500 + +/* +RevokeRoleFromGroupInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response revokeRoleFromGroupInternalServerError +*/ +type RevokeRoleFromGroupInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRevokeRoleFromGroupInternalServerError creates RevokeRoleFromGroupInternalServerError with default headers values +func NewRevokeRoleFromGroupInternalServerError() *RevokeRoleFromGroupInternalServerError { + + return &RevokeRoleFromGroupInternalServerError{} +} + +// WithPayload adds the payload to the revoke role from group internal server error response +func (o *RevokeRoleFromGroupInternalServerError) WithPayload(payload *models.ErrorResponse) *RevokeRoleFromGroupInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the revoke role from group internal server error response +func (o *RevokeRoleFromGroupInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RevokeRoleFromGroupInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..9ce72360b33dbdd152fee8259db2945183c2dea8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_group_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// RevokeRoleFromGroupURL generates an URL for the revoke role from group operation +type RevokeRoleFromGroupURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RevokeRoleFromGroupURL) WithBasePath(bp string) *RevokeRoleFromGroupURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RevokeRoleFromGroupURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *RevokeRoleFromGroupURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/groups/{id}/revoke" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on RevokeRoleFromGroupURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *RevokeRoleFromGroupURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *RevokeRoleFromGroupURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *RevokeRoleFromGroupURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on RevokeRoleFromGroupURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on RevokeRoleFromGroupURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *RevokeRoleFromGroupURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user.go new file mode 100644 index 0000000000000000000000000000000000000000..f8f835711f6c8ba8e5aaf26d79585becdead9a8c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// RevokeRoleFromUserHandlerFunc turns a function with the right signature into a revoke role from user handler +type RevokeRoleFromUserHandlerFunc func(RevokeRoleFromUserParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn RevokeRoleFromUserHandlerFunc) Handle(params RevokeRoleFromUserParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// RevokeRoleFromUserHandler interface for that can handle valid revoke role from user params +type RevokeRoleFromUserHandler interface { + Handle(RevokeRoleFromUserParams, *models.Principal) middleware.Responder +} + +// NewRevokeRoleFromUser creates a new http.Handler for the revoke role from user operation +func NewRevokeRoleFromUser(ctx *middleware.Context, handler RevokeRoleFromUserHandler) *RevokeRoleFromUser { + return &RevokeRoleFromUser{Context: ctx, Handler: handler} +} + +/* + RevokeRoleFromUser swagger:route POST /authz/users/{id}/revoke authz revokeRoleFromUser + +Revoke a role from a user +*/ +type RevokeRoleFromUser struct { + Context *middleware.Context + Handler RevokeRoleFromUserHandler +} + +func (o *RevokeRoleFromUser) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewRevokeRoleFromUserParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// RevokeRoleFromUserBody revoke role from user body +// +// swagger:model RevokeRoleFromUserBody +type RevokeRoleFromUserBody struct { + + // the roles that revoked from the key or user + Roles []string `json:"roles" yaml:"roles"` + + // user type + UserType models.UserTypeInput `json:"userType,omitempty" yaml:"userType,omitempty"` +} + +// Validate validates this revoke role from user body +func (o *RevokeRoleFromUserBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateUserType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromUserBody) validateUserType(formats strfmt.Registry) error { + if swag.IsZero(o.UserType) { // not required + return nil + } + + if err := o.UserType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// ContextValidate validate this revoke role from user body based on the context it is used +func (o *RevokeRoleFromUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateUserType(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *RevokeRoleFromUserBody) contextValidateUserType(ctx context.Context, formats strfmt.Registry) error { + + if err := o.UserType.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "userType") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "userType") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (o *RevokeRoleFromUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *RevokeRoleFromUserBody) UnmarshalBinary(b []byte) error { + var res RevokeRoleFromUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..eb852a26e6b24c05a8c09572690d80f099e44726 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_parameters.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewRevokeRoleFromUserParams creates a new RevokeRoleFromUserParams object +// +// There are no default values defined in the spec. +func NewRevokeRoleFromUserParams() RevokeRoleFromUserParams { + + return RevokeRoleFromUserParams{} +} + +// RevokeRoleFromUserParams contains all the bound params for the revoke role from user operation +// typically these are obtained from a http.Request +// +// swagger:parameters revokeRoleFromUser +type RevokeRoleFromUserParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body RevokeRoleFromUserBody + /*user name + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewRevokeRoleFromUserParams() beforehand. +func (o *RevokeRoleFromUserParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body RevokeRoleFromUserBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *RevokeRoleFromUserParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..2454a795541d937e6dc8dd7ed47bf12719da7245 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// RevokeRoleFromUserOKCode is the HTTP code returned for type RevokeRoleFromUserOK +const RevokeRoleFromUserOKCode int = 200 + +/* +RevokeRoleFromUserOK Role revoked successfully + +swagger:response revokeRoleFromUserOK +*/ +type RevokeRoleFromUserOK struct { +} + +// NewRevokeRoleFromUserOK creates RevokeRoleFromUserOK with default headers values +func NewRevokeRoleFromUserOK() *RevokeRoleFromUserOK { + + return &RevokeRoleFromUserOK{} +} + +// WriteResponse to the client +func (o *RevokeRoleFromUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// RevokeRoleFromUserBadRequestCode is the HTTP code returned for type RevokeRoleFromUserBadRequest +const RevokeRoleFromUserBadRequestCode int = 400 + +/* +RevokeRoleFromUserBadRequest Bad request + +swagger:response revokeRoleFromUserBadRequest +*/ +type RevokeRoleFromUserBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRevokeRoleFromUserBadRequest creates RevokeRoleFromUserBadRequest with default headers values +func NewRevokeRoleFromUserBadRequest() *RevokeRoleFromUserBadRequest { + + return &RevokeRoleFromUserBadRequest{} +} + +// WithPayload adds the payload to the revoke role from user bad request response +func (o *RevokeRoleFromUserBadRequest) WithPayload(payload *models.ErrorResponse) *RevokeRoleFromUserBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the revoke role from user bad request response +func (o *RevokeRoleFromUserBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RevokeRoleFromUserBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RevokeRoleFromUserUnauthorizedCode is the HTTP code returned for type RevokeRoleFromUserUnauthorized +const RevokeRoleFromUserUnauthorizedCode int = 401 + +/* +RevokeRoleFromUserUnauthorized Unauthorized or invalid credentials. + +swagger:response revokeRoleFromUserUnauthorized +*/ +type RevokeRoleFromUserUnauthorized struct { +} + +// NewRevokeRoleFromUserUnauthorized creates RevokeRoleFromUserUnauthorized with default headers values +func NewRevokeRoleFromUserUnauthorized() *RevokeRoleFromUserUnauthorized { + + return &RevokeRoleFromUserUnauthorized{} +} + +// WriteResponse to the client +func (o *RevokeRoleFromUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// RevokeRoleFromUserForbiddenCode is the HTTP code returned for type RevokeRoleFromUserForbidden +const RevokeRoleFromUserForbiddenCode int = 403 + +/* +RevokeRoleFromUserForbidden Forbidden + +swagger:response revokeRoleFromUserForbidden +*/ +type RevokeRoleFromUserForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRevokeRoleFromUserForbidden creates RevokeRoleFromUserForbidden with default headers values +func NewRevokeRoleFromUserForbidden() *RevokeRoleFromUserForbidden { + + return &RevokeRoleFromUserForbidden{} +} + +// WithPayload adds the payload to the revoke role from user forbidden response +func (o *RevokeRoleFromUserForbidden) WithPayload(payload *models.ErrorResponse) *RevokeRoleFromUserForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the revoke role from user forbidden response +func (o *RevokeRoleFromUserForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RevokeRoleFromUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RevokeRoleFromUserNotFoundCode is the HTTP code returned for type RevokeRoleFromUserNotFound +const RevokeRoleFromUserNotFoundCode int = 404 + +/* +RevokeRoleFromUserNotFound role or user is not found. + +swagger:response revokeRoleFromUserNotFound +*/ +type RevokeRoleFromUserNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRevokeRoleFromUserNotFound creates RevokeRoleFromUserNotFound with default headers values +func NewRevokeRoleFromUserNotFound() *RevokeRoleFromUserNotFound { + + return &RevokeRoleFromUserNotFound{} +} + +// WithPayload adds the payload to the revoke role from user not found response +func (o *RevokeRoleFromUserNotFound) WithPayload(payload *models.ErrorResponse) *RevokeRoleFromUserNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the revoke role from user not found response +func (o *RevokeRoleFromUserNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RevokeRoleFromUserNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RevokeRoleFromUserInternalServerErrorCode is the HTTP code returned for type RevokeRoleFromUserInternalServerError +const RevokeRoleFromUserInternalServerErrorCode int = 500 + +/* +RevokeRoleFromUserInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response revokeRoleFromUserInternalServerError +*/ +type RevokeRoleFromUserInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRevokeRoleFromUserInternalServerError creates RevokeRoleFromUserInternalServerError with default headers values +func NewRevokeRoleFromUserInternalServerError() *RevokeRoleFromUserInternalServerError { + + return &RevokeRoleFromUserInternalServerError{} +} + +// WithPayload adds the payload to the revoke role from user internal server error response +func (o *RevokeRoleFromUserInternalServerError) WithPayload(payload *models.ErrorResponse) *RevokeRoleFromUserInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the revoke role from user internal server error response +func (o *RevokeRoleFromUserInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RevokeRoleFromUserInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..929947abe63f4d1052b203d4c93dc2815d9260ff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/authz/revoke_role_from_user_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package authz + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// RevokeRoleFromUserURL generates an URL for the revoke role from user operation +type RevokeRoleFromUserURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RevokeRoleFromUserURL) WithBasePath(bp string) *RevokeRoleFromUserURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RevokeRoleFromUserURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *RevokeRoleFromUserURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/authz/users/{id}/revoke" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on RevokeRoleFromUserURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *RevokeRoleFromUserURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *RevokeRoleFromUserURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *RevokeRoleFromUserURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on RevokeRoleFromUserURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on RevokeRoleFromUserURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *RevokeRoleFromUserURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel.go new file mode 100644 index 0000000000000000000000000000000000000000..26c772ffad1844c769fa5a8ea0f6c1ce0e30664d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCancelHandlerFunc turns a function with the right signature into a backups cancel handler +type BackupsCancelHandlerFunc func(BackupsCancelParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BackupsCancelHandlerFunc) Handle(params BackupsCancelParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BackupsCancelHandler interface for that can handle valid backups cancel params +type BackupsCancelHandler interface { + Handle(BackupsCancelParams, *models.Principal) middleware.Responder +} + +// NewBackupsCancel creates a new http.Handler for the backups cancel operation +func NewBackupsCancel(ctx *middleware.Context, handler BackupsCancelHandler) *BackupsCancel { + return &BackupsCancel{Context: ctx, Handler: handler} +} + +/* + BackupsCancel swagger:route DELETE /backups/{backend}/{id} backups backupsCancel + +# Cancel backup + +Cancel created backup with specified ID +*/ +type BackupsCancel struct { + Context *middleware.Context + Handler BackupsCancelHandler +} + +func (o *BackupsCancel) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBackupsCancelParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..afce5389cdd10a2702447dbe8cb3443a3514eae8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_parameters.go @@ -0,0 +1,163 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewBackupsCancelParams creates a new BackupsCancelParams object +// +// There are no default values defined in the spec. +func NewBackupsCancelParams() BackupsCancelParams { + + return BackupsCancelParams{} +} + +// BackupsCancelParams contains all the bound params for the backups cancel operation +// typically these are obtained from a http.Request +// +// swagger:parameters backups.cancel +type BackupsCancelParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Backup backend name e.g. filesystem, gcs, s3. + Required: true + In: path + */ + Backend string + /*Name of the bucket, container, volume, etc + In: query + */ + Bucket *string + /*The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + Required: true + In: path + */ + ID string + /*The path within the bucket + In: query + */ + Path *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBackupsCancelParams() beforehand. +func (o *BackupsCancelParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rBackend, rhkBackend, _ := route.Params.GetOK("backend") + if err := o.bindBackend(rBackend, rhkBackend, route.Formats); err != nil { + res = append(res, err) + } + + qBucket, qhkBucket, _ := qs.GetOK("bucket") + if err := o.bindBucket(qBucket, qhkBucket, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qPath, qhkPath, _ := qs.GetOK("path") + if err := o.bindPath(qPath, qhkPath, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindBackend binds and validates parameter Backend from path. +func (o *BackupsCancelParams) bindBackend(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Backend = raw + + return nil +} + +// bindBucket binds and validates parameter Bucket from query. +func (o *BackupsCancelParams) bindBucket(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Bucket = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *BackupsCancelParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} + +// bindPath binds and validates parameter Path from query. +func (o *BackupsCancelParams) bindPath(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Path = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..10256a22e4428b92372a50eb1a77f0683bfef0f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCancelNoContentCode is the HTTP code returned for type BackupsCancelNoContent +const BackupsCancelNoContentCode int = 204 + +/* +BackupsCancelNoContent Successfully deleted. + +swagger:response backupsCancelNoContent +*/ +type BackupsCancelNoContent struct { +} + +// NewBackupsCancelNoContent creates BackupsCancelNoContent with default headers values +func NewBackupsCancelNoContent() *BackupsCancelNoContent { + + return &BackupsCancelNoContent{} +} + +// WriteResponse to the client +func (o *BackupsCancelNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// BackupsCancelUnauthorizedCode is the HTTP code returned for type BackupsCancelUnauthorized +const BackupsCancelUnauthorizedCode int = 401 + +/* +BackupsCancelUnauthorized Unauthorized or invalid credentials. + +swagger:response backupsCancelUnauthorized +*/ +type BackupsCancelUnauthorized struct { +} + +// NewBackupsCancelUnauthorized creates BackupsCancelUnauthorized with default headers values +func NewBackupsCancelUnauthorized() *BackupsCancelUnauthorized { + + return &BackupsCancelUnauthorized{} +} + +// WriteResponse to the client +func (o *BackupsCancelUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BackupsCancelForbiddenCode is the HTTP code returned for type BackupsCancelForbidden +const BackupsCancelForbiddenCode int = 403 + +/* +BackupsCancelForbidden Forbidden + +swagger:response backupsCancelForbidden +*/ +type BackupsCancelForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCancelForbidden creates BackupsCancelForbidden with default headers values +func NewBackupsCancelForbidden() *BackupsCancelForbidden { + + return &BackupsCancelForbidden{} +} + +// WithPayload adds the payload to the backups cancel forbidden response +func (o *BackupsCancelForbidden) WithPayload(payload *models.ErrorResponse) *BackupsCancelForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups cancel forbidden response +func (o *BackupsCancelForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCancelForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCancelUnprocessableEntityCode is the HTTP code returned for type BackupsCancelUnprocessableEntity +const BackupsCancelUnprocessableEntityCode int = 422 + +/* +BackupsCancelUnprocessableEntity Invalid backup cancellation attempt. + +swagger:response backupsCancelUnprocessableEntity +*/ +type BackupsCancelUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCancelUnprocessableEntity creates BackupsCancelUnprocessableEntity with default headers values +func NewBackupsCancelUnprocessableEntity() *BackupsCancelUnprocessableEntity { + + return &BackupsCancelUnprocessableEntity{} +} + +// WithPayload adds the payload to the backups cancel unprocessable entity response +func (o *BackupsCancelUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BackupsCancelUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups cancel unprocessable entity response +func (o *BackupsCancelUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCancelUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCancelInternalServerErrorCode is the HTTP code returned for type BackupsCancelInternalServerError +const BackupsCancelInternalServerErrorCode int = 500 + +/* +BackupsCancelInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response backupsCancelInternalServerError +*/ +type BackupsCancelInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCancelInternalServerError creates BackupsCancelInternalServerError with default headers values +func NewBackupsCancelInternalServerError() *BackupsCancelInternalServerError { + + return &BackupsCancelInternalServerError{} +} + +// WithPayload adds the payload to the backups cancel internal server error response +func (o *BackupsCancelInternalServerError) WithPayload(payload *models.ErrorResponse) *BackupsCancelInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups cancel internal server error response +func (o *BackupsCancelInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCancelInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..d7fd6a72b3bc12e42e4522e776f7491e992dd78e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_cancel_urlbuilder.go @@ -0,0 +1,141 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// BackupsCancelURL generates an URL for the backups cancel operation +type BackupsCancelURL struct { + Backend string + ID string + + Bucket *string + Path *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsCancelURL) WithBasePath(bp string) *BackupsCancelURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsCancelURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BackupsCancelURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/backups/{backend}/{id}" + + backend := o.Backend + if backend != "" { + _path = strings.Replace(_path, "{backend}", backend, -1) + } else { + return nil, errors.New("backend is required on BackupsCancelURL") + } + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on BackupsCancelURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var bucketQ string + if o.Bucket != nil { + bucketQ = *o.Bucket + } + if bucketQ != "" { + qs.Set("bucket", bucketQ) + } + + var pathQ string + if o.Path != nil { + pathQ = *o.Path + } + if pathQ != "" { + qs.Set("path", pathQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BackupsCancelURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BackupsCancelURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BackupsCancelURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BackupsCancelURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BackupsCancelURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BackupsCancelURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create.go new file mode 100644 index 0000000000000000000000000000000000000000..3f8e0098a7ce5887edc1487b51a68ae5156d9825 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCreateHandlerFunc turns a function with the right signature into a backups create handler +type BackupsCreateHandlerFunc func(BackupsCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BackupsCreateHandlerFunc) Handle(params BackupsCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BackupsCreateHandler interface for that can handle valid backups create params +type BackupsCreateHandler interface { + Handle(BackupsCreateParams, *models.Principal) middleware.Responder +} + +// NewBackupsCreate creates a new http.Handler for the backups create operation +func NewBackupsCreate(ctx *middleware.Context, handler BackupsCreateHandler) *BackupsCreate { + return &BackupsCreate{Context: ctx, Handler: handler} +} + +/* + BackupsCreate swagger:route POST /backups/{backend} backups backupsCreate + +# Start a backup process + +Start creating a backup for a set of collections.

    Notes:
    - Weaviate uses gzip compression by default.
    - Weaviate stays usable while a backup process is ongoing. +*/ +type BackupsCreate struct { + Context *middleware.Context + Handler BackupsCreateHandler +} + +func (o *BackupsCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBackupsCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f6698aa08991b48688f3b1aa79992082bb5dece0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_parameters.go @@ -0,0 +1,120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBackupsCreateParams creates a new BackupsCreateParams object +// +// There are no default values defined in the spec. +func NewBackupsCreateParams() BackupsCreateParams { + + return BackupsCreateParams{} +} + +// BackupsCreateParams contains all the bound params for the backups create operation +// typically these are obtained from a http.Request +// +// swagger:parameters backups.create +type BackupsCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`. + Required: true + In: path + */ + Backend string + /* + Required: true + In: body + */ + Body *models.BackupCreateRequest +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBackupsCreateParams() beforehand. +func (o *BackupsCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rBackend, rhkBackend, _ := route.Params.GetOK("backend") + if err := o.bindBackend(rBackend, rhkBackend, route.Formats); err != nil { + res = append(res, err) + } + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.BackupCreateRequest + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindBackend binds and validates parameter Backend from path. +func (o *BackupsCreateParams) bindBackend(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Backend = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..0fcf8e8a79a44ed03cda6a0496bdecbc4f148f04 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCreateOKCode is the HTTP code returned for type BackupsCreateOK +const BackupsCreateOKCode int = 200 + +/* +BackupsCreateOK Backup create process successfully started. + +swagger:response backupsCreateOK +*/ +type BackupsCreateOK struct { + + /* + In: Body + */ + Payload *models.BackupCreateResponse `json:"body,omitempty"` +} + +// NewBackupsCreateOK creates BackupsCreateOK with default headers values +func NewBackupsCreateOK() *BackupsCreateOK { + + return &BackupsCreateOK{} +} + +// WithPayload adds the payload to the backups create o k response +func (o *BackupsCreateOK) WithPayload(payload *models.BackupCreateResponse) *BackupsCreateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create o k response +func (o *BackupsCreateOK) SetPayload(payload *models.BackupCreateResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCreateUnauthorizedCode is the HTTP code returned for type BackupsCreateUnauthorized +const BackupsCreateUnauthorizedCode int = 401 + +/* +BackupsCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response backupsCreateUnauthorized +*/ +type BackupsCreateUnauthorized struct { +} + +// NewBackupsCreateUnauthorized creates BackupsCreateUnauthorized with default headers values +func NewBackupsCreateUnauthorized() *BackupsCreateUnauthorized { + + return &BackupsCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *BackupsCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BackupsCreateForbiddenCode is the HTTP code returned for type BackupsCreateForbidden +const BackupsCreateForbiddenCode int = 403 + +/* +BackupsCreateForbidden Forbidden + +swagger:response backupsCreateForbidden +*/ +type BackupsCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCreateForbidden creates BackupsCreateForbidden with default headers values +func NewBackupsCreateForbidden() *BackupsCreateForbidden { + + return &BackupsCreateForbidden{} +} + +// WithPayload adds the payload to the backups create forbidden response +func (o *BackupsCreateForbidden) WithPayload(payload *models.ErrorResponse) *BackupsCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create forbidden response +func (o *BackupsCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCreateUnprocessableEntityCode is the HTTP code returned for type BackupsCreateUnprocessableEntity +const BackupsCreateUnprocessableEntityCode int = 422 + +/* +BackupsCreateUnprocessableEntity Invalid backup creation attempt. + +swagger:response backupsCreateUnprocessableEntity +*/ +type BackupsCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCreateUnprocessableEntity creates BackupsCreateUnprocessableEntity with default headers values +func NewBackupsCreateUnprocessableEntity() *BackupsCreateUnprocessableEntity { + + return &BackupsCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the backups create unprocessable entity response +func (o *BackupsCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BackupsCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create unprocessable entity response +func (o *BackupsCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCreateInternalServerErrorCode is the HTTP code returned for type BackupsCreateInternalServerError +const BackupsCreateInternalServerErrorCode int = 500 + +/* +BackupsCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response backupsCreateInternalServerError +*/ +type BackupsCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCreateInternalServerError creates BackupsCreateInternalServerError with default headers values +func NewBackupsCreateInternalServerError() *BackupsCreateInternalServerError { + + return &BackupsCreateInternalServerError{} +} + +// WithPayload adds the payload to the backups create internal server error response +func (o *BackupsCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *BackupsCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create internal server error response +func (o *BackupsCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status.go new file mode 100644 index 0000000000000000000000000000000000000000..cbc32c403f37497229ad3044c7de410e248e56ed --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCreateStatusHandlerFunc turns a function with the right signature into a backups create status handler +type BackupsCreateStatusHandlerFunc func(BackupsCreateStatusParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BackupsCreateStatusHandlerFunc) Handle(params BackupsCreateStatusParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BackupsCreateStatusHandler interface for that can handle valid backups create status params +type BackupsCreateStatusHandler interface { + Handle(BackupsCreateStatusParams, *models.Principal) middleware.Responder +} + +// NewBackupsCreateStatus creates a new http.Handler for the backups create status operation +func NewBackupsCreateStatus(ctx *middleware.Context, handler BackupsCreateStatusHandler) *BackupsCreateStatus { + return &BackupsCreateStatus{Context: ctx, Handler: handler} +} + +/* + BackupsCreateStatus swagger:route GET /backups/{backend}/{id} backups backupsCreateStatus + +# Get backup process status + +Returns status of backup creation attempt for a set of collections.

    All client implementations have a `wait for completion` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the `wait for completion` option to false, you can also check the status yourself using this endpoint. +*/ +type BackupsCreateStatus struct { + Context *middleware.Context + Handler BackupsCreateStatusHandler +} + +func (o *BackupsCreateStatus) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBackupsCreateStatusParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..cc66eceea25b771e9922645c84e2f41f81705551 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_parameters.go @@ -0,0 +1,163 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewBackupsCreateStatusParams creates a new BackupsCreateStatusParams object +// +// There are no default values defined in the spec. +func NewBackupsCreateStatusParams() BackupsCreateStatusParams { + + return BackupsCreateStatusParams{} +} + +// BackupsCreateStatusParams contains all the bound params for the backups create status operation +// typically these are obtained from a http.Request +// +// swagger:parameters backups.create.status +type BackupsCreateStatusParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Backup backend name e.g. filesystem, gcs, s3. + Required: true + In: path + */ + Backend string + /*Name of the bucket, container, volume, etc + In: query + */ + Bucket *string + /*The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + Required: true + In: path + */ + ID string + /*The path within the bucket + In: query + */ + Path *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBackupsCreateStatusParams() beforehand. +func (o *BackupsCreateStatusParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rBackend, rhkBackend, _ := route.Params.GetOK("backend") + if err := o.bindBackend(rBackend, rhkBackend, route.Formats); err != nil { + res = append(res, err) + } + + qBucket, qhkBucket, _ := qs.GetOK("bucket") + if err := o.bindBucket(qBucket, qhkBucket, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qPath, qhkPath, _ := qs.GetOK("path") + if err := o.bindPath(qPath, qhkPath, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindBackend binds and validates parameter Backend from path. +func (o *BackupsCreateStatusParams) bindBackend(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Backend = raw + + return nil +} + +// bindBucket binds and validates parameter Bucket from query. +func (o *BackupsCreateStatusParams) bindBucket(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Bucket = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *BackupsCreateStatusParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} + +// bindPath binds and validates parameter Path from query. +func (o *BackupsCreateStatusParams) bindPath(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Path = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6ab2cd8585147f6a1342ab3a0bd43383c26b6bf4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsCreateStatusOKCode is the HTTP code returned for type BackupsCreateStatusOK +const BackupsCreateStatusOKCode int = 200 + +/* +BackupsCreateStatusOK Backup creation status successfully returned + +swagger:response backupsCreateStatusOK +*/ +type BackupsCreateStatusOK struct { + + /* + In: Body + */ + Payload *models.BackupCreateStatusResponse `json:"body,omitempty"` +} + +// NewBackupsCreateStatusOK creates BackupsCreateStatusOK with default headers values +func NewBackupsCreateStatusOK() *BackupsCreateStatusOK { + + return &BackupsCreateStatusOK{} +} + +// WithPayload adds the payload to the backups create status o k response +func (o *BackupsCreateStatusOK) WithPayload(payload *models.BackupCreateStatusResponse) *BackupsCreateStatusOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create status o k response +func (o *BackupsCreateStatusOK) SetPayload(payload *models.BackupCreateStatusResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCreateStatusUnauthorizedCode is the HTTP code returned for type BackupsCreateStatusUnauthorized +const BackupsCreateStatusUnauthorizedCode int = 401 + +/* +BackupsCreateStatusUnauthorized Unauthorized or invalid credentials. + +swagger:response backupsCreateStatusUnauthorized +*/ +type BackupsCreateStatusUnauthorized struct { +} + +// NewBackupsCreateStatusUnauthorized creates BackupsCreateStatusUnauthorized with default headers values +func NewBackupsCreateStatusUnauthorized() *BackupsCreateStatusUnauthorized { + + return &BackupsCreateStatusUnauthorized{} +} + +// WriteResponse to the client +func (o *BackupsCreateStatusUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BackupsCreateStatusForbiddenCode is the HTTP code returned for type BackupsCreateStatusForbidden +const BackupsCreateStatusForbiddenCode int = 403 + +/* +BackupsCreateStatusForbidden Forbidden + +swagger:response backupsCreateStatusForbidden +*/ +type BackupsCreateStatusForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCreateStatusForbidden creates BackupsCreateStatusForbidden with default headers values +func NewBackupsCreateStatusForbidden() *BackupsCreateStatusForbidden { + + return &BackupsCreateStatusForbidden{} +} + +// WithPayload adds the payload to the backups create status forbidden response +func (o *BackupsCreateStatusForbidden) WithPayload(payload *models.ErrorResponse) *BackupsCreateStatusForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create status forbidden response +func (o *BackupsCreateStatusForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateStatusForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCreateStatusNotFoundCode is the HTTP code returned for type BackupsCreateStatusNotFound +const BackupsCreateStatusNotFoundCode int = 404 + +/* +BackupsCreateStatusNotFound Not Found - Backup does not exist + +swagger:response backupsCreateStatusNotFound +*/ +type BackupsCreateStatusNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCreateStatusNotFound creates BackupsCreateStatusNotFound with default headers values +func NewBackupsCreateStatusNotFound() *BackupsCreateStatusNotFound { + + return &BackupsCreateStatusNotFound{} +} + +// WithPayload adds the payload to the backups create status not found response +func (o *BackupsCreateStatusNotFound) WithPayload(payload *models.ErrorResponse) *BackupsCreateStatusNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create status not found response +func (o *BackupsCreateStatusNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateStatusNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCreateStatusUnprocessableEntityCode is the HTTP code returned for type BackupsCreateStatusUnprocessableEntity +const BackupsCreateStatusUnprocessableEntityCode int = 422 + +/* +BackupsCreateStatusUnprocessableEntity Invalid backup restoration status attempt. + +swagger:response backupsCreateStatusUnprocessableEntity +*/ +type BackupsCreateStatusUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCreateStatusUnprocessableEntity creates BackupsCreateStatusUnprocessableEntity with default headers values +func NewBackupsCreateStatusUnprocessableEntity() *BackupsCreateStatusUnprocessableEntity { + + return &BackupsCreateStatusUnprocessableEntity{} +} + +// WithPayload adds the payload to the backups create status unprocessable entity response +func (o *BackupsCreateStatusUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BackupsCreateStatusUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create status unprocessable entity response +func (o *BackupsCreateStatusUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateStatusUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsCreateStatusInternalServerErrorCode is the HTTP code returned for type BackupsCreateStatusInternalServerError +const BackupsCreateStatusInternalServerErrorCode int = 500 + +/* +BackupsCreateStatusInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response backupsCreateStatusInternalServerError +*/ +type BackupsCreateStatusInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsCreateStatusInternalServerError creates BackupsCreateStatusInternalServerError with default headers values +func NewBackupsCreateStatusInternalServerError() *BackupsCreateStatusInternalServerError { + + return &BackupsCreateStatusInternalServerError{} +} + +// WithPayload adds the payload to the backups create status internal server error response +func (o *BackupsCreateStatusInternalServerError) WithPayload(payload *models.ErrorResponse) *BackupsCreateStatusInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups create status internal server error response +func (o *BackupsCreateStatusInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsCreateStatusInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..adcce8677f071fafee35efdf101bd5e12764309a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_status_urlbuilder.go @@ -0,0 +1,141 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// BackupsCreateStatusURL generates an URL for the backups create status operation +type BackupsCreateStatusURL struct { + Backend string + ID string + + Bucket *string + Path *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsCreateStatusURL) WithBasePath(bp string) *BackupsCreateStatusURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsCreateStatusURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BackupsCreateStatusURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/backups/{backend}/{id}" + + backend := o.Backend + if backend != "" { + _path = strings.Replace(_path, "{backend}", backend, -1) + } else { + return nil, errors.New("backend is required on BackupsCreateStatusURL") + } + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on BackupsCreateStatusURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var bucketQ string + if o.Bucket != nil { + bucketQ = *o.Bucket + } + if bucketQ != "" { + qs.Set("bucket", bucketQ) + } + + var pathQ string + if o.Path != nil { + pathQ = *o.Path + } + if pathQ != "" { + qs.Set("path", pathQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BackupsCreateStatusURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BackupsCreateStatusURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BackupsCreateStatusURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BackupsCreateStatusURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BackupsCreateStatusURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BackupsCreateStatusURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..11b128adcd9a5dcaa6de8f5c05a37087fd4f12b5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_create_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// BackupsCreateURL generates an URL for the backups create operation +type BackupsCreateURL struct { + Backend string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsCreateURL) WithBasePath(bp string) *BackupsCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BackupsCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/backups/{backend}" + + backend := o.Backend + if backend != "" { + _path = strings.Replace(_path, "{backend}", backend, -1) + } else { + return nil, errors.New("backend is required on BackupsCreateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BackupsCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BackupsCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BackupsCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BackupsCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BackupsCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BackupsCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list.go new file mode 100644 index 0000000000000000000000000000000000000000..c22b8c291b2d58b45dc77e9b8426ca2a128d5224 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsListHandlerFunc turns a function with the right signature into a backups list handler +type BackupsListHandlerFunc func(BackupsListParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BackupsListHandlerFunc) Handle(params BackupsListParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BackupsListHandler interface for that can handle valid backups list params +type BackupsListHandler interface { + Handle(BackupsListParams, *models.Principal) middleware.Responder +} + +// NewBackupsList creates a new http.Handler for the backups list operation +func NewBackupsList(ctx *middleware.Context, handler BackupsListHandler) *BackupsList { + return &BackupsList{Context: ctx, Handler: handler} +} + +/* + BackupsList swagger:route GET /backups/{backend} backups backupsList + +# List backups in progress + +[Coming soon] List all backups in progress not implemented yet. +*/ +type BackupsList struct { + Context *middleware.Context + Handler BackupsListHandler +} + +func (o *BackupsList) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBackupsListParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..6d64f127c61be2f6beb17e2fce52e244010db5dc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewBackupsListParams creates a new BackupsListParams object +// +// There are no default values defined in the spec. +func NewBackupsListParams() BackupsListParams { + + return BackupsListParams{} +} + +// BackupsListParams contains all the bound params for the backups list operation +// typically these are obtained from a http.Request +// +// swagger:parameters backups.list +type BackupsListParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Backup backend name e.g. filesystem, gcs, s3. + Required: true + In: path + */ + Backend string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBackupsListParams() beforehand. +func (o *BackupsListParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rBackend, rhkBackend, _ := route.Params.GetOK("backend") + if err := o.bindBackend(rBackend, rhkBackend, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindBackend binds and validates parameter Backend from path. +func (o *BackupsListParams) bindBackend(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Backend = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..4e16b062186110b587787179ea3eba4467ff487d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_responses.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsListOKCode is the HTTP code returned for type BackupsListOK +const BackupsListOKCode int = 200 + +/* +BackupsListOK Existed backups + +swagger:response backupsListOK +*/ +type BackupsListOK struct { + + /* + In: Body + */ + Payload models.BackupListResponse `json:"body,omitempty"` +} + +// NewBackupsListOK creates BackupsListOK with default headers values +func NewBackupsListOK() *BackupsListOK { + + return &BackupsListOK{} +} + +// WithPayload adds the payload to the backups list o k response +func (o *BackupsListOK) WithPayload(payload models.BackupListResponse) *BackupsListOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups list o k response +func (o *BackupsListOK) SetPayload(payload models.BackupListResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsListOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.BackupListResponse{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// BackupsListUnauthorizedCode is the HTTP code returned for type BackupsListUnauthorized +const BackupsListUnauthorizedCode int = 401 + +/* +BackupsListUnauthorized Unauthorized or invalid credentials. + +swagger:response backupsListUnauthorized +*/ +type BackupsListUnauthorized struct { +} + +// NewBackupsListUnauthorized creates BackupsListUnauthorized with default headers values +func NewBackupsListUnauthorized() *BackupsListUnauthorized { + + return &BackupsListUnauthorized{} +} + +// WriteResponse to the client +func (o *BackupsListUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BackupsListForbiddenCode is the HTTP code returned for type BackupsListForbidden +const BackupsListForbiddenCode int = 403 + +/* +BackupsListForbidden Forbidden + +swagger:response backupsListForbidden +*/ +type BackupsListForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsListForbidden creates BackupsListForbidden with default headers values +func NewBackupsListForbidden() *BackupsListForbidden { + + return &BackupsListForbidden{} +} + +// WithPayload adds the payload to the backups list forbidden response +func (o *BackupsListForbidden) WithPayload(payload *models.ErrorResponse) *BackupsListForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups list forbidden response +func (o *BackupsListForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsListForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsListUnprocessableEntityCode is the HTTP code returned for type BackupsListUnprocessableEntity +const BackupsListUnprocessableEntityCode int = 422 + +/* +BackupsListUnprocessableEntity Invalid backup list. + +swagger:response backupsListUnprocessableEntity +*/ +type BackupsListUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsListUnprocessableEntity creates BackupsListUnprocessableEntity with default headers values +func NewBackupsListUnprocessableEntity() *BackupsListUnprocessableEntity { + + return &BackupsListUnprocessableEntity{} +} + +// WithPayload adds the payload to the backups list unprocessable entity response +func (o *BackupsListUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BackupsListUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups list unprocessable entity response +func (o *BackupsListUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsListUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsListInternalServerErrorCode is the HTTP code returned for type BackupsListInternalServerError +const BackupsListInternalServerErrorCode int = 500 + +/* +BackupsListInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response backupsListInternalServerError +*/ +type BackupsListInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsListInternalServerError creates BackupsListInternalServerError with default headers values +func NewBackupsListInternalServerError() *BackupsListInternalServerError { + + return &BackupsListInternalServerError{} +} + +// WithPayload adds the payload to the backups list internal server error response +func (o *BackupsListInternalServerError) WithPayload(payload *models.ErrorResponse) *BackupsListInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups list internal server error response +func (o *BackupsListInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsListInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..f51697aeb1967b14caddfefb0c05af232373818f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_list_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// BackupsListURL generates an URL for the backups list operation +type BackupsListURL struct { + Backend string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsListURL) WithBasePath(bp string) *BackupsListURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsListURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BackupsListURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/backups/{backend}" + + backend := o.Backend + if backend != "" { + _path = strings.Replace(_path, "{backend}", backend, -1) + } else { + return nil, errors.New("backend is required on BackupsListURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BackupsListURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BackupsListURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BackupsListURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BackupsListURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BackupsListURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BackupsListURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore.go new file mode 100644 index 0000000000000000000000000000000000000000..d7e9b967a06560e87a433ce8d04bf90468dc9a73 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsRestoreHandlerFunc turns a function with the right signature into a backups restore handler +type BackupsRestoreHandlerFunc func(BackupsRestoreParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BackupsRestoreHandlerFunc) Handle(params BackupsRestoreParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BackupsRestoreHandler interface for that can handle valid backups restore params +type BackupsRestoreHandler interface { + Handle(BackupsRestoreParams, *models.Principal) middleware.Responder +} + +// NewBackupsRestore creates a new http.Handler for the backups restore operation +func NewBackupsRestore(ctx *middleware.Context, handler BackupsRestoreHandler) *BackupsRestore { + return &BackupsRestore{Context: ctx, Handler: handler} +} + +/* + BackupsRestore swagger:route POST /backups/{backend}/{id}/restore backups backupsRestore + +# Start a restoration process + +Starts a process of restoring a backup for a set of collections.

    Any backup can be restored to any machine, as long as the number of nodes between source and target are identical.

    Requrements:

    - None of the collections to be restored already exist on the target restoration node(s).
    - The node names of the backed-up collections' must match those of the target restoration node(s). +*/ +type BackupsRestore struct { + Context *middleware.Context + Handler BackupsRestoreHandler +} + +func (o *BackupsRestore) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBackupsRestoreParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..2d6cebfefc98055448465822f185dff1f1d82129 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_parameters.go @@ -0,0 +1,144 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBackupsRestoreParams creates a new BackupsRestoreParams object +// +// There are no default values defined in the spec. +func NewBackupsRestoreParams() BackupsRestoreParams { + + return BackupsRestoreParams{} +} + +// BackupsRestoreParams contains all the bound params for the backups restore operation +// typically these are obtained from a http.Request +// +// swagger:parameters backups.restore +type BackupsRestoreParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`. + Required: true + In: path + */ + Backend string + /* + Required: true + In: body + */ + Body *models.BackupRestoreRequest + /*The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBackupsRestoreParams() beforehand. +func (o *BackupsRestoreParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rBackend, rhkBackend, _ := route.Params.GetOK("backend") + if err := o.bindBackend(rBackend, rhkBackend, route.Formats); err != nil { + res = append(res, err) + } + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.BackupRestoreRequest + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindBackend binds and validates parameter Backend from path. +func (o *BackupsRestoreParams) bindBackend(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Backend = raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *BackupsRestoreParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..fec63c9ad8e81691c9e92d260e647bf61155d30e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsRestoreOKCode is the HTTP code returned for type BackupsRestoreOK +const BackupsRestoreOKCode int = 200 + +/* +BackupsRestoreOK Backup restoration process successfully started. + +swagger:response backupsRestoreOK +*/ +type BackupsRestoreOK struct { + + /* + In: Body + */ + Payload *models.BackupRestoreResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreOK creates BackupsRestoreOK with default headers values +func NewBackupsRestoreOK() *BackupsRestoreOK { + + return &BackupsRestoreOK{} +} + +// WithPayload adds the payload to the backups restore o k response +func (o *BackupsRestoreOK) WithPayload(payload *models.BackupRestoreResponse) *BackupsRestoreOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore o k response +func (o *BackupsRestoreOK) SetPayload(payload *models.BackupRestoreResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsRestoreUnauthorizedCode is the HTTP code returned for type BackupsRestoreUnauthorized +const BackupsRestoreUnauthorizedCode int = 401 + +/* +BackupsRestoreUnauthorized Unauthorized or invalid credentials. + +swagger:response backupsRestoreUnauthorized +*/ +type BackupsRestoreUnauthorized struct { +} + +// NewBackupsRestoreUnauthorized creates BackupsRestoreUnauthorized with default headers values +func NewBackupsRestoreUnauthorized() *BackupsRestoreUnauthorized { + + return &BackupsRestoreUnauthorized{} +} + +// WriteResponse to the client +func (o *BackupsRestoreUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BackupsRestoreForbiddenCode is the HTTP code returned for type BackupsRestoreForbidden +const BackupsRestoreForbiddenCode int = 403 + +/* +BackupsRestoreForbidden Forbidden + +swagger:response backupsRestoreForbidden +*/ +type BackupsRestoreForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreForbidden creates BackupsRestoreForbidden with default headers values +func NewBackupsRestoreForbidden() *BackupsRestoreForbidden { + + return &BackupsRestoreForbidden{} +} + +// WithPayload adds the payload to the backups restore forbidden response +func (o *BackupsRestoreForbidden) WithPayload(payload *models.ErrorResponse) *BackupsRestoreForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore forbidden response +func (o *BackupsRestoreForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsRestoreNotFoundCode is the HTTP code returned for type BackupsRestoreNotFound +const BackupsRestoreNotFoundCode int = 404 + +/* +BackupsRestoreNotFound Not Found - Backup does not exist + +swagger:response backupsRestoreNotFound +*/ +type BackupsRestoreNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreNotFound creates BackupsRestoreNotFound with default headers values +func NewBackupsRestoreNotFound() *BackupsRestoreNotFound { + + return &BackupsRestoreNotFound{} +} + +// WithPayload adds the payload to the backups restore not found response +func (o *BackupsRestoreNotFound) WithPayload(payload *models.ErrorResponse) *BackupsRestoreNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore not found response +func (o *BackupsRestoreNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsRestoreUnprocessableEntityCode is the HTTP code returned for type BackupsRestoreUnprocessableEntity +const BackupsRestoreUnprocessableEntityCode int = 422 + +/* +BackupsRestoreUnprocessableEntity Invalid backup restoration attempt. + +swagger:response backupsRestoreUnprocessableEntity +*/ +type BackupsRestoreUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreUnprocessableEntity creates BackupsRestoreUnprocessableEntity with default headers values +func NewBackupsRestoreUnprocessableEntity() *BackupsRestoreUnprocessableEntity { + + return &BackupsRestoreUnprocessableEntity{} +} + +// WithPayload adds the payload to the backups restore unprocessable entity response +func (o *BackupsRestoreUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BackupsRestoreUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore unprocessable entity response +func (o *BackupsRestoreUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsRestoreInternalServerErrorCode is the HTTP code returned for type BackupsRestoreInternalServerError +const BackupsRestoreInternalServerErrorCode int = 500 + +/* +BackupsRestoreInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response backupsRestoreInternalServerError +*/ +type BackupsRestoreInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreInternalServerError creates BackupsRestoreInternalServerError with default headers values +func NewBackupsRestoreInternalServerError() *BackupsRestoreInternalServerError { + + return &BackupsRestoreInternalServerError{} +} + +// WithPayload adds the payload to the backups restore internal server error response +func (o *BackupsRestoreInternalServerError) WithPayload(payload *models.ErrorResponse) *BackupsRestoreInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore internal server error response +func (o *BackupsRestoreInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status.go new file mode 100644 index 0000000000000000000000000000000000000000..d546a8af418202e082f230d9966d797a0d5ff217 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsRestoreStatusHandlerFunc turns a function with the right signature into a backups restore status handler +type BackupsRestoreStatusHandlerFunc func(BackupsRestoreStatusParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BackupsRestoreStatusHandlerFunc) Handle(params BackupsRestoreStatusParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BackupsRestoreStatusHandler interface for that can handle valid backups restore status params +type BackupsRestoreStatusHandler interface { + Handle(BackupsRestoreStatusParams, *models.Principal) middleware.Responder +} + +// NewBackupsRestoreStatus creates a new http.Handler for the backups restore status operation +func NewBackupsRestoreStatus(ctx *middleware.Context, handler BackupsRestoreStatusHandler) *BackupsRestoreStatus { + return &BackupsRestoreStatus{Context: ctx, Handler: handler} +} + +/* + BackupsRestoreStatus swagger:route GET /backups/{backend}/{id}/restore backups backupsRestoreStatus + +# Get restore process status + +Returns status of a backup restoration attempt for a set of classes.

    All client implementations have a `wait for completion` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the `wait for completion` option to false, you can also check the status yourself using the this endpoint. +*/ +type BackupsRestoreStatus struct { + Context *middleware.Context + Handler BackupsRestoreStatusHandler +} + +func (o *BackupsRestoreStatus) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBackupsRestoreStatusParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..82e591419e41d4e64b68a9d502c86002d9cd5b05 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_parameters.go @@ -0,0 +1,163 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewBackupsRestoreStatusParams creates a new BackupsRestoreStatusParams object +// +// There are no default values defined in the spec. +func NewBackupsRestoreStatusParams() BackupsRestoreStatusParams { + + return BackupsRestoreStatusParams{} +} + +// BackupsRestoreStatusParams contains all the bound params for the backups restore status operation +// typically these are obtained from a http.Request +// +// swagger:parameters backups.restore.status +type BackupsRestoreStatusParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`. + Required: true + In: path + */ + Backend string + /*Name of the bucket, container, volume, etc + In: query + */ + Bucket *string + /*The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed. + Required: true + In: path + */ + ID string + /*The path within the bucket + In: query + */ + Path *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBackupsRestoreStatusParams() beforehand. +func (o *BackupsRestoreStatusParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rBackend, rhkBackend, _ := route.Params.GetOK("backend") + if err := o.bindBackend(rBackend, rhkBackend, route.Formats); err != nil { + res = append(res, err) + } + + qBucket, qhkBucket, _ := qs.GetOK("bucket") + if err := o.bindBucket(qBucket, qhkBucket, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qPath, qhkPath, _ := qs.GetOK("path") + if err := o.bindPath(qPath, qhkPath, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindBackend binds and validates parameter Backend from path. +func (o *BackupsRestoreStatusParams) bindBackend(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.Backend = raw + + return nil +} + +// bindBucket binds and validates parameter Bucket from query. +func (o *BackupsRestoreStatusParams) bindBucket(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Bucket = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *BackupsRestoreStatusParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} + +// bindPath binds and validates parameter Path from query. +func (o *BackupsRestoreStatusParams) bindPath(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Path = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..e46f6ea2d81531f9a23c131689f27f1f03bb0e28 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BackupsRestoreStatusOKCode is the HTTP code returned for type BackupsRestoreStatusOK +const BackupsRestoreStatusOKCode int = 200 + +/* +BackupsRestoreStatusOK Backup restoration status successfully returned + +swagger:response backupsRestoreStatusOK +*/ +type BackupsRestoreStatusOK struct { + + /* + In: Body + */ + Payload *models.BackupRestoreStatusResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreStatusOK creates BackupsRestoreStatusOK with default headers values +func NewBackupsRestoreStatusOK() *BackupsRestoreStatusOK { + + return &BackupsRestoreStatusOK{} +} + +// WithPayload adds the payload to the backups restore status o k response +func (o *BackupsRestoreStatusOK) WithPayload(payload *models.BackupRestoreStatusResponse) *BackupsRestoreStatusOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore status o k response +func (o *BackupsRestoreStatusOK) SetPayload(payload *models.BackupRestoreStatusResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsRestoreStatusUnauthorizedCode is the HTTP code returned for type BackupsRestoreStatusUnauthorized +const BackupsRestoreStatusUnauthorizedCode int = 401 + +/* +BackupsRestoreStatusUnauthorized Unauthorized or invalid credentials. + +swagger:response backupsRestoreStatusUnauthorized +*/ +type BackupsRestoreStatusUnauthorized struct { +} + +// NewBackupsRestoreStatusUnauthorized creates BackupsRestoreStatusUnauthorized with default headers values +func NewBackupsRestoreStatusUnauthorized() *BackupsRestoreStatusUnauthorized { + + return &BackupsRestoreStatusUnauthorized{} +} + +// WriteResponse to the client +func (o *BackupsRestoreStatusUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BackupsRestoreStatusForbiddenCode is the HTTP code returned for type BackupsRestoreStatusForbidden +const BackupsRestoreStatusForbiddenCode int = 403 + +/* +BackupsRestoreStatusForbidden Forbidden + +swagger:response backupsRestoreStatusForbidden +*/ +type BackupsRestoreStatusForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreStatusForbidden creates BackupsRestoreStatusForbidden with default headers values +func NewBackupsRestoreStatusForbidden() *BackupsRestoreStatusForbidden { + + return &BackupsRestoreStatusForbidden{} +} + +// WithPayload adds the payload to the backups restore status forbidden response +func (o *BackupsRestoreStatusForbidden) WithPayload(payload *models.ErrorResponse) *BackupsRestoreStatusForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore status forbidden response +func (o *BackupsRestoreStatusForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreStatusForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsRestoreStatusNotFoundCode is the HTTP code returned for type BackupsRestoreStatusNotFound +const BackupsRestoreStatusNotFoundCode int = 404 + +/* +BackupsRestoreStatusNotFound Not Found - Backup does not exist + +swagger:response backupsRestoreStatusNotFound +*/ +type BackupsRestoreStatusNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreStatusNotFound creates BackupsRestoreStatusNotFound with default headers values +func NewBackupsRestoreStatusNotFound() *BackupsRestoreStatusNotFound { + + return &BackupsRestoreStatusNotFound{} +} + +// WithPayload adds the payload to the backups restore status not found response +func (o *BackupsRestoreStatusNotFound) WithPayload(payload *models.ErrorResponse) *BackupsRestoreStatusNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore status not found response +func (o *BackupsRestoreStatusNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreStatusNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BackupsRestoreStatusInternalServerErrorCode is the HTTP code returned for type BackupsRestoreStatusInternalServerError +const BackupsRestoreStatusInternalServerErrorCode int = 500 + +/* +BackupsRestoreStatusInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response backupsRestoreStatusInternalServerError +*/ +type BackupsRestoreStatusInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBackupsRestoreStatusInternalServerError creates BackupsRestoreStatusInternalServerError with default headers values +func NewBackupsRestoreStatusInternalServerError() *BackupsRestoreStatusInternalServerError { + + return &BackupsRestoreStatusInternalServerError{} +} + +// WithPayload adds the payload to the backups restore status internal server error response +func (o *BackupsRestoreStatusInternalServerError) WithPayload(payload *models.ErrorResponse) *BackupsRestoreStatusInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the backups restore status internal server error response +func (o *BackupsRestoreStatusInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BackupsRestoreStatusInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..c56305faa89b83cd122c859940212c6e9f1b15de --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_status_urlbuilder.go @@ -0,0 +1,141 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// BackupsRestoreStatusURL generates an URL for the backups restore status operation +type BackupsRestoreStatusURL struct { + Backend string + ID string + + Bucket *string + Path *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsRestoreStatusURL) WithBasePath(bp string) *BackupsRestoreStatusURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsRestoreStatusURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BackupsRestoreStatusURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/backups/{backend}/{id}/restore" + + backend := o.Backend + if backend != "" { + _path = strings.Replace(_path, "{backend}", backend, -1) + } else { + return nil, errors.New("backend is required on BackupsRestoreStatusURL") + } + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on BackupsRestoreStatusURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var bucketQ string + if o.Bucket != nil { + bucketQ = *o.Bucket + } + if bucketQ != "" { + qs.Set("bucket", bucketQ) + } + + var pathQ string + if o.Path != nil { + pathQ = *o.Path + } + if pathQ != "" { + qs.Set("path", pathQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BackupsRestoreStatusURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BackupsRestoreStatusURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BackupsRestoreStatusURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BackupsRestoreStatusURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BackupsRestoreStatusURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BackupsRestoreStatusURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..c088b9fb539354b4e7356fb900d2690c7684c4f1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/backups/backups_restore_urlbuilder.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package backups + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// BackupsRestoreURL generates an URL for the backups restore operation +type BackupsRestoreURL struct { + Backend string + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsRestoreURL) WithBasePath(bp string) *BackupsRestoreURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BackupsRestoreURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BackupsRestoreURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/backups/{backend}/{id}/restore" + + backend := o.Backend + if backend != "" { + _path = strings.Replace(_path, "{backend}", backend, -1) + } else { + return nil, errors.New("backend is required on BackupsRestoreURL") + } + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on BackupsRestoreURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BackupsRestoreURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BackupsRestoreURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BackupsRestoreURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BackupsRestoreURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BackupsRestoreURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BackupsRestoreURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create.go new file mode 100644 index 0000000000000000000000000000000000000000..7ff4d0f7c28ae2e0dc11158399950bacd4931214 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create.go @@ -0,0 +1,238 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "encoding/json" + "net/http" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchObjectsCreateHandlerFunc turns a function with the right signature into a batch objects create handler +type BatchObjectsCreateHandlerFunc func(BatchObjectsCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BatchObjectsCreateHandlerFunc) Handle(params BatchObjectsCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BatchObjectsCreateHandler interface for that can handle valid batch objects create params +type BatchObjectsCreateHandler interface { + Handle(BatchObjectsCreateParams, *models.Principal) middleware.Responder +} + +// NewBatchObjectsCreate creates a new http.Handler for the batch objects create operation +func NewBatchObjectsCreate(ctx *middleware.Context, handler BatchObjectsCreateHandler) *BatchObjectsCreate { + return &BatchObjectsCreate{Context: ctx, Handler: handler} +} + +/* + BatchObjectsCreate swagger:route POST /batch/objects batch objects batchObjectsCreate + +Creates new Objects based on a Object template as a batch. + +Create new objects in bulk.

    Meta-data and schema values are validated.

    **Note: idempotence of `/batch/objects`**:
    `POST /batch/objects` is idempotent, and will overwrite any existing object given the same id. +*/ +type BatchObjectsCreate struct { + Context *middleware.Context + Handler BatchObjectsCreateHandler +} + +func (o *BatchObjectsCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBatchObjectsCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// BatchObjectsCreateBody batch objects create body +// +// swagger:model BatchObjectsCreateBody +type BatchObjectsCreateBody struct { + + // Define which fields need to be returned. Default value is ALL + Fields []*string `json:"fields" yaml:"fields"` + + // objects + Objects []*models.Object `json:"objects" yaml:"objects"` +} + +// Validate validates this batch objects create body +func (o *BatchObjectsCreateBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateFields(formats); err != nil { + res = append(res, err) + } + + if err := o.validateObjects(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var batchObjectsCreateBodyFieldsItemsEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["ALL","class","schema","id","creationTimeUnix"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + batchObjectsCreateBodyFieldsItemsEnum = append(batchObjectsCreateBodyFieldsItemsEnum, v) + } +} + +func (o *BatchObjectsCreateBody) validateFieldsItemsEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, batchObjectsCreateBodyFieldsItemsEnum, true); err != nil { + return err + } + return nil +} + +func (o *BatchObjectsCreateBody) validateFields(formats strfmt.Registry) error { + if swag.IsZero(o.Fields) { // not required + return nil + } + + for i := 0; i < len(o.Fields); i++ { + if swag.IsZero(o.Fields[i]) { // not required + continue + } + + // value enum + if err := o.validateFieldsItemsEnum("body"+"."+"fields"+"."+strconv.Itoa(i), "body", *o.Fields[i]); err != nil { + return err + } + + } + + return nil +} + +func (o *BatchObjectsCreateBody) validateObjects(formats strfmt.Registry) error { + if swag.IsZero(o.Objects) { // not required + return nil + } + + for i := 0; i < len(o.Objects); i++ { + if swag.IsZero(o.Objects[i]) { // not required + continue + } + + if o.Objects[i] != nil { + if err := o.Objects[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this batch objects create body based on the context it is used +func (o *BatchObjectsCreateBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateObjects(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *BatchObjectsCreateBody) contextValidateObjects(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Objects); i++ { + + if o.Objects[i] != nil { + if err := o.Objects[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("body" + "." + "objects" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *BatchObjectsCreateBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *BatchObjectsCreateBody) UnmarshalBinary(b []byte) error { + var res BatchObjectsCreateBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..d39be203860fafcdedaf53ce2b266aa6149878fc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_parameters.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewBatchObjectsCreateParams creates a new BatchObjectsCreateParams object +// +// There are no default values defined in the spec. +func NewBatchObjectsCreateParams() BatchObjectsCreateParams { + + return BatchObjectsCreateParams{} +} + +// BatchObjectsCreateParams contains all the bound params for the batch objects create operation +// typically these are obtained from a http.Request +// +// swagger:parameters batch.objects.create +type BatchObjectsCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body BatchObjectsCreateBody + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBatchObjectsCreateParams() beforehand. +func (o *BatchObjectsCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body BatchObjectsCreateBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *BatchObjectsCreateParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ec2f3e0469b9cbbd62d6bff2d7c530c02079a9a9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_responses.go @@ -0,0 +1,278 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchObjectsCreateOKCode is the HTTP code returned for type BatchObjectsCreateOK +const BatchObjectsCreateOKCode int = 200 + +/* +BatchObjectsCreateOK Request succeeded, see response body to get detailed information about each batched item. + +swagger:response batchObjectsCreateOK +*/ +type BatchObjectsCreateOK struct { + + /* + In: Body + */ + Payload []*models.ObjectsGetResponse `json:"body,omitempty"` +} + +// NewBatchObjectsCreateOK creates BatchObjectsCreateOK with default headers values +func NewBatchObjectsCreateOK() *BatchObjectsCreateOK { + + return &BatchObjectsCreateOK{} +} + +// WithPayload adds the payload to the batch objects create o k response +func (o *BatchObjectsCreateOK) WithPayload(payload []*models.ObjectsGetResponse) *BatchObjectsCreateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects create o k response +func (o *BatchObjectsCreateOK) SetPayload(payload []*models.ObjectsGetResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*models.ObjectsGetResponse, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// BatchObjectsCreateBadRequestCode is the HTTP code returned for type BatchObjectsCreateBadRequest +const BatchObjectsCreateBadRequestCode int = 400 + +/* +BatchObjectsCreateBadRequest Malformed request. + +swagger:response batchObjectsCreateBadRequest +*/ +type BatchObjectsCreateBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsCreateBadRequest creates BatchObjectsCreateBadRequest with default headers values +func NewBatchObjectsCreateBadRequest() *BatchObjectsCreateBadRequest { + + return &BatchObjectsCreateBadRequest{} +} + +// WithPayload adds the payload to the batch objects create bad request response +func (o *BatchObjectsCreateBadRequest) WithPayload(payload *models.ErrorResponse) *BatchObjectsCreateBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects create bad request response +func (o *BatchObjectsCreateBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsCreateBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchObjectsCreateUnauthorizedCode is the HTTP code returned for type BatchObjectsCreateUnauthorized +const BatchObjectsCreateUnauthorizedCode int = 401 + +/* +BatchObjectsCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response batchObjectsCreateUnauthorized +*/ +type BatchObjectsCreateUnauthorized struct { +} + +// NewBatchObjectsCreateUnauthorized creates BatchObjectsCreateUnauthorized with default headers values +func NewBatchObjectsCreateUnauthorized() *BatchObjectsCreateUnauthorized { + + return &BatchObjectsCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *BatchObjectsCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BatchObjectsCreateForbiddenCode is the HTTP code returned for type BatchObjectsCreateForbidden +const BatchObjectsCreateForbiddenCode int = 403 + +/* +BatchObjectsCreateForbidden Forbidden + +swagger:response batchObjectsCreateForbidden +*/ +type BatchObjectsCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsCreateForbidden creates BatchObjectsCreateForbidden with default headers values +func NewBatchObjectsCreateForbidden() *BatchObjectsCreateForbidden { + + return &BatchObjectsCreateForbidden{} +} + +// WithPayload adds the payload to the batch objects create forbidden response +func (o *BatchObjectsCreateForbidden) WithPayload(payload *models.ErrorResponse) *BatchObjectsCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects create forbidden response +func (o *BatchObjectsCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchObjectsCreateUnprocessableEntityCode is the HTTP code returned for type BatchObjectsCreateUnprocessableEntity +const BatchObjectsCreateUnprocessableEntityCode int = 422 + +/* +BatchObjectsCreateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response batchObjectsCreateUnprocessableEntity +*/ +type BatchObjectsCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsCreateUnprocessableEntity creates BatchObjectsCreateUnprocessableEntity with default headers values +func NewBatchObjectsCreateUnprocessableEntity() *BatchObjectsCreateUnprocessableEntity { + + return &BatchObjectsCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the batch objects create unprocessable entity response +func (o *BatchObjectsCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BatchObjectsCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects create unprocessable entity response +func (o *BatchObjectsCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchObjectsCreateInternalServerErrorCode is the HTTP code returned for type BatchObjectsCreateInternalServerError +const BatchObjectsCreateInternalServerErrorCode int = 500 + +/* +BatchObjectsCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response batchObjectsCreateInternalServerError +*/ +type BatchObjectsCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsCreateInternalServerError creates BatchObjectsCreateInternalServerError with default headers values +func NewBatchObjectsCreateInternalServerError() *BatchObjectsCreateInternalServerError { + + return &BatchObjectsCreateInternalServerError{} +} + +// WithPayload adds the payload to the batch objects create internal server error response +func (o *BatchObjectsCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *BatchObjectsCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects create internal server error response +func (o *BatchObjectsCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..fddc1a800fd93a8155bf1affc3b034c31ffd7840 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_create_urlbuilder.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// BatchObjectsCreateURL generates an URL for the batch objects create operation +type BatchObjectsCreateURL struct { + ConsistencyLevel *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BatchObjectsCreateURL) WithBasePath(bp string) *BatchObjectsCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BatchObjectsCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BatchObjectsCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/batch/objects" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BatchObjectsCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BatchObjectsCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BatchObjectsCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BatchObjectsCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BatchObjectsCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BatchObjectsCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..c9c8d8f1ecd49bf9cd227c2c4353a00c0b147ac8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchObjectsDeleteHandlerFunc turns a function with the right signature into a batch objects delete handler +type BatchObjectsDeleteHandlerFunc func(BatchObjectsDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BatchObjectsDeleteHandlerFunc) Handle(params BatchObjectsDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BatchObjectsDeleteHandler interface for that can handle valid batch objects delete params +type BatchObjectsDeleteHandler interface { + Handle(BatchObjectsDeleteParams, *models.Principal) middleware.Responder +} + +// NewBatchObjectsDelete creates a new http.Handler for the batch objects delete operation +func NewBatchObjectsDelete(ctx *middleware.Context, handler BatchObjectsDeleteHandler) *BatchObjectsDelete { + return &BatchObjectsDelete{Context: ctx, Handler: handler} +} + +/* + BatchObjectsDelete swagger:route DELETE /batch/objects batch objects batchObjectsDelete + +Deletes Objects based on a match filter as a batch. + +Batch delete objects that match a particular filter.

    The request body takes a single `where` filter and will delete all objects matched.

    Note that there is a limit to the number of objects to be deleted at once using this filter, in order to protect against unexpected memory surges and very-long-running requests. The default limit is 10,000 and may be configured by setting the `QUERY_MAXIMUM_RESULTS` environment variable.

    Objects are deleted in the same order that they would be returned in an equivalent Get query. To delete more objects than the limit, run the same query multiple times. +*/ +type BatchObjectsDelete struct { + Context *middleware.Context + Handler BatchObjectsDeleteHandler +} + +func (o *BatchObjectsDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBatchObjectsDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..646c1e7a29ff76c0a996561df06f15e4939c6862 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_parameters.go @@ -0,0 +1,152 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBatchObjectsDeleteParams creates a new BatchObjectsDeleteParams object +// +// There are no default values defined in the spec. +func NewBatchObjectsDeleteParams() BatchObjectsDeleteParams { + + return BatchObjectsDeleteParams{} +} + +// BatchObjectsDeleteParams contains all the bound params for the batch objects delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters batch.objects.delete +type BatchObjectsDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.BatchDelete + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBatchObjectsDeleteParams() beforehand. +func (o *BatchObjectsDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.BatchDelete + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *BatchObjectsDeleteParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *BatchObjectsDeleteParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..d242e3ffc29dde71e08e53312bcc3142214e6b3f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchObjectsDeleteOKCode is the HTTP code returned for type BatchObjectsDeleteOK +const BatchObjectsDeleteOKCode int = 200 + +/* +BatchObjectsDeleteOK Request succeeded, see response body to get detailed information about each batched item. + +swagger:response batchObjectsDeleteOK +*/ +type BatchObjectsDeleteOK struct { + + /* + In: Body + */ + Payload *models.BatchDeleteResponse `json:"body,omitempty"` +} + +// NewBatchObjectsDeleteOK creates BatchObjectsDeleteOK with default headers values +func NewBatchObjectsDeleteOK() *BatchObjectsDeleteOK { + + return &BatchObjectsDeleteOK{} +} + +// WithPayload adds the payload to the batch objects delete o k response +func (o *BatchObjectsDeleteOK) WithPayload(payload *models.BatchDeleteResponse) *BatchObjectsDeleteOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects delete o k response +func (o *BatchObjectsDeleteOK) SetPayload(payload *models.BatchDeleteResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsDeleteOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchObjectsDeleteBadRequestCode is the HTTP code returned for type BatchObjectsDeleteBadRequest +const BatchObjectsDeleteBadRequestCode int = 400 + +/* +BatchObjectsDeleteBadRequest Malformed request. + +swagger:response batchObjectsDeleteBadRequest +*/ +type BatchObjectsDeleteBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsDeleteBadRequest creates BatchObjectsDeleteBadRequest with default headers values +func NewBatchObjectsDeleteBadRequest() *BatchObjectsDeleteBadRequest { + + return &BatchObjectsDeleteBadRequest{} +} + +// WithPayload adds the payload to the batch objects delete bad request response +func (o *BatchObjectsDeleteBadRequest) WithPayload(payload *models.ErrorResponse) *BatchObjectsDeleteBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects delete bad request response +func (o *BatchObjectsDeleteBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsDeleteBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchObjectsDeleteUnauthorizedCode is the HTTP code returned for type BatchObjectsDeleteUnauthorized +const BatchObjectsDeleteUnauthorizedCode int = 401 + +/* +BatchObjectsDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response batchObjectsDeleteUnauthorized +*/ +type BatchObjectsDeleteUnauthorized struct { +} + +// NewBatchObjectsDeleteUnauthorized creates BatchObjectsDeleteUnauthorized with default headers values +func NewBatchObjectsDeleteUnauthorized() *BatchObjectsDeleteUnauthorized { + + return &BatchObjectsDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *BatchObjectsDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BatchObjectsDeleteForbiddenCode is the HTTP code returned for type BatchObjectsDeleteForbidden +const BatchObjectsDeleteForbiddenCode int = 403 + +/* +BatchObjectsDeleteForbidden Forbidden + +swagger:response batchObjectsDeleteForbidden +*/ +type BatchObjectsDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsDeleteForbidden creates BatchObjectsDeleteForbidden with default headers values +func NewBatchObjectsDeleteForbidden() *BatchObjectsDeleteForbidden { + + return &BatchObjectsDeleteForbidden{} +} + +// WithPayload adds the payload to the batch objects delete forbidden response +func (o *BatchObjectsDeleteForbidden) WithPayload(payload *models.ErrorResponse) *BatchObjectsDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects delete forbidden response +func (o *BatchObjectsDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchObjectsDeleteUnprocessableEntityCode is the HTTP code returned for type BatchObjectsDeleteUnprocessableEntity +const BatchObjectsDeleteUnprocessableEntityCode int = 422 + +/* +BatchObjectsDeleteUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response batchObjectsDeleteUnprocessableEntity +*/ +type BatchObjectsDeleteUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsDeleteUnprocessableEntity creates BatchObjectsDeleteUnprocessableEntity with default headers values +func NewBatchObjectsDeleteUnprocessableEntity() *BatchObjectsDeleteUnprocessableEntity { + + return &BatchObjectsDeleteUnprocessableEntity{} +} + +// WithPayload adds the payload to the batch objects delete unprocessable entity response +func (o *BatchObjectsDeleteUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BatchObjectsDeleteUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects delete unprocessable entity response +func (o *BatchObjectsDeleteUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsDeleteUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchObjectsDeleteInternalServerErrorCode is the HTTP code returned for type BatchObjectsDeleteInternalServerError +const BatchObjectsDeleteInternalServerErrorCode int = 500 + +/* +BatchObjectsDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response batchObjectsDeleteInternalServerError +*/ +type BatchObjectsDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchObjectsDeleteInternalServerError creates BatchObjectsDeleteInternalServerError with default headers values +func NewBatchObjectsDeleteInternalServerError() *BatchObjectsDeleteInternalServerError { + + return &BatchObjectsDeleteInternalServerError{} +} + +// WithPayload adds the payload to the batch objects delete internal server error response +func (o *BatchObjectsDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *BatchObjectsDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch objects delete internal server error response +func (o *BatchObjectsDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchObjectsDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..970d826f57b494ead52446a12a8ab6e45f377897 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_objects_delete_urlbuilder.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// BatchObjectsDeleteURL generates an URL for the batch objects delete operation +type BatchObjectsDeleteURL struct { + ConsistencyLevel *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BatchObjectsDeleteURL) WithBasePath(bp string) *BatchObjectsDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BatchObjectsDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BatchObjectsDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/batch/objects" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BatchObjectsDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BatchObjectsDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BatchObjectsDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BatchObjectsDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BatchObjectsDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BatchObjectsDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create.go new file mode 100644 index 0000000000000000000000000000000000000000..331192ae1738c3eb0d4578ee995caf790815543d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchReferencesCreateHandlerFunc turns a function with the right signature into a batch references create handler +type BatchReferencesCreateHandlerFunc func(BatchReferencesCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn BatchReferencesCreateHandlerFunc) Handle(params BatchReferencesCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// BatchReferencesCreateHandler interface for that can handle valid batch references create params +type BatchReferencesCreateHandler interface { + Handle(BatchReferencesCreateParams, *models.Principal) middleware.Responder +} + +// NewBatchReferencesCreate creates a new http.Handler for the batch references create operation +func NewBatchReferencesCreate(ctx *middleware.Context, handler BatchReferencesCreateHandler) *BatchReferencesCreate { + return &BatchReferencesCreate{Context: ctx, Handler: handler} +} + +/* + BatchReferencesCreate swagger:route POST /batch/references batch references batchReferencesCreate + +Creates new Cross-References between arbitrary classes in bulk. + +Batch create cross-references between collections items (objects or objects) in bulk. +*/ +type BatchReferencesCreate struct { + Context *middleware.Context + Handler BatchReferencesCreateHandler +} + +func (o *BatchReferencesCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewBatchReferencesCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..1af82d4de9dba983911f7716a5e82309caa47695 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_parameters.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewBatchReferencesCreateParams creates a new BatchReferencesCreateParams object +// +// There are no default values defined in the spec. +func NewBatchReferencesCreateParams() BatchReferencesCreateParams { + + return BatchReferencesCreateParams{} +} + +// BatchReferencesCreateParams contains all the bound params for the batch references create operation +// typically these are obtained from a http.Request +// +// swagger:parameters batch.references.create +type BatchReferencesCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*A list of references to be batched. The ideal size depends on the used database connector. Please see the documentation of the used connector for help + Required: true + In: body + */ + Body []*models.BatchReference + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewBatchReferencesCreateParams() beforehand. +func (o *BatchReferencesCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body []*models.BatchReference + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + + // validate array of body objects + for i := range body { + if body[i] == nil { + continue + } + if err := body[i].Validate(route.Formats); err != nil { + res = append(res, err) + break + } + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *BatchReferencesCreateParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..4aaebbb3f9067909862f26eabe8965c42e783e6f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_responses.go @@ -0,0 +1,278 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// BatchReferencesCreateOKCode is the HTTP code returned for type BatchReferencesCreateOK +const BatchReferencesCreateOKCode int = 200 + +/* +BatchReferencesCreateOK Request Successful. Warning: A successful request does not guarantee that every batched reference was successfully created. Inspect the response body to see which references succeeded and which failed. + +swagger:response batchReferencesCreateOK +*/ +type BatchReferencesCreateOK struct { + + /* + In: Body + */ + Payload []*models.BatchReferenceResponse `json:"body,omitempty"` +} + +// NewBatchReferencesCreateOK creates BatchReferencesCreateOK with default headers values +func NewBatchReferencesCreateOK() *BatchReferencesCreateOK { + + return &BatchReferencesCreateOK{} +} + +// WithPayload adds the payload to the batch references create o k response +func (o *BatchReferencesCreateOK) WithPayload(payload []*models.BatchReferenceResponse) *BatchReferencesCreateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch references create o k response +func (o *BatchReferencesCreateOK) SetPayload(payload []*models.BatchReferenceResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchReferencesCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*models.BatchReferenceResponse, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// BatchReferencesCreateBadRequestCode is the HTTP code returned for type BatchReferencesCreateBadRequest +const BatchReferencesCreateBadRequestCode int = 400 + +/* +BatchReferencesCreateBadRequest Malformed request. + +swagger:response batchReferencesCreateBadRequest +*/ +type BatchReferencesCreateBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchReferencesCreateBadRequest creates BatchReferencesCreateBadRequest with default headers values +func NewBatchReferencesCreateBadRequest() *BatchReferencesCreateBadRequest { + + return &BatchReferencesCreateBadRequest{} +} + +// WithPayload adds the payload to the batch references create bad request response +func (o *BatchReferencesCreateBadRequest) WithPayload(payload *models.ErrorResponse) *BatchReferencesCreateBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch references create bad request response +func (o *BatchReferencesCreateBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchReferencesCreateBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchReferencesCreateUnauthorizedCode is the HTTP code returned for type BatchReferencesCreateUnauthorized +const BatchReferencesCreateUnauthorizedCode int = 401 + +/* +BatchReferencesCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response batchReferencesCreateUnauthorized +*/ +type BatchReferencesCreateUnauthorized struct { +} + +// NewBatchReferencesCreateUnauthorized creates BatchReferencesCreateUnauthorized with default headers values +func NewBatchReferencesCreateUnauthorized() *BatchReferencesCreateUnauthorized { + + return &BatchReferencesCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *BatchReferencesCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// BatchReferencesCreateForbiddenCode is the HTTP code returned for type BatchReferencesCreateForbidden +const BatchReferencesCreateForbiddenCode int = 403 + +/* +BatchReferencesCreateForbidden Forbidden + +swagger:response batchReferencesCreateForbidden +*/ +type BatchReferencesCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchReferencesCreateForbidden creates BatchReferencesCreateForbidden with default headers values +func NewBatchReferencesCreateForbidden() *BatchReferencesCreateForbidden { + + return &BatchReferencesCreateForbidden{} +} + +// WithPayload adds the payload to the batch references create forbidden response +func (o *BatchReferencesCreateForbidden) WithPayload(payload *models.ErrorResponse) *BatchReferencesCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch references create forbidden response +func (o *BatchReferencesCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchReferencesCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchReferencesCreateUnprocessableEntityCode is the HTTP code returned for type BatchReferencesCreateUnprocessableEntity +const BatchReferencesCreateUnprocessableEntityCode int = 422 + +/* +BatchReferencesCreateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response batchReferencesCreateUnprocessableEntity +*/ +type BatchReferencesCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchReferencesCreateUnprocessableEntity creates BatchReferencesCreateUnprocessableEntity with default headers values +func NewBatchReferencesCreateUnprocessableEntity() *BatchReferencesCreateUnprocessableEntity { + + return &BatchReferencesCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the batch references create unprocessable entity response +func (o *BatchReferencesCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *BatchReferencesCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch references create unprocessable entity response +func (o *BatchReferencesCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchReferencesCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// BatchReferencesCreateInternalServerErrorCode is the HTTP code returned for type BatchReferencesCreateInternalServerError +const BatchReferencesCreateInternalServerErrorCode int = 500 + +/* +BatchReferencesCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response batchReferencesCreateInternalServerError +*/ +type BatchReferencesCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewBatchReferencesCreateInternalServerError creates BatchReferencesCreateInternalServerError with default headers values +func NewBatchReferencesCreateInternalServerError() *BatchReferencesCreateInternalServerError { + + return &BatchReferencesCreateInternalServerError{} +} + +// WithPayload adds the payload to the batch references create internal server error response +func (o *BatchReferencesCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *BatchReferencesCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the batch references create internal server error response +func (o *BatchReferencesCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *BatchReferencesCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..f26a0ac368dff9e5b71546b7803bdb81b5d56710 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/batch/batch_references_create_urlbuilder.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package batch + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// BatchReferencesCreateURL generates an URL for the batch references create operation +type BatchReferencesCreateURL struct { + ConsistencyLevel *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BatchReferencesCreateURL) WithBasePath(bp string) *BatchReferencesCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *BatchReferencesCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *BatchReferencesCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/batch/references" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *BatchReferencesCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *BatchReferencesCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *BatchReferencesCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on BatchReferencesCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on BatchReferencesCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *BatchReferencesCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get.go new file mode 100644 index 0000000000000000000000000000000000000000..11658a86ea46f6a125b06608ee05592a776432cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClassificationsGetHandlerFunc turns a function with the right signature into a classifications get handler +type ClassificationsGetHandlerFunc func(ClassificationsGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ClassificationsGetHandlerFunc) Handle(params ClassificationsGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ClassificationsGetHandler interface for that can handle valid classifications get params +type ClassificationsGetHandler interface { + Handle(ClassificationsGetParams, *models.Principal) middleware.Responder +} + +// NewClassificationsGet creates a new http.Handler for the classifications get operation +func NewClassificationsGet(ctx *middleware.Context, handler ClassificationsGetHandler) *ClassificationsGet { + return &ClassificationsGet{Context: ctx, Handler: handler} +} + +/* + ClassificationsGet swagger:route GET /classifications/{id} classifications classificationsGet + +# View previously created classification + +Get status, results and metadata of a previously created classification +*/ +type ClassificationsGet struct { + Context *middleware.Context + Handler ClassificationsGetHandler +} + +func (o *ClassificationsGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewClassificationsGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..3a73433d8295ba7fe2bba2d1e044733fdbd94fa8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewClassificationsGetParams creates a new ClassificationsGetParams object +// +// There are no default values defined in the spec. +func NewClassificationsGetParams() ClassificationsGetParams { + + return ClassificationsGetParams{} +} + +// ClassificationsGetParams contains all the bound params for the classifications get operation +// typically these are obtained from a http.Request +// +// swagger:parameters classifications.get +type ClassificationsGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*classification id + Required: true + In: path + */ + ID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewClassificationsGetParams() beforehand. +func (o *ClassificationsGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ClassificationsGetParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..fe2df4a480942b86aebdbbbfcef31f718c939c13 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClassificationsGetOKCode is the HTTP code returned for type ClassificationsGetOK +const ClassificationsGetOKCode int = 200 + +/* +ClassificationsGetOK Found the classification, returned as body + +swagger:response classificationsGetOK +*/ +type ClassificationsGetOK struct { + + /* + In: Body + */ + Payload *models.Classification `json:"body,omitempty"` +} + +// NewClassificationsGetOK creates ClassificationsGetOK with default headers values +func NewClassificationsGetOK() *ClassificationsGetOK { + + return &ClassificationsGetOK{} +} + +// WithPayload adds the payload to the classifications get o k response +func (o *ClassificationsGetOK) WithPayload(payload *models.Classification) *ClassificationsGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the classifications get o k response +func (o *ClassificationsGetOK) SetPayload(payload *models.Classification) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClassificationsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClassificationsGetUnauthorizedCode is the HTTP code returned for type ClassificationsGetUnauthorized +const ClassificationsGetUnauthorizedCode int = 401 + +/* +ClassificationsGetUnauthorized Unauthorized or invalid credentials. + +swagger:response classificationsGetUnauthorized +*/ +type ClassificationsGetUnauthorized struct { +} + +// NewClassificationsGetUnauthorized creates ClassificationsGetUnauthorized with default headers values +func NewClassificationsGetUnauthorized() *ClassificationsGetUnauthorized { + + return &ClassificationsGetUnauthorized{} +} + +// WriteResponse to the client +func (o *ClassificationsGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ClassificationsGetForbiddenCode is the HTTP code returned for type ClassificationsGetForbidden +const ClassificationsGetForbiddenCode int = 403 + +/* +ClassificationsGetForbidden Forbidden + +swagger:response classificationsGetForbidden +*/ +type ClassificationsGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClassificationsGetForbidden creates ClassificationsGetForbidden with default headers values +func NewClassificationsGetForbidden() *ClassificationsGetForbidden { + + return &ClassificationsGetForbidden{} +} + +// WithPayload adds the payload to the classifications get forbidden response +func (o *ClassificationsGetForbidden) WithPayload(payload *models.ErrorResponse) *ClassificationsGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the classifications get forbidden response +func (o *ClassificationsGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClassificationsGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClassificationsGetNotFoundCode is the HTTP code returned for type ClassificationsGetNotFound +const ClassificationsGetNotFoundCode int = 404 + +/* +ClassificationsGetNotFound Not Found - Classification does not exist + +swagger:response classificationsGetNotFound +*/ +type ClassificationsGetNotFound struct { +} + +// NewClassificationsGetNotFound creates ClassificationsGetNotFound with default headers values +func NewClassificationsGetNotFound() *ClassificationsGetNotFound { + + return &ClassificationsGetNotFound{} +} + +// WriteResponse to the client +func (o *ClassificationsGetNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ClassificationsGetInternalServerErrorCode is the HTTP code returned for type ClassificationsGetInternalServerError +const ClassificationsGetInternalServerErrorCode int = 500 + +/* +ClassificationsGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response classificationsGetInternalServerError +*/ +type ClassificationsGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClassificationsGetInternalServerError creates ClassificationsGetInternalServerError with default headers values +func NewClassificationsGetInternalServerError() *ClassificationsGetInternalServerError { + + return &ClassificationsGetInternalServerError{} +} + +// WithPayload adds the payload to the classifications get internal server error response +func (o *ClassificationsGetInternalServerError) WithPayload(payload *models.ErrorResponse) *ClassificationsGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the classifications get internal server error response +func (o *ClassificationsGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClassificationsGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..a3ebaee372579ce6d865b6e06ec57ee5cd9bbe07 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_get_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// ClassificationsGetURL generates an URL for the classifications get operation +type ClassificationsGetURL struct { + ID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ClassificationsGetURL) WithBasePath(bp string) *ClassificationsGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ClassificationsGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ClassificationsGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/classifications/{id}" + + id := o.ID + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ClassificationsGetURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ClassificationsGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ClassificationsGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ClassificationsGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ClassificationsGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ClassificationsGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ClassificationsGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post.go new file mode 100644 index 0000000000000000000000000000000000000000..e0ebca4463b817a02e6a79c6a54884c6493e8aa3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClassificationsPostHandlerFunc turns a function with the right signature into a classifications post handler +type ClassificationsPostHandlerFunc func(ClassificationsPostParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ClassificationsPostHandlerFunc) Handle(params ClassificationsPostParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ClassificationsPostHandler interface for that can handle valid classifications post params +type ClassificationsPostHandler interface { + Handle(ClassificationsPostParams, *models.Principal) middleware.Responder +} + +// NewClassificationsPost creates a new http.Handler for the classifications post operation +func NewClassificationsPost(ctx *middleware.Context, handler ClassificationsPostHandler) *ClassificationsPost { + return &ClassificationsPost{Context: ctx, Handler: handler} +} + +/* + ClassificationsPost swagger:route POST /classifications/ classifications classificationsPost + +Starts a classification. + +Trigger a classification based on the specified params. Classifications will run in the background, use GET /classifications/ to retrieve the status of your classification. +*/ +type ClassificationsPost struct { + Context *middleware.Context + Handler ClassificationsPostHandler +} + +func (o *ClassificationsPost) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewClassificationsPostParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ddb5bb47eb38d6f2148272c07f88a3547b808b9a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewClassificationsPostParams creates a new ClassificationsPostParams object +// +// There are no default values defined in the spec. +func NewClassificationsPostParams() ClassificationsPostParams { + + return ClassificationsPostParams{} +} + +// ClassificationsPostParams contains all the bound params for the classifications post operation +// typically these are obtained from a http.Request +// +// swagger:parameters classifications.post +type ClassificationsPostParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*parameters to start a classification + Required: true + In: body + */ + Params *models.Classification +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewClassificationsPostParams() beforehand. +func (o *ClassificationsPostParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Classification + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("params", "body", "")) + } else { + res = append(res, errors.NewParseError("params", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Params = &body + } + } + } else { + res = append(res, errors.Required("params", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..9ab37ba8392303ab8076889ef3a8b0bb72b74f98 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClassificationsPostCreatedCode is the HTTP code returned for type ClassificationsPostCreated +const ClassificationsPostCreatedCode int = 201 + +/* +ClassificationsPostCreated Successfully started classification. + +swagger:response classificationsPostCreated +*/ +type ClassificationsPostCreated struct { + + /* + In: Body + */ + Payload *models.Classification `json:"body,omitempty"` +} + +// NewClassificationsPostCreated creates ClassificationsPostCreated with default headers values +func NewClassificationsPostCreated() *ClassificationsPostCreated { + + return &ClassificationsPostCreated{} +} + +// WithPayload adds the payload to the classifications post created response +func (o *ClassificationsPostCreated) WithPayload(payload *models.Classification) *ClassificationsPostCreated { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the classifications post created response +func (o *ClassificationsPostCreated) SetPayload(payload *models.Classification) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClassificationsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(201) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClassificationsPostBadRequestCode is the HTTP code returned for type ClassificationsPostBadRequest +const ClassificationsPostBadRequestCode int = 400 + +/* +ClassificationsPostBadRequest Incorrect request + +swagger:response classificationsPostBadRequest +*/ +type ClassificationsPostBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClassificationsPostBadRequest creates ClassificationsPostBadRequest with default headers values +func NewClassificationsPostBadRequest() *ClassificationsPostBadRequest { + + return &ClassificationsPostBadRequest{} +} + +// WithPayload adds the payload to the classifications post bad request response +func (o *ClassificationsPostBadRequest) WithPayload(payload *models.ErrorResponse) *ClassificationsPostBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the classifications post bad request response +func (o *ClassificationsPostBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClassificationsPostBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClassificationsPostUnauthorizedCode is the HTTP code returned for type ClassificationsPostUnauthorized +const ClassificationsPostUnauthorizedCode int = 401 + +/* +ClassificationsPostUnauthorized Unauthorized or invalid credentials. + +swagger:response classificationsPostUnauthorized +*/ +type ClassificationsPostUnauthorized struct { +} + +// NewClassificationsPostUnauthorized creates ClassificationsPostUnauthorized with default headers values +func NewClassificationsPostUnauthorized() *ClassificationsPostUnauthorized { + + return &ClassificationsPostUnauthorized{} +} + +// WriteResponse to the client +func (o *ClassificationsPostUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ClassificationsPostForbiddenCode is the HTTP code returned for type ClassificationsPostForbidden +const ClassificationsPostForbiddenCode int = 403 + +/* +ClassificationsPostForbidden Forbidden + +swagger:response classificationsPostForbidden +*/ +type ClassificationsPostForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClassificationsPostForbidden creates ClassificationsPostForbidden with default headers values +func NewClassificationsPostForbidden() *ClassificationsPostForbidden { + + return &ClassificationsPostForbidden{} +} + +// WithPayload adds the payload to the classifications post forbidden response +func (o *ClassificationsPostForbidden) WithPayload(payload *models.ErrorResponse) *ClassificationsPostForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the classifications post forbidden response +func (o *ClassificationsPostForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClassificationsPostForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClassificationsPostInternalServerErrorCode is the HTTP code returned for type ClassificationsPostInternalServerError +const ClassificationsPostInternalServerErrorCode int = 500 + +/* +ClassificationsPostInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response classificationsPostInternalServerError +*/ +type ClassificationsPostInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClassificationsPostInternalServerError creates ClassificationsPostInternalServerError with default headers values +func NewClassificationsPostInternalServerError() *ClassificationsPostInternalServerError { + + return &ClassificationsPostInternalServerError{} +} + +// WithPayload adds the payload to the classifications post internal server error response +func (o *ClassificationsPostInternalServerError) WithPayload(payload *models.ErrorResponse) *ClassificationsPostInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the classifications post internal server error response +func (o *ClassificationsPostInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClassificationsPostInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..fd3e5377326490a050fcb4f6fb2e7a8484b27569 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/classifications/classifications_post_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package classifications + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ClassificationsPostURL generates an URL for the classifications post operation +type ClassificationsPostURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ClassificationsPostURL) WithBasePath(bp string) *ClassificationsPostURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ClassificationsPostURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ClassificationsPostURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/classifications/" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ClassificationsPostURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ClassificationsPostURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ClassificationsPostURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ClassificationsPostURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ClassificationsPostURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ClassificationsPostURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics.go new file mode 100644 index 0000000000000000000000000000000000000000..e968be030b54774d9257f7152444843dda93b2fa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package cluster + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClusterGetStatisticsHandlerFunc turns a function with the right signature into a cluster get statistics handler +type ClusterGetStatisticsHandlerFunc func(ClusterGetStatisticsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ClusterGetStatisticsHandlerFunc) Handle(params ClusterGetStatisticsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ClusterGetStatisticsHandler interface for that can handle valid cluster get statistics params +type ClusterGetStatisticsHandler interface { + Handle(ClusterGetStatisticsParams, *models.Principal) middleware.Responder +} + +// NewClusterGetStatistics creates a new http.Handler for the cluster get statistics operation +func NewClusterGetStatistics(ctx *middleware.Context, handler ClusterGetStatisticsHandler) *ClusterGetStatistics { + return &ClusterGetStatistics{Context: ctx, Handler: handler} +} + +/* + ClusterGetStatistics swagger:route GET /cluster/statistics cluster clusterGetStatistics + +# See Raft cluster statistics + +Returns Raft cluster statistics of Weaviate DB. +*/ +type ClusterGetStatistics struct { + Context *middleware.Context + Handler ClusterGetStatisticsHandler +} + +func (o *ClusterGetStatistics) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewClusterGetStatisticsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..30301b86445d656bfa77a86cd58bab6faace984a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package cluster + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewClusterGetStatisticsParams creates a new ClusterGetStatisticsParams object +// +// There are no default values defined in the spec. +func NewClusterGetStatisticsParams() ClusterGetStatisticsParams { + + return ClusterGetStatisticsParams{} +} + +// ClusterGetStatisticsParams contains all the bound params for the cluster get statistics operation +// typically these are obtained from a http.Request +// +// swagger:parameters cluster.get.statistics +type ClusterGetStatisticsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewClusterGetStatisticsParams() beforehand. +func (o *ClusterGetStatisticsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..13a2a79fddb7aa0d71add9c4b5c441f540346543 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package cluster + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ClusterGetStatisticsOKCode is the HTTP code returned for type ClusterGetStatisticsOK +const ClusterGetStatisticsOKCode int = 200 + +/* +ClusterGetStatisticsOK Cluster statistics successfully returned + +swagger:response clusterGetStatisticsOK +*/ +type ClusterGetStatisticsOK struct { + + /* + In: Body + */ + Payload *models.ClusterStatisticsResponse `json:"body,omitempty"` +} + +// NewClusterGetStatisticsOK creates ClusterGetStatisticsOK with default headers values +func NewClusterGetStatisticsOK() *ClusterGetStatisticsOK { + + return &ClusterGetStatisticsOK{} +} + +// WithPayload adds the payload to the cluster get statistics o k response +func (o *ClusterGetStatisticsOK) WithPayload(payload *models.ClusterStatisticsResponse) *ClusterGetStatisticsOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cluster get statistics o k response +func (o *ClusterGetStatisticsOK) SetPayload(payload *models.ClusterStatisticsResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClusterGetStatisticsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClusterGetStatisticsUnauthorizedCode is the HTTP code returned for type ClusterGetStatisticsUnauthorized +const ClusterGetStatisticsUnauthorizedCode int = 401 + +/* +ClusterGetStatisticsUnauthorized Unauthorized or invalid credentials. + +swagger:response clusterGetStatisticsUnauthorized +*/ +type ClusterGetStatisticsUnauthorized struct { +} + +// NewClusterGetStatisticsUnauthorized creates ClusterGetStatisticsUnauthorized with default headers values +func NewClusterGetStatisticsUnauthorized() *ClusterGetStatisticsUnauthorized { + + return &ClusterGetStatisticsUnauthorized{} +} + +// WriteResponse to the client +func (o *ClusterGetStatisticsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ClusterGetStatisticsForbiddenCode is the HTTP code returned for type ClusterGetStatisticsForbidden +const ClusterGetStatisticsForbiddenCode int = 403 + +/* +ClusterGetStatisticsForbidden Forbidden + +swagger:response clusterGetStatisticsForbidden +*/ +type ClusterGetStatisticsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClusterGetStatisticsForbidden creates ClusterGetStatisticsForbidden with default headers values +func NewClusterGetStatisticsForbidden() *ClusterGetStatisticsForbidden { + + return &ClusterGetStatisticsForbidden{} +} + +// WithPayload adds the payload to the cluster get statistics forbidden response +func (o *ClusterGetStatisticsForbidden) WithPayload(payload *models.ErrorResponse) *ClusterGetStatisticsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cluster get statistics forbidden response +func (o *ClusterGetStatisticsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClusterGetStatisticsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClusterGetStatisticsUnprocessableEntityCode is the HTTP code returned for type ClusterGetStatisticsUnprocessableEntity +const ClusterGetStatisticsUnprocessableEntityCode int = 422 + +/* +ClusterGetStatisticsUnprocessableEntity Invalid backup restoration status attempt. + +swagger:response clusterGetStatisticsUnprocessableEntity +*/ +type ClusterGetStatisticsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClusterGetStatisticsUnprocessableEntity creates ClusterGetStatisticsUnprocessableEntity with default headers values +func NewClusterGetStatisticsUnprocessableEntity() *ClusterGetStatisticsUnprocessableEntity { + + return &ClusterGetStatisticsUnprocessableEntity{} +} + +// WithPayload adds the payload to the cluster get statistics unprocessable entity response +func (o *ClusterGetStatisticsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ClusterGetStatisticsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cluster get statistics unprocessable entity response +func (o *ClusterGetStatisticsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClusterGetStatisticsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ClusterGetStatisticsInternalServerErrorCode is the HTTP code returned for type ClusterGetStatisticsInternalServerError +const ClusterGetStatisticsInternalServerErrorCode int = 500 + +/* +ClusterGetStatisticsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response clusterGetStatisticsInternalServerError +*/ +type ClusterGetStatisticsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewClusterGetStatisticsInternalServerError creates ClusterGetStatisticsInternalServerError with default headers values +func NewClusterGetStatisticsInternalServerError() *ClusterGetStatisticsInternalServerError { + + return &ClusterGetStatisticsInternalServerError{} +} + +// WithPayload adds the payload to the cluster get statistics internal server error response +func (o *ClusterGetStatisticsInternalServerError) WithPayload(payload *models.ErrorResponse) *ClusterGetStatisticsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cluster get statistics internal server error response +func (o *ClusterGetStatisticsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ClusterGetStatisticsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..2a72e79bd8091cad2cc157fc442e245e837125a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/cluster/cluster_get_statistics_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package cluster + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ClusterGetStatisticsURL generates an URL for the cluster get statistics operation +type ClusterGetStatisticsURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ClusterGetStatisticsURL) WithBasePath(bp string) *ClusterGetStatisticsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ClusterGetStatisticsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ClusterGetStatisticsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/cluster/statistics" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ClusterGetStatisticsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ClusterGetStatisticsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ClusterGetStatisticsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ClusterGetStatisticsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ClusterGetStatisticsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ClusterGetStatisticsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get.go new file mode 100644 index 0000000000000000000000000000000000000000..63b8f038b81b434cb6beccd7c4880ad64ef46a83 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package distributed_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// DistributedTasksGetHandlerFunc turns a function with the right signature into a distributed tasks get handler +type DistributedTasksGetHandlerFunc func(DistributedTasksGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn DistributedTasksGetHandlerFunc) Handle(params DistributedTasksGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// DistributedTasksGetHandler interface for that can handle valid distributed tasks get params +type DistributedTasksGetHandler interface { + Handle(DistributedTasksGetParams, *models.Principal) middleware.Responder +} + +// NewDistributedTasksGet creates a new http.Handler for the distributed tasks get operation +func NewDistributedTasksGet(ctx *middleware.Context, handler DistributedTasksGetHandler) *DistributedTasksGet { + return &DistributedTasksGet{Context: ctx, Handler: handler} +} + +/* + DistributedTasksGet swagger:route GET /tasks distributedTasks distributedTasksGet + +Lists all distributed tasks in the cluster. +*/ +type DistributedTasksGet struct { + Context *middleware.Context + Handler DistributedTasksGetHandler +} + +func (o *DistributedTasksGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewDistributedTasksGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..a82d690cb19bb1e595c0e9de89f11e83cd90e9cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package distributed_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewDistributedTasksGetParams creates a new DistributedTasksGetParams object +// +// There are no default values defined in the spec. +func NewDistributedTasksGetParams() DistributedTasksGetParams { + + return DistributedTasksGetParams{} +} + +// DistributedTasksGetParams contains all the bound params for the distributed tasks get operation +// typically these are obtained from a http.Request +// +// swagger:parameters distributedTasks.get +type DistributedTasksGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDistributedTasksGetParams() beforehand. +func (o *DistributedTasksGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f66592853aa3d840c314461c7f77dcbbf61f651d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_responses.go @@ -0,0 +1,163 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package distributed_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// DistributedTasksGetOKCode is the HTTP code returned for type DistributedTasksGetOK +const DistributedTasksGetOKCode int = 200 + +/* +DistributedTasksGetOK Distributed tasks successfully returned + +swagger:response distributedTasksGetOK +*/ +type DistributedTasksGetOK struct { + + /* + In: Body + */ + Payload models.DistributedTasks `json:"body,omitempty"` +} + +// NewDistributedTasksGetOK creates DistributedTasksGetOK with default headers values +func NewDistributedTasksGetOK() *DistributedTasksGetOK { + + return &DistributedTasksGetOK{} +} + +// WithPayload adds the payload to the distributed tasks get o k response +func (o *DistributedTasksGetOK) WithPayload(payload models.DistributedTasks) *DistributedTasksGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the distributed tasks get o k response +func (o *DistributedTasksGetOK) SetPayload(payload models.DistributedTasks) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DistributedTasksGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty map + payload = models.DistributedTasks{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// DistributedTasksGetForbiddenCode is the HTTP code returned for type DistributedTasksGetForbidden +const DistributedTasksGetForbiddenCode int = 403 + +/* +DistributedTasksGetForbidden Unauthorized or invalid credentials. + +swagger:response distributedTasksGetForbidden +*/ +type DistributedTasksGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDistributedTasksGetForbidden creates DistributedTasksGetForbidden with default headers values +func NewDistributedTasksGetForbidden() *DistributedTasksGetForbidden { + + return &DistributedTasksGetForbidden{} +} + +// WithPayload adds the payload to the distributed tasks get forbidden response +func (o *DistributedTasksGetForbidden) WithPayload(payload *models.ErrorResponse) *DistributedTasksGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the distributed tasks get forbidden response +func (o *DistributedTasksGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DistributedTasksGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DistributedTasksGetInternalServerErrorCode is the HTTP code returned for type DistributedTasksGetInternalServerError +const DistributedTasksGetInternalServerErrorCode int = 500 + +/* +DistributedTasksGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response distributedTasksGetInternalServerError +*/ +type DistributedTasksGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDistributedTasksGetInternalServerError creates DistributedTasksGetInternalServerError with default headers values +func NewDistributedTasksGetInternalServerError() *DistributedTasksGetInternalServerError { + + return &DistributedTasksGetInternalServerError{} +} + +// WithPayload adds the payload to the distributed tasks get internal server error response +func (o *DistributedTasksGetInternalServerError) WithPayload(payload *models.ErrorResponse) *DistributedTasksGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the distributed tasks get internal server error response +func (o *DistributedTasksGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DistributedTasksGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..1e680d117a1cd47c0e6e16b394a10b9989c4a883 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/distributed_tasks/distributed_tasks_get_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package distributed_tasks + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// DistributedTasksGetURL generates an URL for the distributed tasks get operation +type DistributedTasksGetURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DistributedTasksGetURL) WithBasePath(bp string) *DistributedTasksGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DistributedTasksGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DistributedTasksGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/tasks" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DistributedTasksGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DistributedTasksGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DistributedTasksGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DistributedTasksGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DistributedTasksGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DistributedTasksGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch.go new file mode 100644 index 0000000000000000000000000000000000000000..56299fb61323fc73394e177ac55a736e6894b8f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GraphqlBatchHandlerFunc turns a function with the right signature into a graphql batch handler +type GraphqlBatchHandlerFunc func(GraphqlBatchParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GraphqlBatchHandlerFunc) Handle(params GraphqlBatchParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GraphqlBatchHandler interface for that can handle valid graphql batch params +type GraphqlBatchHandler interface { + Handle(GraphqlBatchParams, *models.Principal) middleware.Responder +} + +// NewGraphqlBatch creates a new http.Handler for the graphql batch operation +func NewGraphqlBatch(ctx *middleware.Context, handler GraphqlBatchHandler) *GraphqlBatch { + return &GraphqlBatch{Context: ctx, Handler: handler} +} + +/* + GraphqlBatch swagger:route POST /graphql/batch graphql graphqlBatch + +Get a response based on GraphQL. + +Perform a batched GraphQL query +*/ +type GraphqlBatch struct { + Context *middleware.Context + Handler GraphqlBatchHandler +} + +func (o *GraphqlBatch) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGraphqlBatchParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..52b4ff24940e004acea7a08bc5cc34410fcb27e0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewGraphqlBatchParams creates a new GraphqlBatchParams object +// +// There are no default values defined in the spec. +func NewGraphqlBatchParams() GraphqlBatchParams { + + return GraphqlBatchParams{} +} + +// GraphqlBatchParams contains all the bound params for the graphql batch operation +// typically these are obtained from a http.Request +// +// swagger:parameters graphql.batch +type GraphqlBatchParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The GraphQL queries. + Required: true + In: body + */ + Body models.GraphQLQueries +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGraphqlBatchParams() beforehand. +func (o *GraphqlBatchParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.GraphQLQueries + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..68056bc2286c9fdb054b5d456f4ea88c2254d175 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_responses.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GraphqlBatchOKCode is the HTTP code returned for type GraphqlBatchOK +const GraphqlBatchOKCode int = 200 + +/* +GraphqlBatchOK Successful query (with select). + +swagger:response graphqlBatchOK +*/ +type GraphqlBatchOK struct { + + /* + In: Body + */ + Payload models.GraphQLResponses `json:"body,omitempty"` +} + +// NewGraphqlBatchOK creates GraphqlBatchOK with default headers values +func NewGraphqlBatchOK() *GraphqlBatchOK { + + return &GraphqlBatchOK{} +} + +// WithPayload adds the payload to the graphql batch o k response +func (o *GraphqlBatchOK) WithPayload(payload models.GraphQLResponses) *GraphqlBatchOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql batch o k response +func (o *GraphqlBatchOK) SetPayload(payload models.GraphQLResponses) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlBatchOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.GraphQLResponses{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GraphqlBatchUnauthorizedCode is the HTTP code returned for type GraphqlBatchUnauthorized +const GraphqlBatchUnauthorizedCode int = 401 + +/* +GraphqlBatchUnauthorized Unauthorized or invalid credentials. + +swagger:response graphqlBatchUnauthorized +*/ +type GraphqlBatchUnauthorized struct { +} + +// NewGraphqlBatchUnauthorized creates GraphqlBatchUnauthorized with default headers values +func NewGraphqlBatchUnauthorized() *GraphqlBatchUnauthorized { + + return &GraphqlBatchUnauthorized{} +} + +// WriteResponse to the client +func (o *GraphqlBatchUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GraphqlBatchForbiddenCode is the HTTP code returned for type GraphqlBatchForbidden +const GraphqlBatchForbiddenCode int = 403 + +/* +GraphqlBatchForbidden Forbidden + +swagger:response graphqlBatchForbidden +*/ +type GraphqlBatchForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGraphqlBatchForbidden creates GraphqlBatchForbidden with default headers values +func NewGraphqlBatchForbidden() *GraphqlBatchForbidden { + + return &GraphqlBatchForbidden{} +} + +// WithPayload adds the payload to the graphql batch forbidden response +func (o *GraphqlBatchForbidden) WithPayload(payload *models.ErrorResponse) *GraphqlBatchForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql batch forbidden response +func (o *GraphqlBatchForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlBatchForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GraphqlBatchUnprocessableEntityCode is the HTTP code returned for type GraphqlBatchUnprocessableEntity +const GraphqlBatchUnprocessableEntityCode int = 422 + +/* +GraphqlBatchUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response graphqlBatchUnprocessableEntity +*/ +type GraphqlBatchUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGraphqlBatchUnprocessableEntity creates GraphqlBatchUnprocessableEntity with default headers values +func NewGraphqlBatchUnprocessableEntity() *GraphqlBatchUnprocessableEntity { + + return &GraphqlBatchUnprocessableEntity{} +} + +// WithPayload adds the payload to the graphql batch unprocessable entity response +func (o *GraphqlBatchUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GraphqlBatchUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql batch unprocessable entity response +func (o *GraphqlBatchUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlBatchUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GraphqlBatchInternalServerErrorCode is the HTTP code returned for type GraphqlBatchInternalServerError +const GraphqlBatchInternalServerErrorCode int = 500 + +/* +GraphqlBatchInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response graphqlBatchInternalServerError +*/ +type GraphqlBatchInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGraphqlBatchInternalServerError creates GraphqlBatchInternalServerError with default headers values +func NewGraphqlBatchInternalServerError() *GraphqlBatchInternalServerError { + + return &GraphqlBatchInternalServerError{} +} + +// WithPayload adds the payload to the graphql batch internal server error response +func (o *GraphqlBatchInternalServerError) WithPayload(payload *models.ErrorResponse) *GraphqlBatchInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql batch internal server error response +func (o *GraphqlBatchInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlBatchInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..682a6291eb94462a9179c52fe07abe8570e6c96a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_batch_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GraphqlBatchURL generates an URL for the graphql batch operation +type GraphqlBatchURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GraphqlBatchURL) WithBasePath(bp string) *GraphqlBatchURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GraphqlBatchURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GraphqlBatchURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/graphql/batch" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GraphqlBatchURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GraphqlBatchURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GraphqlBatchURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GraphqlBatchURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GraphqlBatchURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GraphqlBatchURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post.go new file mode 100644 index 0000000000000000000000000000000000000000..a40d9e5652034efd6f1d079e341184c8c6e3617d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GraphqlPostHandlerFunc turns a function with the right signature into a graphql post handler +type GraphqlPostHandlerFunc func(GraphqlPostParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GraphqlPostHandlerFunc) Handle(params GraphqlPostParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GraphqlPostHandler interface for that can handle valid graphql post params +type GraphqlPostHandler interface { + Handle(GraphqlPostParams, *models.Principal) middleware.Responder +} + +// NewGraphqlPost creates a new http.Handler for the graphql post operation +func NewGraphqlPost(ctx *middleware.Context, handler GraphqlPostHandler) *GraphqlPost { + return &GraphqlPost{Context: ctx, Handler: handler} +} + +/* + GraphqlPost swagger:route POST /graphql graphql graphqlPost + +# Get a response based on GraphQL + +Get a response based on a GraphQL query +*/ +type GraphqlPost struct { + Context *middleware.Context + Handler GraphqlPostHandler +} + +func (o *GraphqlPost) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGraphqlPostParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..8b18e6e59391d49fd33df9f122533fe145f6461f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewGraphqlPostParams creates a new GraphqlPostParams object +// +// There are no default values defined in the spec. +func NewGraphqlPostParams() GraphqlPostParams { + + return GraphqlPostParams{} +} + +// GraphqlPostParams contains all the bound params for the graphql post operation +// typically these are obtained from a http.Request +// +// swagger:parameters graphql.post +type GraphqlPostParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The GraphQL query request parameters. + Required: true + In: body + */ + Body *models.GraphQLQuery +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGraphqlPostParams() beforehand. +func (o *GraphqlPostParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.GraphQLQuery + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..ecb12486be8e7ba7e22526326d151be608cded88 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GraphqlPostOKCode is the HTTP code returned for type GraphqlPostOK +const GraphqlPostOKCode int = 200 + +/* +GraphqlPostOK Successful query (with select). + +swagger:response graphqlPostOK +*/ +type GraphqlPostOK struct { + + /* + In: Body + */ + Payload *models.GraphQLResponse `json:"body,omitempty"` +} + +// NewGraphqlPostOK creates GraphqlPostOK with default headers values +func NewGraphqlPostOK() *GraphqlPostOK { + + return &GraphqlPostOK{} +} + +// WithPayload adds the payload to the graphql post o k response +func (o *GraphqlPostOK) WithPayload(payload *models.GraphQLResponse) *GraphqlPostOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql post o k response +func (o *GraphqlPostOK) SetPayload(payload *models.GraphQLResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GraphqlPostUnauthorizedCode is the HTTP code returned for type GraphqlPostUnauthorized +const GraphqlPostUnauthorizedCode int = 401 + +/* +GraphqlPostUnauthorized Unauthorized or invalid credentials. + +swagger:response graphqlPostUnauthorized +*/ +type GraphqlPostUnauthorized struct { +} + +// NewGraphqlPostUnauthorized creates GraphqlPostUnauthorized with default headers values +func NewGraphqlPostUnauthorized() *GraphqlPostUnauthorized { + + return &GraphqlPostUnauthorized{} +} + +// WriteResponse to the client +func (o *GraphqlPostUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GraphqlPostForbiddenCode is the HTTP code returned for type GraphqlPostForbidden +const GraphqlPostForbiddenCode int = 403 + +/* +GraphqlPostForbidden Forbidden + +swagger:response graphqlPostForbidden +*/ +type GraphqlPostForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGraphqlPostForbidden creates GraphqlPostForbidden with default headers values +func NewGraphqlPostForbidden() *GraphqlPostForbidden { + + return &GraphqlPostForbidden{} +} + +// WithPayload adds the payload to the graphql post forbidden response +func (o *GraphqlPostForbidden) WithPayload(payload *models.ErrorResponse) *GraphqlPostForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql post forbidden response +func (o *GraphqlPostForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlPostForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GraphqlPostUnprocessableEntityCode is the HTTP code returned for type GraphqlPostUnprocessableEntity +const GraphqlPostUnprocessableEntityCode int = 422 + +/* +GraphqlPostUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response graphqlPostUnprocessableEntity +*/ +type GraphqlPostUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGraphqlPostUnprocessableEntity creates GraphqlPostUnprocessableEntity with default headers values +func NewGraphqlPostUnprocessableEntity() *GraphqlPostUnprocessableEntity { + + return &GraphqlPostUnprocessableEntity{} +} + +// WithPayload adds the payload to the graphql post unprocessable entity response +func (o *GraphqlPostUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GraphqlPostUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql post unprocessable entity response +func (o *GraphqlPostUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlPostUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GraphqlPostInternalServerErrorCode is the HTTP code returned for type GraphqlPostInternalServerError +const GraphqlPostInternalServerErrorCode int = 500 + +/* +GraphqlPostInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response graphqlPostInternalServerError +*/ +type GraphqlPostInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGraphqlPostInternalServerError creates GraphqlPostInternalServerError with default headers values +func NewGraphqlPostInternalServerError() *GraphqlPostInternalServerError { + + return &GraphqlPostInternalServerError{} +} + +// WithPayload adds the payload to the graphql post internal server error response +func (o *GraphqlPostInternalServerError) WithPayload(payload *models.ErrorResponse) *GraphqlPostInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the graphql post internal server error response +func (o *GraphqlPostInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GraphqlPostInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..550ae7696f0f475bcb8c203d1163cd31270cc1a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/graphql/graphql_post_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package graphql + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GraphqlPostURL generates an URL for the graphql post operation +type GraphqlPostURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GraphqlPostURL) WithBasePath(bp string) *GraphqlPostURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GraphqlPostURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GraphqlPostURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/graphql" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GraphqlPostURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GraphqlPostURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GraphqlPostURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GraphqlPostURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GraphqlPostURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GraphqlPostURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get.go new file mode 100644 index 0000000000000000000000000000000000000000..043fe4ca366135fe901d885b08c9db6c37f8803b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package meta + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// MetaGetHandlerFunc turns a function with the right signature into a meta get handler +type MetaGetHandlerFunc func(MetaGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn MetaGetHandlerFunc) Handle(params MetaGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// MetaGetHandler interface for that can handle valid meta get params +type MetaGetHandler interface { + Handle(MetaGetParams, *models.Principal) middleware.Responder +} + +// NewMetaGet creates a new http.Handler for the meta get operation +func NewMetaGet(ctx *middleware.Context, handler MetaGetHandler) *MetaGet { + return &MetaGet{Context: ctx, Handler: handler} +} + +/* + MetaGet swagger:route GET /meta meta metaGet + +Returns meta information of the current Weaviate instance. + +Returns meta information about the server. Can be used to provide information to another Weaviate instance that wants to interact with the current instance. +*/ +type MetaGet struct { + Context *middleware.Context + Handler MetaGetHandler +} + +func (o *MetaGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewMetaGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..b176291a29461dff92e93b0978f239c673a9cab3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package meta + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewMetaGetParams creates a new MetaGetParams object +// +// There are no default values defined in the spec. +func NewMetaGetParams() MetaGetParams { + + return MetaGetParams{} +} + +// MetaGetParams contains all the bound params for the meta get operation +// typically these are obtained from a http.Request +// +// swagger:parameters meta.get +type MetaGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewMetaGetParams() beforehand. +func (o *MetaGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..8ebe4beeb3f52e808ca6855532e7f7970097a2c7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_responses.go @@ -0,0 +1,185 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package meta + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// MetaGetOKCode is the HTTP code returned for type MetaGetOK +const MetaGetOKCode int = 200 + +/* +MetaGetOK Successful response. + +swagger:response metaGetOK +*/ +type MetaGetOK struct { + + /* + In: Body + */ + Payload *models.Meta `json:"body,omitempty"` +} + +// NewMetaGetOK creates MetaGetOK with default headers values +func NewMetaGetOK() *MetaGetOK { + + return &MetaGetOK{} +} + +// WithPayload adds the payload to the meta get o k response +func (o *MetaGetOK) WithPayload(payload *models.Meta) *MetaGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the meta get o k response +func (o *MetaGetOK) SetPayload(payload *models.Meta) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *MetaGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// MetaGetUnauthorizedCode is the HTTP code returned for type MetaGetUnauthorized +const MetaGetUnauthorizedCode int = 401 + +/* +MetaGetUnauthorized Unauthorized or invalid credentials. + +swagger:response metaGetUnauthorized +*/ +type MetaGetUnauthorized struct { +} + +// NewMetaGetUnauthorized creates MetaGetUnauthorized with default headers values +func NewMetaGetUnauthorized() *MetaGetUnauthorized { + + return &MetaGetUnauthorized{} +} + +// WriteResponse to the client +func (o *MetaGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// MetaGetForbiddenCode is the HTTP code returned for type MetaGetForbidden +const MetaGetForbiddenCode int = 403 + +/* +MetaGetForbidden Forbidden + +swagger:response metaGetForbidden +*/ +type MetaGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewMetaGetForbidden creates MetaGetForbidden with default headers values +func NewMetaGetForbidden() *MetaGetForbidden { + + return &MetaGetForbidden{} +} + +// WithPayload adds the payload to the meta get forbidden response +func (o *MetaGetForbidden) WithPayload(payload *models.ErrorResponse) *MetaGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the meta get forbidden response +func (o *MetaGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *MetaGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// MetaGetInternalServerErrorCode is the HTTP code returned for type MetaGetInternalServerError +const MetaGetInternalServerErrorCode int = 500 + +/* +MetaGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response metaGetInternalServerError +*/ +type MetaGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewMetaGetInternalServerError creates MetaGetInternalServerError with default headers values +func NewMetaGetInternalServerError() *MetaGetInternalServerError { + + return &MetaGetInternalServerError{} +} + +// WithPayload adds the payload to the meta get internal server error response +func (o *MetaGetInternalServerError) WithPayload(payload *models.ErrorResponse) *MetaGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the meta get internal server error response +func (o *MetaGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *MetaGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..e60ef8d08e7faf33225318356187a74b5a590895 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/meta/meta_get_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package meta + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// MetaGetURL generates an URL for the meta get operation +type MetaGetURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *MetaGetURL) WithBasePath(bp string) *MetaGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *MetaGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *MetaGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/meta" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *MetaGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *MetaGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *MetaGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on MetaGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on MetaGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *MetaGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get.go new file mode 100644 index 0000000000000000000000000000000000000000..75b10ff760c3d26468d98e29973e4b737d0e6a5d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// NodesGetHandlerFunc turns a function with the right signature into a nodes get handler +type NodesGetHandlerFunc func(NodesGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn NodesGetHandlerFunc) Handle(params NodesGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// NodesGetHandler interface for that can handle valid nodes get params +type NodesGetHandler interface { + Handle(NodesGetParams, *models.Principal) middleware.Responder +} + +// NewNodesGet creates a new http.Handler for the nodes get operation +func NewNodesGet(ctx *middleware.Context, handler NodesGetHandler) *NodesGet { + return &NodesGet{Context: ctx, Handler: handler} +} + +/* + NodesGet swagger:route GET /nodes nodes nodesGet + +Node information for the database. + +Returns node information for the entire database. +*/ +type NodesGet struct { + Context *middleware.Context + Handler NodesGetHandler +} + +func (o *NodesGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewNodesGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class.go new file mode 100644 index 0000000000000000000000000000000000000000..25790b3fe109639bd5b4daffcf4ae44652f16fb8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// NodesGetClassHandlerFunc turns a function with the right signature into a nodes get class handler +type NodesGetClassHandlerFunc func(NodesGetClassParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn NodesGetClassHandlerFunc) Handle(params NodesGetClassParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// NodesGetClassHandler interface for that can handle valid nodes get class params +type NodesGetClassHandler interface { + Handle(NodesGetClassParams, *models.Principal) middleware.Responder +} + +// NewNodesGetClass creates a new http.Handler for the nodes get class operation +func NewNodesGetClass(ctx *middleware.Context, handler NodesGetClassHandler) *NodesGetClass { + return &NodesGetClass{Context: ctx, Handler: handler} +} + +/* + NodesGetClass swagger:route GET /nodes/{className} nodes nodesGetClass + +Node information for a collection. + +Returns node information for the nodes relevant to the collection. +*/ +type NodesGetClass struct { + Context *middleware.Context + Handler NodesGetClassHandler +} + +func (o *NodesGetClass) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewNodesGetClassParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..62c5598f80eac7ef45157188abbe837a58c4bcec --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_parameters.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewNodesGetClassParams creates a new NodesGetClassParams object +// with the default values initialized. +func NewNodesGetClassParams() NodesGetClassParams { + + var ( + // initialize parameters with default values + + outputDefault = string("minimal") + ) + + return NodesGetClassParams{ + Output: &outputDefault, + } +} + +// NodesGetClassParams contains all the bound params for the nodes get class operation +// typically these are obtained from a http.Request +// +// swagger:parameters nodes.get.class +type NodesGetClassParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /*Controls the verbosity of the output, possible values are: "minimal", "verbose". Defaults to "minimal". + In: query + Default: "minimal" + */ + Output *string + /* + In: query + */ + ShardName *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewNodesGetClassParams() beforehand. +func (o *NodesGetClassParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qOutput, qhkOutput, _ := qs.GetOK("output") + if err := o.bindOutput(qOutput, qhkOutput, route.Formats); err != nil { + res = append(res, err) + } + + qShardName, qhkShardName, _ := qs.GetOK("shardName") + if err := o.bindShardName(qShardName, qhkShardName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *NodesGetClassParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindOutput binds and validates parameter Output from query. +func (o *NodesGetClassParams) bindOutput(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewNodesGetClassParams() + return nil + } + o.Output = &raw + + return nil +} + +// bindShardName binds and validates parameter ShardName from query. +func (o *NodesGetClassParams) bindShardName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ShardName = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..2b27cc4c59694477b23ef6fe7076431300a03536 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// NodesGetClassOKCode is the HTTP code returned for type NodesGetClassOK +const NodesGetClassOKCode int = 200 + +/* +NodesGetClassOK Nodes status successfully returned + +swagger:response nodesGetClassOK +*/ +type NodesGetClassOK struct { + + /* + In: Body + */ + Payload *models.NodesStatusResponse `json:"body,omitempty"` +} + +// NewNodesGetClassOK creates NodesGetClassOK with default headers values +func NewNodesGetClassOK() *NodesGetClassOK { + + return &NodesGetClassOK{} +} + +// WithPayload adds the payload to the nodes get class o k response +func (o *NodesGetClassOK) WithPayload(payload *models.NodesStatusResponse) *NodesGetClassOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get class o k response +func (o *NodesGetClassOK) SetPayload(payload *models.NodesStatusResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetClassOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetClassUnauthorizedCode is the HTTP code returned for type NodesGetClassUnauthorized +const NodesGetClassUnauthorizedCode int = 401 + +/* +NodesGetClassUnauthorized Unauthorized or invalid credentials. + +swagger:response nodesGetClassUnauthorized +*/ +type NodesGetClassUnauthorized struct { +} + +// NewNodesGetClassUnauthorized creates NodesGetClassUnauthorized with default headers values +func NewNodesGetClassUnauthorized() *NodesGetClassUnauthorized { + + return &NodesGetClassUnauthorized{} +} + +// WriteResponse to the client +func (o *NodesGetClassUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// NodesGetClassForbiddenCode is the HTTP code returned for type NodesGetClassForbidden +const NodesGetClassForbiddenCode int = 403 + +/* +NodesGetClassForbidden Forbidden + +swagger:response nodesGetClassForbidden +*/ +type NodesGetClassForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetClassForbidden creates NodesGetClassForbidden with default headers values +func NewNodesGetClassForbidden() *NodesGetClassForbidden { + + return &NodesGetClassForbidden{} +} + +// WithPayload adds the payload to the nodes get class forbidden response +func (o *NodesGetClassForbidden) WithPayload(payload *models.ErrorResponse) *NodesGetClassForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get class forbidden response +func (o *NodesGetClassForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetClassForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetClassNotFoundCode is the HTTP code returned for type NodesGetClassNotFound +const NodesGetClassNotFoundCode int = 404 + +/* +NodesGetClassNotFound Not Found - Backup does not exist + +swagger:response nodesGetClassNotFound +*/ +type NodesGetClassNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetClassNotFound creates NodesGetClassNotFound with default headers values +func NewNodesGetClassNotFound() *NodesGetClassNotFound { + + return &NodesGetClassNotFound{} +} + +// WithPayload adds the payload to the nodes get class not found response +func (o *NodesGetClassNotFound) WithPayload(payload *models.ErrorResponse) *NodesGetClassNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get class not found response +func (o *NodesGetClassNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetClassNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetClassUnprocessableEntityCode is the HTTP code returned for type NodesGetClassUnprocessableEntity +const NodesGetClassUnprocessableEntityCode int = 422 + +/* +NodesGetClassUnprocessableEntity Invalid backup restoration status attempt. + +swagger:response nodesGetClassUnprocessableEntity +*/ +type NodesGetClassUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetClassUnprocessableEntity creates NodesGetClassUnprocessableEntity with default headers values +func NewNodesGetClassUnprocessableEntity() *NodesGetClassUnprocessableEntity { + + return &NodesGetClassUnprocessableEntity{} +} + +// WithPayload adds the payload to the nodes get class unprocessable entity response +func (o *NodesGetClassUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *NodesGetClassUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get class unprocessable entity response +func (o *NodesGetClassUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetClassUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetClassInternalServerErrorCode is the HTTP code returned for type NodesGetClassInternalServerError +const NodesGetClassInternalServerErrorCode int = 500 + +/* +NodesGetClassInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response nodesGetClassInternalServerError +*/ +type NodesGetClassInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetClassInternalServerError creates NodesGetClassInternalServerError with default headers values +func NewNodesGetClassInternalServerError() *NodesGetClassInternalServerError { + + return &NodesGetClassInternalServerError{} +} + +// WithPayload adds the payload to the nodes get class internal server error response +func (o *NodesGetClassInternalServerError) WithPayload(payload *models.ErrorResponse) *NodesGetClassInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get class internal server error response +func (o *NodesGetClassInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetClassInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..45d0e855f6700affe0cea5b90c37a3d567854914 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_class_urlbuilder.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// NodesGetClassURL generates an URL for the nodes get class operation +type NodesGetClassURL struct { + ClassName string + + Output *string + ShardName *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *NodesGetClassURL) WithBasePath(bp string) *NodesGetClassURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *NodesGetClassURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *NodesGetClassURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/nodes/{className}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on NodesGetClassURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var outputQ string + if o.Output != nil { + outputQ = *o.Output + } + if outputQ != "" { + qs.Set("output", outputQ) + } + + var shardNameQ string + if o.ShardName != nil { + shardNameQ = *o.ShardName + } + if shardNameQ != "" { + qs.Set("shardName", shardNameQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *NodesGetClassURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *NodesGetClassURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *NodesGetClassURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on NodesGetClassURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on NodesGetClassURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *NodesGetClassURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fc2c3bbaa357b4c9b98c2e1229cc76da0b5cd6c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_parameters.go @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewNodesGetParams creates a new NodesGetParams object +// with the default values initialized. +func NewNodesGetParams() NodesGetParams { + + var ( + // initialize parameters with default values + + outputDefault = string("minimal") + ) + + return NodesGetParams{ + Output: &outputDefault, + } +} + +// NodesGetParams contains all the bound params for the nodes get operation +// typically these are obtained from a http.Request +// +// swagger:parameters nodes.get +type NodesGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Controls the verbosity of the output, possible values are: "minimal", "verbose". Defaults to "minimal". + In: query + Default: "minimal" + */ + Output *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewNodesGetParams() beforehand. +func (o *NodesGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qOutput, qhkOutput, _ := qs.GetOK("output") + if err := o.bindOutput(qOutput, qhkOutput, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindOutput binds and validates parameter Output from query. +func (o *NodesGetParams) bindOutput(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewNodesGetParams() + return nil + } + o.Output = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..5d8af2dab90c1188c388d4e8b8e2a18abfee0fcc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// NodesGetOKCode is the HTTP code returned for type NodesGetOK +const NodesGetOKCode int = 200 + +/* +NodesGetOK Nodes status successfully returned + +swagger:response nodesGetOK +*/ +type NodesGetOK struct { + + /* + In: Body + */ + Payload *models.NodesStatusResponse `json:"body,omitempty"` +} + +// NewNodesGetOK creates NodesGetOK with default headers values +func NewNodesGetOK() *NodesGetOK { + + return &NodesGetOK{} +} + +// WithPayload adds the payload to the nodes get o k response +func (o *NodesGetOK) WithPayload(payload *models.NodesStatusResponse) *NodesGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get o k response +func (o *NodesGetOK) SetPayload(payload *models.NodesStatusResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetUnauthorizedCode is the HTTP code returned for type NodesGetUnauthorized +const NodesGetUnauthorizedCode int = 401 + +/* +NodesGetUnauthorized Unauthorized or invalid credentials. + +swagger:response nodesGetUnauthorized +*/ +type NodesGetUnauthorized struct { +} + +// NewNodesGetUnauthorized creates NodesGetUnauthorized with default headers values +func NewNodesGetUnauthorized() *NodesGetUnauthorized { + + return &NodesGetUnauthorized{} +} + +// WriteResponse to the client +func (o *NodesGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// NodesGetForbiddenCode is the HTTP code returned for type NodesGetForbidden +const NodesGetForbiddenCode int = 403 + +/* +NodesGetForbidden Forbidden + +swagger:response nodesGetForbidden +*/ +type NodesGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetForbidden creates NodesGetForbidden with default headers values +func NewNodesGetForbidden() *NodesGetForbidden { + + return &NodesGetForbidden{} +} + +// WithPayload adds the payload to the nodes get forbidden response +func (o *NodesGetForbidden) WithPayload(payload *models.ErrorResponse) *NodesGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get forbidden response +func (o *NodesGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetNotFoundCode is the HTTP code returned for type NodesGetNotFound +const NodesGetNotFoundCode int = 404 + +/* +NodesGetNotFound Not Found - Backup does not exist + +swagger:response nodesGetNotFound +*/ +type NodesGetNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetNotFound creates NodesGetNotFound with default headers values +func NewNodesGetNotFound() *NodesGetNotFound { + + return &NodesGetNotFound{} +} + +// WithPayload adds the payload to the nodes get not found response +func (o *NodesGetNotFound) WithPayload(payload *models.ErrorResponse) *NodesGetNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get not found response +func (o *NodesGetNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetUnprocessableEntityCode is the HTTP code returned for type NodesGetUnprocessableEntity +const NodesGetUnprocessableEntityCode int = 422 + +/* +NodesGetUnprocessableEntity Invalid backup restoration status attempt. + +swagger:response nodesGetUnprocessableEntity +*/ +type NodesGetUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetUnprocessableEntity creates NodesGetUnprocessableEntity with default headers values +func NewNodesGetUnprocessableEntity() *NodesGetUnprocessableEntity { + + return &NodesGetUnprocessableEntity{} +} + +// WithPayload adds the payload to the nodes get unprocessable entity response +func (o *NodesGetUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *NodesGetUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get unprocessable entity response +func (o *NodesGetUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// NodesGetInternalServerErrorCode is the HTTP code returned for type NodesGetInternalServerError +const NodesGetInternalServerErrorCode int = 500 + +/* +NodesGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response nodesGetInternalServerError +*/ +type NodesGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewNodesGetInternalServerError creates NodesGetInternalServerError with default headers values +func NewNodesGetInternalServerError() *NodesGetInternalServerError { + + return &NodesGetInternalServerError{} +} + +// WithPayload adds the payload to the nodes get internal server error response +func (o *NodesGetInternalServerError) WithPayload(payload *models.ErrorResponse) *NodesGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the nodes get internal server error response +func (o *NodesGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *NodesGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..efb8f39f0e84c6e25bb88ef71d99353ef96b3196 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/nodes/nodes_get_urlbuilder.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package nodes + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// NodesGetURL generates an URL for the nodes get operation +type NodesGetURL struct { + Output *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *NodesGetURL) WithBasePath(bp string) *NodesGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *NodesGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *NodesGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/nodes" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var outputQ string + if o.Output != nil { + outputQ = *o.Output + } + if outputQ != "" { + qs.Set("output", outputQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *NodesGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *NodesGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *NodesGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on NodesGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on NodesGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *NodesGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..9ef676a6ee500cb27e924e395f5e3a25c12bf968 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassDeleteHandlerFunc turns a function with the right signature into a objects class delete handler +type ObjectsClassDeleteHandlerFunc func(ObjectsClassDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassDeleteHandlerFunc) Handle(params ObjectsClassDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassDeleteHandler interface for that can handle valid objects class delete params +type ObjectsClassDeleteHandler interface { + Handle(ObjectsClassDeleteParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassDelete creates a new http.Handler for the objects class delete operation +func NewObjectsClassDelete(ctx *middleware.Context, handler ObjectsClassDeleteHandler) *ObjectsClassDelete { + return &ObjectsClassDelete{Context: ctx, Handler: handler} +} + +/* + ObjectsClassDelete swagger:route DELETE /objects/{className}/{id} objects objectsClassDelete + +Delete object based on its class and UUID. + +Delete an object based on its collection and UUID.

    Note: For backward compatibility, beacons also support an older, deprecated format without the collection name. As a result, when deleting a reference, the beacon specified has to match the beacon to be deleted exactly. In other words, if a beacon is present using the old format (without collection name) you also need to specify it the same way.

    In the beacon format, you need to always use `localhost` as the host, rather than the actual hostname. `localhost` here refers to the fact that the beacon's target is on the same Weaviate instance, as opposed to a foreign instance. +*/ +type ObjectsClassDelete struct { + Context *middleware.Context + Handler ObjectsClassDeleteHandler +} + +func (o *ObjectsClassDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..2d98ef90a3d0699f6376457bc9d2c0743e543a2b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_parameters.go @@ -0,0 +1,183 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewObjectsClassDeleteParams creates a new ObjectsClassDeleteParams object +// +// There are no default values defined in the spec. +func NewObjectsClassDeleteParams() ObjectsClassDeleteParams { + + return ObjectsClassDeleteParams{} +} + +// ObjectsClassDeleteParams contains all the bound params for the objects class delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.delete +type ObjectsClassDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassDeleteParams() beforehand. +func (o *ObjectsClassDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassDeleteParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassDeleteParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassDeleteParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassDeleteParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsClassDeleteParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f97bfba36eed234e067d2233f5aafc9ffce88949 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_responses.go @@ -0,0 +1,280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassDeleteNoContentCode is the HTTP code returned for type ObjectsClassDeleteNoContent +const ObjectsClassDeleteNoContentCode int = 204 + +/* +ObjectsClassDeleteNoContent Successfully deleted. + +swagger:response objectsClassDeleteNoContent +*/ +type ObjectsClassDeleteNoContent struct { +} + +// NewObjectsClassDeleteNoContent creates ObjectsClassDeleteNoContent with default headers values +func NewObjectsClassDeleteNoContent() *ObjectsClassDeleteNoContent { + + return &ObjectsClassDeleteNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsClassDeleteNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsClassDeleteBadRequestCode is the HTTP code returned for type ObjectsClassDeleteBadRequest +const ObjectsClassDeleteBadRequestCode int = 400 + +/* +ObjectsClassDeleteBadRequest Malformed request. + +swagger:response objectsClassDeleteBadRequest +*/ +type ObjectsClassDeleteBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassDeleteBadRequest creates ObjectsClassDeleteBadRequest with default headers values +func NewObjectsClassDeleteBadRequest() *ObjectsClassDeleteBadRequest { + + return &ObjectsClassDeleteBadRequest{} +} + +// WithPayload adds the payload to the objects class delete bad request response +func (o *ObjectsClassDeleteBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsClassDeleteBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class delete bad request response +func (o *ObjectsClassDeleteBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassDeleteBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassDeleteUnauthorizedCode is the HTTP code returned for type ObjectsClassDeleteUnauthorized +const ObjectsClassDeleteUnauthorizedCode int = 401 + +/* +ObjectsClassDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassDeleteUnauthorized +*/ +type ObjectsClassDeleteUnauthorized struct { +} + +// NewObjectsClassDeleteUnauthorized creates ObjectsClassDeleteUnauthorized with default headers values +func NewObjectsClassDeleteUnauthorized() *ObjectsClassDeleteUnauthorized { + + return &ObjectsClassDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassDeleteForbiddenCode is the HTTP code returned for type ObjectsClassDeleteForbidden +const ObjectsClassDeleteForbiddenCode int = 403 + +/* +ObjectsClassDeleteForbidden Forbidden + +swagger:response objectsClassDeleteForbidden +*/ +type ObjectsClassDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassDeleteForbidden creates ObjectsClassDeleteForbidden with default headers values +func NewObjectsClassDeleteForbidden() *ObjectsClassDeleteForbidden { + + return &ObjectsClassDeleteForbidden{} +} + +// WithPayload adds the payload to the objects class delete forbidden response +func (o *ObjectsClassDeleteForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class delete forbidden response +func (o *ObjectsClassDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassDeleteNotFoundCode is the HTTP code returned for type ObjectsClassDeleteNotFound +const ObjectsClassDeleteNotFoundCode int = 404 + +/* +ObjectsClassDeleteNotFound Successful query result but no resource was found. + +swagger:response objectsClassDeleteNotFound +*/ +type ObjectsClassDeleteNotFound struct { +} + +// NewObjectsClassDeleteNotFound creates ObjectsClassDeleteNotFound with default headers values +func NewObjectsClassDeleteNotFound() *ObjectsClassDeleteNotFound { + + return &ObjectsClassDeleteNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsClassDeleteNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsClassDeleteUnprocessableEntityCode is the HTTP code returned for type ObjectsClassDeleteUnprocessableEntity +const ObjectsClassDeleteUnprocessableEntityCode int = 422 + +/* +ObjectsClassDeleteUnprocessableEntity Request is well-formed (i.e., syntactically correct), but erroneous. + +swagger:response objectsClassDeleteUnprocessableEntity +*/ +type ObjectsClassDeleteUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassDeleteUnprocessableEntity creates ObjectsClassDeleteUnprocessableEntity with default headers values +func NewObjectsClassDeleteUnprocessableEntity() *ObjectsClassDeleteUnprocessableEntity { + + return &ObjectsClassDeleteUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class delete unprocessable entity response +func (o *ObjectsClassDeleteUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassDeleteUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class delete unprocessable entity response +func (o *ObjectsClassDeleteUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassDeleteUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassDeleteInternalServerErrorCode is the HTTP code returned for type ObjectsClassDeleteInternalServerError +const ObjectsClassDeleteInternalServerErrorCode int = 500 + +/* +ObjectsClassDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassDeleteInternalServerError +*/ +type ObjectsClassDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassDeleteInternalServerError creates ObjectsClassDeleteInternalServerError with default headers values +func NewObjectsClassDeleteInternalServerError() *ObjectsClassDeleteInternalServerError { + + return &ObjectsClassDeleteInternalServerError{} +} + +// WithPayload adds the payload to the objects class delete internal server error response +func (o *ObjectsClassDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class delete internal server error response +func (o *ObjectsClassDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..21362f91472e8cf302ee403c62270e615a3bb8de --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_delete_urlbuilder.go @@ -0,0 +1,143 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassDeleteURL generates an URL for the objects class delete operation +type ObjectsClassDeleteURL struct { + ClassName string + ID strfmt.UUID + + ConsistencyLevel *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassDeleteURL) WithBasePath(bp string) *ObjectsClassDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassDeleteURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassDeleteURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get.go new file mode 100644 index 0000000000000000000000000000000000000000..991ff0046de36446d28c0508fcdfaf65bcf0bbab --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassGetHandlerFunc turns a function with the right signature into a objects class get handler +type ObjectsClassGetHandlerFunc func(ObjectsClassGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassGetHandlerFunc) Handle(params ObjectsClassGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassGetHandler interface for that can handle valid objects class get params +type ObjectsClassGetHandler interface { + Handle(ObjectsClassGetParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassGet creates a new http.Handler for the objects class get operation +func NewObjectsClassGet(ctx *middleware.Context, handler ObjectsClassGetHandler) *ObjectsClassGet { + return &ObjectsClassGet{Context: ctx, Handler: handler} +} + +/* + ObjectsClassGet swagger:route GET /objects/{className}/{id} objects objectsClassGet + +Get a specific Object based on its class and UUID. Also available as Websocket bus. + +Get a data object based on its collection and UUID. +*/ +type ObjectsClassGet struct { + Context *middleware.Context + Handler ObjectsClassGetHandler +} + +func (o *ObjectsClassGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..accf34a27544a672fbb9b69d4274f47bcd76cbdc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_parameters.go @@ -0,0 +1,237 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewObjectsClassGetParams creates a new ObjectsClassGetParams object +// +// There are no default values defined in the spec. +func NewObjectsClassGetParams() ObjectsClassGetParams { + + return ObjectsClassGetParams{} +} + +// ObjectsClassGetParams contains all the bound params for the objects class get operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.get +type ObjectsClassGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation + In: query + */ + Include *string + /*The target node which should fulfill the request + In: query + */ + NodeName *string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassGetParams() beforehand. +func (o *ObjectsClassGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qInclude, qhkInclude, _ := qs.GetOK("include") + if err := o.bindInclude(qInclude, qhkInclude, route.Formats); err != nil { + res = append(res, err) + } + + qNodeName, qhkNodeName, _ := qs.GetOK("node_name") + if err := o.bindNodeName(qNodeName, qhkNodeName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassGetParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassGetParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassGetParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassGetParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindInclude binds and validates parameter Include from query. +func (o *ObjectsClassGetParams) bindInclude(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Include = &raw + + return nil +} + +// bindNodeName binds and validates parameter NodeName from query. +func (o *ObjectsClassGetParams) bindNodeName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.NodeName = &raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsClassGetParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..70e06b3653f821670d7ff48be1c087c59c345da2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_responses.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassGetOKCode is the HTTP code returned for type ObjectsClassGetOK +const ObjectsClassGetOKCode int = 200 + +/* +ObjectsClassGetOK Successful response. + +swagger:response objectsClassGetOK +*/ +type ObjectsClassGetOK struct { + + /* + In: Body + */ + Payload *models.Object `json:"body,omitempty"` +} + +// NewObjectsClassGetOK creates ObjectsClassGetOK with default headers values +func NewObjectsClassGetOK() *ObjectsClassGetOK { + + return &ObjectsClassGetOK{} +} + +// WithPayload adds the payload to the objects class get o k response +func (o *ObjectsClassGetOK) WithPayload(payload *models.Object) *ObjectsClassGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class get o k response +func (o *ObjectsClassGetOK) SetPayload(payload *models.Object) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassGetBadRequestCode is the HTTP code returned for type ObjectsClassGetBadRequest +const ObjectsClassGetBadRequestCode int = 400 + +/* +ObjectsClassGetBadRequest Malformed request. + +swagger:response objectsClassGetBadRequest +*/ +type ObjectsClassGetBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassGetBadRequest creates ObjectsClassGetBadRequest with default headers values +func NewObjectsClassGetBadRequest() *ObjectsClassGetBadRequest { + + return &ObjectsClassGetBadRequest{} +} + +// WithPayload adds the payload to the objects class get bad request response +func (o *ObjectsClassGetBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsClassGetBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class get bad request response +func (o *ObjectsClassGetBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassGetBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassGetUnauthorizedCode is the HTTP code returned for type ObjectsClassGetUnauthorized +const ObjectsClassGetUnauthorizedCode int = 401 + +/* +ObjectsClassGetUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassGetUnauthorized +*/ +type ObjectsClassGetUnauthorized struct { +} + +// NewObjectsClassGetUnauthorized creates ObjectsClassGetUnauthorized with default headers values +func NewObjectsClassGetUnauthorized() *ObjectsClassGetUnauthorized { + + return &ObjectsClassGetUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassGetForbiddenCode is the HTTP code returned for type ObjectsClassGetForbidden +const ObjectsClassGetForbiddenCode int = 403 + +/* +ObjectsClassGetForbidden Forbidden + +swagger:response objectsClassGetForbidden +*/ +type ObjectsClassGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassGetForbidden creates ObjectsClassGetForbidden with default headers values +func NewObjectsClassGetForbidden() *ObjectsClassGetForbidden { + + return &ObjectsClassGetForbidden{} +} + +// WithPayload adds the payload to the objects class get forbidden response +func (o *ObjectsClassGetForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class get forbidden response +func (o *ObjectsClassGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassGetNotFoundCode is the HTTP code returned for type ObjectsClassGetNotFound +const ObjectsClassGetNotFoundCode int = 404 + +/* +ObjectsClassGetNotFound Successful query result but no resource was found. + +swagger:response objectsClassGetNotFound +*/ +type ObjectsClassGetNotFound struct { +} + +// NewObjectsClassGetNotFound creates ObjectsClassGetNotFound with default headers values +func NewObjectsClassGetNotFound() *ObjectsClassGetNotFound { + + return &ObjectsClassGetNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsClassGetNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsClassGetUnprocessableEntityCode is the HTTP code returned for type ObjectsClassGetUnprocessableEntity +const ObjectsClassGetUnprocessableEntityCode int = 422 + +/* +ObjectsClassGetUnprocessableEntity Request is well-formed (i.e., syntactically correct), but erroneous. + +swagger:response objectsClassGetUnprocessableEntity +*/ +type ObjectsClassGetUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassGetUnprocessableEntity creates ObjectsClassGetUnprocessableEntity with default headers values +func NewObjectsClassGetUnprocessableEntity() *ObjectsClassGetUnprocessableEntity { + + return &ObjectsClassGetUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class get unprocessable entity response +func (o *ObjectsClassGetUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassGetUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class get unprocessable entity response +func (o *ObjectsClassGetUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassGetUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassGetInternalServerErrorCode is the HTTP code returned for type ObjectsClassGetInternalServerError +const ObjectsClassGetInternalServerErrorCode int = 500 + +/* +ObjectsClassGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassGetInternalServerError +*/ +type ObjectsClassGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassGetInternalServerError creates ObjectsClassGetInternalServerError with default headers values +func NewObjectsClassGetInternalServerError() *ObjectsClassGetInternalServerError { + + return &ObjectsClassGetInternalServerError{} +} + +// WithPayload adds the payload to the objects class get internal server error response +func (o *ObjectsClassGetInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class get internal server error response +func (o *ObjectsClassGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..5a5a64c6cf2c5d1cdcce7596e9cba33804f867a0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_get_urlbuilder.go @@ -0,0 +1,161 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassGetURL generates an URL for the objects class get operation +type ObjectsClassGetURL struct { + ClassName string + ID strfmt.UUID + + ConsistencyLevel *string + Include *string + NodeName *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassGetURL) WithBasePath(bp string) *ObjectsClassGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassGetURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassGetURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var includeQ string + if o.Include != nil { + includeQ = *o.Include + } + if includeQ != "" { + qs.Set("include", includeQ) + } + + var nodeNameQ string + if o.NodeName != nil { + nodeNameQ = *o.NodeName + } + if nodeNameQ != "" { + qs.Set("node_name", nodeNameQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head.go new file mode 100644 index 0000000000000000000000000000000000000000..805d0722a53dbfd8fa8d3cf209c947fc64a55b0e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassHeadHandlerFunc turns a function with the right signature into a objects class head handler +type ObjectsClassHeadHandlerFunc func(ObjectsClassHeadParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassHeadHandlerFunc) Handle(params ObjectsClassHeadParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassHeadHandler interface for that can handle valid objects class head params +type ObjectsClassHeadHandler interface { + Handle(ObjectsClassHeadParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassHead creates a new http.Handler for the objects class head operation +func NewObjectsClassHead(ctx *middleware.Context, handler ObjectsClassHeadHandler) *ObjectsClassHead { + return &ObjectsClassHead{Context: ctx, Handler: handler} +} + +/* + ObjectsClassHead swagger:route HEAD /objects/{className}/{id} objects objectsClassHead + +Checks object's existence based on its class and uuid. + +Checks if a data object exists based on its collection and uuid without retrieving it.

    Internally it skips reading the object from disk other than checking if it is present. Thus it does not use resources on marshalling, parsing, etc., and is faster. Note the resulting HTTP request has no body; the existence of an object is indicated solely by the status code. +*/ +type ObjectsClassHead struct { + Context *middleware.Context + Handler ObjectsClassHeadHandler +} + +func (o *ObjectsClassHead) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassHeadParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..65a7af209df8c8f2dcd1a0c0b0ee9f8230b6be8a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_parameters.go @@ -0,0 +1,183 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewObjectsClassHeadParams creates a new ObjectsClassHeadParams object +// +// There are no default values defined in the spec. +func NewObjectsClassHeadParams() ObjectsClassHeadParams { + + return ObjectsClassHeadParams{} +} + +// ObjectsClassHeadParams contains all the bound params for the objects class head operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.head +type ObjectsClassHeadParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The class name as defined in the schema + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*The uuid of the data object + Required: true + In: path + */ + ID strfmt.UUID + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassHeadParams() beforehand. +func (o *ObjectsClassHeadParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassHeadParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassHeadParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassHeadParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassHeadParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsClassHeadParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..380fb5ea39d528a6da4c555454bbd0e0a4c57a7f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_responses.go @@ -0,0 +1,235 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassHeadNoContentCode is the HTTP code returned for type ObjectsClassHeadNoContent +const ObjectsClassHeadNoContentCode int = 204 + +/* +ObjectsClassHeadNoContent Object exists. + +swagger:response objectsClassHeadNoContent +*/ +type ObjectsClassHeadNoContent struct { +} + +// NewObjectsClassHeadNoContent creates ObjectsClassHeadNoContent with default headers values +func NewObjectsClassHeadNoContent() *ObjectsClassHeadNoContent { + + return &ObjectsClassHeadNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsClassHeadNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsClassHeadUnauthorizedCode is the HTTP code returned for type ObjectsClassHeadUnauthorized +const ObjectsClassHeadUnauthorizedCode int = 401 + +/* +ObjectsClassHeadUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassHeadUnauthorized +*/ +type ObjectsClassHeadUnauthorized struct { +} + +// NewObjectsClassHeadUnauthorized creates ObjectsClassHeadUnauthorized with default headers values +func NewObjectsClassHeadUnauthorized() *ObjectsClassHeadUnauthorized { + + return &ObjectsClassHeadUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassHeadUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassHeadForbiddenCode is the HTTP code returned for type ObjectsClassHeadForbidden +const ObjectsClassHeadForbiddenCode int = 403 + +/* +ObjectsClassHeadForbidden Forbidden + +swagger:response objectsClassHeadForbidden +*/ +type ObjectsClassHeadForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassHeadForbidden creates ObjectsClassHeadForbidden with default headers values +func NewObjectsClassHeadForbidden() *ObjectsClassHeadForbidden { + + return &ObjectsClassHeadForbidden{} +} + +// WithPayload adds the payload to the objects class head forbidden response +func (o *ObjectsClassHeadForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassHeadForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class head forbidden response +func (o *ObjectsClassHeadForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassHeadForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassHeadNotFoundCode is the HTTP code returned for type ObjectsClassHeadNotFound +const ObjectsClassHeadNotFoundCode int = 404 + +/* +ObjectsClassHeadNotFound Object doesn't exist. + +swagger:response objectsClassHeadNotFound +*/ +type ObjectsClassHeadNotFound struct { +} + +// NewObjectsClassHeadNotFound creates ObjectsClassHeadNotFound with default headers values +func NewObjectsClassHeadNotFound() *ObjectsClassHeadNotFound { + + return &ObjectsClassHeadNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsClassHeadNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsClassHeadUnprocessableEntityCode is the HTTP code returned for type ObjectsClassHeadUnprocessableEntity +const ObjectsClassHeadUnprocessableEntityCode int = 422 + +/* +ObjectsClassHeadUnprocessableEntity Request is well-formed (i.e., syntactically correct), but erroneous. + +swagger:response objectsClassHeadUnprocessableEntity +*/ +type ObjectsClassHeadUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassHeadUnprocessableEntity creates ObjectsClassHeadUnprocessableEntity with default headers values +func NewObjectsClassHeadUnprocessableEntity() *ObjectsClassHeadUnprocessableEntity { + + return &ObjectsClassHeadUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class head unprocessable entity response +func (o *ObjectsClassHeadUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassHeadUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class head unprocessable entity response +func (o *ObjectsClassHeadUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassHeadUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassHeadInternalServerErrorCode is the HTTP code returned for type ObjectsClassHeadInternalServerError +const ObjectsClassHeadInternalServerErrorCode int = 500 + +/* +ObjectsClassHeadInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassHeadInternalServerError +*/ +type ObjectsClassHeadInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassHeadInternalServerError creates ObjectsClassHeadInternalServerError with default headers values +func NewObjectsClassHeadInternalServerError() *ObjectsClassHeadInternalServerError { + + return &ObjectsClassHeadInternalServerError{} +} + +// WithPayload adds the payload to the objects class head internal server error response +func (o *ObjectsClassHeadInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassHeadInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class head internal server error response +func (o *ObjectsClassHeadInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassHeadInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..93784868da36f80fd173485904e2228811b5d109 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_head_urlbuilder.go @@ -0,0 +1,143 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassHeadURL generates an URL for the objects class head operation +type ObjectsClassHeadURL struct { + ClassName string + ID strfmt.UUID + + ConsistencyLevel *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassHeadURL) WithBasePath(bp string) *ObjectsClassHeadURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassHeadURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassHeadURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassHeadURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassHeadURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassHeadURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassHeadURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassHeadURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassHeadURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassHeadURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassHeadURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch.go new file mode 100644 index 0000000000000000000000000000000000000000..49dfafc1decf4b976739566fad1237240ce02da3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassPatchHandlerFunc turns a function with the right signature into a objects class patch handler +type ObjectsClassPatchHandlerFunc func(ObjectsClassPatchParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassPatchHandlerFunc) Handle(params ObjectsClassPatchParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassPatchHandler interface for that can handle valid objects class patch params +type ObjectsClassPatchHandler interface { + Handle(ObjectsClassPatchParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassPatch creates a new http.Handler for the objects class patch operation +func NewObjectsClassPatch(ctx *middleware.Context, handler ObjectsClassPatchHandler) *ObjectsClassPatch { + return &ObjectsClassPatch{Context: ctx, Handler: handler} +} + +/* + ObjectsClassPatch swagger:route PATCH /objects/{className}/{id} objects objectsClassPatch + +Update an Object based on its UUID (using patch semantics). + +Update an individual data object based on its class and uuid. This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called. +*/ +type ObjectsClassPatch struct { + Context *middleware.Context + Handler ObjectsClassPatchHandler +} + +func (o *ObjectsClassPatch) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassPatchParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f312e8a211202d27765dbde03364747d49bd456a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_parameters.go @@ -0,0 +1,184 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassPatchParams creates a new ObjectsClassPatchParams object +// +// There are no default values defined in the spec. +func NewObjectsClassPatchParams() ObjectsClassPatchParams { + + return ObjectsClassPatchParams{} +} + +// ObjectsClassPatchParams contains all the bound params for the objects class patch operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.patch +type ObjectsClassPatchParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*RFC 7396-style patch, the body contains the object to merge into the existing object. + In: body + */ + Body *models.Object + /*The class name as defined in the schema + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*The uuid of the data object to update. + Required: true + In: path + */ + ID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassPatchParams() beforehand. +func (o *ObjectsClassPatchParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Object + if err := route.Consumer.Consume(r.Body, &body); err != nil { + res = append(res, errors.NewParseError("body", "body", "", err)) + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassPatchParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassPatchParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassPatchParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassPatchParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..7bbfa5085db35f3ead55f8614549a5b5088f5b53 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_responses.go @@ -0,0 +1,280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassPatchNoContentCode is the HTTP code returned for type ObjectsClassPatchNoContent +const ObjectsClassPatchNoContentCode int = 204 + +/* +ObjectsClassPatchNoContent Successfully applied. No content provided. + +swagger:response objectsClassPatchNoContent +*/ +type ObjectsClassPatchNoContent struct { +} + +// NewObjectsClassPatchNoContent creates ObjectsClassPatchNoContent with default headers values +func NewObjectsClassPatchNoContent() *ObjectsClassPatchNoContent { + + return &ObjectsClassPatchNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsClassPatchNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsClassPatchBadRequestCode is the HTTP code returned for type ObjectsClassPatchBadRequest +const ObjectsClassPatchBadRequestCode int = 400 + +/* +ObjectsClassPatchBadRequest The patch-JSON is malformed. + +swagger:response objectsClassPatchBadRequest +*/ +type ObjectsClassPatchBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassPatchBadRequest creates ObjectsClassPatchBadRequest with default headers values +func NewObjectsClassPatchBadRequest() *ObjectsClassPatchBadRequest { + + return &ObjectsClassPatchBadRequest{} +} + +// WithPayload adds the payload to the objects class patch bad request response +func (o *ObjectsClassPatchBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsClassPatchBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class patch bad request response +func (o *ObjectsClassPatchBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPatchBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassPatchUnauthorizedCode is the HTTP code returned for type ObjectsClassPatchUnauthorized +const ObjectsClassPatchUnauthorizedCode int = 401 + +/* +ObjectsClassPatchUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassPatchUnauthorized +*/ +type ObjectsClassPatchUnauthorized struct { +} + +// NewObjectsClassPatchUnauthorized creates ObjectsClassPatchUnauthorized with default headers values +func NewObjectsClassPatchUnauthorized() *ObjectsClassPatchUnauthorized { + + return &ObjectsClassPatchUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassPatchUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassPatchForbiddenCode is the HTTP code returned for type ObjectsClassPatchForbidden +const ObjectsClassPatchForbiddenCode int = 403 + +/* +ObjectsClassPatchForbidden Forbidden + +swagger:response objectsClassPatchForbidden +*/ +type ObjectsClassPatchForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassPatchForbidden creates ObjectsClassPatchForbidden with default headers values +func NewObjectsClassPatchForbidden() *ObjectsClassPatchForbidden { + + return &ObjectsClassPatchForbidden{} +} + +// WithPayload adds the payload to the objects class patch forbidden response +func (o *ObjectsClassPatchForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassPatchForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class patch forbidden response +func (o *ObjectsClassPatchForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPatchForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassPatchNotFoundCode is the HTTP code returned for type ObjectsClassPatchNotFound +const ObjectsClassPatchNotFoundCode int = 404 + +/* +ObjectsClassPatchNotFound Successful query result but no resource was found. + +swagger:response objectsClassPatchNotFound +*/ +type ObjectsClassPatchNotFound struct { +} + +// NewObjectsClassPatchNotFound creates ObjectsClassPatchNotFound with default headers values +func NewObjectsClassPatchNotFound() *ObjectsClassPatchNotFound { + + return &ObjectsClassPatchNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsClassPatchNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsClassPatchUnprocessableEntityCode is the HTTP code returned for type ObjectsClassPatchUnprocessableEntity +const ObjectsClassPatchUnprocessableEntityCode int = 422 + +/* +ObjectsClassPatchUnprocessableEntity The patch-JSON is valid but unprocessable. + +swagger:response objectsClassPatchUnprocessableEntity +*/ +type ObjectsClassPatchUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassPatchUnprocessableEntity creates ObjectsClassPatchUnprocessableEntity with default headers values +func NewObjectsClassPatchUnprocessableEntity() *ObjectsClassPatchUnprocessableEntity { + + return &ObjectsClassPatchUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class patch unprocessable entity response +func (o *ObjectsClassPatchUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassPatchUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class patch unprocessable entity response +func (o *ObjectsClassPatchUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPatchUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassPatchInternalServerErrorCode is the HTTP code returned for type ObjectsClassPatchInternalServerError +const ObjectsClassPatchInternalServerErrorCode int = 500 + +/* +ObjectsClassPatchInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassPatchInternalServerError +*/ +type ObjectsClassPatchInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassPatchInternalServerError creates ObjectsClassPatchInternalServerError with default headers values +func NewObjectsClassPatchInternalServerError() *ObjectsClassPatchInternalServerError { + + return &ObjectsClassPatchInternalServerError{} +} + +// WithPayload adds the payload to the objects class patch internal server error response +func (o *ObjectsClassPatchInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassPatchInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class patch internal server error response +func (o *ObjectsClassPatchInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPatchInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..9c32e5f1ef426b8f98f827d0f8928c51e1f25908 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_patch_urlbuilder.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassPatchURL generates an URL for the objects class patch operation +type ObjectsClassPatchURL struct { + ClassName string + ID strfmt.UUID + + ConsistencyLevel *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassPatchURL) WithBasePath(bp string) *ObjectsClassPatchURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassPatchURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassPatchURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassPatchURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassPatchURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassPatchURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassPatchURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassPatchURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassPatchURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassPatchURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassPatchURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put.go new file mode 100644 index 0000000000000000000000000000000000000000..f835eb8213d8697b93f626333d6a614621cfcbe3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassPutHandlerFunc turns a function with the right signature into a objects class put handler +type ObjectsClassPutHandlerFunc func(ObjectsClassPutParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassPutHandlerFunc) Handle(params ObjectsClassPutParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassPutHandler interface for that can handle valid objects class put params +type ObjectsClassPutHandler interface { + Handle(ObjectsClassPutParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassPut creates a new http.Handler for the objects class put operation +func NewObjectsClassPut(ctx *middleware.Context, handler ObjectsClassPutHandler) *ObjectsClassPut { + return &ObjectsClassPut{Context: ctx, Handler: handler} +} + +/* + ObjectsClassPut swagger:route PUT /objects/{className}/{id} objects objectsClassPut + +# Update a class object based on its uuid + +Update an object based on its uuid and collection. This (`put`) method replaces the object with the provided object. +*/ +type ObjectsClassPut struct { + Context *middleware.Context + Handler ObjectsClassPutHandler +} + +func (o *ObjectsClassPut) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassPutParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..cc0ed13d46231bdb61c1320c0611fa61321a807d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_parameters.go @@ -0,0 +1,192 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassPutParams creates a new ObjectsClassPutParams object +// +// There are no default values defined in the spec. +func NewObjectsClassPutParams() ObjectsClassPutParams { + + return ObjectsClassPutParams{} +} + +// ObjectsClassPutParams contains all the bound params for the objects class put operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.put +type ObjectsClassPutParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Object + /* + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*The uuid of the data object to update. + Required: true + In: path + */ + ID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassPutParams() beforehand. +func (o *ObjectsClassPutParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Object + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassPutParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassPutParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassPutParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassPutParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f9122b79aa4c6066925f4f49ee42081f712556c1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassPutOKCode is the HTTP code returned for type ObjectsClassPutOK +const ObjectsClassPutOKCode int = 200 + +/* +ObjectsClassPutOK Successfully received. + +swagger:response objectsClassPutOK +*/ +type ObjectsClassPutOK struct { + + /* + In: Body + */ + Payload *models.Object `json:"body,omitempty"` +} + +// NewObjectsClassPutOK creates ObjectsClassPutOK with default headers values +func NewObjectsClassPutOK() *ObjectsClassPutOK { + + return &ObjectsClassPutOK{} +} + +// WithPayload adds the payload to the objects class put o k response +func (o *ObjectsClassPutOK) WithPayload(payload *models.Object) *ObjectsClassPutOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class put o k response +func (o *ObjectsClassPutOK) SetPayload(payload *models.Object) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassPutUnauthorizedCode is the HTTP code returned for type ObjectsClassPutUnauthorized +const ObjectsClassPutUnauthorizedCode int = 401 + +/* +ObjectsClassPutUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassPutUnauthorized +*/ +type ObjectsClassPutUnauthorized struct { +} + +// NewObjectsClassPutUnauthorized creates ObjectsClassPutUnauthorized with default headers values +func NewObjectsClassPutUnauthorized() *ObjectsClassPutUnauthorized { + + return &ObjectsClassPutUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassPutUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassPutForbiddenCode is the HTTP code returned for type ObjectsClassPutForbidden +const ObjectsClassPutForbiddenCode int = 403 + +/* +ObjectsClassPutForbidden Forbidden + +swagger:response objectsClassPutForbidden +*/ +type ObjectsClassPutForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassPutForbidden creates ObjectsClassPutForbidden with default headers values +func NewObjectsClassPutForbidden() *ObjectsClassPutForbidden { + + return &ObjectsClassPutForbidden{} +} + +// WithPayload adds the payload to the objects class put forbidden response +func (o *ObjectsClassPutForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassPutForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class put forbidden response +func (o *ObjectsClassPutForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPutForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassPutNotFoundCode is the HTTP code returned for type ObjectsClassPutNotFound +const ObjectsClassPutNotFoundCode int = 404 + +/* +ObjectsClassPutNotFound Successful query result but no resource was found. + +swagger:response objectsClassPutNotFound +*/ +type ObjectsClassPutNotFound struct { +} + +// NewObjectsClassPutNotFound creates ObjectsClassPutNotFound with default headers values +func NewObjectsClassPutNotFound() *ObjectsClassPutNotFound { + + return &ObjectsClassPutNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsClassPutNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsClassPutUnprocessableEntityCode is the HTTP code returned for type ObjectsClassPutUnprocessableEntity +const ObjectsClassPutUnprocessableEntityCode int = 422 + +/* +ObjectsClassPutUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response objectsClassPutUnprocessableEntity +*/ +type ObjectsClassPutUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassPutUnprocessableEntity creates ObjectsClassPutUnprocessableEntity with default headers values +func NewObjectsClassPutUnprocessableEntity() *ObjectsClassPutUnprocessableEntity { + + return &ObjectsClassPutUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class put unprocessable entity response +func (o *ObjectsClassPutUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassPutUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class put unprocessable entity response +func (o *ObjectsClassPutUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPutUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassPutInternalServerErrorCode is the HTTP code returned for type ObjectsClassPutInternalServerError +const ObjectsClassPutInternalServerErrorCode int = 500 + +/* +ObjectsClassPutInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassPutInternalServerError +*/ +type ObjectsClassPutInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassPutInternalServerError creates ObjectsClassPutInternalServerError with default headers values +func NewObjectsClassPutInternalServerError() *ObjectsClassPutInternalServerError { + + return &ObjectsClassPutInternalServerError{} +} + +// WithPayload adds the payload to the objects class put internal server error response +func (o *ObjectsClassPutInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassPutInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class put internal server error response +func (o *ObjectsClassPutInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassPutInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..0552b997413c03a720a0dbaf3c964407df95fd37 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_put_urlbuilder.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassPutURL generates an URL for the objects class put operation +type ObjectsClassPutURL struct { + ClassName string + ID strfmt.UUID + + ConsistencyLevel *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassPutURL) WithBasePath(bp string) *ObjectsClassPutURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassPutURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassPutURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassPutURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassPutURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassPutURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassPutURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassPutURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassPutURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassPutURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassPutURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create.go new file mode 100644 index 0000000000000000000000000000000000000000..910d150499f51c87e95c5cd5bd6151108bacacba --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesCreateHandlerFunc turns a function with the right signature into a objects class references create handler +type ObjectsClassReferencesCreateHandlerFunc func(ObjectsClassReferencesCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassReferencesCreateHandlerFunc) Handle(params ObjectsClassReferencesCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassReferencesCreateHandler interface for that can handle valid objects class references create params +type ObjectsClassReferencesCreateHandler interface { + Handle(ObjectsClassReferencesCreateParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassReferencesCreate creates a new http.Handler for the objects class references create operation +func NewObjectsClassReferencesCreate(ctx *middleware.Context, handler ObjectsClassReferencesCreateHandler) *ObjectsClassReferencesCreate { + return &ObjectsClassReferencesCreate{Context: ctx, Handler: handler} +} + +/* + ObjectsClassReferencesCreate swagger:route POST /objects/{className}/{id}/references/{propertyName} objects objectsClassReferencesCreate + +Add a single reference to a class-property. + +Add a single reference to an object. This adds a reference to the array of cross-references of the given property in the source object specified by its collection name and id +*/ +type ObjectsClassReferencesCreate struct { + Context *middleware.Context + Handler ObjectsClassReferencesCreateHandler +} + +func (o *ObjectsClassReferencesCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassReferencesCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..b9ad4e971a454b00ebfae4cd6f3d179f70ff01b8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_parameters.go @@ -0,0 +1,243 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassReferencesCreateParams creates a new ObjectsClassReferencesCreateParams object +// +// There are no default values defined in the spec. +func NewObjectsClassReferencesCreateParams() ObjectsClassReferencesCreateParams { + + return ObjectsClassReferencesCreateParams{} +} + +// ObjectsClassReferencesCreateParams contains all the bound params for the objects class references create operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.references.create +type ObjectsClassReferencesCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.SingleRef + /*The class name as defined in the schema + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Unique name of the property related to the Object. + Required: true + In: path + */ + PropertyName string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassReferencesCreateParams() beforehand. +func (o *ObjectsClassReferencesCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.SingleRef + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + rPropertyName, rhkPropertyName, _ := route.Params.GetOK("propertyName") + if err := o.bindPropertyName(rPropertyName, rhkPropertyName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassReferencesCreateParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassReferencesCreateParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassReferencesCreateParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassReferencesCreateParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindPropertyName binds and validates parameter PropertyName from path. +func (o *ObjectsClassReferencesCreateParams) bindPropertyName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.PropertyName = raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsClassReferencesCreateParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..b9fd4beda528aafb95ad69415e773971394b713c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_responses.go @@ -0,0 +1,280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesCreateOKCode is the HTTP code returned for type ObjectsClassReferencesCreateOK +const ObjectsClassReferencesCreateOKCode int = 200 + +/* +ObjectsClassReferencesCreateOK Successfully added the reference. + +swagger:response objectsClassReferencesCreateOK +*/ +type ObjectsClassReferencesCreateOK struct { +} + +// NewObjectsClassReferencesCreateOK creates ObjectsClassReferencesCreateOK with default headers values +func NewObjectsClassReferencesCreateOK() *ObjectsClassReferencesCreateOK { + + return &ObjectsClassReferencesCreateOK{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// ObjectsClassReferencesCreateBadRequestCode is the HTTP code returned for type ObjectsClassReferencesCreateBadRequest +const ObjectsClassReferencesCreateBadRequestCode int = 400 + +/* +ObjectsClassReferencesCreateBadRequest Malformed request. + +swagger:response objectsClassReferencesCreateBadRequest +*/ +type ObjectsClassReferencesCreateBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesCreateBadRequest creates ObjectsClassReferencesCreateBadRequest with default headers values +func NewObjectsClassReferencesCreateBadRequest() *ObjectsClassReferencesCreateBadRequest { + + return &ObjectsClassReferencesCreateBadRequest{} +} + +// WithPayload adds the payload to the objects class references create bad request response +func (o *ObjectsClassReferencesCreateBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesCreateBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references create bad request response +func (o *ObjectsClassReferencesCreateBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesCreateBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesCreateUnauthorizedCode is the HTTP code returned for type ObjectsClassReferencesCreateUnauthorized +const ObjectsClassReferencesCreateUnauthorizedCode int = 401 + +/* +ObjectsClassReferencesCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassReferencesCreateUnauthorized +*/ +type ObjectsClassReferencesCreateUnauthorized struct { +} + +// NewObjectsClassReferencesCreateUnauthorized creates ObjectsClassReferencesCreateUnauthorized with default headers values +func NewObjectsClassReferencesCreateUnauthorized() *ObjectsClassReferencesCreateUnauthorized { + + return &ObjectsClassReferencesCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassReferencesCreateForbiddenCode is the HTTP code returned for type ObjectsClassReferencesCreateForbidden +const ObjectsClassReferencesCreateForbiddenCode int = 403 + +/* +ObjectsClassReferencesCreateForbidden Forbidden + +swagger:response objectsClassReferencesCreateForbidden +*/ +type ObjectsClassReferencesCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesCreateForbidden creates ObjectsClassReferencesCreateForbidden with default headers values +func NewObjectsClassReferencesCreateForbidden() *ObjectsClassReferencesCreateForbidden { + + return &ObjectsClassReferencesCreateForbidden{} +} + +// WithPayload adds the payload to the objects class references create forbidden response +func (o *ObjectsClassReferencesCreateForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references create forbidden response +func (o *ObjectsClassReferencesCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesCreateNotFoundCode is the HTTP code returned for type ObjectsClassReferencesCreateNotFound +const ObjectsClassReferencesCreateNotFoundCode int = 404 + +/* +ObjectsClassReferencesCreateNotFound Source object doesn't exist. + +swagger:response objectsClassReferencesCreateNotFound +*/ +type ObjectsClassReferencesCreateNotFound struct { +} + +// NewObjectsClassReferencesCreateNotFound creates ObjectsClassReferencesCreateNotFound with default headers values +func NewObjectsClassReferencesCreateNotFound() *ObjectsClassReferencesCreateNotFound { + + return &ObjectsClassReferencesCreateNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesCreateNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsClassReferencesCreateUnprocessableEntityCode is the HTTP code returned for type ObjectsClassReferencesCreateUnprocessableEntity +const ObjectsClassReferencesCreateUnprocessableEntityCode int = 422 + +/* +ObjectsClassReferencesCreateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? + +swagger:response objectsClassReferencesCreateUnprocessableEntity +*/ +type ObjectsClassReferencesCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesCreateUnprocessableEntity creates ObjectsClassReferencesCreateUnprocessableEntity with default headers values +func NewObjectsClassReferencesCreateUnprocessableEntity() *ObjectsClassReferencesCreateUnprocessableEntity { + + return &ObjectsClassReferencesCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class references create unprocessable entity response +func (o *ObjectsClassReferencesCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references create unprocessable entity response +func (o *ObjectsClassReferencesCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesCreateInternalServerErrorCode is the HTTP code returned for type ObjectsClassReferencesCreateInternalServerError +const ObjectsClassReferencesCreateInternalServerErrorCode int = 500 + +/* +ObjectsClassReferencesCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassReferencesCreateInternalServerError +*/ +type ObjectsClassReferencesCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesCreateInternalServerError creates ObjectsClassReferencesCreateInternalServerError with default headers values +func NewObjectsClassReferencesCreateInternalServerError() *ObjectsClassReferencesCreateInternalServerError { + + return &ObjectsClassReferencesCreateInternalServerError{} +} + +// WithPayload adds the payload to the objects class references create internal server error response +func (o *ObjectsClassReferencesCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references create internal server error response +func (o *ObjectsClassReferencesCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..4bb01f3049287fff05df77512fb67a6abe111da7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_create_urlbuilder.go @@ -0,0 +1,151 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassReferencesCreateURL generates an URL for the objects class references create operation +type ObjectsClassReferencesCreateURL struct { + ClassName string + ID strfmt.UUID + PropertyName string + + ConsistencyLevel *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassReferencesCreateURL) WithBasePath(bp string) *ObjectsClassReferencesCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassReferencesCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassReferencesCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}/references/{propertyName}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassReferencesCreateURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassReferencesCreateURL") + } + + propertyName := o.PropertyName + if propertyName != "" { + _path = strings.Replace(_path, "{propertyName}", propertyName, -1) + } else { + return nil, errors.New("propertyName is required on ObjectsClassReferencesCreateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassReferencesCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassReferencesCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassReferencesCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassReferencesCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassReferencesCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassReferencesCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..a99f1a56e9574cce6bff0cb298e7daaf2d70afef --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesDeleteHandlerFunc turns a function with the right signature into a objects class references delete handler +type ObjectsClassReferencesDeleteHandlerFunc func(ObjectsClassReferencesDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassReferencesDeleteHandlerFunc) Handle(params ObjectsClassReferencesDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassReferencesDeleteHandler interface for that can handle valid objects class references delete params +type ObjectsClassReferencesDeleteHandler interface { + Handle(ObjectsClassReferencesDeleteParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassReferencesDelete creates a new http.Handler for the objects class references delete operation +func NewObjectsClassReferencesDelete(ctx *middleware.Context, handler ObjectsClassReferencesDeleteHandler) *ObjectsClassReferencesDelete { + return &ObjectsClassReferencesDelete{Context: ctx, Handler: handler} +} + +/* + ObjectsClassReferencesDelete swagger:route DELETE /objects/{className}/{id}/references/{propertyName} objects objectsClassReferencesDelete + +Delete a single reference from the list of references. + +Delete the single reference that is given in the body from the list of references that this property has. +*/ +type ObjectsClassReferencesDelete struct { + Context *middleware.Context + Handler ObjectsClassReferencesDeleteHandler +} + +func (o *ObjectsClassReferencesDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassReferencesDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..5bd632d628cbde5a23064208d852cd53480ce759 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_parameters.go @@ -0,0 +1,243 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassReferencesDeleteParams creates a new ObjectsClassReferencesDeleteParams object +// +// There are no default values defined in the spec. +func NewObjectsClassReferencesDeleteParams() ObjectsClassReferencesDeleteParams { + + return ObjectsClassReferencesDeleteParams{} +} + +// ObjectsClassReferencesDeleteParams contains all the bound params for the objects class references delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.references.delete +type ObjectsClassReferencesDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.SingleRef + /*The class name as defined in the schema + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Unique name of the property related to the Object. + Required: true + In: path + */ + PropertyName string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassReferencesDeleteParams() beforehand. +func (o *ObjectsClassReferencesDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.SingleRef + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + rPropertyName, rhkPropertyName, _ := route.Params.GetOK("propertyName") + if err := o.bindPropertyName(rPropertyName, rhkPropertyName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassReferencesDeleteParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassReferencesDeleteParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassReferencesDeleteParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassReferencesDeleteParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindPropertyName binds and validates parameter PropertyName from path. +func (o *ObjectsClassReferencesDeleteParams) bindPropertyName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.PropertyName = raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsClassReferencesDeleteParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f5c1825de3622e2c54fa15ec1cb392f2a6f08dd5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_responses.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesDeleteNoContentCode is the HTTP code returned for type ObjectsClassReferencesDeleteNoContent +const ObjectsClassReferencesDeleteNoContentCode int = 204 + +/* +ObjectsClassReferencesDeleteNoContent Successfully deleted. + +swagger:response objectsClassReferencesDeleteNoContent +*/ +type ObjectsClassReferencesDeleteNoContent struct { +} + +// NewObjectsClassReferencesDeleteNoContent creates ObjectsClassReferencesDeleteNoContent with default headers values +func NewObjectsClassReferencesDeleteNoContent() *ObjectsClassReferencesDeleteNoContent { + + return &ObjectsClassReferencesDeleteNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesDeleteNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsClassReferencesDeleteBadRequestCode is the HTTP code returned for type ObjectsClassReferencesDeleteBadRequest +const ObjectsClassReferencesDeleteBadRequestCode int = 400 + +/* +ObjectsClassReferencesDeleteBadRequest Malformed request. + +swagger:response objectsClassReferencesDeleteBadRequest +*/ +type ObjectsClassReferencesDeleteBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesDeleteBadRequest creates ObjectsClassReferencesDeleteBadRequest with default headers values +func NewObjectsClassReferencesDeleteBadRequest() *ObjectsClassReferencesDeleteBadRequest { + + return &ObjectsClassReferencesDeleteBadRequest{} +} + +// WithPayload adds the payload to the objects class references delete bad request response +func (o *ObjectsClassReferencesDeleteBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesDeleteBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references delete bad request response +func (o *ObjectsClassReferencesDeleteBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesDeleteBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesDeleteUnauthorizedCode is the HTTP code returned for type ObjectsClassReferencesDeleteUnauthorized +const ObjectsClassReferencesDeleteUnauthorizedCode int = 401 + +/* +ObjectsClassReferencesDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassReferencesDeleteUnauthorized +*/ +type ObjectsClassReferencesDeleteUnauthorized struct { +} + +// NewObjectsClassReferencesDeleteUnauthorized creates ObjectsClassReferencesDeleteUnauthorized with default headers values +func NewObjectsClassReferencesDeleteUnauthorized() *ObjectsClassReferencesDeleteUnauthorized { + + return &ObjectsClassReferencesDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassReferencesDeleteForbiddenCode is the HTTP code returned for type ObjectsClassReferencesDeleteForbidden +const ObjectsClassReferencesDeleteForbiddenCode int = 403 + +/* +ObjectsClassReferencesDeleteForbidden Forbidden + +swagger:response objectsClassReferencesDeleteForbidden +*/ +type ObjectsClassReferencesDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesDeleteForbidden creates ObjectsClassReferencesDeleteForbidden with default headers values +func NewObjectsClassReferencesDeleteForbidden() *ObjectsClassReferencesDeleteForbidden { + + return &ObjectsClassReferencesDeleteForbidden{} +} + +// WithPayload adds the payload to the objects class references delete forbidden response +func (o *ObjectsClassReferencesDeleteForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references delete forbidden response +func (o *ObjectsClassReferencesDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesDeleteNotFoundCode is the HTTP code returned for type ObjectsClassReferencesDeleteNotFound +const ObjectsClassReferencesDeleteNotFoundCode int = 404 + +/* +ObjectsClassReferencesDeleteNotFound Successful query result but no resource was found. + +swagger:response objectsClassReferencesDeleteNotFound +*/ +type ObjectsClassReferencesDeleteNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesDeleteNotFound creates ObjectsClassReferencesDeleteNotFound with default headers values +func NewObjectsClassReferencesDeleteNotFound() *ObjectsClassReferencesDeleteNotFound { + + return &ObjectsClassReferencesDeleteNotFound{} +} + +// WithPayload adds the payload to the objects class references delete not found response +func (o *ObjectsClassReferencesDeleteNotFound) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesDeleteNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references delete not found response +func (o *ObjectsClassReferencesDeleteNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesDeleteNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesDeleteUnprocessableEntityCode is the HTTP code returned for type ObjectsClassReferencesDeleteUnprocessableEntity +const ObjectsClassReferencesDeleteUnprocessableEntityCode int = 422 + +/* +ObjectsClassReferencesDeleteUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? + +swagger:response objectsClassReferencesDeleteUnprocessableEntity +*/ +type ObjectsClassReferencesDeleteUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesDeleteUnprocessableEntity creates ObjectsClassReferencesDeleteUnprocessableEntity with default headers values +func NewObjectsClassReferencesDeleteUnprocessableEntity() *ObjectsClassReferencesDeleteUnprocessableEntity { + + return &ObjectsClassReferencesDeleteUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class references delete unprocessable entity response +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesDeleteUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references delete unprocessable entity response +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesDeleteUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesDeleteInternalServerErrorCode is the HTTP code returned for type ObjectsClassReferencesDeleteInternalServerError +const ObjectsClassReferencesDeleteInternalServerErrorCode int = 500 + +/* +ObjectsClassReferencesDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassReferencesDeleteInternalServerError +*/ +type ObjectsClassReferencesDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesDeleteInternalServerError creates ObjectsClassReferencesDeleteInternalServerError with default headers values +func NewObjectsClassReferencesDeleteInternalServerError() *ObjectsClassReferencesDeleteInternalServerError { + + return &ObjectsClassReferencesDeleteInternalServerError{} +} + +// WithPayload adds the payload to the objects class references delete internal server error response +func (o *ObjectsClassReferencesDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references delete internal server error response +func (o *ObjectsClassReferencesDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..922e992a96dc924eb27e0c00e69ff61c37b4b406 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_delete_urlbuilder.go @@ -0,0 +1,151 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassReferencesDeleteURL generates an URL for the objects class references delete operation +type ObjectsClassReferencesDeleteURL struct { + ClassName string + ID strfmt.UUID + PropertyName string + + ConsistencyLevel *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassReferencesDeleteURL) WithBasePath(bp string) *ObjectsClassReferencesDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassReferencesDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassReferencesDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}/references/{propertyName}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassReferencesDeleteURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassReferencesDeleteURL") + } + + propertyName := o.PropertyName + if propertyName != "" { + _path = strings.Replace(_path, "{propertyName}", propertyName, -1) + } else { + return nil, errors.New("propertyName is required on ObjectsClassReferencesDeleteURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassReferencesDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassReferencesDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassReferencesDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassReferencesDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassReferencesDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassReferencesDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put.go new file mode 100644 index 0000000000000000000000000000000000000000..0990ff69c916aad05ee666987bcf39edb367832e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesPutHandlerFunc turns a function with the right signature into a objects class references put handler +type ObjectsClassReferencesPutHandlerFunc func(ObjectsClassReferencesPutParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsClassReferencesPutHandlerFunc) Handle(params ObjectsClassReferencesPutParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsClassReferencesPutHandler interface for that can handle valid objects class references put params +type ObjectsClassReferencesPutHandler interface { + Handle(ObjectsClassReferencesPutParams, *models.Principal) middleware.Responder +} + +// NewObjectsClassReferencesPut creates a new http.Handler for the objects class references put operation +func NewObjectsClassReferencesPut(ctx *middleware.Context, handler ObjectsClassReferencesPutHandler) *ObjectsClassReferencesPut { + return &ObjectsClassReferencesPut{Context: ctx, Handler: handler} +} + +/* + ObjectsClassReferencesPut swagger:route PUT /objects/{className}/{id}/references/{propertyName} objects objectsClassReferencesPut + +Replace all references to a class-property. + +Replace **all** references in cross-reference property of an object. +*/ +type ObjectsClassReferencesPut struct { + Context *middleware.Context + Handler ObjectsClassReferencesPutHandler +} + +func (o *ObjectsClassReferencesPut) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsClassReferencesPutParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..493205e6694cba297e8abf54ae4e492344fe5977 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_parameters.go @@ -0,0 +1,243 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsClassReferencesPutParams creates a new ObjectsClassReferencesPutParams object +// +// There are no default values defined in the spec. +func NewObjectsClassReferencesPutParams() ObjectsClassReferencesPutParams { + + return ObjectsClassReferencesPutParams{} +} + +// ObjectsClassReferencesPutParams contains all the bound params for the objects class references put operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.class.references.put +type ObjectsClassReferencesPutParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body models.MultipleRef + /*The class name as defined in the schema + Required: true + In: path + */ + ClassName string + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Unique name of the property related to the Object. + Required: true + In: path + */ + PropertyName string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsClassReferencesPutParams() beforehand. +func (o *ObjectsClassReferencesPutParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.MultipleRef + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + rPropertyName, rhkPropertyName, _ := route.Params.GetOK("propertyName") + if err := o.bindPropertyName(rPropertyName, rhkPropertyName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *ObjectsClassReferencesPutParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsClassReferencesPutParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsClassReferencesPutParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsClassReferencesPutParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindPropertyName binds and validates parameter PropertyName from path. +func (o *ObjectsClassReferencesPutParams) bindPropertyName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.PropertyName = raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsClassReferencesPutParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6bc006855e9b71cfac6112fd3e969c2f3615afde --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_responses.go @@ -0,0 +1,280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsClassReferencesPutOKCode is the HTTP code returned for type ObjectsClassReferencesPutOK +const ObjectsClassReferencesPutOKCode int = 200 + +/* +ObjectsClassReferencesPutOK Successfully replaced all the references. + +swagger:response objectsClassReferencesPutOK +*/ +type ObjectsClassReferencesPutOK struct { +} + +// NewObjectsClassReferencesPutOK creates ObjectsClassReferencesPutOK with default headers values +func NewObjectsClassReferencesPutOK() *ObjectsClassReferencesPutOK { + + return &ObjectsClassReferencesPutOK{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesPutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// ObjectsClassReferencesPutBadRequestCode is the HTTP code returned for type ObjectsClassReferencesPutBadRequest +const ObjectsClassReferencesPutBadRequestCode int = 400 + +/* +ObjectsClassReferencesPutBadRequest Malformed request. + +swagger:response objectsClassReferencesPutBadRequest +*/ +type ObjectsClassReferencesPutBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesPutBadRequest creates ObjectsClassReferencesPutBadRequest with default headers values +func NewObjectsClassReferencesPutBadRequest() *ObjectsClassReferencesPutBadRequest { + + return &ObjectsClassReferencesPutBadRequest{} +} + +// WithPayload adds the payload to the objects class references put bad request response +func (o *ObjectsClassReferencesPutBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesPutBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references put bad request response +func (o *ObjectsClassReferencesPutBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesPutBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesPutUnauthorizedCode is the HTTP code returned for type ObjectsClassReferencesPutUnauthorized +const ObjectsClassReferencesPutUnauthorizedCode int = 401 + +/* +ObjectsClassReferencesPutUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsClassReferencesPutUnauthorized +*/ +type ObjectsClassReferencesPutUnauthorized struct { +} + +// NewObjectsClassReferencesPutUnauthorized creates ObjectsClassReferencesPutUnauthorized with default headers values +func NewObjectsClassReferencesPutUnauthorized() *ObjectsClassReferencesPutUnauthorized { + + return &ObjectsClassReferencesPutUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesPutUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsClassReferencesPutForbiddenCode is the HTTP code returned for type ObjectsClassReferencesPutForbidden +const ObjectsClassReferencesPutForbiddenCode int = 403 + +/* +ObjectsClassReferencesPutForbidden Forbidden + +swagger:response objectsClassReferencesPutForbidden +*/ +type ObjectsClassReferencesPutForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesPutForbidden creates ObjectsClassReferencesPutForbidden with default headers values +func NewObjectsClassReferencesPutForbidden() *ObjectsClassReferencesPutForbidden { + + return &ObjectsClassReferencesPutForbidden{} +} + +// WithPayload adds the payload to the objects class references put forbidden response +func (o *ObjectsClassReferencesPutForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesPutForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references put forbidden response +func (o *ObjectsClassReferencesPutForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesPutForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesPutNotFoundCode is the HTTP code returned for type ObjectsClassReferencesPutNotFound +const ObjectsClassReferencesPutNotFoundCode int = 404 + +/* +ObjectsClassReferencesPutNotFound Source object doesn't exist. + +swagger:response objectsClassReferencesPutNotFound +*/ +type ObjectsClassReferencesPutNotFound struct { +} + +// NewObjectsClassReferencesPutNotFound creates ObjectsClassReferencesPutNotFound with default headers values +func NewObjectsClassReferencesPutNotFound() *ObjectsClassReferencesPutNotFound { + + return &ObjectsClassReferencesPutNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesPutNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsClassReferencesPutUnprocessableEntityCode is the HTTP code returned for type ObjectsClassReferencesPutUnprocessableEntity +const ObjectsClassReferencesPutUnprocessableEntityCode int = 422 + +/* +ObjectsClassReferencesPutUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? + +swagger:response objectsClassReferencesPutUnprocessableEntity +*/ +type ObjectsClassReferencesPutUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesPutUnprocessableEntity creates ObjectsClassReferencesPutUnprocessableEntity with default headers values +func NewObjectsClassReferencesPutUnprocessableEntity() *ObjectsClassReferencesPutUnprocessableEntity { + + return &ObjectsClassReferencesPutUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects class references put unprocessable entity response +func (o *ObjectsClassReferencesPutUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesPutUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references put unprocessable entity response +func (o *ObjectsClassReferencesPutUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesPutUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsClassReferencesPutInternalServerErrorCode is the HTTP code returned for type ObjectsClassReferencesPutInternalServerError +const ObjectsClassReferencesPutInternalServerErrorCode int = 500 + +/* +ObjectsClassReferencesPutInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsClassReferencesPutInternalServerError +*/ +type ObjectsClassReferencesPutInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsClassReferencesPutInternalServerError creates ObjectsClassReferencesPutInternalServerError with default headers values +func NewObjectsClassReferencesPutInternalServerError() *ObjectsClassReferencesPutInternalServerError { + + return &ObjectsClassReferencesPutInternalServerError{} +} + +// WithPayload adds the payload to the objects class references put internal server error response +func (o *ObjectsClassReferencesPutInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsClassReferencesPutInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects class references put internal server error response +func (o *ObjectsClassReferencesPutInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsClassReferencesPutInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..55b4dfb937b236b41ffa21882b27cd357f66001e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_class_references_put_urlbuilder.go @@ -0,0 +1,151 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsClassReferencesPutURL generates an URL for the objects class references put operation +type ObjectsClassReferencesPutURL struct { + ClassName string + ID strfmt.UUID + PropertyName string + + ConsistencyLevel *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassReferencesPutURL) WithBasePath(bp string) *ObjectsClassReferencesPutURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsClassReferencesPutURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsClassReferencesPutURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{className}/{id}/references/{propertyName}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on ObjectsClassReferencesPutURL") + } + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsClassReferencesPutURL") + } + + propertyName := o.PropertyName + if propertyName != "" { + _path = strings.Replace(_path, "{propertyName}", propertyName, -1) + } else { + return nil, errors.New("propertyName is required on ObjectsClassReferencesPutURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsClassReferencesPutURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsClassReferencesPutURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsClassReferencesPutURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsClassReferencesPutURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsClassReferencesPutURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsClassReferencesPutURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create.go new file mode 100644 index 0000000000000000000000000000000000000000..23a26d4212dcb52329fcc093e160178dd99496f6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsCreateHandlerFunc turns a function with the right signature into a objects create handler +type ObjectsCreateHandlerFunc func(ObjectsCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsCreateHandlerFunc) Handle(params ObjectsCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsCreateHandler interface for that can handle valid objects create params +type ObjectsCreateHandler interface { + Handle(ObjectsCreateParams, *models.Principal) middleware.Responder +} + +// NewObjectsCreate creates a new http.Handler for the objects create operation +func NewObjectsCreate(ctx *middleware.Context, handler ObjectsCreateHandler) *ObjectsCreate { + return &ObjectsCreate{Context: ctx, Handler: handler} +} + +/* + ObjectsCreate swagger:route POST /objects objects objectsCreate + +Create a new object. + +Create a new object.

    Meta-data and schema values are validated.

    **Note: Use `/batch` for importing many objects**:
    If you plan on importing a large number of objects, it's much more efficient to use the `/batch` endpoint. Otherwise, sending multiple single requests sequentially would incur a large performance penalty.

    **Note: idempotence of `/objects`**:
    POST /objects will fail if an id is provided which already exists in the class. To update an existing object with the objects endpoint, use the PUT or PATCH method. +*/ +type ObjectsCreate struct { + Context *middleware.Context + Handler ObjectsCreateHandler +} + +func (o *ObjectsCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..1c3fae6d5aa0ce80ceff4f00602b869f1ccca497 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_parameters.go @@ -0,0 +1,125 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsCreateParams creates a new ObjectsCreateParams object +// +// There are no default values defined in the spec. +func NewObjectsCreateParams() ObjectsCreateParams { + + return ObjectsCreateParams{} +} + +// ObjectsCreateParams contains all the bound params for the objects create operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.create +type ObjectsCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Object + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsCreateParams() beforehand. +func (o *ObjectsCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Object + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsCreateParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..7781465ca8c411f9d9314a21c00002d66c54f9c1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsCreateOKCode is the HTTP code returned for type ObjectsCreateOK +const ObjectsCreateOKCode int = 200 + +/* +ObjectsCreateOK Object created. + +swagger:response objectsCreateOK +*/ +type ObjectsCreateOK struct { + + /* + In: Body + */ + Payload *models.Object `json:"body,omitempty"` +} + +// NewObjectsCreateOK creates ObjectsCreateOK with default headers values +func NewObjectsCreateOK() *ObjectsCreateOK { + + return &ObjectsCreateOK{} +} + +// WithPayload adds the payload to the objects create o k response +func (o *ObjectsCreateOK) WithPayload(payload *models.Object) *ObjectsCreateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects create o k response +func (o *ObjectsCreateOK) SetPayload(payload *models.Object) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsCreateBadRequestCode is the HTTP code returned for type ObjectsCreateBadRequest +const ObjectsCreateBadRequestCode int = 400 + +/* +ObjectsCreateBadRequest Malformed request. + +swagger:response objectsCreateBadRequest +*/ +type ObjectsCreateBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsCreateBadRequest creates ObjectsCreateBadRequest with default headers values +func NewObjectsCreateBadRequest() *ObjectsCreateBadRequest { + + return &ObjectsCreateBadRequest{} +} + +// WithPayload adds the payload to the objects create bad request response +func (o *ObjectsCreateBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsCreateBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects create bad request response +func (o *ObjectsCreateBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsCreateBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsCreateUnauthorizedCode is the HTTP code returned for type ObjectsCreateUnauthorized +const ObjectsCreateUnauthorizedCode int = 401 + +/* +ObjectsCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsCreateUnauthorized +*/ +type ObjectsCreateUnauthorized struct { +} + +// NewObjectsCreateUnauthorized creates ObjectsCreateUnauthorized with default headers values +func NewObjectsCreateUnauthorized() *ObjectsCreateUnauthorized { + + return &ObjectsCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsCreateForbiddenCode is the HTTP code returned for type ObjectsCreateForbidden +const ObjectsCreateForbiddenCode int = 403 + +/* +ObjectsCreateForbidden Forbidden + +swagger:response objectsCreateForbidden +*/ +type ObjectsCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsCreateForbidden creates ObjectsCreateForbidden with default headers values +func NewObjectsCreateForbidden() *ObjectsCreateForbidden { + + return &ObjectsCreateForbidden{} +} + +// WithPayload adds the payload to the objects create forbidden response +func (o *ObjectsCreateForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects create forbidden response +func (o *ObjectsCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsCreateUnprocessableEntityCode is the HTTP code returned for type ObjectsCreateUnprocessableEntity +const ObjectsCreateUnprocessableEntityCode int = 422 + +/* +ObjectsCreateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response objectsCreateUnprocessableEntity +*/ +type ObjectsCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsCreateUnprocessableEntity creates ObjectsCreateUnprocessableEntity with default headers values +func NewObjectsCreateUnprocessableEntity() *ObjectsCreateUnprocessableEntity { + + return &ObjectsCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects create unprocessable entity response +func (o *ObjectsCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects create unprocessable entity response +func (o *ObjectsCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsCreateInternalServerErrorCode is the HTTP code returned for type ObjectsCreateInternalServerError +const ObjectsCreateInternalServerErrorCode int = 500 + +/* +ObjectsCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsCreateInternalServerError +*/ +type ObjectsCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsCreateInternalServerError creates ObjectsCreateInternalServerError with default headers values +func NewObjectsCreateInternalServerError() *ObjectsCreateInternalServerError { + + return &ObjectsCreateInternalServerError{} +} + +// WithPayload adds the payload to the objects create internal server error response +func (o *ObjectsCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects create internal server error response +func (o *ObjectsCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..6a640d94270c23ac21c2745d6a8f452a159216f2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_create_urlbuilder.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ObjectsCreateURL generates an URL for the objects create operation +type ObjectsCreateURL struct { + ConsistencyLevel *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsCreateURL) WithBasePath(bp string) *ObjectsCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..c30c0c1aa0a483495b81a712ef985176f82e0f70 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsDeleteHandlerFunc turns a function with the right signature into a objects delete handler +type ObjectsDeleteHandlerFunc func(ObjectsDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsDeleteHandlerFunc) Handle(params ObjectsDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsDeleteHandler interface for that can handle valid objects delete params +type ObjectsDeleteHandler interface { + Handle(ObjectsDeleteParams, *models.Principal) middleware.Responder +} + +// NewObjectsDelete creates a new http.Handler for the objects delete operation +func NewObjectsDelete(ctx *middleware.Context, handler ObjectsDeleteHandler) *ObjectsDelete { + return &ObjectsDelete{Context: ctx, Handler: handler} +} + +/* + ObjectsDelete swagger:route DELETE /objects/{id} objects objectsDelete + +Delete an Object based on its UUID. + +Deletes an object from the database based on its UUID.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +type ObjectsDelete struct { + Context *middleware.Context + Handler ObjectsDeleteHandler +} + +func (o *ObjectsDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..adcefabcfde4ac5000ec38abe0cb339a83fd994a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_parameters.go @@ -0,0 +1,159 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewObjectsDeleteParams creates a new ObjectsDeleteParams object +// +// There are no default values defined in the spec. +func NewObjectsDeleteParams() ObjectsDeleteParams { + + return ObjectsDeleteParams{} +} + +// ObjectsDeleteParams contains all the bound params for the objects delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.delete +type ObjectsDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsDeleteParams() beforehand. +func (o *ObjectsDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsDeleteParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsDeleteParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsDeleteParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsDeleteParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..af3a31797f0c2ba3b5025c5e8adcd6c55796e87d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_responses.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsDeleteNoContentCode is the HTTP code returned for type ObjectsDeleteNoContent +const ObjectsDeleteNoContentCode int = 204 + +/* +ObjectsDeleteNoContent Successfully deleted. + +swagger:response objectsDeleteNoContent +*/ +type ObjectsDeleteNoContent struct { +} + +// NewObjectsDeleteNoContent creates ObjectsDeleteNoContent with default headers values +func NewObjectsDeleteNoContent() *ObjectsDeleteNoContent { + + return &ObjectsDeleteNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsDeleteNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsDeleteUnauthorizedCode is the HTTP code returned for type ObjectsDeleteUnauthorized +const ObjectsDeleteUnauthorizedCode int = 401 + +/* +ObjectsDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsDeleteUnauthorized +*/ +type ObjectsDeleteUnauthorized struct { +} + +// NewObjectsDeleteUnauthorized creates ObjectsDeleteUnauthorized with default headers values +func NewObjectsDeleteUnauthorized() *ObjectsDeleteUnauthorized { + + return &ObjectsDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsDeleteForbiddenCode is the HTTP code returned for type ObjectsDeleteForbidden +const ObjectsDeleteForbiddenCode int = 403 + +/* +ObjectsDeleteForbidden Forbidden + +swagger:response objectsDeleteForbidden +*/ +type ObjectsDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsDeleteForbidden creates ObjectsDeleteForbidden with default headers values +func NewObjectsDeleteForbidden() *ObjectsDeleteForbidden { + + return &ObjectsDeleteForbidden{} +} + +// WithPayload adds the payload to the objects delete forbidden response +func (o *ObjectsDeleteForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects delete forbidden response +func (o *ObjectsDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsDeleteNotFoundCode is the HTTP code returned for type ObjectsDeleteNotFound +const ObjectsDeleteNotFoundCode int = 404 + +/* +ObjectsDeleteNotFound Successful query result but no resource was found. + +swagger:response objectsDeleteNotFound +*/ +type ObjectsDeleteNotFound struct { +} + +// NewObjectsDeleteNotFound creates ObjectsDeleteNotFound with default headers values +func NewObjectsDeleteNotFound() *ObjectsDeleteNotFound { + + return &ObjectsDeleteNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsDeleteNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsDeleteInternalServerErrorCode is the HTTP code returned for type ObjectsDeleteInternalServerError +const ObjectsDeleteInternalServerErrorCode int = 500 + +/* +ObjectsDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsDeleteInternalServerError +*/ +type ObjectsDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsDeleteInternalServerError creates ObjectsDeleteInternalServerError with default headers values +func NewObjectsDeleteInternalServerError() *ObjectsDeleteInternalServerError { + + return &ObjectsDeleteInternalServerError{} +} + +// WithPayload adds the payload to the objects delete internal server error response +func (o *ObjectsDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects delete internal server error response +func (o *ObjectsDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..786cfb5400044d9c9288f50008175f85d4312bb0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_delete_urlbuilder.go @@ -0,0 +1,135 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsDeleteURL generates an URL for the objects delete operation +type ObjectsDeleteURL struct { + ID strfmt.UUID + + ConsistencyLevel *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsDeleteURL) WithBasePath(bp string) *ObjectsDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsDeleteURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get.go new file mode 100644 index 0000000000000000000000000000000000000000..a18c85f4ab08da76842fdf041aa4272095f1c96e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsGetHandlerFunc turns a function with the right signature into a objects get handler +type ObjectsGetHandlerFunc func(ObjectsGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsGetHandlerFunc) Handle(params ObjectsGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsGetHandler interface for that can handle valid objects get params +type ObjectsGetHandler interface { + Handle(ObjectsGetParams, *models.Principal) middleware.Responder +} + +// NewObjectsGet creates a new http.Handler for the objects get operation +func NewObjectsGet(ctx *middleware.Context, handler ObjectsGetHandler) *ObjectsGet { + return &ObjectsGet{Context: ctx, Handler: handler} +} + +/* + ObjectsGet swagger:route GET /objects/{id} objects objectsGet + +Get a specific Object based on its UUID. + +Get a specific object based on its UUID. Also available as Websocket bus.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +type ObjectsGet struct { + Context *middleware.Context + Handler ObjectsGetHandler +} + +func (o *ObjectsGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..2fd05992ae3257f2a3c06728f4f1e60af2d9d56c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_parameters.go @@ -0,0 +1,132 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewObjectsGetParams creates a new ObjectsGetParams object +// +// There are no default values defined in the spec. +func NewObjectsGetParams() ObjectsGetParams { + + return ObjectsGetParams{} +} + +// ObjectsGetParams contains all the bound params for the objects get operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.get +type ObjectsGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation + In: query + */ + Include *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsGetParams() beforehand. +func (o *ObjectsGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qInclude, qhkInclude, _ := qs.GetOK("include") + if err := o.bindInclude(qInclude, qhkInclude, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsGetParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsGetParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindInclude binds and validates parameter Include from query. +func (o *ObjectsGetParams) bindInclude(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Include = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6e69516b589cf3a7096e39934bd26d844efd2a04 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsGetOKCode is the HTTP code returned for type ObjectsGetOK +const ObjectsGetOKCode int = 200 + +/* +ObjectsGetOK Successful response. + +swagger:response objectsGetOK +*/ +type ObjectsGetOK struct { + + /* + In: Body + */ + Payload *models.Object `json:"body,omitempty"` +} + +// NewObjectsGetOK creates ObjectsGetOK with default headers values +func NewObjectsGetOK() *ObjectsGetOK { + + return &ObjectsGetOK{} +} + +// WithPayload adds the payload to the objects get o k response +func (o *ObjectsGetOK) WithPayload(payload *models.Object) *ObjectsGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects get o k response +func (o *ObjectsGetOK) SetPayload(payload *models.Object) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsGetBadRequestCode is the HTTP code returned for type ObjectsGetBadRequest +const ObjectsGetBadRequestCode int = 400 + +/* +ObjectsGetBadRequest Malformed request. + +swagger:response objectsGetBadRequest +*/ +type ObjectsGetBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsGetBadRequest creates ObjectsGetBadRequest with default headers values +func NewObjectsGetBadRequest() *ObjectsGetBadRequest { + + return &ObjectsGetBadRequest{} +} + +// WithPayload adds the payload to the objects get bad request response +func (o *ObjectsGetBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsGetBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects get bad request response +func (o *ObjectsGetBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsGetBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsGetUnauthorizedCode is the HTTP code returned for type ObjectsGetUnauthorized +const ObjectsGetUnauthorizedCode int = 401 + +/* +ObjectsGetUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsGetUnauthorized +*/ +type ObjectsGetUnauthorized struct { +} + +// NewObjectsGetUnauthorized creates ObjectsGetUnauthorized with default headers values +func NewObjectsGetUnauthorized() *ObjectsGetUnauthorized { + + return &ObjectsGetUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsGetForbiddenCode is the HTTP code returned for type ObjectsGetForbidden +const ObjectsGetForbiddenCode int = 403 + +/* +ObjectsGetForbidden Forbidden + +swagger:response objectsGetForbidden +*/ +type ObjectsGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsGetForbidden creates ObjectsGetForbidden with default headers values +func NewObjectsGetForbidden() *ObjectsGetForbidden { + + return &ObjectsGetForbidden{} +} + +// WithPayload adds the payload to the objects get forbidden response +func (o *ObjectsGetForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects get forbidden response +func (o *ObjectsGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsGetNotFoundCode is the HTTP code returned for type ObjectsGetNotFound +const ObjectsGetNotFoundCode int = 404 + +/* +ObjectsGetNotFound Successful query result but no resource was found. + +swagger:response objectsGetNotFound +*/ +type ObjectsGetNotFound struct { +} + +// NewObjectsGetNotFound creates ObjectsGetNotFound with default headers values +func NewObjectsGetNotFound() *ObjectsGetNotFound { + + return &ObjectsGetNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsGetNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsGetInternalServerErrorCode is the HTTP code returned for type ObjectsGetInternalServerError +const ObjectsGetInternalServerErrorCode int = 500 + +/* +ObjectsGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsGetInternalServerError +*/ +type ObjectsGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsGetInternalServerError creates ObjectsGetInternalServerError with default headers values +func NewObjectsGetInternalServerError() *ObjectsGetInternalServerError { + + return &ObjectsGetInternalServerError{} +} + +// WithPayload adds the payload to the objects get internal server error response +func (o *ObjectsGetInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects get internal server error response +func (o *ObjectsGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..d3e0383ee2dbddc682604bb15795d86752d2c440 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_get_urlbuilder.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsGetURL generates an URL for the objects get operation +type ObjectsGetURL struct { + ID strfmt.UUID + + Include *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsGetURL) WithBasePath(bp string) *ObjectsGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsGetURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var includeQ string + if o.Include != nil { + includeQ = *o.Include + } + if includeQ != "" { + qs.Set("include", includeQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head.go new file mode 100644 index 0000000000000000000000000000000000000000..49e94155dd67556923ffcd881216f27516499787 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsHeadHandlerFunc turns a function with the right signature into a objects head handler +type ObjectsHeadHandlerFunc func(ObjectsHeadParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsHeadHandlerFunc) Handle(params ObjectsHeadParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsHeadHandler interface for that can handle valid objects head params +type ObjectsHeadHandler interface { + Handle(ObjectsHeadParams, *models.Principal) middleware.Responder +} + +// NewObjectsHead creates a new http.Handler for the objects head operation +func NewObjectsHead(ctx *middleware.Context, handler ObjectsHeadHandler) *ObjectsHead { + return &ObjectsHead{Context: ctx, Handler: handler} +} + +/* + ObjectsHead swagger:route HEAD /objects/{id} objects objectsHead + +Checks Object's existence based on its UUID. + +Checks if an object exists in the system based on its UUID.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +type ObjectsHead struct { + Context *middleware.Context + Handler ObjectsHeadHandler +} + +func (o *ObjectsHead) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsHeadParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..abb5a1d4f8e38e973d6acee88b279dbf55edb652 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_parameters.go @@ -0,0 +1,102 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewObjectsHeadParams creates a new ObjectsHeadParams object +// +// There are no default values defined in the spec. +func NewObjectsHeadParams() ObjectsHeadParams { + + return ObjectsHeadParams{} +} + +// ObjectsHeadParams contains all the bound params for the objects head operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.head +type ObjectsHeadParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsHeadParams() beforehand. +func (o *ObjectsHeadParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsHeadParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsHeadParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..b28e4094a155f131e9126fd3bd9a8d127bbb05e8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_responses.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsHeadNoContentCode is the HTTP code returned for type ObjectsHeadNoContent +const ObjectsHeadNoContentCode int = 204 + +/* +ObjectsHeadNoContent Object exists. + +swagger:response objectsHeadNoContent +*/ +type ObjectsHeadNoContent struct { +} + +// NewObjectsHeadNoContent creates ObjectsHeadNoContent with default headers values +func NewObjectsHeadNoContent() *ObjectsHeadNoContent { + + return &ObjectsHeadNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsHeadNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsHeadUnauthorizedCode is the HTTP code returned for type ObjectsHeadUnauthorized +const ObjectsHeadUnauthorizedCode int = 401 + +/* +ObjectsHeadUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsHeadUnauthorized +*/ +type ObjectsHeadUnauthorized struct { +} + +// NewObjectsHeadUnauthorized creates ObjectsHeadUnauthorized with default headers values +func NewObjectsHeadUnauthorized() *ObjectsHeadUnauthorized { + + return &ObjectsHeadUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsHeadUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsHeadForbiddenCode is the HTTP code returned for type ObjectsHeadForbidden +const ObjectsHeadForbiddenCode int = 403 + +/* +ObjectsHeadForbidden Forbidden + +swagger:response objectsHeadForbidden +*/ +type ObjectsHeadForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsHeadForbidden creates ObjectsHeadForbidden with default headers values +func NewObjectsHeadForbidden() *ObjectsHeadForbidden { + + return &ObjectsHeadForbidden{} +} + +// WithPayload adds the payload to the objects head forbidden response +func (o *ObjectsHeadForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsHeadForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects head forbidden response +func (o *ObjectsHeadForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsHeadForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsHeadNotFoundCode is the HTTP code returned for type ObjectsHeadNotFound +const ObjectsHeadNotFoundCode int = 404 + +/* +ObjectsHeadNotFound Object doesn't exist. + +swagger:response objectsHeadNotFound +*/ +type ObjectsHeadNotFound struct { +} + +// NewObjectsHeadNotFound creates ObjectsHeadNotFound with default headers values +func NewObjectsHeadNotFound() *ObjectsHeadNotFound { + + return &ObjectsHeadNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsHeadNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsHeadInternalServerErrorCode is the HTTP code returned for type ObjectsHeadInternalServerError +const ObjectsHeadInternalServerErrorCode int = 500 + +/* +ObjectsHeadInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsHeadInternalServerError +*/ +type ObjectsHeadInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsHeadInternalServerError creates ObjectsHeadInternalServerError with default headers values +func NewObjectsHeadInternalServerError() *ObjectsHeadInternalServerError { + + return &ObjectsHeadInternalServerError{} +} + +// WithPayload adds the payload to the objects head internal server error response +func (o *ObjectsHeadInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsHeadInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects head internal server error response +func (o *ObjectsHeadInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsHeadInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..6c1f57db3921a39f05acc978916dc0f73b8c6f8f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_head_urlbuilder.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsHeadURL generates an URL for the objects head operation +type ObjectsHeadURL struct { + ID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsHeadURL) WithBasePath(bp string) *ObjectsHeadURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsHeadURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsHeadURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsHeadURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsHeadURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsHeadURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsHeadURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsHeadURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsHeadURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsHeadURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list.go new file mode 100644 index 0000000000000000000000000000000000000000..74fe4981fef7cdf735d63dff35f6d21e4f3bc15a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsListHandlerFunc turns a function with the right signature into a objects list handler +type ObjectsListHandlerFunc func(ObjectsListParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsListHandlerFunc) Handle(params ObjectsListParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsListHandler interface for that can handle valid objects list params +type ObjectsListHandler interface { + Handle(ObjectsListParams, *models.Principal) middleware.Responder +} + +// NewObjectsList creates a new http.Handler for the objects list operation +func NewObjectsList(ctx *middleware.Context, handler ObjectsListHandler) *ObjectsList { + return &ObjectsList{Context: ctx, Handler: handler} +} + +/* + ObjectsList swagger:route GET /objects objects objectsList + +Get a list of Objects. + +Lists all Objects in reverse order of creation, owned by the user that belongs to the used token. +*/ +type ObjectsList struct { + Context *middleware.Context + Handler ObjectsListHandler +} + +func (o *ObjectsList) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsListParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e5308caf49b0a97d82b805207e05b636b1d81c6a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_parameters.go @@ -0,0 +1,297 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewObjectsListParams creates a new ObjectsListParams object +// with the default values initialized. +func NewObjectsListParams() ObjectsListParams { + + var ( + // initialize parameters with default values + + offsetDefault = int64(0) + ) + + return ObjectsListParams{ + Offset: &offsetDefault, + } +} + +// ObjectsListParams contains all the bound params for the objects list operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.list +type ObjectsListParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*A threshold UUID of the objects to retrieve after, using an UUID-based ordering. This object is not part of the set.

    Must be used with `class`, typically in conjunction with `limit`.

    Note `after` cannot be used with `offset` or `sort`.

    For a null value similar to offset=0, set an empty string in the request, i.e. `after=` or `after`. + In: query + */ + After *string + /*The collection from which to query objects.

    Note that if `class` is not provided, the response will not include any objects. + In: query + */ + Class *string + /*Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation + In: query + */ + Include *string + /*The maximum number of items to be returned per page. The default is 25 unless set otherwise as an environment variable. + In: query + */ + Limit *int64 + /*The starting index of the result window. Note `offset` will retrieve `offset+limit` results and return `limit` results from the object with index `offset` onwards. Limited by the value of `QUERY_MAXIMUM_RESULTS`.

    Should be used in conjunction with `limit`.

    Cannot be used with `after`. + In: query + Default: 0 + */ + Offset *int64 + /*Order parameter to tell how to order (asc or desc) data within given field. Should be used in conjunction with `sort` parameter. If providing multiple `sort` values, provide multiple `order` values in corresponding order, e.g.: `sort=author_name,title&order=desc,asc`. + In: query + */ + Order *string + /*Name(s) of the property to sort by - e.g. `city`, or `country,city`. + In: query + */ + Sort *string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsListParams() beforehand. +func (o *ObjectsListParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qAfter, qhkAfter, _ := qs.GetOK("after") + if err := o.bindAfter(qAfter, qhkAfter, route.Formats); err != nil { + res = append(res, err) + } + + qClass, qhkClass, _ := qs.GetOK("class") + if err := o.bindClass(qClass, qhkClass, route.Formats); err != nil { + res = append(res, err) + } + + qInclude, qhkInclude, _ := qs.GetOK("include") + if err := o.bindInclude(qInclude, qhkInclude, route.Formats); err != nil { + res = append(res, err) + } + + qLimit, qhkLimit, _ := qs.GetOK("limit") + if err := o.bindLimit(qLimit, qhkLimit, route.Formats); err != nil { + res = append(res, err) + } + + qOffset, qhkOffset, _ := qs.GetOK("offset") + if err := o.bindOffset(qOffset, qhkOffset, route.Formats); err != nil { + res = append(res, err) + } + + qOrder, qhkOrder, _ := qs.GetOK("order") + if err := o.bindOrder(qOrder, qhkOrder, route.Formats); err != nil { + res = append(res, err) + } + + qSort, qhkSort, _ := qs.GetOK("sort") + if err := o.bindSort(qSort, qhkSort, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindAfter binds and validates parameter After from query. +func (o *ObjectsListParams) bindAfter(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.After = &raw + + return nil +} + +// bindClass binds and validates parameter Class from query. +func (o *ObjectsListParams) bindClass(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Class = &raw + + return nil +} + +// bindInclude binds and validates parameter Include from query. +func (o *ObjectsListParams) bindInclude(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Include = &raw + + return nil +} + +// bindLimit binds and validates parameter Limit from query. +func (o *ObjectsListParams) bindLimit(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + + value, err := swag.ConvertInt64(raw) + if err != nil { + return errors.InvalidType("limit", "query", "int64", raw) + } + o.Limit = &value + + return nil +} + +// bindOffset binds and validates parameter Offset from query. +func (o *ObjectsListParams) bindOffset(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewObjectsListParams() + return nil + } + + value, err := swag.ConvertInt64(raw) + if err != nil { + return errors.InvalidType("offset", "query", "int64", raw) + } + o.Offset = &value + + return nil +} + +// bindOrder binds and validates parameter Order from query. +func (o *ObjectsListParams) bindOrder(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Order = &raw + + return nil +} + +// bindSort binds and validates parameter Sort from query. +func (o *ObjectsListParams) bindSort(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Sort = &raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsListParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..637d7e3a97c18f9d030c7bc5ed3f53c0f7f98ac5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_responses.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsListOKCode is the HTTP code returned for type ObjectsListOK +const ObjectsListOKCode int = 200 + +/* +ObjectsListOK Successful response.

    If `class` is not provided, the response will not include any objects. + +swagger:response objectsListOK +*/ +type ObjectsListOK struct { + + /* + In: Body + */ + Payload *models.ObjectsListResponse `json:"body,omitempty"` +} + +// NewObjectsListOK creates ObjectsListOK with default headers values +func NewObjectsListOK() *ObjectsListOK { + + return &ObjectsListOK{} +} + +// WithPayload adds the payload to the objects list o k response +func (o *ObjectsListOK) WithPayload(payload *models.ObjectsListResponse) *ObjectsListOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects list o k response +func (o *ObjectsListOK) SetPayload(payload *models.ObjectsListResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsListOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsListBadRequestCode is the HTTP code returned for type ObjectsListBadRequest +const ObjectsListBadRequestCode int = 400 + +/* +ObjectsListBadRequest Malformed request. + +swagger:response objectsListBadRequest +*/ +type ObjectsListBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsListBadRequest creates ObjectsListBadRequest with default headers values +func NewObjectsListBadRequest() *ObjectsListBadRequest { + + return &ObjectsListBadRequest{} +} + +// WithPayload adds the payload to the objects list bad request response +func (o *ObjectsListBadRequest) WithPayload(payload *models.ErrorResponse) *ObjectsListBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects list bad request response +func (o *ObjectsListBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsListBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsListUnauthorizedCode is the HTTP code returned for type ObjectsListUnauthorized +const ObjectsListUnauthorizedCode int = 401 + +/* +ObjectsListUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsListUnauthorized +*/ +type ObjectsListUnauthorized struct { +} + +// NewObjectsListUnauthorized creates ObjectsListUnauthorized with default headers values +func NewObjectsListUnauthorized() *ObjectsListUnauthorized { + + return &ObjectsListUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsListUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsListForbiddenCode is the HTTP code returned for type ObjectsListForbidden +const ObjectsListForbiddenCode int = 403 + +/* +ObjectsListForbidden Forbidden + +swagger:response objectsListForbidden +*/ +type ObjectsListForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsListForbidden creates ObjectsListForbidden with default headers values +func NewObjectsListForbidden() *ObjectsListForbidden { + + return &ObjectsListForbidden{} +} + +// WithPayload adds the payload to the objects list forbidden response +func (o *ObjectsListForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsListForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects list forbidden response +func (o *ObjectsListForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsListForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsListNotFoundCode is the HTTP code returned for type ObjectsListNotFound +const ObjectsListNotFoundCode int = 404 + +/* +ObjectsListNotFound Successful query result but no resource was found. + +swagger:response objectsListNotFound +*/ +type ObjectsListNotFound struct { +} + +// NewObjectsListNotFound creates ObjectsListNotFound with default headers values +func NewObjectsListNotFound() *ObjectsListNotFound { + + return &ObjectsListNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsListNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsListUnprocessableEntityCode is the HTTP code returned for type ObjectsListUnprocessableEntity +const ObjectsListUnprocessableEntityCode int = 422 + +/* +ObjectsListUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response objectsListUnprocessableEntity +*/ +type ObjectsListUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsListUnprocessableEntity creates ObjectsListUnprocessableEntity with default headers values +func NewObjectsListUnprocessableEntity() *ObjectsListUnprocessableEntity { + + return &ObjectsListUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects list unprocessable entity response +func (o *ObjectsListUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsListUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects list unprocessable entity response +func (o *ObjectsListUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsListUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsListInternalServerErrorCode is the HTTP code returned for type ObjectsListInternalServerError +const ObjectsListInternalServerErrorCode int = 500 + +/* +ObjectsListInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsListInternalServerError +*/ +type ObjectsListInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsListInternalServerError creates ObjectsListInternalServerError with default headers values +func NewObjectsListInternalServerError() *ObjectsListInternalServerError { + + return &ObjectsListInternalServerError{} +} + +// WithPayload adds the payload to the objects list internal server error response +func (o *ObjectsListInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsListInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects list internal server error response +func (o *ObjectsListInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsListInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..fbf4f3599f770a9b1c8bac98192bbfa57ecf5a08 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_list_urlbuilder.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + + "github.com/go-openapi/swag" +) + +// ObjectsListURL generates an URL for the objects list operation +type ObjectsListURL struct { + After *string + Class *string + Include *string + Limit *int64 + Offset *int64 + Order *string + Sort *string + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsListURL) WithBasePath(bp string) *ObjectsListURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsListURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsListURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var afterQ string + if o.After != nil { + afterQ = *o.After + } + if afterQ != "" { + qs.Set("after", afterQ) + } + + var classQ string + if o.Class != nil { + classQ = *o.Class + } + if classQ != "" { + qs.Set("class", classQ) + } + + var includeQ string + if o.Include != nil { + includeQ = *o.Include + } + if includeQ != "" { + qs.Set("include", includeQ) + } + + var limitQ string + if o.Limit != nil { + limitQ = swag.FormatInt64(*o.Limit) + } + if limitQ != "" { + qs.Set("limit", limitQ) + } + + var offsetQ string + if o.Offset != nil { + offsetQ = swag.FormatInt64(*o.Offset) + } + if offsetQ != "" { + qs.Set("offset", offsetQ) + } + + var orderQ string + if o.Order != nil { + orderQ = *o.Order + } + if orderQ != "" { + qs.Set("order", orderQ) + } + + var sortQ string + if o.Sort != nil { + sortQ = *o.Sort + } + if sortQ != "" { + qs.Set("sort", sortQ) + } + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsListURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsListURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsListURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsListURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsListURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsListURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch.go new file mode 100644 index 0000000000000000000000000000000000000000..d8a4737b3d0eed22f11f28a60c7a2e09a85d72bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsPatchHandlerFunc turns a function with the right signature into a objects patch handler +type ObjectsPatchHandlerFunc func(ObjectsPatchParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsPatchHandlerFunc) Handle(params ObjectsPatchParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsPatchHandler interface for that can handle valid objects patch params +type ObjectsPatchHandler interface { + Handle(ObjectsPatchParams, *models.Principal) middleware.Responder +} + +// NewObjectsPatch creates a new http.Handler for the objects patch operation +func NewObjectsPatch(ctx *middleware.Context, handler ObjectsPatchHandler) *ObjectsPatch { + return &ObjectsPatch{Context: ctx, Handler: handler} +} + +/* + ObjectsPatch swagger:route PATCH /objects/{id} objects objectsPatch + +Update an Object based on its UUID (using patch semantics). + +Update an object based on its UUID (using patch semantics). This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +type ObjectsPatch struct { + Context *middleware.Context + Handler ObjectsPatchHandler +} + +func (o *ObjectsPatch) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsPatchParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..57e39f39e721920e03bafd07b84c9febe2328467 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_parameters.go @@ -0,0 +1,160 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsPatchParams creates a new ObjectsPatchParams object +// +// There are no default values defined in the spec. +func NewObjectsPatchParams() ObjectsPatchParams { + + return ObjectsPatchParams{} +} + +// ObjectsPatchParams contains all the bound params for the objects patch operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.patch +type ObjectsPatchParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*RFC 7396-style patch, the body contains the object to merge into the existing object. + In: body + */ + Body *models.Object + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsPatchParams() beforehand. +func (o *ObjectsPatchParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Object + if err := route.Consumer.Consume(r.Body, &body); err != nil { + res = append(res, errors.NewParseError("body", "body", "", err)) + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsPatchParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsPatchParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsPatchParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..24b40cd1ddef2aaaa102b5a3e6c37f204ea04a6a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_responses.go @@ -0,0 +1,260 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsPatchNoContentCode is the HTTP code returned for type ObjectsPatchNoContent +const ObjectsPatchNoContentCode int = 204 + +/* +ObjectsPatchNoContent Successfully applied. No content provided. + +swagger:response objectsPatchNoContent +*/ +type ObjectsPatchNoContent struct { +} + +// NewObjectsPatchNoContent creates ObjectsPatchNoContent with default headers values +func NewObjectsPatchNoContent() *ObjectsPatchNoContent { + + return &ObjectsPatchNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsPatchNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsPatchBadRequestCode is the HTTP code returned for type ObjectsPatchBadRequest +const ObjectsPatchBadRequestCode int = 400 + +/* +ObjectsPatchBadRequest The patch-JSON is malformed. + +swagger:response objectsPatchBadRequest +*/ +type ObjectsPatchBadRequest struct { +} + +// NewObjectsPatchBadRequest creates ObjectsPatchBadRequest with default headers values +func NewObjectsPatchBadRequest() *ObjectsPatchBadRequest { + + return &ObjectsPatchBadRequest{} +} + +// WriteResponse to the client +func (o *ObjectsPatchBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(400) +} + +// ObjectsPatchUnauthorizedCode is the HTTP code returned for type ObjectsPatchUnauthorized +const ObjectsPatchUnauthorizedCode int = 401 + +/* +ObjectsPatchUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsPatchUnauthorized +*/ +type ObjectsPatchUnauthorized struct { +} + +// NewObjectsPatchUnauthorized creates ObjectsPatchUnauthorized with default headers values +func NewObjectsPatchUnauthorized() *ObjectsPatchUnauthorized { + + return &ObjectsPatchUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsPatchUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsPatchForbiddenCode is the HTTP code returned for type ObjectsPatchForbidden +const ObjectsPatchForbiddenCode int = 403 + +/* +ObjectsPatchForbidden Forbidden + +swagger:response objectsPatchForbidden +*/ +type ObjectsPatchForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsPatchForbidden creates ObjectsPatchForbidden with default headers values +func NewObjectsPatchForbidden() *ObjectsPatchForbidden { + + return &ObjectsPatchForbidden{} +} + +// WithPayload adds the payload to the objects patch forbidden response +func (o *ObjectsPatchForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsPatchForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects patch forbidden response +func (o *ObjectsPatchForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsPatchForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsPatchNotFoundCode is the HTTP code returned for type ObjectsPatchNotFound +const ObjectsPatchNotFoundCode int = 404 + +/* +ObjectsPatchNotFound Successful query result but no resource was found. + +swagger:response objectsPatchNotFound +*/ +type ObjectsPatchNotFound struct { +} + +// NewObjectsPatchNotFound creates ObjectsPatchNotFound with default headers values +func NewObjectsPatchNotFound() *ObjectsPatchNotFound { + + return &ObjectsPatchNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsPatchNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsPatchUnprocessableEntityCode is the HTTP code returned for type ObjectsPatchUnprocessableEntity +const ObjectsPatchUnprocessableEntityCode int = 422 + +/* +ObjectsPatchUnprocessableEntity The patch-JSON is valid but unprocessable. + +swagger:response objectsPatchUnprocessableEntity +*/ +type ObjectsPatchUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsPatchUnprocessableEntity creates ObjectsPatchUnprocessableEntity with default headers values +func NewObjectsPatchUnprocessableEntity() *ObjectsPatchUnprocessableEntity { + + return &ObjectsPatchUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects patch unprocessable entity response +func (o *ObjectsPatchUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsPatchUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects patch unprocessable entity response +func (o *ObjectsPatchUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsPatchUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsPatchInternalServerErrorCode is the HTTP code returned for type ObjectsPatchInternalServerError +const ObjectsPatchInternalServerErrorCode int = 500 + +/* +ObjectsPatchInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsPatchInternalServerError +*/ +type ObjectsPatchInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsPatchInternalServerError creates ObjectsPatchInternalServerError with default headers values +func NewObjectsPatchInternalServerError() *ObjectsPatchInternalServerError { + + return &ObjectsPatchInternalServerError{} +} + +// WithPayload adds the payload to the objects patch internal server error response +func (o *ObjectsPatchInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsPatchInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects patch internal server error response +func (o *ObjectsPatchInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsPatchInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..1480d71e92758f592f0e43124b059beef8485c71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_patch_urlbuilder.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsPatchURL generates an URL for the objects patch operation +type ObjectsPatchURL struct { + ID strfmt.UUID + + ConsistencyLevel *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsPatchURL) WithBasePath(bp string) *ObjectsPatchURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsPatchURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsPatchURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsPatchURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsPatchURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsPatchURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsPatchURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsPatchURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsPatchURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsPatchURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create.go new file mode 100644 index 0000000000000000000000000000000000000000..462ec6957610f4eabacfa1b29ca9d2f5414a244c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesCreateHandlerFunc turns a function with the right signature into a objects references create handler +type ObjectsReferencesCreateHandlerFunc func(ObjectsReferencesCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsReferencesCreateHandlerFunc) Handle(params ObjectsReferencesCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsReferencesCreateHandler interface for that can handle valid objects references create params +type ObjectsReferencesCreateHandler interface { + Handle(ObjectsReferencesCreateParams, *models.Principal) middleware.Responder +} + +// NewObjectsReferencesCreate creates a new http.Handler for the objects references create operation +func NewObjectsReferencesCreate(ctx *middleware.Context, handler ObjectsReferencesCreateHandler) *ObjectsReferencesCreate { + return &ObjectsReferencesCreate{Context: ctx, Handler: handler} +} + +/* + ObjectsReferencesCreate swagger:route POST /objects/{id}/references/{propertyName} objects objectsReferencesCreate + +Add a single reference to a class-property. + +Add a cross-reference.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead. +*/ +type ObjectsReferencesCreate struct { + Context *middleware.Context + Handler ObjectsReferencesCreateHandler +} + +func (o *ObjectsReferencesCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsReferencesCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..653301b50800d8778efe34c1c76f101490094bb0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_parameters.go @@ -0,0 +1,192 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsReferencesCreateParams creates a new ObjectsReferencesCreateParams object +// +// There are no default values defined in the spec. +func NewObjectsReferencesCreateParams() ObjectsReferencesCreateParams { + + return ObjectsReferencesCreateParams{} +} + +// ObjectsReferencesCreateParams contains all the bound params for the objects references create operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.references.create +type ObjectsReferencesCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.SingleRef + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Unique name of the property related to the Object. + Required: true + In: path + */ + PropertyName string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsReferencesCreateParams() beforehand. +func (o *ObjectsReferencesCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.SingleRef + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + rPropertyName, rhkPropertyName, _ := route.Params.GetOK("propertyName") + if err := o.bindPropertyName(rPropertyName, rhkPropertyName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsReferencesCreateParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsReferencesCreateParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindPropertyName binds and validates parameter PropertyName from path. +func (o *ObjectsReferencesCreateParams) bindPropertyName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.PropertyName = raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsReferencesCreateParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..cfd3a5f325051bbc6146bda90d9159daf2ce8188 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesCreateOKCode is the HTTP code returned for type ObjectsReferencesCreateOK +const ObjectsReferencesCreateOKCode int = 200 + +/* +ObjectsReferencesCreateOK Successfully added the reference. + +swagger:response objectsReferencesCreateOK +*/ +type ObjectsReferencesCreateOK struct { +} + +// NewObjectsReferencesCreateOK creates ObjectsReferencesCreateOK with default headers values +func NewObjectsReferencesCreateOK() *ObjectsReferencesCreateOK { + + return &ObjectsReferencesCreateOK{} +} + +// WriteResponse to the client +func (o *ObjectsReferencesCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// ObjectsReferencesCreateUnauthorizedCode is the HTTP code returned for type ObjectsReferencesCreateUnauthorized +const ObjectsReferencesCreateUnauthorizedCode int = 401 + +/* +ObjectsReferencesCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsReferencesCreateUnauthorized +*/ +type ObjectsReferencesCreateUnauthorized struct { +} + +// NewObjectsReferencesCreateUnauthorized creates ObjectsReferencesCreateUnauthorized with default headers values +func NewObjectsReferencesCreateUnauthorized() *ObjectsReferencesCreateUnauthorized { + + return &ObjectsReferencesCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsReferencesCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsReferencesCreateForbiddenCode is the HTTP code returned for type ObjectsReferencesCreateForbidden +const ObjectsReferencesCreateForbiddenCode int = 403 + +/* +ObjectsReferencesCreateForbidden Forbidden + +swagger:response objectsReferencesCreateForbidden +*/ +type ObjectsReferencesCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesCreateForbidden creates ObjectsReferencesCreateForbidden with default headers values +func NewObjectsReferencesCreateForbidden() *ObjectsReferencesCreateForbidden { + + return &ObjectsReferencesCreateForbidden{} +} + +// WithPayload adds the payload to the objects references create forbidden response +func (o *ObjectsReferencesCreateForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references create forbidden response +func (o *ObjectsReferencesCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsReferencesCreateUnprocessableEntityCode is the HTTP code returned for type ObjectsReferencesCreateUnprocessableEntity +const ObjectsReferencesCreateUnprocessableEntityCode int = 422 + +/* +ObjectsReferencesCreateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? + +swagger:response objectsReferencesCreateUnprocessableEntity +*/ +type ObjectsReferencesCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesCreateUnprocessableEntity creates ObjectsReferencesCreateUnprocessableEntity with default headers values +func NewObjectsReferencesCreateUnprocessableEntity() *ObjectsReferencesCreateUnprocessableEntity { + + return &ObjectsReferencesCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects references create unprocessable entity response +func (o *ObjectsReferencesCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references create unprocessable entity response +func (o *ObjectsReferencesCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsReferencesCreateInternalServerErrorCode is the HTTP code returned for type ObjectsReferencesCreateInternalServerError +const ObjectsReferencesCreateInternalServerErrorCode int = 500 + +/* +ObjectsReferencesCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsReferencesCreateInternalServerError +*/ +type ObjectsReferencesCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesCreateInternalServerError creates ObjectsReferencesCreateInternalServerError with default headers values +func NewObjectsReferencesCreateInternalServerError() *ObjectsReferencesCreateInternalServerError { + + return &ObjectsReferencesCreateInternalServerError{} +} + +// WithPayload adds the payload to the objects references create internal server error response +func (o *ObjectsReferencesCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references create internal server error response +func (o *ObjectsReferencesCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..5c4a3b3a1035bc6e5466d9d9ca69ed00b443b1d9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_create_urlbuilder.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsReferencesCreateURL generates an URL for the objects references create operation +type ObjectsReferencesCreateURL struct { + ID strfmt.UUID + PropertyName string + + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsReferencesCreateURL) WithBasePath(bp string) *ObjectsReferencesCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsReferencesCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsReferencesCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}/references/{propertyName}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsReferencesCreateURL") + } + + propertyName := o.PropertyName + if propertyName != "" { + _path = strings.Replace(_path, "{propertyName}", propertyName, -1) + } else { + return nil, errors.New("propertyName is required on ObjectsReferencesCreateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsReferencesCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsReferencesCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsReferencesCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsReferencesCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsReferencesCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsReferencesCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..4ace245fe28a3b147720a467ff045d4069a0dcba --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesDeleteHandlerFunc turns a function with the right signature into a objects references delete handler +type ObjectsReferencesDeleteHandlerFunc func(ObjectsReferencesDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsReferencesDeleteHandlerFunc) Handle(params ObjectsReferencesDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsReferencesDeleteHandler interface for that can handle valid objects references delete params +type ObjectsReferencesDeleteHandler interface { + Handle(ObjectsReferencesDeleteParams, *models.Principal) middleware.Responder +} + +// NewObjectsReferencesDelete creates a new http.Handler for the objects references delete operation +func NewObjectsReferencesDelete(ctx *middleware.Context, handler ObjectsReferencesDeleteHandler) *ObjectsReferencesDelete { + return &ObjectsReferencesDelete{Context: ctx, Handler: handler} +} + +/* + ObjectsReferencesDelete swagger:route DELETE /objects/{id}/references/{propertyName} objects objectsReferencesDelete + +Delete a single reference from the list of references. + +Delete the single reference that is given in the body from the list of references that this property has.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead. +*/ +type ObjectsReferencesDelete struct { + Context *middleware.Context + Handler ObjectsReferencesDeleteHandler +} + +func (o *ObjectsReferencesDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsReferencesDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ac275f716922294848cfbd1c9bbb4d4488b7fd54 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_parameters.go @@ -0,0 +1,192 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsReferencesDeleteParams creates a new ObjectsReferencesDeleteParams object +// +// There are no default values defined in the spec. +func NewObjectsReferencesDeleteParams() ObjectsReferencesDeleteParams { + + return ObjectsReferencesDeleteParams{} +} + +// ObjectsReferencesDeleteParams contains all the bound params for the objects references delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.references.delete +type ObjectsReferencesDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.SingleRef + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Unique name of the property related to the Object. + Required: true + In: path + */ + PropertyName string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsReferencesDeleteParams() beforehand. +func (o *ObjectsReferencesDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.SingleRef + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + rPropertyName, rhkPropertyName, _ := route.Params.GetOK("propertyName") + if err := o.bindPropertyName(rPropertyName, rhkPropertyName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsReferencesDeleteParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsReferencesDeleteParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindPropertyName binds and validates parameter PropertyName from path. +func (o *ObjectsReferencesDeleteParams) bindPropertyName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.PropertyName = raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsReferencesDeleteParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..42b14730274baf31df38ea14bb04e8f1ec0b137e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesDeleteNoContentCode is the HTTP code returned for type ObjectsReferencesDeleteNoContent +const ObjectsReferencesDeleteNoContentCode int = 204 + +/* +ObjectsReferencesDeleteNoContent Successfully deleted. + +swagger:response objectsReferencesDeleteNoContent +*/ +type ObjectsReferencesDeleteNoContent struct { +} + +// NewObjectsReferencesDeleteNoContent creates ObjectsReferencesDeleteNoContent with default headers values +func NewObjectsReferencesDeleteNoContent() *ObjectsReferencesDeleteNoContent { + + return &ObjectsReferencesDeleteNoContent{} +} + +// WriteResponse to the client +func (o *ObjectsReferencesDeleteNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// ObjectsReferencesDeleteUnauthorizedCode is the HTTP code returned for type ObjectsReferencesDeleteUnauthorized +const ObjectsReferencesDeleteUnauthorizedCode int = 401 + +/* +ObjectsReferencesDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsReferencesDeleteUnauthorized +*/ +type ObjectsReferencesDeleteUnauthorized struct { +} + +// NewObjectsReferencesDeleteUnauthorized creates ObjectsReferencesDeleteUnauthorized with default headers values +func NewObjectsReferencesDeleteUnauthorized() *ObjectsReferencesDeleteUnauthorized { + + return &ObjectsReferencesDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsReferencesDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsReferencesDeleteForbiddenCode is the HTTP code returned for type ObjectsReferencesDeleteForbidden +const ObjectsReferencesDeleteForbiddenCode int = 403 + +/* +ObjectsReferencesDeleteForbidden Forbidden + +swagger:response objectsReferencesDeleteForbidden +*/ +type ObjectsReferencesDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesDeleteForbidden creates ObjectsReferencesDeleteForbidden with default headers values +func NewObjectsReferencesDeleteForbidden() *ObjectsReferencesDeleteForbidden { + + return &ObjectsReferencesDeleteForbidden{} +} + +// WithPayload adds the payload to the objects references delete forbidden response +func (o *ObjectsReferencesDeleteForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references delete forbidden response +func (o *ObjectsReferencesDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsReferencesDeleteNotFoundCode is the HTTP code returned for type ObjectsReferencesDeleteNotFound +const ObjectsReferencesDeleteNotFoundCode int = 404 + +/* +ObjectsReferencesDeleteNotFound Successful query result but no resource was found. + +swagger:response objectsReferencesDeleteNotFound +*/ +type ObjectsReferencesDeleteNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesDeleteNotFound creates ObjectsReferencesDeleteNotFound with default headers values +func NewObjectsReferencesDeleteNotFound() *ObjectsReferencesDeleteNotFound { + + return &ObjectsReferencesDeleteNotFound{} +} + +// WithPayload adds the payload to the objects references delete not found response +func (o *ObjectsReferencesDeleteNotFound) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesDeleteNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references delete not found response +func (o *ObjectsReferencesDeleteNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesDeleteNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsReferencesDeleteInternalServerErrorCode is the HTTP code returned for type ObjectsReferencesDeleteInternalServerError +const ObjectsReferencesDeleteInternalServerErrorCode int = 500 + +/* +ObjectsReferencesDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsReferencesDeleteInternalServerError +*/ +type ObjectsReferencesDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesDeleteInternalServerError creates ObjectsReferencesDeleteInternalServerError with default headers values +func NewObjectsReferencesDeleteInternalServerError() *ObjectsReferencesDeleteInternalServerError { + + return &ObjectsReferencesDeleteInternalServerError{} +} + +// WithPayload adds the payload to the objects references delete internal server error response +func (o *ObjectsReferencesDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references delete internal server error response +func (o *ObjectsReferencesDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..95e7cf77ba8ac19ec9fbe0499ee8707b0eccd4a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_delete_urlbuilder.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsReferencesDeleteURL generates an URL for the objects references delete operation +type ObjectsReferencesDeleteURL struct { + ID strfmt.UUID + PropertyName string + + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsReferencesDeleteURL) WithBasePath(bp string) *ObjectsReferencesDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsReferencesDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsReferencesDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}/references/{propertyName}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsReferencesDeleteURL") + } + + propertyName := o.PropertyName + if propertyName != "" { + _path = strings.Replace(_path, "{propertyName}", propertyName, -1) + } else { + return nil, errors.New("propertyName is required on ObjectsReferencesDeleteURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsReferencesDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsReferencesDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsReferencesDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsReferencesDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsReferencesDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsReferencesDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update.go new file mode 100644 index 0000000000000000000000000000000000000000..6d2c809beae52fed48a1c044dba716a539b1b616 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesUpdateHandlerFunc turns a function with the right signature into a objects references update handler +type ObjectsReferencesUpdateHandlerFunc func(ObjectsReferencesUpdateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsReferencesUpdateHandlerFunc) Handle(params ObjectsReferencesUpdateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsReferencesUpdateHandler interface for that can handle valid objects references update params +type ObjectsReferencesUpdateHandler interface { + Handle(ObjectsReferencesUpdateParams, *models.Principal) middleware.Responder +} + +// NewObjectsReferencesUpdate creates a new http.Handler for the objects references update operation +func NewObjectsReferencesUpdate(ctx *middleware.Context, handler ObjectsReferencesUpdateHandler) *ObjectsReferencesUpdate { + return &ObjectsReferencesUpdate{Context: ctx, Handler: handler} +} + +/* + ObjectsReferencesUpdate swagger:route PUT /objects/{id}/references/{propertyName} objects objectsReferencesUpdate + +Replace all references to a class-property. + +Replace all references in cross-reference property of an object.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead. +*/ +type ObjectsReferencesUpdate struct { + Context *middleware.Context + Handler ObjectsReferencesUpdateHandler +} + +func (o *ObjectsReferencesUpdate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsReferencesUpdateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..c5f701574e4f6c9cd3d35db515577266720c0ef6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_parameters.go @@ -0,0 +1,192 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsReferencesUpdateParams creates a new ObjectsReferencesUpdateParams object +// +// There are no default values defined in the spec. +func NewObjectsReferencesUpdateParams() ObjectsReferencesUpdateParams { + + return ObjectsReferencesUpdateParams{} +} + +// ObjectsReferencesUpdateParams contains all the bound params for the objects references update operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.references.update +type ObjectsReferencesUpdateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body models.MultipleRef + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID + /*Unique name of the property related to the Object. + Required: true + In: path + */ + PropertyName string + /*Specifies the tenant in a request targeting a multi-tenant class + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsReferencesUpdateParams() beforehand. +func (o *ObjectsReferencesUpdateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.MultipleRef + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + rPropertyName, rhkPropertyName, _ := route.Params.GetOK("propertyName") + if err := o.bindPropertyName(rPropertyName, rhkPropertyName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsReferencesUpdateParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsReferencesUpdateParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindPropertyName binds and validates parameter PropertyName from path. +func (o *ObjectsReferencesUpdateParams) bindPropertyName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.PropertyName = raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *ObjectsReferencesUpdateParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..e776362bb724f326648cdaed534efacae2c7dbbc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsReferencesUpdateOKCode is the HTTP code returned for type ObjectsReferencesUpdateOK +const ObjectsReferencesUpdateOKCode int = 200 + +/* +ObjectsReferencesUpdateOK Successfully replaced all the references. + +swagger:response objectsReferencesUpdateOK +*/ +type ObjectsReferencesUpdateOK struct { +} + +// NewObjectsReferencesUpdateOK creates ObjectsReferencesUpdateOK with default headers values +func NewObjectsReferencesUpdateOK() *ObjectsReferencesUpdateOK { + + return &ObjectsReferencesUpdateOK{} +} + +// WriteResponse to the client +func (o *ObjectsReferencesUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// ObjectsReferencesUpdateUnauthorizedCode is the HTTP code returned for type ObjectsReferencesUpdateUnauthorized +const ObjectsReferencesUpdateUnauthorizedCode int = 401 + +/* +ObjectsReferencesUpdateUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsReferencesUpdateUnauthorized +*/ +type ObjectsReferencesUpdateUnauthorized struct { +} + +// NewObjectsReferencesUpdateUnauthorized creates ObjectsReferencesUpdateUnauthorized with default headers values +func NewObjectsReferencesUpdateUnauthorized() *ObjectsReferencesUpdateUnauthorized { + + return &ObjectsReferencesUpdateUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsReferencesUpdateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsReferencesUpdateForbiddenCode is the HTTP code returned for type ObjectsReferencesUpdateForbidden +const ObjectsReferencesUpdateForbiddenCode int = 403 + +/* +ObjectsReferencesUpdateForbidden Forbidden + +swagger:response objectsReferencesUpdateForbidden +*/ +type ObjectsReferencesUpdateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesUpdateForbidden creates ObjectsReferencesUpdateForbidden with default headers values +func NewObjectsReferencesUpdateForbidden() *ObjectsReferencesUpdateForbidden { + + return &ObjectsReferencesUpdateForbidden{} +} + +// WithPayload adds the payload to the objects references update forbidden response +func (o *ObjectsReferencesUpdateForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesUpdateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references update forbidden response +func (o *ObjectsReferencesUpdateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesUpdateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsReferencesUpdateUnprocessableEntityCode is the HTTP code returned for type ObjectsReferencesUpdateUnprocessableEntity +const ObjectsReferencesUpdateUnprocessableEntityCode int = 422 + +/* +ObjectsReferencesUpdateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class? + +swagger:response objectsReferencesUpdateUnprocessableEntity +*/ +type ObjectsReferencesUpdateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesUpdateUnprocessableEntity creates ObjectsReferencesUpdateUnprocessableEntity with default headers values +func NewObjectsReferencesUpdateUnprocessableEntity() *ObjectsReferencesUpdateUnprocessableEntity { + + return &ObjectsReferencesUpdateUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects references update unprocessable entity response +func (o *ObjectsReferencesUpdateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesUpdateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references update unprocessable entity response +func (o *ObjectsReferencesUpdateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesUpdateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsReferencesUpdateInternalServerErrorCode is the HTTP code returned for type ObjectsReferencesUpdateInternalServerError +const ObjectsReferencesUpdateInternalServerErrorCode int = 500 + +/* +ObjectsReferencesUpdateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsReferencesUpdateInternalServerError +*/ +type ObjectsReferencesUpdateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsReferencesUpdateInternalServerError creates ObjectsReferencesUpdateInternalServerError with default headers values +func NewObjectsReferencesUpdateInternalServerError() *ObjectsReferencesUpdateInternalServerError { + + return &ObjectsReferencesUpdateInternalServerError{} +} + +// WithPayload adds the payload to the objects references update internal server error response +func (o *ObjectsReferencesUpdateInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesUpdateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects references update internal server error response +func (o *ObjectsReferencesUpdateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsReferencesUpdateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..144216fcd53800ce17e443bfc0d3e2633cc21a5f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_references_update_urlbuilder.go @@ -0,0 +1,134 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsReferencesUpdateURL generates an URL for the objects references update operation +type ObjectsReferencesUpdateURL struct { + ID strfmt.UUID + PropertyName string + + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsReferencesUpdateURL) WithBasePath(bp string) *ObjectsReferencesUpdateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsReferencesUpdateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsReferencesUpdateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}/references/{propertyName}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsReferencesUpdateURL") + } + + propertyName := o.PropertyName + if propertyName != "" { + _path = strings.Replace(_path, "{propertyName}", propertyName, -1) + } else { + return nil, errors.New("propertyName is required on ObjectsReferencesUpdateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsReferencesUpdateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsReferencesUpdateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsReferencesUpdateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsReferencesUpdateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsReferencesUpdateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsReferencesUpdateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update.go new file mode 100644 index 0000000000000000000000000000000000000000..d1da410d0444db660282d78643dc40f0effc8179 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsUpdateHandlerFunc turns a function with the right signature into a objects update handler +type ObjectsUpdateHandlerFunc func(ObjectsUpdateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsUpdateHandlerFunc) Handle(params ObjectsUpdateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsUpdateHandler interface for that can handle valid objects update params +type ObjectsUpdateHandler interface { + Handle(ObjectsUpdateParams, *models.Principal) middleware.Responder +} + +// NewObjectsUpdate creates a new http.Handler for the objects update operation +func NewObjectsUpdate(ctx *middleware.Context, handler ObjectsUpdateHandler) *ObjectsUpdate { + return &ObjectsUpdate{Context: ctx, Handler: handler} +} + +/* + ObjectsUpdate swagger:route PUT /objects/{id} objects objectsUpdate + +Update an Object based on its UUID. + +Updates an object based on its UUID. Given meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead. +*/ +type ObjectsUpdate struct { + Context *middleware.Context + Handler ObjectsUpdateHandler +} + +func (o *ObjectsUpdate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsUpdateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..4b354c3e447b2fe94187802ad3d0dc022d12fe88 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_parameters.go @@ -0,0 +1,168 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsUpdateParams creates a new ObjectsUpdateParams object +// +// There are no default values defined in the spec. +func NewObjectsUpdateParams() ObjectsUpdateParams { + + return ObjectsUpdateParams{} +} + +// ObjectsUpdateParams contains all the bound params for the objects update operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.update +type ObjectsUpdateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Object + /*Determines how many replicas must acknowledge a request before it is considered successful + In: query + */ + ConsistencyLevel *string + /*Unique ID of the Object. + Required: true + In: path + */ + ID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsUpdateParams() beforehand. +func (o *ObjectsUpdateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Object + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + qConsistencyLevel, qhkConsistencyLevel, _ := qs.GetOK("consistency_level") + if err := o.bindConsistencyLevel(qConsistencyLevel, qhkConsistencyLevel, route.Formats); err != nil { + res = append(res, err) + } + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistencyLevel binds and validates parameter ConsistencyLevel from query. +func (o *ObjectsUpdateParams) bindConsistencyLevel(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.ConsistencyLevel = &raw + + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ObjectsUpdateParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ObjectsUpdateParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..356b97b2baea40936a822a9232baa2f39be04e09 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsUpdateOKCode is the HTTP code returned for type ObjectsUpdateOK +const ObjectsUpdateOKCode int = 200 + +/* +ObjectsUpdateOK Successfully received. + +swagger:response objectsUpdateOK +*/ +type ObjectsUpdateOK struct { + + /* + In: Body + */ + Payload *models.Object `json:"body,omitempty"` +} + +// NewObjectsUpdateOK creates ObjectsUpdateOK with default headers values +func NewObjectsUpdateOK() *ObjectsUpdateOK { + + return &ObjectsUpdateOK{} +} + +// WithPayload adds the payload to the objects update o k response +func (o *ObjectsUpdateOK) WithPayload(payload *models.Object) *ObjectsUpdateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects update o k response +func (o *ObjectsUpdateOK) SetPayload(payload *models.Object) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsUpdateUnauthorizedCode is the HTTP code returned for type ObjectsUpdateUnauthorized +const ObjectsUpdateUnauthorizedCode int = 401 + +/* +ObjectsUpdateUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsUpdateUnauthorized +*/ +type ObjectsUpdateUnauthorized struct { +} + +// NewObjectsUpdateUnauthorized creates ObjectsUpdateUnauthorized with default headers values +func NewObjectsUpdateUnauthorized() *ObjectsUpdateUnauthorized { + + return &ObjectsUpdateUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsUpdateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsUpdateForbiddenCode is the HTTP code returned for type ObjectsUpdateForbidden +const ObjectsUpdateForbiddenCode int = 403 + +/* +ObjectsUpdateForbidden Forbidden + +swagger:response objectsUpdateForbidden +*/ +type ObjectsUpdateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsUpdateForbidden creates ObjectsUpdateForbidden with default headers values +func NewObjectsUpdateForbidden() *ObjectsUpdateForbidden { + + return &ObjectsUpdateForbidden{} +} + +// WithPayload adds the payload to the objects update forbidden response +func (o *ObjectsUpdateForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsUpdateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects update forbidden response +func (o *ObjectsUpdateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsUpdateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsUpdateNotFoundCode is the HTTP code returned for type ObjectsUpdateNotFound +const ObjectsUpdateNotFoundCode int = 404 + +/* +ObjectsUpdateNotFound Successful query result but no resource was found. + +swagger:response objectsUpdateNotFound +*/ +type ObjectsUpdateNotFound struct { +} + +// NewObjectsUpdateNotFound creates ObjectsUpdateNotFound with default headers values +func NewObjectsUpdateNotFound() *ObjectsUpdateNotFound { + + return &ObjectsUpdateNotFound{} +} + +// WriteResponse to the client +func (o *ObjectsUpdateNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ObjectsUpdateUnprocessableEntityCode is the HTTP code returned for type ObjectsUpdateUnprocessableEntity +const ObjectsUpdateUnprocessableEntityCode int = 422 + +/* +ObjectsUpdateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response objectsUpdateUnprocessableEntity +*/ +type ObjectsUpdateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsUpdateUnprocessableEntity creates ObjectsUpdateUnprocessableEntity with default headers values +func NewObjectsUpdateUnprocessableEntity() *ObjectsUpdateUnprocessableEntity { + + return &ObjectsUpdateUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects update unprocessable entity response +func (o *ObjectsUpdateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsUpdateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects update unprocessable entity response +func (o *ObjectsUpdateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsUpdateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsUpdateInternalServerErrorCode is the HTTP code returned for type ObjectsUpdateInternalServerError +const ObjectsUpdateInternalServerErrorCode int = 500 + +/* +ObjectsUpdateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsUpdateInternalServerError +*/ +type ObjectsUpdateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsUpdateInternalServerError creates ObjectsUpdateInternalServerError with default headers values +func NewObjectsUpdateInternalServerError() *ObjectsUpdateInternalServerError { + + return &ObjectsUpdateInternalServerError{} +} + +// WithPayload adds the payload to the objects update internal server error response +func (o *ObjectsUpdateInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsUpdateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects update internal server error response +func (o *ObjectsUpdateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsUpdateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..e4a34a129d282037718e5fb8a3786824ed20f65d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_update_urlbuilder.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ObjectsUpdateURL generates an URL for the objects update operation +type ObjectsUpdateURL struct { + ID strfmt.UUID + + ConsistencyLevel *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsUpdateURL) WithBasePath(bp string) *ObjectsUpdateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsUpdateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsUpdateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/{id}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ObjectsUpdateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var consistencyLevelQ string + if o.ConsistencyLevel != nil { + consistencyLevelQ = *o.ConsistencyLevel + } + if consistencyLevelQ != "" { + qs.Set("consistency_level", consistencyLevelQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsUpdateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsUpdateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsUpdateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsUpdateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsUpdateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsUpdateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate.go new file mode 100644 index 0000000000000000000000000000000000000000..b529d692c30bebb020fc4218854f9ecd4f41345f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsValidateHandlerFunc turns a function with the right signature into a objects validate handler +type ObjectsValidateHandlerFunc func(ObjectsValidateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ObjectsValidateHandlerFunc) Handle(params ObjectsValidateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ObjectsValidateHandler interface for that can handle valid objects validate params +type ObjectsValidateHandler interface { + Handle(ObjectsValidateParams, *models.Principal) middleware.Responder +} + +// NewObjectsValidate creates a new http.Handler for the objects validate operation +func NewObjectsValidate(ctx *middleware.Context, handler ObjectsValidateHandler) *ObjectsValidate { + return &ObjectsValidate{Context: ctx, Handler: handler} +} + +/* + ObjectsValidate swagger:route POST /objects/validate objects objectsValidate + +Validate an Object based on a schema. + +Validate an object's schema and meta-data without creating it.

    If the schema of the object is valid, the request should return nothing with a plain RESTful request. Otherwise, an error object will be returned. +*/ +type ObjectsValidate struct { + Context *middleware.Context + Handler ObjectsValidateHandler +} + +func (o *ObjectsValidate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewObjectsValidateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..8ac319804e88d9f7f23a30c80691e72fbf92ae52 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewObjectsValidateParams creates a new ObjectsValidateParams object +// +// There are no default values defined in the spec. +func NewObjectsValidateParams() ObjectsValidateParams { + + return ObjectsValidateParams{} +} + +// ObjectsValidateParams contains all the bound params for the objects validate operation +// typically these are obtained from a http.Request +// +// swagger:parameters objects.validate +type ObjectsValidateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Object +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewObjectsValidateParams() beforehand. +func (o *ObjectsValidateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Object + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..96713ec63bb01da95ef6c17e657fd5a0a756bdc4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ObjectsValidateOKCode is the HTTP code returned for type ObjectsValidateOK +const ObjectsValidateOKCode int = 200 + +/* +ObjectsValidateOK Successfully validated. + +swagger:response objectsValidateOK +*/ +type ObjectsValidateOK struct { +} + +// NewObjectsValidateOK creates ObjectsValidateOK with default headers values +func NewObjectsValidateOK() *ObjectsValidateOK { + + return &ObjectsValidateOK{} +} + +// WriteResponse to the client +func (o *ObjectsValidateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// ObjectsValidateUnauthorizedCode is the HTTP code returned for type ObjectsValidateUnauthorized +const ObjectsValidateUnauthorizedCode int = 401 + +/* +ObjectsValidateUnauthorized Unauthorized or invalid credentials. + +swagger:response objectsValidateUnauthorized +*/ +type ObjectsValidateUnauthorized struct { +} + +// NewObjectsValidateUnauthorized creates ObjectsValidateUnauthorized with default headers values +func NewObjectsValidateUnauthorized() *ObjectsValidateUnauthorized { + + return &ObjectsValidateUnauthorized{} +} + +// WriteResponse to the client +func (o *ObjectsValidateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ObjectsValidateForbiddenCode is the HTTP code returned for type ObjectsValidateForbidden +const ObjectsValidateForbiddenCode int = 403 + +/* +ObjectsValidateForbidden Forbidden + +swagger:response objectsValidateForbidden +*/ +type ObjectsValidateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsValidateForbidden creates ObjectsValidateForbidden with default headers values +func NewObjectsValidateForbidden() *ObjectsValidateForbidden { + + return &ObjectsValidateForbidden{} +} + +// WithPayload adds the payload to the objects validate forbidden response +func (o *ObjectsValidateForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsValidateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects validate forbidden response +func (o *ObjectsValidateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsValidateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsValidateUnprocessableEntityCode is the HTTP code returned for type ObjectsValidateUnprocessableEntity +const ObjectsValidateUnprocessableEntityCode int = 422 + +/* +ObjectsValidateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response objectsValidateUnprocessableEntity +*/ +type ObjectsValidateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsValidateUnprocessableEntity creates ObjectsValidateUnprocessableEntity with default headers values +func NewObjectsValidateUnprocessableEntity() *ObjectsValidateUnprocessableEntity { + + return &ObjectsValidateUnprocessableEntity{} +} + +// WithPayload adds the payload to the objects validate unprocessable entity response +func (o *ObjectsValidateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ObjectsValidateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects validate unprocessable entity response +func (o *ObjectsValidateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsValidateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ObjectsValidateInternalServerErrorCode is the HTTP code returned for type ObjectsValidateInternalServerError +const ObjectsValidateInternalServerErrorCode int = 500 + +/* +ObjectsValidateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response objectsValidateInternalServerError +*/ +type ObjectsValidateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewObjectsValidateInternalServerError creates ObjectsValidateInternalServerError with default headers values +func NewObjectsValidateInternalServerError() *ObjectsValidateInternalServerError { + + return &ObjectsValidateInternalServerError{} +} + +// WithPayload adds the payload to the objects validate internal server error response +func (o *ObjectsValidateInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsValidateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the objects validate internal server error response +func (o *ObjectsValidateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ObjectsValidateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..ee35394d48ba248bc082e94207805ea4de832051 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/objects/objects_validate_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package objects + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ObjectsValidateURL generates an URL for the objects validate operation +type ObjectsValidateURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsValidateURL) WithBasePath(bp string) *ObjectsValidateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ObjectsValidateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ObjectsValidateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/objects/validate" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ObjectsValidateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ObjectsValidateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ObjectsValidateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ObjectsValidateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ObjectsValidateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ObjectsValidateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication.go new file mode 100644 index 0000000000000000000000000000000000000000..805c48d989be0944b90b7e6873a673cc02104229 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// CancelReplicationHandlerFunc turns a function with the right signature into a cancel replication handler +type CancelReplicationHandlerFunc func(CancelReplicationParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn CancelReplicationHandlerFunc) Handle(params CancelReplicationParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// CancelReplicationHandler interface for that can handle valid cancel replication params +type CancelReplicationHandler interface { + Handle(CancelReplicationParams, *models.Principal) middleware.Responder +} + +// NewCancelReplication creates a new http.Handler for the cancel replication operation +func NewCancelReplication(ctx *middleware.Context, handler CancelReplicationHandler) *CancelReplication { + return &CancelReplication{Context: ctx, Handler: handler} +} + +/* + CancelReplication swagger:route POST /replication/replicate/{id}/cancel replication cancelReplication + +# Cancel a replication operation + +Requests the cancellation of an active replication operation identified by its ID. The operation will be stopped, but its record will remain in the 'CANCELLED' state (can't be resumed) and will not be automatically deleted. +*/ +type CancelReplication struct { + Context *middleware.Context + Handler CancelReplicationHandler +} + +func (o *CancelReplication) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewCancelReplicationParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..16062e9ebd282d2f058193caeddea40af980093b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_parameters.go @@ -0,0 +1,102 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewCancelReplicationParams creates a new CancelReplicationParams object +// +// There are no default values defined in the spec. +func NewCancelReplicationParams() CancelReplicationParams { + + return CancelReplicationParams{} +} + +// CancelReplicationParams contains all the bound params for the cancel replication operation +// typically these are obtained from a http.Request +// +// swagger:parameters cancelReplication +type CancelReplicationParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The ID of the replication operation to cancel. + Required: true + In: path + */ + ID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewCancelReplicationParams() beforehand. +func (o *CancelReplicationParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *CancelReplicationParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *CancelReplicationParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..9b3f40d54ac23812046d8d0b36f1812f65c4c2d7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_responses.go @@ -0,0 +1,325 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// CancelReplicationNoContentCode is the HTTP code returned for type CancelReplicationNoContent +const CancelReplicationNoContentCode int = 204 + +/* +CancelReplicationNoContent Successfully cancelled. + +swagger:response cancelReplicationNoContent +*/ +type CancelReplicationNoContent struct { +} + +// NewCancelReplicationNoContent creates CancelReplicationNoContent with default headers values +func NewCancelReplicationNoContent() *CancelReplicationNoContent { + + return &CancelReplicationNoContent{} +} + +// WriteResponse to the client +func (o *CancelReplicationNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// CancelReplicationUnauthorizedCode is the HTTP code returned for type CancelReplicationUnauthorized +const CancelReplicationUnauthorizedCode int = 401 + +/* +CancelReplicationUnauthorized Unauthorized or invalid credentials. + +swagger:response cancelReplicationUnauthorized +*/ +type CancelReplicationUnauthorized struct { +} + +// NewCancelReplicationUnauthorized creates CancelReplicationUnauthorized with default headers values +func NewCancelReplicationUnauthorized() *CancelReplicationUnauthorized { + + return &CancelReplicationUnauthorized{} +} + +// WriteResponse to the client +func (o *CancelReplicationUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// CancelReplicationForbiddenCode is the HTTP code returned for type CancelReplicationForbidden +const CancelReplicationForbiddenCode int = 403 + +/* +CancelReplicationForbidden Forbidden + +swagger:response cancelReplicationForbidden +*/ +type CancelReplicationForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCancelReplicationForbidden creates CancelReplicationForbidden with default headers values +func NewCancelReplicationForbidden() *CancelReplicationForbidden { + + return &CancelReplicationForbidden{} +} + +// WithPayload adds the payload to the cancel replication forbidden response +func (o *CancelReplicationForbidden) WithPayload(payload *models.ErrorResponse) *CancelReplicationForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel replication forbidden response +func (o *CancelReplicationForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelReplicationForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CancelReplicationNotFoundCode is the HTTP code returned for type CancelReplicationNotFound +const CancelReplicationNotFoundCode int = 404 + +/* +CancelReplicationNotFound Shard replica operation not found. + +swagger:response cancelReplicationNotFound +*/ +type CancelReplicationNotFound struct { +} + +// NewCancelReplicationNotFound creates CancelReplicationNotFound with default headers values +func NewCancelReplicationNotFound() *CancelReplicationNotFound { + + return &CancelReplicationNotFound{} +} + +// WriteResponse to the client +func (o *CancelReplicationNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// CancelReplicationConflictCode is the HTTP code returned for type CancelReplicationConflict +const CancelReplicationConflictCode int = 409 + +/* +CancelReplicationConflict The operation is not in a cancellable state, e.g. it is READY or is a MOVE op in the DEHYDRATING state. + +swagger:response cancelReplicationConflict +*/ +type CancelReplicationConflict struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCancelReplicationConflict creates CancelReplicationConflict with default headers values +func NewCancelReplicationConflict() *CancelReplicationConflict { + + return &CancelReplicationConflict{} +} + +// WithPayload adds the payload to the cancel replication conflict response +func (o *CancelReplicationConflict) WithPayload(payload *models.ErrorResponse) *CancelReplicationConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel replication conflict response +func (o *CancelReplicationConflict) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelReplicationConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CancelReplicationUnprocessableEntityCode is the HTTP code returned for type CancelReplicationUnprocessableEntity +const CancelReplicationUnprocessableEntityCode int = 422 + +/* +CancelReplicationUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response cancelReplicationUnprocessableEntity +*/ +type CancelReplicationUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCancelReplicationUnprocessableEntity creates CancelReplicationUnprocessableEntity with default headers values +func NewCancelReplicationUnprocessableEntity() *CancelReplicationUnprocessableEntity { + + return &CancelReplicationUnprocessableEntity{} +} + +// WithPayload adds the payload to the cancel replication unprocessable entity response +func (o *CancelReplicationUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *CancelReplicationUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel replication unprocessable entity response +func (o *CancelReplicationUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelReplicationUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CancelReplicationInternalServerErrorCode is the HTTP code returned for type CancelReplicationInternalServerError +const CancelReplicationInternalServerErrorCode int = 500 + +/* +CancelReplicationInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response cancelReplicationInternalServerError +*/ +type CancelReplicationInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCancelReplicationInternalServerError creates CancelReplicationInternalServerError with default headers values +func NewCancelReplicationInternalServerError() *CancelReplicationInternalServerError { + + return &CancelReplicationInternalServerError{} +} + +// WithPayload adds the payload to the cancel replication internal server error response +func (o *CancelReplicationInternalServerError) WithPayload(payload *models.ErrorResponse) *CancelReplicationInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel replication internal server error response +func (o *CancelReplicationInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelReplicationInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CancelReplicationNotImplementedCode is the HTTP code returned for type CancelReplicationNotImplemented +const CancelReplicationNotImplementedCode int = 501 + +/* +CancelReplicationNotImplemented Replica movement operations are disabled. + +swagger:response cancelReplicationNotImplemented +*/ +type CancelReplicationNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCancelReplicationNotImplemented creates CancelReplicationNotImplemented with default headers values +func NewCancelReplicationNotImplemented() *CancelReplicationNotImplemented { + + return &CancelReplicationNotImplemented{} +} + +// WithPayload adds the payload to the cancel replication not implemented response +func (o *CancelReplicationNotImplemented) WithPayload(payload *models.ErrorResponse) *CancelReplicationNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel replication not implemented response +func (o *CancelReplicationNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelReplicationNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..39ab94ac65ca804d5e9f34671531a5807f34832b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/cancel_replication_urlbuilder.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// CancelReplicationURL generates an URL for the cancel replication operation +type CancelReplicationURL struct { + ID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CancelReplicationURL) WithBasePath(bp string) *CancelReplicationURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CancelReplicationURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *CancelReplicationURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/replicate/{id}/cancel" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on CancelReplicationURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *CancelReplicationURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *CancelReplicationURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *CancelReplicationURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on CancelReplicationURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on CancelReplicationURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *CancelReplicationURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications.go new file mode 100644 index 0000000000000000000000000000000000000000..3e29e17128ae9d49b4cc6f5f592425b816d1bbfe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteAllReplicationsHandlerFunc turns a function with the right signature into a delete all replications handler +type DeleteAllReplicationsHandlerFunc func(DeleteAllReplicationsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn DeleteAllReplicationsHandlerFunc) Handle(params DeleteAllReplicationsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// DeleteAllReplicationsHandler interface for that can handle valid delete all replications params +type DeleteAllReplicationsHandler interface { + Handle(DeleteAllReplicationsParams, *models.Principal) middleware.Responder +} + +// NewDeleteAllReplications creates a new http.Handler for the delete all replications operation +func NewDeleteAllReplications(ctx *middleware.Context, handler DeleteAllReplicationsHandler) *DeleteAllReplications { + return &DeleteAllReplications{Context: ctx, Handler: handler} +} + +/* + DeleteAllReplications swagger:route DELETE /replication/replicate replication deleteAllReplications + +Schedules all replication operations for deletion across all collections, shards, and nodes. +*/ +type DeleteAllReplications struct { + Context *middleware.Context + Handler DeleteAllReplicationsHandler +} + +func (o *DeleteAllReplications) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewDeleteAllReplicationsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..86ba19db108ec9d3ee2c2ddce226fe99d6572feb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewDeleteAllReplicationsParams creates a new DeleteAllReplicationsParams object +// +// There are no default values defined in the spec. +func NewDeleteAllReplicationsParams() DeleteAllReplicationsParams { + + return DeleteAllReplicationsParams{} +} + +// DeleteAllReplicationsParams contains all the bound params for the delete all replications operation +// typically these are obtained from a http.Request +// +// swagger:parameters deleteAllReplications +type DeleteAllReplicationsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDeleteAllReplicationsParams() beforehand. +func (o *DeleteAllReplicationsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..70216b0ec1bcd24e2068333f925cb462c35214ac --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_responses.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteAllReplicationsNoContentCode is the HTTP code returned for type DeleteAllReplicationsNoContent +const DeleteAllReplicationsNoContentCode int = 204 + +/* +DeleteAllReplicationsNoContent Replication operation registered successfully + +swagger:response deleteAllReplicationsNoContent +*/ +type DeleteAllReplicationsNoContent struct { +} + +// NewDeleteAllReplicationsNoContent creates DeleteAllReplicationsNoContent with default headers values +func NewDeleteAllReplicationsNoContent() *DeleteAllReplicationsNoContent { + + return &DeleteAllReplicationsNoContent{} +} + +// WriteResponse to the client +func (o *DeleteAllReplicationsNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// DeleteAllReplicationsBadRequestCode is the HTTP code returned for type DeleteAllReplicationsBadRequest +const DeleteAllReplicationsBadRequestCode int = 400 + +/* +DeleteAllReplicationsBadRequest Malformed request. + +swagger:response deleteAllReplicationsBadRequest +*/ +type DeleteAllReplicationsBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteAllReplicationsBadRequest creates DeleteAllReplicationsBadRequest with default headers values +func NewDeleteAllReplicationsBadRequest() *DeleteAllReplicationsBadRequest { + + return &DeleteAllReplicationsBadRequest{} +} + +// WithPayload adds the payload to the delete all replications bad request response +func (o *DeleteAllReplicationsBadRequest) WithPayload(payload *models.ErrorResponse) *DeleteAllReplicationsBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete all replications bad request response +func (o *DeleteAllReplicationsBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteAllReplicationsBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteAllReplicationsUnauthorizedCode is the HTTP code returned for type DeleteAllReplicationsUnauthorized +const DeleteAllReplicationsUnauthorizedCode int = 401 + +/* +DeleteAllReplicationsUnauthorized Unauthorized or invalid credentials. + +swagger:response deleteAllReplicationsUnauthorized +*/ +type DeleteAllReplicationsUnauthorized struct { +} + +// NewDeleteAllReplicationsUnauthorized creates DeleteAllReplicationsUnauthorized with default headers values +func NewDeleteAllReplicationsUnauthorized() *DeleteAllReplicationsUnauthorized { + + return &DeleteAllReplicationsUnauthorized{} +} + +// WriteResponse to the client +func (o *DeleteAllReplicationsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// DeleteAllReplicationsForbiddenCode is the HTTP code returned for type DeleteAllReplicationsForbidden +const DeleteAllReplicationsForbiddenCode int = 403 + +/* +DeleteAllReplicationsForbidden Forbidden + +swagger:response deleteAllReplicationsForbidden +*/ +type DeleteAllReplicationsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteAllReplicationsForbidden creates DeleteAllReplicationsForbidden with default headers values +func NewDeleteAllReplicationsForbidden() *DeleteAllReplicationsForbidden { + + return &DeleteAllReplicationsForbidden{} +} + +// WithPayload adds the payload to the delete all replications forbidden response +func (o *DeleteAllReplicationsForbidden) WithPayload(payload *models.ErrorResponse) *DeleteAllReplicationsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete all replications forbidden response +func (o *DeleteAllReplicationsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteAllReplicationsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteAllReplicationsUnprocessableEntityCode is the HTTP code returned for type DeleteAllReplicationsUnprocessableEntity +const DeleteAllReplicationsUnprocessableEntityCode int = 422 + +/* +DeleteAllReplicationsUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response deleteAllReplicationsUnprocessableEntity +*/ +type DeleteAllReplicationsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteAllReplicationsUnprocessableEntity creates DeleteAllReplicationsUnprocessableEntity with default headers values +func NewDeleteAllReplicationsUnprocessableEntity() *DeleteAllReplicationsUnprocessableEntity { + + return &DeleteAllReplicationsUnprocessableEntity{} +} + +// WithPayload adds the payload to the delete all replications unprocessable entity response +func (o *DeleteAllReplicationsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *DeleteAllReplicationsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete all replications unprocessable entity response +func (o *DeleteAllReplicationsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteAllReplicationsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteAllReplicationsInternalServerErrorCode is the HTTP code returned for type DeleteAllReplicationsInternalServerError +const DeleteAllReplicationsInternalServerErrorCode int = 500 + +/* +DeleteAllReplicationsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response deleteAllReplicationsInternalServerError +*/ +type DeleteAllReplicationsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteAllReplicationsInternalServerError creates DeleteAllReplicationsInternalServerError with default headers values +func NewDeleteAllReplicationsInternalServerError() *DeleteAllReplicationsInternalServerError { + + return &DeleteAllReplicationsInternalServerError{} +} + +// WithPayload adds the payload to the delete all replications internal server error response +func (o *DeleteAllReplicationsInternalServerError) WithPayload(payload *models.ErrorResponse) *DeleteAllReplicationsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete all replications internal server error response +func (o *DeleteAllReplicationsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteAllReplicationsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteAllReplicationsNotImplementedCode is the HTTP code returned for type DeleteAllReplicationsNotImplemented +const DeleteAllReplicationsNotImplementedCode int = 501 + +/* +DeleteAllReplicationsNotImplemented Replica movement operations are disabled. + +swagger:response deleteAllReplicationsNotImplemented +*/ +type DeleteAllReplicationsNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteAllReplicationsNotImplemented creates DeleteAllReplicationsNotImplemented with default headers values +func NewDeleteAllReplicationsNotImplemented() *DeleteAllReplicationsNotImplemented { + + return &DeleteAllReplicationsNotImplemented{} +} + +// WithPayload adds the payload to the delete all replications not implemented response +func (o *DeleteAllReplicationsNotImplemented) WithPayload(payload *models.ErrorResponse) *DeleteAllReplicationsNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete all replications not implemented response +func (o *DeleteAllReplicationsNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteAllReplicationsNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..4052ca439994b1be6ab34f591b2e24bc035cfb84 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_all_replications_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// DeleteAllReplicationsURL generates an URL for the delete all replications operation +type DeleteAllReplicationsURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteAllReplicationsURL) WithBasePath(bp string) *DeleteAllReplicationsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteAllReplicationsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DeleteAllReplicationsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/replicate" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DeleteAllReplicationsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DeleteAllReplicationsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DeleteAllReplicationsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DeleteAllReplicationsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DeleteAllReplicationsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DeleteAllReplicationsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication.go new file mode 100644 index 0000000000000000000000000000000000000000..6566910cf2b0a0e3d7a87dd8b5e6601fd08e7d71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteReplicationHandlerFunc turns a function with the right signature into a delete replication handler +type DeleteReplicationHandlerFunc func(DeleteReplicationParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn DeleteReplicationHandlerFunc) Handle(params DeleteReplicationParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// DeleteReplicationHandler interface for that can handle valid delete replication params +type DeleteReplicationHandler interface { + Handle(DeleteReplicationParams, *models.Principal) middleware.Responder +} + +// NewDeleteReplication creates a new http.Handler for the delete replication operation +func NewDeleteReplication(ctx *middleware.Context, handler DeleteReplicationHandler) *DeleteReplication { + return &DeleteReplication{Context: ctx, Handler: handler} +} + +/* + DeleteReplication swagger:route DELETE /replication/replicate/{id} replication deleteReplication + +# Delete a replication operation + +Removes a specific replication operation. If the operation is currently active, it will be cancelled and its resources cleaned up before the operation is deleted. +*/ +type DeleteReplication struct { + Context *middleware.Context + Handler DeleteReplicationHandler +} + +func (o *DeleteReplication) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewDeleteReplicationParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ad62eabfb5d91fd0213e43ee225df5310fe001b7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_parameters.go @@ -0,0 +1,102 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewDeleteReplicationParams creates a new DeleteReplicationParams object +// +// There are no default values defined in the spec. +func NewDeleteReplicationParams() DeleteReplicationParams { + + return DeleteReplicationParams{} +} + +// DeleteReplicationParams contains all the bound params for the delete replication operation +// typically these are obtained from a http.Request +// +// swagger:parameters deleteReplication +type DeleteReplicationParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The ID of the replication operation to delete. + Required: true + In: path + */ + ID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDeleteReplicationParams() beforehand. +func (o *DeleteReplicationParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *DeleteReplicationParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *DeleteReplicationParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a024337e9e429f8eb9eefc9ab1304d3c6cfac12d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_responses.go @@ -0,0 +1,325 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteReplicationNoContentCode is the HTTP code returned for type DeleteReplicationNoContent +const DeleteReplicationNoContentCode int = 204 + +/* +DeleteReplicationNoContent Successfully deleted. + +swagger:response deleteReplicationNoContent +*/ +type DeleteReplicationNoContent struct { +} + +// NewDeleteReplicationNoContent creates DeleteReplicationNoContent with default headers values +func NewDeleteReplicationNoContent() *DeleteReplicationNoContent { + + return &DeleteReplicationNoContent{} +} + +// WriteResponse to the client +func (o *DeleteReplicationNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// DeleteReplicationUnauthorizedCode is the HTTP code returned for type DeleteReplicationUnauthorized +const DeleteReplicationUnauthorizedCode int = 401 + +/* +DeleteReplicationUnauthorized Unauthorized or invalid credentials. + +swagger:response deleteReplicationUnauthorized +*/ +type DeleteReplicationUnauthorized struct { +} + +// NewDeleteReplicationUnauthorized creates DeleteReplicationUnauthorized with default headers values +func NewDeleteReplicationUnauthorized() *DeleteReplicationUnauthorized { + + return &DeleteReplicationUnauthorized{} +} + +// WriteResponse to the client +func (o *DeleteReplicationUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// DeleteReplicationForbiddenCode is the HTTP code returned for type DeleteReplicationForbidden +const DeleteReplicationForbiddenCode int = 403 + +/* +DeleteReplicationForbidden Forbidden. + +swagger:response deleteReplicationForbidden +*/ +type DeleteReplicationForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteReplicationForbidden creates DeleteReplicationForbidden with default headers values +func NewDeleteReplicationForbidden() *DeleteReplicationForbidden { + + return &DeleteReplicationForbidden{} +} + +// WithPayload adds the payload to the delete replication forbidden response +func (o *DeleteReplicationForbidden) WithPayload(payload *models.ErrorResponse) *DeleteReplicationForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete replication forbidden response +func (o *DeleteReplicationForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteReplicationForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteReplicationNotFoundCode is the HTTP code returned for type DeleteReplicationNotFound +const DeleteReplicationNotFoundCode int = 404 + +/* +DeleteReplicationNotFound Shard replica operation not found. + +swagger:response deleteReplicationNotFound +*/ +type DeleteReplicationNotFound struct { +} + +// NewDeleteReplicationNotFound creates DeleteReplicationNotFound with default headers values +func NewDeleteReplicationNotFound() *DeleteReplicationNotFound { + + return &DeleteReplicationNotFound{} +} + +// WriteResponse to the client +func (o *DeleteReplicationNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// DeleteReplicationConflictCode is the HTTP code returned for type DeleteReplicationConflict +const DeleteReplicationConflictCode int = 409 + +/* +DeleteReplicationConflict The operation is not in a deletable state, e.g. it is a MOVE op in the DEHYDRATING state. + +swagger:response deleteReplicationConflict +*/ +type DeleteReplicationConflict struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteReplicationConflict creates DeleteReplicationConflict with default headers values +func NewDeleteReplicationConflict() *DeleteReplicationConflict { + + return &DeleteReplicationConflict{} +} + +// WithPayload adds the payload to the delete replication conflict response +func (o *DeleteReplicationConflict) WithPayload(payload *models.ErrorResponse) *DeleteReplicationConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete replication conflict response +func (o *DeleteReplicationConflict) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteReplicationConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteReplicationUnprocessableEntityCode is the HTTP code returned for type DeleteReplicationUnprocessableEntity +const DeleteReplicationUnprocessableEntityCode int = 422 + +/* +DeleteReplicationUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response deleteReplicationUnprocessableEntity +*/ +type DeleteReplicationUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteReplicationUnprocessableEntity creates DeleteReplicationUnprocessableEntity with default headers values +func NewDeleteReplicationUnprocessableEntity() *DeleteReplicationUnprocessableEntity { + + return &DeleteReplicationUnprocessableEntity{} +} + +// WithPayload adds the payload to the delete replication unprocessable entity response +func (o *DeleteReplicationUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *DeleteReplicationUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete replication unprocessable entity response +func (o *DeleteReplicationUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteReplicationUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteReplicationInternalServerErrorCode is the HTTP code returned for type DeleteReplicationInternalServerError +const DeleteReplicationInternalServerErrorCode int = 500 + +/* +DeleteReplicationInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response deleteReplicationInternalServerError +*/ +type DeleteReplicationInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteReplicationInternalServerError creates DeleteReplicationInternalServerError with default headers values +func NewDeleteReplicationInternalServerError() *DeleteReplicationInternalServerError { + + return &DeleteReplicationInternalServerError{} +} + +// WithPayload adds the payload to the delete replication internal server error response +func (o *DeleteReplicationInternalServerError) WithPayload(payload *models.ErrorResponse) *DeleteReplicationInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete replication internal server error response +func (o *DeleteReplicationInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteReplicationInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteReplicationNotImplementedCode is the HTTP code returned for type DeleteReplicationNotImplemented +const DeleteReplicationNotImplementedCode int = 501 + +/* +DeleteReplicationNotImplemented Replica movement operations are disabled. + +swagger:response deleteReplicationNotImplemented +*/ +type DeleteReplicationNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteReplicationNotImplemented creates DeleteReplicationNotImplemented with default headers values +func NewDeleteReplicationNotImplemented() *DeleteReplicationNotImplemented { + + return &DeleteReplicationNotImplemented{} +} + +// WithPayload adds the payload to the delete replication not implemented response +func (o *DeleteReplicationNotImplemented) WithPayload(payload *models.ErrorResponse) *DeleteReplicationNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete replication not implemented response +func (o *DeleteReplicationNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteReplicationNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..1088b7c0165785f0c7947f369a37dacae08950bf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/delete_replication_urlbuilder.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// DeleteReplicationURL generates an URL for the delete replication operation +type DeleteReplicationURL struct { + ID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteReplicationURL) WithBasePath(bp string) *DeleteReplicationURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteReplicationURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DeleteReplicationURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/replicate/{id}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on DeleteReplicationURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DeleteReplicationURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DeleteReplicationURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DeleteReplicationURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DeleteReplicationURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DeleteReplicationURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DeleteReplicationURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications.go new file mode 100644 index 0000000000000000000000000000000000000000..5691c9812494e1ed16c3c35baab2f8c8b6120910 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ForceDeleteReplicationsHandlerFunc turns a function with the right signature into a force delete replications handler +type ForceDeleteReplicationsHandlerFunc func(ForceDeleteReplicationsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ForceDeleteReplicationsHandlerFunc) Handle(params ForceDeleteReplicationsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ForceDeleteReplicationsHandler interface for that can handle valid force delete replications params +type ForceDeleteReplicationsHandler interface { + Handle(ForceDeleteReplicationsParams, *models.Principal) middleware.Responder +} + +// NewForceDeleteReplications creates a new http.Handler for the force delete replications operation +func NewForceDeleteReplications(ctx *middleware.Context, handler ForceDeleteReplicationsHandler) *ForceDeleteReplications { + return &ForceDeleteReplications{Context: ctx, Handler: handler} +} + +/* + ForceDeleteReplications swagger:route POST /replication/replicate/force-delete replication forceDeleteReplications + +# Force delete replication operations + +USE AT OWN RISK! Synchronously force delete operations from the FSM. This will not perform any checks on which state the operation is in so may lead to data corruption or loss. It is recommended to first scale the number of replication engine workers to 0 before calling this endpoint to ensure no operations are in-flight. +*/ +type ForceDeleteReplications struct { + Context *middleware.Context + Handler ForceDeleteReplicationsHandler +} + +func (o *ForceDeleteReplications) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewForceDeleteReplicationsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..10b0af66b33e9bebd09f7685eb86031f19602a02 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_parameters.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewForceDeleteReplicationsParams creates a new ForceDeleteReplicationsParams object +// +// There are no default values defined in the spec. +func NewForceDeleteReplicationsParams() ForceDeleteReplicationsParams { + + return ForceDeleteReplicationsParams{} +} + +// ForceDeleteReplicationsParams contains all the bound params for the force delete replications operation +// typically these are obtained from a http.Request +// +// swagger:parameters forceDeleteReplications +type ForceDeleteReplicationsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + In: body + */ + Body *models.ReplicationReplicateForceDeleteRequest +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewForceDeleteReplicationsParams() beforehand. +func (o *ForceDeleteReplicationsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.ReplicationReplicateForceDeleteRequest + if err := route.Consumer.Consume(r.Body, &body); err != nil { + res = append(res, errors.NewParseError("body", "body", "", err)) + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..b4031cb85b49ac3a02bcdadbed781f92318c7320 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ForceDeleteReplicationsOKCode is the HTTP code returned for type ForceDeleteReplicationsOK +const ForceDeleteReplicationsOKCode int = 200 + +/* +ForceDeleteReplicationsOK Replication operations force deleted successfully. + +swagger:response forceDeleteReplicationsOK +*/ +type ForceDeleteReplicationsOK struct { + + /* + In: Body + */ + Payload *models.ReplicationReplicateForceDeleteResponse `json:"body,omitempty"` +} + +// NewForceDeleteReplicationsOK creates ForceDeleteReplicationsOK with default headers values +func NewForceDeleteReplicationsOK() *ForceDeleteReplicationsOK { + + return &ForceDeleteReplicationsOK{} +} + +// WithPayload adds the payload to the force delete replications o k response +func (o *ForceDeleteReplicationsOK) WithPayload(payload *models.ReplicationReplicateForceDeleteResponse) *ForceDeleteReplicationsOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the force delete replications o k response +func (o *ForceDeleteReplicationsOK) SetPayload(payload *models.ReplicationReplicateForceDeleteResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ForceDeleteReplicationsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ForceDeleteReplicationsBadRequestCode is the HTTP code returned for type ForceDeleteReplicationsBadRequest +const ForceDeleteReplicationsBadRequestCode int = 400 + +/* +ForceDeleteReplicationsBadRequest Malformed request. + +swagger:response forceDeleteReplicationsBadRequest +*/ +type ForceDeleteReplicationsBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewForceDeleteReplicationsBadRequest creates ForceDeleteReplicationsBadRequest with default headers values +func NewForceDeleteReplicationsBadRequest() *ForceDeleteReplicationsBadRequest { + + return &ForceDeleteReplicationsBadRequest{} +} + +// WithPayload adds the payload to the force delete replications bad request response +func (o *ForceDeleteReplicationsBadRequest) WithPayload(payload *models.ErrorResponse) *ForceDeleteReplicationsBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the force delete replications bad request response +func (o *ForceDeleteReplicationsBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ForceDeleteReplicationsBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ForceDeleteReplicationsUnauthorizedCode is the HTTP code returned for type ForceDeleteReplicationsUnauthorized +const ForceDeleteReplicationsUnauthorizedCode int = 401 + +/* +ForceDeleteReplicationsUnauthorized Unauthorized or invalid credentials. + +swagger:response forceDeleteReplicationsUnauthorized +*/ +type ForceDeleteReplicationsUnauthorized struct { +} + +// NewForceDeleteReplicationsUnauthorized creates ForceDeleteReplicationsUnauthorized with default headers values +func NewForceDeleteReplicationsUnauthorized() *ForceDeleteReplicationsUnauthorized { + + return &ForceDeleteReplicationsUnauthorized{} +} + +// WriteResponse to the client +func (o *ForceDeleteReplicationsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ForceDeleteReplicationsForbiddenCode is the HTTP code returned for type ForceDeleteReplicationsForbidden +const ForceDeleteReplicationsForbiddenCode int = 403 + +/* +ForceDeleteReplicationsForbidden Forbidden + +swagger:response forceDeleteReplicationsForbidden +*/ +type ForceDeleteReplicationsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewForceDeleteReplicationsForbidden creates ForceDeleteReplicationsForbidden with default headers values +func NewForceDeleteReplicationsForbidden() *ForceDeleteReplicationsForbidden { + + return &ForceDeleteReplicationsForbidden{} +} + +// WithPayload adds the payload to the force delete replications forbidden response +func (o *ForceDeleteReplicationsForbidden) WithPayload(payload *models.ErrorResponse) *ForceDeleteReplicationsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the force delete replications forbidden response +func (o *ForceDeleteReplicationsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ForceDeleteReplicationsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ForceDeleteReplicationsUnprocessableEntityCode is the HTTP code returned for type ForceDeleteReplicationsUnprocessableEntity +const ForceDeleteReplicationsUnprocessableEntityCode int = 422 + +/* +ForceDeleteReplicationsUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response forceDeleteReplicationsUnprocessableEntity +*/ +type ForceDeleteReplicationsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewForceDeleteReplicationsUnprocessableEntity creates ForceDeleteReplicationsUnprocessableEntity with default headers values +func NewForceDeleteReplicationsUnprocessableEntity() *ForceDeleteReplicationsUnprocessableEntity { + + return &ForceDeleteReplicationsUnprocessableEntity{} +} + +// WithPayload adds the payload to the force delete replications unprocessable entity response +func (o *ForceDeleteReplicationsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ForceDeleteReplicationsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the force delete replications unprocessable entity response +func (o *ForceDeleteReplicationsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ForceDeleteReplicationsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ForceDeleteReplicationsInternalServerErrorCode is the HTTP code returned for type ForceDeleteReplicationsInternalServerError +const ForceDeleteReplicationsInternalServerErrorCode int = 500 + +/* +ForceDeleteReplicationsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response forceDeleteReplicationsInternalServerError +*/ +type ForceDeleteReplicationsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewForceDeleteReplicationsInternalServerError creates ForceDeleteReplicationsInternalServerError with default headers values +func NewForceDeleteReplicationsInternalServerError() *ForceDeleteReplicationsInternalServerError { + + return &ForceDeleteReplicationsInternalServerError{} +} + +// WithPayload adds the payload to the force delete replications internal server error response +func (o *ForceDeleteReplicationsInternalServerError) WithPayload(payload *models.ErrorResponse) *ForceDeleteReplicationsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the force delete replications internal server error response +func (o *ForceDeleteReplicationsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ForceDeleteReplicationsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..f80d93b91bc765ffe340e467cd6663f3455d1793 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/force_delete_replications_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ForceDeleteReplicationsURL generates an URL for the force delete replications operation +type ForceDeleteReplicationsURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ForceDeleteReplicationsURL) WithBasePath(bp string) *ForceDeleteReplicationsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ForceDeleteReplicationsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ForceDeleteReplicationsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/replicate/force-delete" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ForceDeleteReplicationsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ForceDeleteReplicationsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ForceDeleteReplicationsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ForceDeleteReplicationsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ForceDeleteReplicationsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ForceDeleteReplicationsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state.go new file mode 100644 index 0000000000000000000000000000000000000000..06bfb6bcc4bade9227c9e7fbffa43d884169eebc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetCollectionShardingStateHandlerFunc turns a function with the right signature into a get collection sharding state handler +type GetCollectionShardingStateHandlerFunc func(GetCollectionShardingStateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetCollectionShardingStateHandlerFunc) Handle(params GetCollectionShardingStateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetCollectionShardingStateHandler interface for that can handle valid get collection sharding state params +type GetCollectionShardingStateHandler interface { + Handle(GetCollectionShardingStateParams, *models.Principal) middleware.Responder +} + +// NewGetCollectionShardingState creates a new http.Handler for the get collection sharding state operation +func NewGetCollectionShardingState(ctx *middleware.Context, handler GetCollectionShardingStateHandler) *GetCollectionShardingState { + return &GetCollectionShardingState{Context: ctx, Handler: handler} +} + +/* + GetCollectionShardingState swagger:route GET /replication/sharding-state replication getCollectionShardingState + +# Get sharding state + +Fetches the current sharding state, including replica locations and statuses, for all collections or a specified collection. If a shard name is provided along with a collection, the state for that specific shard is returned. +*/ +type GetCollectionShardingState struct { + Context *middleware.Context + Handler GetCollectionShardingStateHandler +} + +func (o *GetCollectionShardingState) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetCollectionShardingStateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..1f42fe357f69428f458bda1f6ce055d7fb1836bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_parameters.go @@ -0,0 +1,115 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewGetCollectionShardingStateParams creates a new GetCollectionShardingStateParams object +// +// There are no default values defined in the spec. +func NewGetCollectionShardingStateParams() GetCollectionShardingStateParams { + + return GetCollectionShardingStateParams{} +} + +// GetCollectionShardingStateParams contains all the bound params for the get collection sharding state operation +// typically these are obtained from a http.Request +// +// swagger:parameters getCollectionShardingState +type GetCollectionShardingStateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The collection name to get the sharding state for. + In: query + */ + Collection *string + /*The shard to get the sharding state for. + In: query + */ + Shard *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetCollectionShardingStateParams() beforehand. +func (o *GetCollectionShardingStateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qCollection, qhkCollection, _ := qs.GetOK("collection") + if err := o.bindCollection(qCollection, qhkCollection, route.Formats); err != nil { + res = append(res, err) + } + + qShard, qhkShard, _ := qs.GetOK("shard") + if err := o.bindShard(qShard, qhkShard, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindCollection binds and validates parameter Collection from query. +func (o *GetCollectionShardingStateParams) bindCollection(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Collection = &raw + + return nil +} + +// bindShard binds and validates parameter Shard from query. +func (o *GetCollectionShardingStateParams) bindShard(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Shard = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..25ece08ad4811b5996addc5137c8d52e2c8d2957 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_responses.go @@ -0,0 +1,320 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetCollectionShardingStateOKCode is the HTTP code returned for type GetCollectionShardingStateOK +const GetCollectionShardingStateOKCode int = 200 + +/* +GetCollectionShardingStateOK Successfully retrieved sharding state. + +swagger:response getCollectionShardingStateOK +*/ +type GetCollectionShardingStateOK struct { + + /* + In: Body + */ + Payload *models.ReplicationShardingStateResponse `json:"body,omitempty"` +} + +// NewGetCollectionShardingStateOK creates GetCollectionShardingStateOK with default headers values +func NewGetCollectionShardingStateOK() *GetCollectionShardingStateOK { + + return &GetCollectionShardingStateOK{} +} + +// WithPayload adds the payload to the get collection sharding state o k response +func (o *GetCollectionShardingStateOK) WithPayload(payload *models.ReplicationShardingStateResponse) *GetCollectionShardingStateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get collection sharding state o k response +func (o *GetCollectionShardingStateOK) SetPayload(payload *models.ReplicationShardingStateResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetCollectionShardingStateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetCollectionShardingStateBadRequestCode is the HTTP code returned for type GetCollectionShardingStateBadRequest +const GetCollectionShardingStateBadRequestCode int = 400 + +/* +GetCollectionShardingStateBadRequest Bad request. + +swagger:response getCollectionShardingStateBadRequest +*/ +type GetCollectionShardingStateBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetCollectionShardingStateBadRequest creates GetCollectionShardingStateBadRequest with default headers values +func NewGetCollectionShardingStateBadRequest() *GetCollectionShardingStateBadRequest { + + return &GetCollectionShardingStateBadRequest{} +} + +// WithPayload adds the payload to the get collection sharding state bad request response +func (o *GetCollectionShardingStateBadRequest) WithPayload(payload *models.ErrorResponse) *GetCollectionShardingStateBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get collection sharding state bad request response +func (o *GetCollectionShardingStateBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetCollectionShardingStateBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetCollectionShardingStateUnauthorizedCode is the HTTP code returned for type GetCollectionShardingStateUnauthorized +const GetCollectionShardingStateUnauthorizedCode int = 401 + +/* +GetCollectionShardingStateUnauthorized Unauthorized or invalid credentials. + +swagger:response getCollectionShardingStateUnauthorized +*/ +type GetCollectionShardingStateUnauthorized struct { +} + +// NewGetCollectionShardingStateUnauthorized creates GetCollectionShardingStateUnauthorized with default headers values +func NewGetCollectionShardingStateUnauthorized() *GetCollectionShardingStateUnauthorized { + + return &GetCollectionShardingStateUnauthorized{} +} + +// WriteResponse to the client +func (o *GetCollectionShardingStateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetCollectionShardingStateForbiddenCode is the HTTP code returned for type GetCollectionShardingStateForbidden +const GetCollectionShardingStateForbiddenCode int = 403 + +/* +GetCollectionShardingStateForbidden Forbidden + +swagger:response getCollectionShardingStateForbidden +*/ +type GetCollectionShardingStateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetCollectionShardingStateForbidden creates GetCollectionShardingStateForbidden with default headers values +func NewGetCollectionShardingStateForbidden() *GetCollectionShardingStateForbidden { + + return &GetCollectionShardingStateForbidden{} +} + +// WithPayload adds the payload to the get collection sharding state forbidden response +func (o *GetCollectionShardingStateForbidden) WithPayload(payload *models.ErrorResponse) *GetCollectionShardingStateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get collection sharding state forbidden response +func (o *GetCollectionShardingStateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetCollectionShardingStateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetCollectionShardingStateNotFoundCode is the HTTP code returned for type GetCollectionShardingStateNotFound +const GetCollectionShardingStateNotFoundCode int = 404 + +/* +GetCollectionShardingStateNotFound Collection or shard not found. + +swagger:response getCollectionShardingStateNotFound +*/ +type GetCollectionShardingStateNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetCollectionShardingStateNotFound creates GetCollectionShardingStateNotFound with default headers values +func NewGetCollectionShardingStateNotFound() *GetCollectionShardingStateNotFound { + + return &GetCollectionShardingStateNotFound{} +} + +// WithPayload adds the payload to the get collection sharding state not found response +func (o *GetCollectionShardingStateNotFound) WithPayload(payload *models.ErrorResponse) *GetCollectionShardingStateNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get collection sharding state not found response +func (o *GetCollectionShardingStateNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetCollectionShardingStateNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetCollectionShardingStateInternalServerErrorCode is the HTTP code returned for type GetCollectionShardingStateInternalServerError +const GetCollectionShardingStateInternalServerErrorCode int = 500 + +/* +GetCollectionShardingStateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getCollectionShardingStateInternalServerError +*/ +type GetCollectionShardingStateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetCollectionShardingStateInternalServerError creates GetCollectionShardingStateInternalServerError with default headers values +func NewGetCollectionShardingStateInternalServerError() *GetCollectionShardingStateInternalServerError { + + return &GetCollectionShardingStateInternalServerError{} +} + +// WithPayload adds the payload to the get collection sharding state internal server error response +func (o *GetCollectionShardingStateInternalServerError) WithPayload(payload *models.ErrorResponse) *GetCollectionShardingStateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get collection sharding state internal server error response +func (o *GetCollectionShardingStateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetCollectionShardingStateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetCollectionShardingStateNotImplementedCode is the HTTP code returned for type GetCollectionShardingStateNotImplemented +const GetCollectionShardingStateNotImplementedCode int = 501 + +/* +GetCollectionShardingStateNotImplemented Replica movement operations are disabled. + +swagger:response getCollectionShardingStateNotImplemented +*/ +type GetCollectionShardingStateNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetCollectionShardingStateNotImplemented creates GetCollectionShardingStateNotImplemented with default headers values +func NewGetCollectionShardingStateNotImplemented() *GetCollectionShardingStateNotImplemented { + + return &GetCollectionShardingStateNotImplemented{} +} + +// WithPayload adds the payload to the get collection sharding state not implemented response +func (o *GetCollectionShardingStateNotImplemented) WithPayload(payload *models.ErrorResponse) *GetCollectionShardingStateNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get collection sharding state not implemented response +func (o *GetCollectionShardingStateNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetCollectionShardingStateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..7cac7f8ca2ac72709cee81467aed93c501028e20 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/get_collection_sharding_state_urlbuilder.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GetCollectionShardingStateURL generates an URL for the get collection sharding state operation +type GetCollectionShardingStateURL struct { + Collection *string + Shard *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetCollectionShardingStateURL) WithBasePath(bp string) *GetCollectionShardingStateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetCollectionShardingStateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetCollectionShardingStateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/sharding-state" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var collectionQ string + if o.Collection != nil { + collectionQ = *o.Collection + } + if collectionQ != "" { + qs.Set("collection", collectionQ) + } + + var shardQ string + if o.Shard != nil { + shardQ = *o.Shard + } + if shardQ != "" { + qs.Set("shard", shardQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetCollectionShardingStateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetCollectionShardingStateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetCollectionShardingStateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetCollectionShardingStateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetCollectionShardingStateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetCollectionShardingStateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication.go new file mode 100644 index 0000000000000000000000000000000000000000..438f7e8c4ffc09ecc1c71dbd2c2921c106dcf054 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ListReplicationHandlerFunc turns a function with the right signature into a list replication handler +type ListReplicationHandlerFunc func(ListReplicationParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ListReplicationHandlerFunc) Handle(params ListReplicationParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ListReplicationHandler interface for that can handle valid list replication params +type ListReplicationHandler interface { + Handle(ListReplicationParams, *models.Principal) middleware.Responder +} + +// NewListReplication creates a new http.Handler for the list replication operation +func NewListReplication(ctx *middleware.Context, handler ListReplicationHandler) *ListReplication { + return &ListReplication{Context: ctx, Handler: handler} +} + +/* + ListReplication swagger:route GET /replication/replicate/list replication listReplication + +# List replication operations + +Retrieves a list of currently registered replication operations, optionally filtered by collection, shard, or node ID. +*/ +type ListReplication struct { + Context *middleware.Context + Handler ListReplicationHandler +} + +func (o *ListReplication) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewListReplicationParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..1c6425c5ba250b036871635f7ebc5ccd244ec940 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_parameters.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewListReplicationParams creates a new ListReplicationParams object +// +// There are no default values defined in the spec. +func NewListReplicationParams() ListReplicationParams { + + return ListReplicationParams{} +} + +// ListReplicationParams contains all the bound params for the list replication operation +// typically these are obtained from a http.Request +// +// swagger:parameters listReplication +type ListReplicationParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The name of the collection to get details for. + In: query + */ + Collection *string + /*Whether to include the history of the replication operation. + In: query + */ + IncludeHistory *bool + /*The shard to get details for. + In: query + */ + Shard *string + /*The name of the target node to get details for. + In: query + */ + TargetNode *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewListReplicationParams() beforehand. +func (o *ListReplicationParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qCollection, qhkCollection, _ := qs.GetOK("collection") + if err := o.bindCollection(qCollection, qhkCollection, route.Formats); err != nil { + res = append(res, err) + } + + qIncludeHistory, qhkIncludeHistory, _ := qs.GetOK("includeHistory") + if err := o.bindIncludeHistory(qIncludeHistory, qhkIncludeHistory, route.Formats); err != nil { + res = append(res, err) + } + + qShard, qhkShard, _ := qs.GetOK("shard") + if err := o.bindShard(qShard, qhkShard, route.Formats); err != nil { + res = append(res, err) + } + + qTargetNode, qhkTargetNode, _ := qs.GetOK("targetNode") + if err := o.bindTargetNode(qTargetNode, qhkTargetNode, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindCollection binds and validates parameter Collection from query. +func (o *ListReplicationParams) bindCollection(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Collection = &raw + + return nil +} + +// bindIncludeHistory binds and validates parameter IncludeHistory from query. +func (o *ListReplicationParams) bindIncludeHistory(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("includeHistory", "query", "bool", raw) + } + o.IncludeHistory = &value + + return nil +} + +// bindShard binds and validates parameter Shard from query. +func (o *ListReplicationParams) bindShard(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Shard = &raw + + return nil +} + +// bindTargetNode binds and validates parameter TargetNode from query. +func (o *ListReplicationParams) bindTargetNode(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.TargetNode = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..d7154dede25afe1629448e977eb4a6c56656fd6a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_responses.go @@ -0,0 +1,278 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ListReplicationOKCode is the HTTP code returned for type ListReplicationOK +const ListReplicationOKCode int = 200 + +/* +ListReplicationOK The details of the replication operations. + +swagger:response listReplicationOK +*/ +type ListReplicationOK struct { + + /* + In: Body + */ + Payload []*models.ReplicationReplicateDetailsReplicaResponse `json:"body,omitempty"` +} + +// NewListReplicationOK creates ListReplicationOK with default headers values +func NewListReplicationOK() *ListReplicationOK { + + return &ListReplicationOK{} +} + +// WithPayload adds the payload to the list replication o k response +func (o *ListReplicationOK) WithPayload(payload []*models.ReplicationReplicateDetailsReplicaResponse) *ListReplicationOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list replication o k response +func (o *ListReplicationOK) SetPayload(payload []*models.ReplicationReplicateDetailsReplicaResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListReplicationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*models.ReplicationReplicateDetailsReplicaResponse, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// ListReplicationBadRequestCode is the HTTP code returned for type ListReplicationBadRequest +const ListReplicationBadRequestCode int = 400 + +/* +ListReplicationBadRequest Bad request. + +swagger:response listReplicationBadRequest +*/ +type ListReplicationBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewListReplicationBadRequest creates ListReplicationBadRequest with default headers values +func NewListReplicationBadRequest() *ListReplicationBadRequest { + + return &ListReplicationBadRequest{} +} + +// WithPayload adds the payload to the list replication bad request response +func (o *ListReplicationBadRequest) WithPayload(payload *models.ErrorResponse) *ListReplicationBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list replication bad request response +func (o *ListReplicationBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListReplicationBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ListReplicationUnauthorizedCode is the HTTP code returned for type ListReplicationUnauthorized +const ListReplicationUnauthorizedCode int = 401 + +/* +ListReplicationUnauthorized Unauthorized or invalid credentials. + +swagger:response listReplicationUnauthorized +*/ +type ListReplicationUnauthorized struct { +} + +// NewListReplicationUnauthorized creates ListReplicationUnauthorized with default headers values +func NewListReplicationUnauthorized() *ListReplicationUnauthorized { + + return &ListReplicationUnauthorized{} +} + +// WriteResponse to the client +func (o *ListReplicationUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ListReplicationForbiddenCode is the HTTP code returned for type ListReplicationForbidden +const ListReplicationForbiddenCode int = 403 + +/* +ListReplicationForbidden Forbidden + +swagger:response listReplicationForbidden +*/ +type ListReplicationForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewListReplicationForbidden creates ListReplicationForbidden with default headers values +func NewListReplicationForbidden() *ListReplicationForbidden { + + return &ListReplicationForbidden{} +} + +// WithPayload adds the payload to the list replication forbidden response +func (o *ListReplicationForbidden) WithPayload(payload *models.ErrorResponse) *ListReplicationForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list replication forbidden response +func (o *ListReplicationForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListReplicationForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ListReplicationInternalServerErrorCode is the HTTP code returned for type ListReplicationInternalServerError +const ListReplicationInternalServerErrorCode int = 500 + +/* +ListReplicationInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response listReplicationInternalServerError +*/ +type ListReplicationInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewListReplicationInternalServerError creates ListReplicationInternalServerError with default headers values +func NewListReplicationInternalServerError() *ListReplicationInternalServerError { + + return &ListReplicationInternalServerError{} +} + +// WithPayload adds the payload to the list replication internal server error response +func (o *ListReplicationInternalServerError) WithPayload(payload *models.ErrorResponse) *ListReplicationInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list replication internal server error response +func (o *ListReplicationInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListReplicationInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ListReplicationNotImplementedCode is the HTTP code returned for type ListReplicationNotImplemented +const ListReplicationNotImplementedCode int = 501 + +/* +ListReplicationNotImplemented Replica movement operations are disabled. + +swagger:response listReplicationNotImplemented +*/ +type ListReplicationNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewListReplicationNotImplemented creates ListReplicationNotImplemented with default headers values +func NewListReplicationNotImplemented() *ListReplicationNotImplemented { + + return &ListReplicationNotImplemented{} +} + +// WithPayload adds the payload to the list replication not implemented response +func (o *ListReplicationNotImplemented) WithPayload(payload *models.ErrorResponse) *ListReplicationNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list replication not implemented response +func (o *ListReplicationNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListReplicationNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..768311862a7521005b85be35cde9214a752876ee --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/list_replication_urlbuilder.go @@ -0,0 +1,143 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + + "github.com/go-openapi/swag" +) + +// ListReplicationURL generates an URL for the list replication operation +type ListReplicationURL struct { + Collection *string + IncludeHistory *bool + Shard *string + TargetNode *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListReplicationURL) WithBasePath(bp string) *ListReplicationURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListReplicationURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ListReplicationURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/replicate/list" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var collectionQ string + if o.Collection != nil { + collectionQ = *o.Collection + } + if collectionQ != "" { + qs.Set("collection", collectionQ) + } + + var includeHistoryQ string + if o.IncludeHistory != nil { + includeHistoryQ = swag.FormatBool(*o.IncludeHistory) + } + if includeHistoryQ != "" { + qs.Set("includeHistory", includeHistoryQ) + } + + var shardQ string + if o.Shard != nil { + shardQ = *o.Shard + } + if shardQ != "" { + qs.Set("shard", shardQ) + } + + var targetNodeQ string + if o.TargetNode != nil { + targetNodeQ = *o.TargetNode + } + if targetNodeQ != "" { + qs.Set("targetNode", targetNodeQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ListReplicationURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ListReplicationURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ListReplicationURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ListReplicationURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ListReplicationURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ListReplicationURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate.go new file mode 100644 index 0000000000000000000000000000000000000000..a8948184b5b5b710c29e7fc3e72d1a0ba650ce55 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ReplicateHandlerFunc turns a function with the right signature into a replicate handler +type ReplicateHandlerFunc func(ReplicateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ReplicateHandlerFunc) Handle(params ReplicateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ReplicateHandler interface for that can handle valid replicate params +type ReplicateHandler interface { + Handle(ReplicateParams, *models.Principal) middleware.Responder +} + +// NewReplicate creates a new http.Handler for the replicate operation +func NewReplicate(ctx *middleware.Context, handler ReplicateHandler) *Replicate { + return &Replicate{Context: ctx, Handler: handler} +} + +/* + Replicate swagger:route POST /replication/replicate replication replicate + +# Initiate a replica movement + +Begins an asynchronous operation to move or copy a specific shard replica from its current node to a designated target node. The operation involves copying data, synchronizing, and potentially decommissioning the source replica. +*/ +type Replicate struct { + Context *middleware.Context + Handler ReplicateHandler +} + +func (o *Replicate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewReplicateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..bc94b4bc11b78ad0ce6b422666d3cb81fdf4b489 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewReplicateParams creates a new ReplicateParams object +// +// There are no default values defined in the spec. +func NewReplicateParams() ReplicateParams { + + return ReplicateParams{} +} + +// ReplicateParams contains all the bound params for the replicate operation +// typically these are obtained from a http.Request +// +// swagger:parameters replicate +type ReplicateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.ReplicationReplicateReplicaRequest +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewReplicateParams() beforehand. +func (o *ReplicateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.ReplicationReplicateReplicaRequest + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..83ba58d11837a684c71b89796d77dd5452bb466a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_responses.go @@ -0,0 +1,320 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ReplicateOKCode is the HTTP code returned for type ReplicateOK +const ReplicateOKCode int = 200 + +/* +ReplicateOK Replication operation registered successfully. ID of the operation is returned. + +swagger:response replicateOK +*/ +type ReplicateOK struct { + + /* + In: Body + */ + Payload *models.ReplicationReplicateReplicaResponse `json:"body,omitempty"` +} + +// NewReplicateOK creates ReplicateOK with default headers values +func NewReplicateOK() *ReplicateOK { + + return &ReplicateOK{} +} + +// WithPayload adds the payload to the replicate o k response +func (o *ReplicateOK) WithPayload(payload *models.ReplicationReplicateReplicaResponse) *ReplicateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replicate o k response +func (o *ReplicateOK) SetPayload(payload *models.ReplicationReplicateReplicaResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicateBadRequestCode is the HTTP code returned for type ReplicateBadRequest +const ReplicateBadRequestCode int = 400 + +/* +ReplicateBadRequest Malformed request. + +swagger:response replicateBadRequest +*/ +type ReplicateBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicateBadRequest creates ReplicateBadRequest with default headers values +func NewReplicateBadRequest() *ReplicateBadRequest { + + return &ReplicateBadRequest{} +} + +// WithPayload adds the payload to the replicate bad request response +func (o *ReplicateBadRequest) WithPayload(payload *models.ErrorResponse) *ReplicateBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replicate bad request response +func (o *ReplicateBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicateBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicateUnauthorizedCode is the HTTP code returned for type ReplicateUnauthorized +const ReplicateUnauthorizedCode int = 401 + +/* +ReplicateUnauthorized Unauthorized or invalid credentials. + +swagger:response replicateUnauthorized +*/ +type ReplicateUnauthorized struct { +} + +// NewReplicateUnauthorized creates ReplicateUnauthorized with default headers values +func NewReplicateUnauthorized() *ReplicateUnauthorized { + + return &ReplicateUnauthorized{} +} + +// WriteResponse to the client +func (o *ReplicateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ReplicateForbiddenCode is the HTTP code returned for type ReplicateForbidden +const ReplicateForbiddenCode int = 403 + +/* +ReplicateForbidden Forbidden + +swagger:response replicateForbidden +*/ +type ReplicateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicateForbidden creates ReplicateForbidden with default headers values +func NewReplicateForbidden() *ReplicateForbidden { + + return &ReplicateForbidden{} +} + +// WithPayload adds the payload to the replicate forbidden response +func (o *ReplicateForbidden) WithPayload(payload *models.ErrorResponse) *ReplicateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replicate forbidden response +func (o *ReplicateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicateUnprocessableEntityCode is the HTTP code returned for type ReplicateUnprocessableEntity +const ReplicateUnprocessableEntityCode int = 422 + +/* +ReplicateUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response replicateUnprocessableEntity +*/ +type ReplicateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicateUnprocessableEntity creates ReplicateUnprocessableEntity with default headers values +func NewReplicateUnprocessableEntity() *ReplicateUnprocessableEntity { + + return &ReplicateUnprocessableEntity{} +} + +// WithPayload adds the payload to the replicate unprocessable entity response +func (o *ReplicateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ReplicateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replicate unprocessable entity response +func (o *ReplicateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicateInternalServerErrorCode is the HTTP code returned for type ReplicateInternalServerError +const ReplicateInternalServerErrorCode int = 500 + +/* +ReplicateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response replicateInternalServerError +*/ +type ReplicateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicateInternalServerError creates ReplicateInternalServerError with default headers values +func NewReplicateInternalServerError() *ReplicateInternalServerError { + + return &ReplicateInternalServerError{} +} + +// WithPayload adds the payload to the replicate internal server error response +func (o *ReplicateInternalServerError) WithPayload(payload *models.ErrorResponse) *ReplicateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replicate internal server error response +func (o *ReplicateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicateNotImplementedCode is the HTTP code returned for type ReplicateNotImplemented +const ReplicateNotImplementedCode int = 501 + +/* +ReplicateNotImplemented Replica movement operations are disabled. + +swagger:response replicateNotImplemented +*/ +type ReplicateNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicateNotImplemented creates ReplicateNotImplemented with default headers values +func NewReplicateNotImplemented() *ReplicateNotImplemented { + + return &ReplicateNotImplemented{} +} + +// WithPayload adds the payload to the replicate not implemented response +func (o *ReplicateNotImplemented) WithPayload(payload *models.ErrorResponse) *ReplicateNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replicate not implemented response +func (o *ReplicateNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..589d5856124b95261888f73e3d00cd3a294e98bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replicate_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ReplicateURL generates an URL for the replicate operation +type ReplicateURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ReplicateURL) WithBasePath(bp string) *ReplicateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ReplicateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ReplicateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/replicate" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ReplicateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ReplicateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ReplicateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ReplicateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ReplicateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ReplicateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details.go new file mode 100644 index 0000000000000000000000000000000000000000..f45dd0844a262c6b7bdda040407dfedc4176edf5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ReplicationDetailsHandlerFunc turns a function with the right signature into a replication details handler +type ReplicationDetailsHandlerFunc func(ReplicationDetailsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ReplicationDetailsHandlerFunc) Handle(params ReplicationDetailsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ReplicationDetailsHandler interface for that can handle valid replication details params +type ReplicationDetailsHandler interface { + Handle(ReplicationDetailsParams, *models.Principal) middleware.Responder +} + +// NewReplicationDetails creates a new http.Handler for the replication details operation +func NewReplicationDetails(ctx *middleware.Context, handler ReplicationDetailsHandler) *ReplicationDetails { + return &ReplicationDetails{Context: ctx, Handler: handler} +} + +/* + ReplicationDetails swagger:route GET /replication/replicate/{id} replication replicationDetails + +# Retrieve a replication operation + +Fetches the current status and detailed information for a specific replication operation, identified by its unique ID. Optionally includes historical data of the operation's progress if requested. +*/ +type ReplicationDetails struct { + Context *middleware.Context + Handler ReplicationDetailsHandler +} + +func (o *ReplicationDetails) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewReplicationDetailsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..a5ecf5c4ab048492f5c023b36d1b467498ba3633 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_parameters.go @@ -0,0 +1,138 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NewReplicationDetailsParams creates a new ReplicationDetailsParams object +// +// There are no default values defined in the spec. +func NewReplicationDetailsParams() ReplicationDetailsParams { + + return ReplicationDetailsParams{} +} + +// ReplicationDetailsParams contains all the bound params for the replication details operation +// typically these are obtained from a http.Request +// +// swagger:parameters replicationDetails +type ReplicationDetailsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*The ID of the replication operation to get details for. + Required: true + In: path + */ + ID strfmt.UUID + /*Whether to include the history of the replication operation. + In: query + */ + IncludeHistory *bool +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewReplicationDetailsParams() beforehand. +func (o *ReplicationDetailsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rID, rhkID, _ := route.Params.GetOK("id") + if err := o.bindID(rID, rhkID, route.Formats); err != nil { + res = append(res, err) + } + + qIncludeHistory, qhkIncludeHistory, _ := qs.GetOK("includeHistory") + if err := o.bindIncludeHistory(qIncludeHistory, qhkIncludeHistory, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindID binds and validates parameter ID from path. +func (o *ReplicationDetailsParams) bindID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("id", "path", "strfmt.UUID", raw) + } + o.ID = *(value.(*strfmt.UUID)) + + if err := o.validateID(formats); err != nil { + return err + } + + return nil +} + +// validateID carries on validations for parameter ID +func (o *ReplicationDetailsParams) validateID(formats strfmt.Registry) error { + + if err := validate.FormatOf("id", "path", "uuid", o.ID.String(), formats); err != nil { + return err + } + return nil +} + +// bindIncludeHistory binds and validates parameter IncludeHistory from query. +func (o *ReplicationDetailsParams) bindIncludeHistory(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("includeHistory", "query", "bool", raw) + } + o.IncludeHistory = &value + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..5c3234c795c6be760cebbaaa397929871097411e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_responses.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ReplicationDetailsOKCode is the HTTP code returned for type ReplicationDetailsOK +const ReplicationDetailsOKCode int = 200 + +/* +ReplicationDetailsOK The details of the replication operation. + +swagger:response replicationDetailsOK +*/ +type ReplicationDetailsOK struct { + + /* + In: Body + */ + Payload *models.ReplicationReplicateDetailsReplicaResponse `json:"body,omitempty"` +} + +// NewReplicationDetailsOK creates ReplicationDetailsOK with default headers values +func NewReplicationDetailsOK() *ReplicationDetailsOK { + + return &ReplicationDetailsOK{} +} + +// WithPayload adds the payload to the replication details o k response +func (o *ReplicationDetailsOK) WithPayload(payload *models.ReplicationReplicateDetailsReplicaResponse) *ReplicationDetailsOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replication details o k response +func (o *ReplicationDetailsOK) SetPayload(payload *models.ReplicationReplicateDetailsReplicaResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicationDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicationDetailsUnauthorizedCode is the HTTP code returned for type ReplicationDetailsUnauthorized +const ReplicationDetailsUnauthorizedCode int = 401 + +/* +ReplicationDetailsUnauthorized Unauthorized or invalid credentials. + +swagger:response replicationDetailsUnauthorized +*/ +type ReplicationDetailsUnauthorized struct { +} + +// NewReplicationDetailsUnauthorized creates ReplicationDetailsUnauthorized with default headers values +func NewReplicationDetailsUnauthorized() *ReplicationDetailsUnauthorized { + + return &ReplicationDetailsUnauthorized{} +} + +// WriteResponse to the client +func (o *ReplicationDetailsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ReplicationDetailsForbiddenCode is the HTTP code returned for type ReplicationDetailsForbidden +const ReplicationDetailsForbiddenCode int = 403 + +/* +ReplicationDetailsForbidden Forbidden. + +swagger:response replicationDetailsForbidden +*/ +type ReplicationDetailsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicationDetailsForbidden creates ReplicationDetailsForbidden with default headers values +func NewReplicationDetailsForbidden() *ReplicationDetailsForbidden { + + return &ReplicationDetailsForbidden{} +} + +// WithPayload adds the payload to the replication details forbidden response +func (o *ReplicationDetailsForbidden) WithPayload(payload *models.ErrorResponse) *ReplicationDetailsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replication details forbidden response +func (o *ReplicationDetailsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicationDetailsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicationDetailsNotFoundCode is the HTTP code returned for type ReplicationDetailsNotFound +const ReplicationDetailsNotFoundCode int = 404 + +/* +ReplicationDetailsNotFound Shard replica operation not found. + +swagger:response replicationDetailsNotFound +*/ +type ReplicationDetailsNotFound struct { +} + +// NewReplicationDetailsNotFound creates ReplicationDetailsNotFound with default headers values +func NewReplicationDetailsNotFound() *ReplicationDetailsNotFound { + + return &ReplicationDetailsNotFound{} +} + +// WriteResponse to the client +func (o *ReplicationDetailsNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ReplicationDetailsUnprocessableEntityCode is the HTTP code returned for type ReplicationDetailsUnprocessableEntity +const ReplicationDetailsUnprocessableEntityCode int = 422 + +/* +ReplicationDetailsUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response replicationDetailsUnprocessableEntity +*/ +type ReplicationDetailsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicationDetailsUnprocessableEntity creates ReplicationDetailsUnprocessableEntity with default headers values +func NewReplicationDetailsUnprocessableEntity() *ReplicationDetailsUnprocessableEntity { + + return &ReplicationDetailsUnprocessableEntity{} +} + +// WithPayload adds the payload to the replication details unprocessable entity response +func (o *ReplicationDetailsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ReplicationDetailsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replication details unprocessable entity response +func (o *ReplicationDetailsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicationDetailsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicationDetailsInternalServerErrorCode is the HTTP code returned for type ReplicationDetailsInternalServerError +const ReplicationDetailsInternalServerErrorCode int = 500 + +/* +ReplicationDetailsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response replicationDetailsInternalServerError +*/ +type ReplicationDetailsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicationDetailsInternalServerError creates ReplicationDetailsInternalServerError with default headers values +func NewReplicationDetailsInternalServerError() *ReplicationDetailsInternalServerError { + + return &ReplicationDetailsInternalServerError{} +} + +// WithPayload adds the payload to the replication details internal server error response +func (o *ReplicationDetailsInternalServerError) WithPayload(payload *models.ErrorResponse) *ReplicationDetailsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replication details internal server error response +func (o *ReplicationDetailsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicationDetailsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ReplicationDetailsNotImplementedCode is the HTTP code returned for type ReplicationDetailsNotImplemented +const ReplicationDetailsNotImplementedCode int = 501 + +/* +ReplicationDetailsNotImplemented Replica movement operations are disabled. + +swagger:response replicationDetailsNotImplemented +*/ +type ReplicationDetailsNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewReplicationDetailsNotImplemented creates ReplicationDetailsNotImplemented with default headers values +func NewReplicationDetailsNotImplemented() *ReplicationDetailsNotImplemented { + + return &ReplicationDetailsNotImplemented{} +} + +// WithPayload adds the payload to the replication details not implemented response +func (o *ReplicationDetailsNotImplemented) WithPayload(payload *models.ErrorResponse) *ReplicationDetailsNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the replication details not implemented response +func (o *ReplicationDetailsNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ReplicationDetailsNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..0dbf7515ff53cf59b95594a8fc785142247e1784 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/replication/replication_details_urlbuilder.go @@ -0,0 +1,127 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package replication + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ReplicationDetailsURL generates an URL for the replication details operation +type ReplicationDetailsURL struct { + ID strfmt.UUID + + IncludeHistory *bool + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ReplicationDetailsURL) WithBasePath(bp string) *ReplicationDetailsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ReplicationDetailsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ReplicationDetailsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/replication/replicate/{id}" + + id := o.ID.String() + if id != "" { + _path = strings.Replace(_path, "{id}", id, -1) + } else { + return nil, errors.New("id is required on ReplicationDetailsURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var includeHistoryQ string + if o.IncludeHistory != nil { + includeHistoryQ = swag.FormatBool(*o.IncludeHistory) + } + if includeHistoryQ != "" { + qs.Set("includeHistory", includeHistoryQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ReplicationDetailsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ReplicationDetailsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ReplicationDetailsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ReplicationDetailsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ReplicationDetailsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ReplicationDetailsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create.go new file mode 100644 index 0000000000000000000000000000000000000000..6452e9af09943eafbe8a7700b0709fff7f6b72fd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesCreateHandlerFunc turns a function with the right signature into a aliases create handler +type AliasesCreateHandlerFunc func(AliasesCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AliasesCreateHandlerFunc) Handle(params AliasesCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AliasesCreateHandler interface for that can handle valid aliases create params +type AliasesCreateHandler interface { + Handle(AliasesCreateParams, *models.Principal) middleware.Responder +} + +// NewAliasesCreate creates a new http.Handler for the aliases create operation +func NewAliasesCreate(ctx *middleware.Context, handler AliasesCreateHandler) *AliasesCreate { + return &AliasesCreate{Context: ctx, Handler: handler} +} + +/* + AliasesCreate swagger:route POST /aliases schema aliasesCreate + +# Create a new alias + +Create a new alias mapping between an alias name and a collection (class). The alias acts as an alternative name for accessing the collection. +*/ +type AliasesCreate struct { + Context *middleware.Context + Handler AliasesCreateHandler +} + +func (o *AliasesCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAliasesCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..342fbbef44a8754762456b7374d6eda1141aa48f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewAliasesCreateParams creates a new AliasesCreateParams object +// +// There are no default values defined in the spec. +func NewAliasesCreateParams() AliasesCreateParams { + + return AliasesCreateParams{} +} + +// AliasesCreateParams contains all the bound params for the aliases create operation +// typically these are obtained from a http.Request +// +// swagger:parameters aliases.create +type AliasesCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Alias +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAliasesCreateParams() beforehand. +func (o *AliasesCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Alias + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..43e5a083fecc471cbc9f9aaa62bbcb5d5bd95614 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesCreateOKCode is the HTTP code returned for type AliasesCreateOK +const AliasesCreateOKCode int = 200 + +/* +AliasesCreateOK Successfully created a new alias for the specified collection (class) + +swagger:response aliasesCreateOK +*/ +type AliasesCreateOK struct { + + /* + In: Body + */ + Payload *models.Alias `json:"body,omitempty"` +} + +// NewAliasesCreateOK creates AliasesCreateOK with default headers values +func NewAliasesCreateOK() *AliasesCreateOK { + + return &AliasesCreateOK{} +} + +// WithPayload adds the payload to the aliases create o k response +func (o *AliasesCreateOK) WithPayload(payload *models.Alias) *AliasesCreateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases create o k response +func (o *AliasesCreateOK) SetPayload(payload *models.Alias) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesCreateUnauthorizedCode is the HTTP code returned for type AliasesCreateUnauthorized +const AliasesCreateUnauthorizedCode int = 401 + +/* +AliasesCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response aliasesCreateUnauthorized +*/ +type AliasesCreateUnauthorized struct { +} + +// NewAliasesCreateUnauthorized creates AliasesCreateUnauthorized with default headers values +func NewAliasesCreateUnauthorized() *AliasesCreateUnauthorized { + + return &AliasesCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *AliasesCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AliasesCreateForbiddenCode is the HTTP code returned for type AliasesCreateForbidden +const AliasesCreateForbiddenCode int = 403 + +/* +AliasesCreateForbidden Forbidden + +swagger:response aliasesCreateForbidden +*/ +type AliasesCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesCreateForbidden creates AliasesCreateForbidden with default headers values +func NewAliasesCreateForbidden() *AliasesCreateForbidden { + + return &AliasesCreateForbidden{} +} + +// WithPayload adds the payload to the aliases create forbidden response +func (o *AliasesCreateForbidden) WithPayload(payload *models.ErrorResponse) *AliasesCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases create forbidden response +func (o *AliasesCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesCreateUnprocessableEntityCode is the HTTP code returned for type AliasesCreateUnprocessableEntity +const AliasesCreateUnprocessableEntityCode int = 422 + +/* +AliasesCreateUnprocessableEntity Invalid create alias request. + +swagger:response aliasesCreateUnprocessableEntity +*/ +type AliasesCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesCreateUnprocessableEntity creates AliasesCreateUnprocessableEntity with default headers values +func NewAliasesCreateUnprocessableEntity() *AliasesCreateUnprocessableEntity { + + return &AliasesCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the aliases create unprocessable entity response +func (o *AliasesCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *AliasesCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases create unprocessable entity response +func (o *AliasesCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesCreateInternalServerErrorCode is the HTTP code returned for type AliasesCreateInternalServerError +const AliasesCreateInternalServerErrorCode int = 500 + +/* +AliasesCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response aliasesCreateInternalServerError +*/ +type AliasesCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesCreateInternalServerError creates AliasesCreateInternalServerError with default headers values +func NewAliasesCreateInternalServerError() *AliasesCreateInternalServerError { + + return &AliasesCreateInternalServerError{} +} + +// WithPayload adds the payload to the aliases create internal server error response +func (o *AliasesCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *AliasesCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases create internal server error response +func (o *AliasesCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..17dce7bc1de56b72e64dc787fa730043271fef5e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_create_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// AliasesCreateURL generates an URL for the aliases create operation +type AliasesCreateURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesCreateURL) WithBasePath(bp string) *AliasesCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AliasesCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/aliases" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AliasesCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AliasesCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AliasesCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AliasesCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AliasesCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AliasesCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..1eb7264924c2cd8653b7aecb97dec0408640ccd5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesDeleteHandlerFunc turns a function with the right signature into a aliases delete handler +type AliasesDeleteHandlerFunc func(AliasesDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AliasesDeleteHandlerFunc) Handle(params AliasesDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AliasesDeleteHandler interface for that can handle valid aliases delete params +type AliasesDeleteHandler interface { + Handle(AliasesDeleteParams, *models.Principal) middleware.Responder +} + +// NewAliasesDelete creates a new http.Handler for the aliases delete operation +func NewAliasesDelete(ctx *middleware.Context, handler AliasesDeleteHandler) *AliasesDelete { + return &AliasesDelete{Context: ctx, Handler: handler} +} + +/* + AliasesDelete swagger:route DELETE /aliases/{aliasName} schema aliasesDelete + +# Delete an alias + +Remove an existing alias from the system. This will delete the alias mapping but will not affect the underlying collection (class). +*/ +type AliasesDelete struct { + Context *middleware.Context + Handler AliasesDeleteHandler +} + +func (o *AliasesDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAliasesDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..76f383c5355e0c1ff2354773f7b2a8d4b98d5d4d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewAliasesDeleteParams creates a new AliasesDeleteParams object +// +// There are no default values defined in the spec. +func NewAliasesDeleteParams() AliasesDeleteParams { + + return AliasesDeleteParams{} +} + +// AliasesDeleteParams contains all the bound params for the aliases delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters aliases.delete +type AliasesDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + AliasName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAliasesDeleteParams() beforehand. +func (o *AliasesDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rAliasName, rhkAliasName, _ := route.Params.GetOK("aliasName") + if err := o.bindAliasName(rAliasName, rhkAliasName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindAliasName binds and validates parameter AliasName from path. +func (o *AliasesDeleteParams) bindAliasName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.AliasName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6c9e184757e3e62365d32afdb7673458b132bff6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesDeleteNoContentCode is the HTTP code returned for type AliasesDeleteNoContent +const AliasesDeleteNoContentCode int = 204 + +/* +AliasesDeleteNoContent Successfully deleted the alias. + +swagger:response aliasesDeleteNoContent +*/ +type AliasesDeleteNoContent struct { +} + +// NewAliasesDeleteNoContent creates AliasesDeleteNoContent with default headers values +func NewAliasesDeleteNoContent() *AliasesDeleteNoContent { + + return &AliasesDeleteNoContent{} +} + +// WriteResponse to the client +func (o *AliasesDeleteNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// AliasesDeleteUnauthorizedCode is the HTTP code returned for type AliasesDeleteUnauthorized +const AliasesDeleteUnauthorizedCode int = 401 + +/* +AliasesDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response aliasesDeleteUnauthorized +*/ +type AliasesDeleteUnauthorized struct { +} + +// NewAliasesDeleteUnauthorized creates AliasesDeleteUnauthorized with default headers values +func NewAliasesDeleteUnauthorized() *AliasesDeleteUnauthorized { + + return &AliasesDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *AliasesDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AliasesDeleteForbiddenCode is the HTTP code returned for type AliasesDeleteForbidden +const AliasesDeleteForbiddenCode int = 403 + +/* +AliasesDeleteForbidden Forbidden + +swagger:response aliasesDeleteForbidden +*/ +type AliasesDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesDeleteForbidden creates AliasesDeleteForbidden with default headers values +func NewAliasesDeleteForbidden() *AliasesDeleteForbidden { + + return &AliasesDeleteForbidden{} +} + +// WithPayload adds the payload to the aliases delete forbidden response +func (o *AliasesDeleteForbidden) WithPayload(payload *models.ErrorResponse) *AliasesDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases delete forbidden response +func (o *AliasesDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesDeleteNotFoundCode is the HTTP code returned for type AliasesDeleteNotFound +const AliasesDeleteNotFoundCode int = 404 + +/* +AliasesDeleteNotFound Not Found - Alias does not exist + +swagger:response aliasesDeleteNotFound +*/ +type AliasesDeleteNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesDeleteNotFound creates AliasesDeleteNotFound with default headers values +func NewAliasesDeleteNotFound() *AliasesDeleteNotFound { + + return &AliasesDeleteNotFound{} +} + +// WithPayload adds the payload to the aliases delete not found response +func (o *AliasesDeleteNotFound) WithPayload(payload *models.ErrorResponse) *AliasesDeleteNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases delete not found response +func (o *AliasesDeleteNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesDeleteNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesDeleteUnprocessableEntityCode is the HTTP code returned for type AliasesDeleteUnprocessableEntity +const AliasesDeleteUnprocessableEntityCode int = 422 + +/* +AliasesDeleteUnprocessableEntity Invalid delete alias request. + +swagger:response aliasesDeleteUnprocessableEntity +*/ +type AliasesDeleteUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesDeleteUnprocessableEntity creates AliasesDeleteUnprocessableEntity with default headers values +func NewAliasesDeleteUnprocessableEntity() *AliasesDeleteUnprocessableEntity { + + return &AliasesDeleteUnprocessableEntity{} +} + +// WithPayload adds the payload to the aliases delete unprocessable entity response +func (o *AliasesDeleteUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *AliasesDeleteUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases delete unprocessable entity response +func (o *AliasesDeleteUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesDeleteUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesDeleteInternalServerErrorCode is the HTTP code returned for type AliasesDeleteInternalServerError +const AliasesDeleteInternalServerErrorCode int = 500 + +/* +AliasesDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response aliasesDeleteInternalServerError +*/ +type AliasesDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesDeleteInternalServerError creates AliasesDeleteInternalServerError with default headers values +func NewAliasesDeleteInternalServerError() *AliasesDeleteInternalServerError { + + return &AliasesDeleteInternalServerError{} +} + +// WithPayload adds the payload to the aliases delete internal server error response +func (o *AliasesDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *AliasesDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases delete internal server error response +func (o *AliasesDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..f6136ac102165cafd5ff8b93d3bf3ebef690eb43 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_delete_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// AliasesDeleteURL generates an URL for the aliases delete operation +type AliasesDeleteURL struct { + AliasName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesDeleteURL) WithBasePath(bp string) *AliasesDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AliasesDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/aliases/{aliasName}" + + aliasName := o.AliasName + if aliasName != "" { + _path = strings.Replace(_path, "{aliasName}", aliasName, -1) + } else { + return nil, errors.New("aliasName is required on AliasesDeleteURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AliasesDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AliasesDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AliasesDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AliasesDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AliasesDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AliasesDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get.go new file mode 100644 index 0000000000000000000000000000000000000000..9ccf854a3e1a0f0ebc598debe5512a1a889cb0c7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesGetHandlerFunc turns a function with the right signature into a aliases get handler +type AliasesGetHandlerFunc func(AliasesGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AliasesGetHandlerFunc) Handle(params AliasesGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AliasesGetHandler interface for that can handle valid aliases get params +type AliasesGetHandler interface { + Handle(AliasesGetParams, *models.Principal) middleware.Responder +} + +// NewAliasesGet creates a new http.Handler for the aliases get operation +func NewAliasesGet(ctx *middleware.Context, handler AliasesGetHandler) *AliasesGet { + return &AliasesGet{Context: ctx, Handler: handler} +} + +/* + AliasesGet swagger:route GET /aliases schema aliasesGet + +# List aliases + +Retrieve a list of all aliases in the system. Results can be filtered by specifying a collection (class) name to get aliases for a specific collection only. +*/ +type AliasesGet struct { + Context *middleware.Context + Handler AliasesGetHandler +} + +func (o *AliasesGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAliasesGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias.go new file mode 100644 index 0000000000000000000000000000000000000000..c61d04fa2d90e0cfa409ebb2f64fcc9fb2d6a8b1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesGetAliasHandlerFunc turns a function with the right signature into a aliases get alias handler +type AliasesGetAliasHandlerFunc func(AliasesGetAliasParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AliasesGetAliasHandlerFunc) Handle(params AliasesGetAliasParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AliasesGetAliasHandler interface for that can handle valid aliases get alias params +type AliasesGetAliasHandler interface { + Handle(AliasesGetAliasParams, *models.Principal) middleware.Responder +} + +// NewAliasesGetAlias creates a new http.Handler for the aliases get alias operation +func NewAliasesGetAlias(ctx *middleware.Context, handler AliasesGetAliasHandler) *AliasesGetAlias { + return &AliasesGetAlias{Context: ctx, Handler: handler} +} + +/* + AliasesGetAlias swagger:route GET /aliases/{aliasName} schema aliasesGetAlias + +# Get an alias + +Retrieve details about a specific alias by its name, including which collection (class) it points to. +*/ +type AliasesGetAlias struct { + Context *middleware.Context + Handler AliasesGetAliasHandler +} + +func (o *AliasesGetAlias) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAliasesGetAliasParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..161a4d7ac8c7f80b51eca8e90721bb62b1876a9a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewAliasesGetAliasParams creates a new AliasesGetAliasParams object +// +// There are no default values defined in the spec. +func NewAliasesGetAliasParams() AliasesGetAliasParams { + + return AliasesGetAliasParams{} +} + +// AliasesGetAliasParams contains all the bound params for the aliases get alias operation +// typically these are obtained from a http.Request +// +// swagger:parameters aliases.get.alias +type AliasesGetAliasParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + AliasName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAliasesGetAliasParams() beforehand. +func (o *AliasesGetAliasParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rAliasName, rhkAliasName, _ := route.Params.GetOK("aliasName") + if err := o.bindAliasName(rAliasName, rhkAliasName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindAliasName binds and validates parameter AliasName from path. +func (o *AliasesGetAliasParams) bindAliasName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.AliasName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..a982691606c66395dd27bea4d3e5c26273d9f0c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesGetAliasOKCode is the HTTP code returned for type AliasesGetAliasOK +const AliasesGetAliasOKCode int = 200 + +/* +AliasesGetAliasOK Successfully retrieved the alias details. + +swagger:response aliasesGetAliasOK +*/ +type AliasesGetAliasOK struct { + + /* + In: Body + */ + Payload *models.Alias `json:"body,omitempty"` +} + +// NewAliasesGetAliasOK creates AliasesGetAliasOK with default headers values +func NewAliasesGetAliasOK() *AliasesGetAliasOK { + + return &AliasesGetAliasOK{} +} + +// WithPayload adds the payload to the aliases get alias o k response +func (o *AliasesGetAliasOK) WithPayload(payload *models.Alias) *AliasesGetAliasOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get alias o k response +func (o *AliasesGetAliasOK) SetPayload(payload *models.Alias) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetAliasOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesGetAliasUnauthorizedCode is the HTTP code returned for type AliasesGetAliasUnauthorized +const AliasesGetAliasUnauthorizedCode int = 401 + +/* +AliasesGetAliasUnauthorized Unauthorized or invalid credentials. + +swagger:response aliasesGetAliasUnauthorized +*/ +type AliasesGetAliasUnauthorized struct { +} + +// NewAliasesGetAliasUnauthorized creates AliasesGetAliasUnauthorized with default headers values +func NewAliasesGetAliasUnauthorized() *AliasesGetAliasUnauthorized { + + return &AliasesGetAliasUnauthorized{} +} + +// WriteResponse to the client +func (o *AliasesGetAliasUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AliasesGetAliasForbiddenCode is the HTTP code returned for type AliasesGetAliasForbidden +const AliasesGetAliasForbiddenCode int = 403 + +/* +AliasesGetAliasForbidden Forbidden + +swagger:response aliasesGetAliasForbidden +*/ +type AliasesGetAliasForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesGetAliasForbidden creates AliasesGetAliasForbidden with default headers values +func NewAliasesGetAliasForbidden() *AliasesGetAliasForbidden { + + return &AliasesGetAliasForbidden{} +} + +// WithPayload adds the payload to the aliases get alias forbidden response +func (o *AliasesGetAliasForbidden) WithPayload(payload *models.ErrorResponse) *AliasesGetAliasForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get alias forbidden response +func (o *AliasesGetAliasForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetAliasForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesGetAliasNotFoundCode is the HTTP code returned for type AliasesGetAliasNotFound +const AliasesGetAliasNotFoundCode int = 404 + +/* +AliasesGetAliasNotFound Not Found - Alias does not exist + +swagger:response aliasesGetAliasNotFound +*/ +type AliasesGetAliasNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesGetAliasNotFound creates AliasesGetAliasNotFound with default headers values +func NewAliasesGetAliasNotFound() *AliasesGetAliasNotFound { + + return &AliasesGetAliasNotFound{} +} + +// WithPayload adds the payload to the aliases get alias not found response +func (o *AliasesGetAliasNotFound) WithPayload(payload *models.ErrorResponse) *AliasesGetAliasNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get alias not found response +func (o *AliasesGetAliasNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetAliasNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesGetAliasUnprocessableEntityCode is the HTTP code returned for type AliasesGetAliasUnprocessableEntity +const AliasesGetAliasUnprocessableEntityCode int = 422 + +/* +AliasesGetAliasUnprocessableEntity Invalid alias name provided. + +swagger:response aliasesGetAliasUnprocessableEntity +*/ +type AliasesGetAliasUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesGetAliasUnprocessableEntity creates AliasesGetAliasUnprocessableEntity with default headers values +func NewAliasesGetAliasUnprocessableEntity() *AliasesGetAliasUnprocessableEntity { + + return &AliasesGetAliasUnprocessableEntity{} +} + +// WithPayload adds the payload to the aliases get alias unprocessable entity response +func (o *AliasesGetAliasUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *AliasesGetAliasUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get alias unprocessable entity response +func (o *AliasesGetAliasUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetAliasUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesGetAliasInternalServerErrorCode is the HTTP code returned for type AliasesGetAliasInternalServerError +const AliasesGetAliasInternalServerErrorCode int = 500 + +/* +AliasesGetAliasInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response aliasesGetAliasInternalServerError +*/ +type AliasesGetAliasInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesGetAliasInternalServerError creates AliasesGetAliasInternalServerError with default headers values +func NewAliasesGetAliasInternalServerError() *AliasesGetAliasInternalServerError { + + return &AliasesGetAliasInternalServerError{} +} + +// WithPayload adds the payload to the aliases get alias internal server error response +func (o *AliasesGetAliasInternalServerError) WithPayload(payload *models.ErrorResponse) *AliasesGetAliasInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get alias internal server error response +func (o *AliasesGetAliasInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetAliasInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..b84ee8e208296e79f6e92fa5533e7a3f70a3925d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_alias_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// AliasesGetAliasURL generates an URL for the aliases get alias operation +type AliasesGetAliasURL struct { + AliasName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesGetAliasURL) WithBasePath(bp string) *AliasesGetAliasURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesGetAliasURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AliasesGetAliasURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/aliases/{aliasName}" + + aliasName := o.AliasName + if aliasName != "" { + _path = strings.Replace(_path, "{aliasName}", aliasName, -1) + } else { + return nil, errors.New("aliasName is required on AliasesGetAliasURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AliasesGetAliasURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AliasesGetAliasURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AliasesGetAliasURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AliasesGetAliasURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AliasesGetAliasURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AliasesGetAliasURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..bf38c95c8ca5d1bfd50a208d22df21a9c4a7b3c6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_parameters.go @@ -0,0 +1,88 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewAliasesGetParams creates a new AliasesGetParams object +// +// There are no default values defined in the spec. +func NewAliasesGetParams() AliasesGetParams { + + return AliasesGetParams{} +} + +// AliasesGetParams contains all the bound params for the aliases get operation +// typically these are obtained from a http.Request +// +// swagger:parameters aliases.get +type AliasesGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Optional filter to retrieve aliases for a specific collection (class) only. If not provided, returns all aliases. + In: query + */ + Class *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAliasesGetParams() beforehand. +func (o *AliasesGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qClass, qhkClass, _ := qs.GetOK("class") + if err := o.bindClass(qClass, qhkClass, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClass binds and validates parameter Class from query. +func (o *AliasesGetParams) bindClass(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Class = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..79c1f77034aa8ca1fc17723c2e9aa34eb6d3f91b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesGetOKCode is the HTTP code returned for type AliasesGetOK +const AliasesGetOKCode int = 200 + +/* +AliasesGetOK Successfully retrieved the list of aliases + +swagger:response aliasesGetOK +*/ +type AliasesGetOK struct { + + /* + In: Body + */ + Payload *models.AliasResponse `json:"body,omitempty"` +} + +// NewAliasesGetOK creates AliasesGetOK with default headers values +func NewAliasesGetOK() *AliasesGetOK { + + return &AliasesGetOK{} +} + +// WithPayload adds the payload to the aliases get o k response +func (o *AliasesGetOK) WithPayload(payload *models.AliasResponse) *AliasesGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get o k response +func (o *AliasesGetOK) SetPayload(payload *models.AliasResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesGetUnauthorizedCode is the HTTP code returned for type AliasesGetUnauthorized +const AliasesGetUnauthorizedCode int = 401 + +/* +AliasesGetUnauthorized Unauthorized or invalid credentials. + +swagger:response aliasesGetUnauthorized +*/ +type AliasesGetUnauthorized struct { +} + +// NewAliasesGetUnauthorized creates AliasesGetUnauthorized with default headers values +func NewAliasesGetUnauthorized() *AliasesGetUnauthorized { + + return &AliasesGetUnauthorized{} +} + +// WriteResponse to the client +func (o *AliasesGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AliasesGetForbiddenCode is the HTTP code returned for type AliasesGetForbidden +const AliasesGetForbiddenCode int = 403 + +/* +AliasesGetForbidden Forbidden + +swagger:response aliasesGetForbidden +*/ +type AliasesGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesGetForbidden creates AliasesGetForbidden with default headers values +func NewAliasesGetForbidden() *AliasesGetForbidden { + + return &AliasesGetForbidden{} +} + +// WithPayload adds the payload to the aliases get forbidden response +func (o *AliasesGetForbidden) WithPayload(payload *models.ErrorResponse) *AliasesGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get forbidden response +func (o *AliasesGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesGetUnprocessableEntityCode is the HTTP code returned for type AliasesGetUnprocessableEntity +const AliasesGetUnprocessableEntityCode int = 422 + +/* +AliasesGetUnprocessableEntity Invalid collection (class) parameter provided + +swagger:response aliasesGetUnprocessableEntity +*/ +type AliasesGetUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesGetUnprocessableEntity creates AliasesGetUnprocessableEntity with default headers values +func NewAliasesGetUnprocessableEntity() *AliasesGetUnprocessableEntity { + + return &AliasesGetUnprocessableEntity{} +} + +// WithPayload adds the payload to the aliases get unprocessable entity response +func (o *AliasesGetUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *AliasesGetUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get unprocessable entity response +func (o *AliasesGetUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesGetInternalServerErrorCode is the HTTP code returned for type AliasesGetInternalServerError +const AliasesGetInternalServerErrorCode int = 500 + +/* +AliasesGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response aliasesGetInternalServerError +*/ +type AliasesGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesGetInternalServerError creates AliasesGetInternalServerError with default headers values +func NewAliasesGetInternalServerError() *AliasesGetInternalServerError { + + return &AliasesGetInternalServerError{} +} + +// WithPayload adds the payload to the aliases get internal server error response +func (o *AliasesGetInternalServerError) WithPayload(payload *models.ErrorResponse) *AliasesGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases get internal server error response +func (o *AliasesGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..1f35aef87f45bc5c5399077e30ac421d09dd6c00 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_get_urlbuilder.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// AliasesGetURL generates an URL for the aliases get operation +type AliasesGetURL struct { + Class *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesGetURL) WithBasePath(bp string) *AliasesGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AliasesGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/aliases" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var classQ string + if o.Class != nil { + classQ = *o.Class + } + if classQ != "" { + qs.Set("class", classQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AliasesGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AliasesGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AliasesGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AliasesGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AliasesGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AliasesGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update.go new file mode 100644 index 0000000000000000000000000000000000000000..f0a4485b1e09b8c4b23dbfa3c200046175881906 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesUpdateHandlerFunc turns a function with the right signature into a aliases update handler +type AliasesUpdateHandlerFunc func(AliasesUpdateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn AliasesUpdateHandlerFunc) Handle(params AliasesUpdateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// AliasesUpdateHandler interface for that can handle valid aliases update params +type AliasesUpdateHandler interface { + Handle(AliasesUpdateParams, *models.Principal) middleware.Responder +} + +// NewAliasesUpdate creates a new http.Handler for the aliases update operation +func NewAliasesUpdate(ctx *middleware.Context, handler AliasesUpdateHandler) *AliasesUpdate { + return &AliasesUpdate{Context: ctx, Handler: handler} +} + +/* + AliasesUpdate swagger:route PUT /aliases/{aliasName} schema aliasesUpdate + +# Update an alias + +Update an existing alias to point to a different collection (class). This allows you to redirect an alias from one collection to another without changing the alias name. +*/ +type AliasesUpdate struct { + Context *middleware.Context + Handler AliasesUpdateHandler +} + +func (o *AliasesUpdate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewAliasesUpdateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// AliasesUpdateBody aliases update body +// +// swagger:model AliasesUpdateBody +type AliasesUpdateBody struct { + + // The new collection (class) that the alias should point to. + Class string `json:"class,omitempty" yaml:"class,omitempty"` +} + +// Validate validates this aliases update body +func (o *AliasesUpdateBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this aliases update body based on context it is used +func (o *AliasesUpdateBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *AliasesUpdateBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *AliasesUpdateBody) UnmarshalBinary(b []byte) error { + var res AliasesUpdateBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..08701bfa826744e39c61dd5ce8dd51c17edfd38b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_parameters.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewAliasesUpdateParams creates a new AliasesUpdateParams object +// +// There are no default values defined in the spec. +func NewAliasesUpdateParams() AliasesUpdateParams { + + return AliasesUpdateParams{} +} + +// AliasesUpdateParams contains all the bound params for the aliases update operation +// typically these are obtained from a http.Request +// +// swagger:parameters aliases.update +type AliasesUpdateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + AliasName string + /* + Required: true + In: body + */ + Body AliasesUpdateBody +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewAliasesUpdateParams() beforehand. +func (o *AliasesUpdateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rAliasName, rhkAliasName, _ := route.Params.GetOK("aliasName") + if err := o.bindAliasName(rAliasName, rhkAliasName, route.Formats); err != nil { + res = append(res, err) + } + + if runtime.HasBody(r) { + defer r.Body.Close() + var body AliasesUpdateBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindAliasName binds and validates parameter AliasName from path. +func (o *AliasesUpdateParams) bindAliasName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.AliasName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..d5915a287ab2e870335be20e5eed690a8badb4e0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// AliasesUpdateOKCode is the HTTP code returned for type AliasesUpdateOK +const AliasesUpdateOKCode int = 200 + +/* +AliasesUpdateOK Successfully updated the alias to point to the new collection (class). + +swagger:response aliasesUpdateOK +*/ +type AliasesUpdateOK struct { + + /* + In: Body + */ + Payload *models.Alias `json:"body,omitempty"` +} + +// NewAliasesUpdateOK creates AliasesUpdateOK with default headers values +func NewAliasesUpdateOK() *AliasesUpdateOK { + + return &AliasesUpdateOK{} +} + +// WithPayload adds the payload to the aliases update o k response +func (o *AliasesUpdateOK) WithPayload(payload *models.Alias) *AliasesUpdateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases update o k response +func (o *AliasesUpdateOK) SetPayload(payload *models.Alias) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesUpdateUnauthorizedCode is the HTTP code returned for type AliasesUpdateUnauthorized +const AliasesUpdateUnauthorizedCode int = 401 + +/* +AliasesUpdateUnauthorized Unauthorized or invalid credentials. + +swagger:response aliasesUpdateUnauthorized +*/ +type AliasesUpdateUnauthorized struct { +} + +// NewAliasesUpdateUnauthorized creates AliasesUpdateUnauthorized with default headers values +func NewAliasesUpdateUnauthorized() *AliasesUpdateUnauthorized { + + return &AliasesUpdateUnauthorized{} +} + +// WriteResponse to the client +func (o *AliasesUpdateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// AliasesUpdateForbiddenCode is the HTTP code returned for type AliasesUpdateForbidden +const AliasesUpdateForbiddenCode int = 403 + +/* +AliasesUpdateForbidden Forbidden + +swagger:response aliasesUpdateForbidden +*/ +type AliasesUpdateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesUpdateForbidden creates AliasesUpdateForbidden with default headers values +func NewAliasesUpdateForbidden() *AliasesUpdateForbidden { + + return &AliasesUpdateForbidden{} +} + +// WithPayload adds the payload to the aliases update forbidden response +func (o *AliasesUpdateForbidden) WithPayload(payload *models.ErrorResponse) *AliasesUpdateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases update forbidden response +func (o *AliasesUpdateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesUpdateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesUpdateNotFoundCode is the HTTP code returned for type AliasesUpdateNotFound +const AliasesUpdateNotFoundCode int = 404 + +/* +AliasesUpdateNotFound Not Found - Alias does not exist + +swagger:response aliasesUpdateNotFound +*/ +type AliasesUpdateNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesUpdateNotFound creates AliasesUpdateNotFound with default headers values +func NewAliasesUpdateNotFound() *AliasesUpdateNotFound { + + return &AliasesUpdateNotFound{} +} + +// WithPayload adds the payload to the aliases update not found response +func (o *AliasesUpdateNotFound) WithPayload(payload *models.ErrorResponse) *AliasesUpdateNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases update not found response +func (o *AliasesUpdateNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesUpdateNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesUpdateUnprocessableEntityCode is the HTTP code returned for type AliasesUpdateUnprocessableEntity +const AliasesUpdateUnprocessableEntityCode int = 422 + +/* +AliasesUpdateUnprocessableEntity Invalid update alias request. + +swagger:response aliasesUpdateUnprocessableEntity +*/ +type AliasesUpdateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesUpdateUnprocessableEntity creates AliasesUpdateUnprocessableEntity with default headers values +func NewAliasesUpdateUnprocessableEntity() *AliasesUpdateUnprocessableEntity { + + return &AliasesUpdateUnprocessableEntity{} +} + +// WithPayload adds the payload to the aliases update unprocessable entity response +func (o *AliasesUpdateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *AliasesUpdateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases update unprocessable entity response +func (o *AliasesUpdateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesUpdateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// AliasesUpdateInternalServerErrorCode is the HTTP code returned for type AliasesUpdateInternalServerError +const AliasesUpdateInternalServerErrorCode int = 500 + +/* +AliasesUpdateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response aliasesUpdateInternalServerError +*/ +type AliasesUpdateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewAliasesUpdateInternalServerError creates AliasesUpdateInternalServerError with default headers values +func NewAliasesUpdateInternalServerError() *AliasesUpdateInternalServerError { + + return &AliasesUpdateInternalServerError{} +} + +// WithPayload adds the payload to the aliases update internal server error response +func (o *AliasesUpdateInternalServerError) WithPayload(payload *models.ErrorResponse) *AliasesUpdateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the aliases update internal server error response +func (o *AliasesUpdateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *AliasesUpdateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..ac68e4577cf98fe5c5c4d5dab2c670e4a73532cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/aliases_update_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// AliasesUpdateURL generates an URL for the aliases update operation +type AliasesUpdateURL struct { + AliasName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesUpdateURL) WithBasePath(bp string) *AliasesUpdateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *AliasesUpdateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *AliasesUpdateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/aliases/{aliasName}" + + aliasName := o.AliasName + if aliasName != "" { + _path = strings.Replace(_path, "{aliasName}", aliasName, -1) + } else { + return nil, errors.New("aliasName is required on AliasesUpdateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *AliasesUpdateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *AliasesUpdateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *AliasesUpdateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on AliasesUpdateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on AliasesUpdateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *AliasesUpdateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump.go new file mode 100644 index 0000000000000000000000000000000000000000..7d232248ff07ac9c64174b1065927787c9c8615e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaDumpHandlerFunc turns a function with the right signature into a schema dump handler +type SchemaDumpHandlerFunc func(SchemaDumpParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaDumpHandlerFunc) Handle(params SchemaDumpParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaDumpHandler interface for that can handle valid schema dump params +type SchemaDumpHandler interface { + Handle(SchemaDumpParams, *models.Principal) middleware.Responder +} + +// NewSchemaDump creates a new http.Handler for the schema dump operation +func NewSchemaDump(ctx *middleware.Context, handler SchemaDumpHandler) *SchemaDump { + return &SchemaDump{Context: ctx, Handler: handler} +} + +/* + SchemaDump swagger:route GET /schema schema schemaDump + +Dump the current the database schema. + +Fetch an array of all collection definitions from the schema. +*/ +type SchemaDump struct { + Context *middleware.Context + Handler SchemaDumpHandler +} + +func (o *SchemaDump) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaDumpParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..33b93809f8b4d51a27964405a369b4b8f6deaf1a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_parameters.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewSchemaDumpParams creates a new SchemaDumpParams object +// with the default values initialized. +func NewSchemaDumpParams() SchemaDumpParams { + + var ( + // initialize parameters with default values + + consistencyDefault = bool(true) + ) + + return SchemaDumpParams{ + Consistency: &consistencyDefault, + } +} + +// SchemaDumpParams contains all the bound params for the schema dump operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.dump +type SchemaDumpParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + In: header + Default: true + */ + Consistency *bool +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaDumpParams() beforehand. +func (o *SchemaDumpParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if err := o.bindConsistency(r.Header[http.CanonicalHeaderKey("consistency")], true, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindConsistency binds and validates parameter Consistency from header. +func (o *SchemaDumpParams) bindConsistency(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewSchemaDumpParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("consistency", "header", "bool", raw) + } + o.Consistency = &value + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..680606734648ecb1ae781b9e1d1e55931617a68a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_responses.go @@ -0,0 +1,185 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaDumpOKCode is the HTTP code returned for type SchemaDumpOK +const SchemaDumpOKCode int = 200 + +/* +SchemaDumpOK Successfully dumped the database schema. + +swagger:response schemaDumpOK +*/ +type SchemaDumpOK struct { + + /* + In: Body + */ + Payload *models.Schema `json:"body,omitempty"` +} + +// NewSchemaDumpOK creates SchemaDumpOK with default headers values +func NewSchemaDumpOK() *SchemaDumpOK { + + return &SchemaDumpOK{} +} + +// WithPayload adds the payload to the schema dump o k response +func (o *SchemaDumpOK) WithPayload(payload *models.Schema) *SchemaDumpOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema dump o k response +func (o *SchemaDumpOK) SetPayload(payload *models.Schema) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaDumpOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaDumpUnauthorizedCode is the HTTP code returned for type SchemaDumpUnauthorized +const SchemaDumpUnauthorizedCode int = 401 + +/* +SchemaDumpUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaDumpUnauthorized +*/ +type SchemaDumpUnauthorized struct { +} + +// NewSchemaDumpUnauthorized creates SchemaDumpUnauthorized with default headers values +func NewSchemaDumpUnauthorized() *SchemaDumpUnauthorized { + + return &SchemaDumpUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaDumpUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaDumpForbiddenCode is the HTTP code returned for type SchemaDumpForbidden +const SchemaDumpForbiddenCode int = 403 + +/* +SchemaDumpForbidden Forbidden + +swagger:response schemaDumpForbidden +*/ +type SchemaDumpForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaDumpForbidden creates SchemaDumpForbidden with default headers values +func NewSchemaDumpForbidden() *SchemaDumpForbidden { + + return &SchemaDumpForbidden{} +} + +// WithPayload adds the payload to the schema dump forbidden response +func (o *SchemaDumpForbidden) WithPayload(payload *models.ErrorResponse) *SchemaDumpForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema dump forbidden response +func (o *SchemaDumpForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaDumpForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaDumpInternalServerErrorCode is the HTTP code returned for type SchemaDumpInternalServerError +const SchemaDumpInternalServerErrorCode int = 500 + +/* +SchemaDumpInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaDumpInternalServerError +*/ +type SchemaDumpInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaDumpInternalServerError creates SchemaDumpInternalServerError with default headers values +func NewSchemaDumpInternalServerError() *SchemaDumpInternalServerError { + + return &SchemaDumpInternalServerError{} +} + +// WithPayload adds the payload to the schema dump internal server error response +func (o *SchemaDumpInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaDumpInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema dump internal server error response +func (o *SchemaDumpInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaDumpInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..2b3c2de296886fae3cba62c012a4ba1bb1b38b8c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_dump_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// SchemaDumpURL generates an URL for the schema dump operation +type SchemaDumpURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaDumpURL) WithBasePath(bp string) *SchemaDumpURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaDumpURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaDumpURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaDumpURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaDumpURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaDumpURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaDumpURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaDumpURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaDumpURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create.go new file mode 100644 index 0000000000000000000000000000000000000000..450395c9997f403b3751116c23f21526ab64979c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsCreateHandlerFunc turns a function with the right signature into a schema objects create handler +type SchemaObjectsCreateHandlerFunc func(SchemaObjectsCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaObjectsCreateHandlerFunc) Handle(params SchemaObjectsCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaObjectsCreateHandler interface for that can handle valid schema objects create params +type SchemaObjectsCreateHandler interface { + Handle(SchemaObjectsCreateParams, *models.Principal) middleware.Responder +} + +// NewSchemaObjectsCreate creates a new http.Handler for the schema objects create operation +func NewSchemaObjectsCreate(ctx *middleware.Context, handler SchemaObjectsCreateHandler) *SchemaObjectsCreate { + return &SchemaObjectsCreate{Context: ctx, Handler: handler} +} + +/* + SchemaObjectsCreate swagger:route POST /schema schema schemaObjectsCreate + +Create a new Object class in the schema. + +Create a new data object collection.

    If AutoSchema is enabled, Weaviate will attempt to infer the schema from the data at import time. However, manual schema definition is recommended for production environments. +*/ +type SchemaObjectsCreate struct { + Context *middleware.Context + Handler SchemaObjectsCreateHandler +} + +func (o *SchemaObjectsCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaObjectsCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e88b98d6bd9f48fc5b6696bcff2e713089e8db2e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_parameters.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsCreateParams creates a new SchemaObjectsCreateParams object +// +// There are no default values defined in the spec. +func NewSchemaObjectsCreateParams() SchemaObjectsCreateParams { + + return SchemaObjectsCreateParams{} +} + +// SchemaObjectsCreateParams contains all the bound params for the schema objects create operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.objects.create +type SchemaObjectsCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + ObjectClass *models.Class +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaObjectsCreateParams() beforehand. +func (o *SchemaObjectsCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Class + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("objectClass", "body", "")) + } else { + res = append(res, errors.NewParseError("objectClass", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.ObjectClass = &body + } + } + } else { + res = append(res, errors.Required("objectClass", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..2b2216c1a931275ccb88441035db8ff9d16d5c2a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsCreateOKCode is the HTTP code returned for type SchemaObjectsCreateOK +const SchemaObjectsCreateOKCode int = 200 + +/* +SchemaObjectsCreateOK Added the new Object class to the schema. + +swagger:response schemaObjectsCreateOK +*/ +type SchemaObjectsCreateOK struct { + + /* + In: Body + */ + Payload *models.Class `json:"body,omitempty"` +} + +// NewSchemaObjectsCreateOK creates SchemaObjectsCreateOK with default headers values +func NewSchemaObjectsCreateOK() *SchemaObjectsCreateOK { + + return &SchemaObjectsCreateOK{} +} + +// WithPayload adds the payload to the schema objects create o k response +func (o *SchemaObjectsCreateOK) WithPayload(payload *models.Class) *SchemaObjectsCreateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects create o k response +func (o *SchemaObjectsCreateOK) SetPayload(payload *models.Class) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsCreateUnauthorizedCode is the HTTP code returned for type SchemaObjectsCreateUnauthorized +const SchemaObjectsCreateUnauthorizedCode int = 401 + +/* +SchemaObjectsCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaObjectsCreateUnauthorized +*/ +type SchemaObjectsCreateUnauthorized struct { +} + +// NewSchemaObjectsCreateUnauthorized creates SchemaObjectsCreateUnauthorized with default headers values +func NewSchemaObjectsCreateUnauthorized() *SchemaObjectsCreateUnauthorized { + + return &SchemaObjectsCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaObjectsCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaObjectsCreateForbiddenCode is the HTTP code returned for type SchemaObjectsCreateForbidden +const SchemaObjectsCreateForbiddenCode int = 403 + +/* +SchemaObjectsCreateForbidden Forbidden + +swagger:response schemaObjectsCreateForbidden +*/ +type SchemaObjectsCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsCreateForbidden creates SchemaObjectsCreateForbidden with default headers values +func NewSchemaObjectsCreateForbidden() *SchemaObjectsCreateForbidden { + + return &SchemaObjectsCreateForbidden{} +} + +// WithPayload adds the payload to the schema objects create forbidden response +func (o *SchemaObjectsCreateForbidden) WithPayload(payload *models.ErrorResponse) *SchemaObjectsCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects create forbidden response +func (o *SchemaObjectsCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsCreateUnprocessableEntityCode is the HTTP code returned for type SchemaObjectsCreateUnprocessableEntity +const SchemaObjectsCreateUnprocessableEntityCode int = 422 + +/* +SchemaObjectsCreateUnprocessableEntity Invalid Object class + +swagger:response schemaObjectsCreateUnprocessableEntity +*/ +type SchemaObjectsCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsCreateUnprocessableEntity creates SchemaObjectsCreateUnprocessableEntity with default headers values +func NewSchemaObjectsCreateUnprocessableEntity() *SchemaObjectsCreateUnprocessableEntity { + + return &SchemaObjectsCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the schema objects create unprocessable entity response +func (o *SchemaObjectsCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *SchemaObjectsCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects create unprocessable entity response +func (o *SchemaObjectsCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsCreateInternalServerErrorCode is the HTTP code returned for type SchemaObjectsCreateInternalServerError +const SchemaObjectsCreateInternalServerErrorCode int = 500 + +/* +SchemaObjectsCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaObjectsCreateInternalServerError +*/ +type SchemaObjectsCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsCreateInternalServerError creates SchemaObjectsCreateInternalServerError with default headers values +func NewSchemaObjectsCreateInternalServerError() *SchemaObjectsCreateInternalServerError { + + return &SchemaObjectsCreateInternalServerError{} +} + +// WithPayload adds the payload to the schema objects create internal server error response +func (o *SchemaObjectsCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaObjectsCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects create internal server error response +func (o *SchemaObjectsCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..43482417b50ed12f274a1c81a05de01fbfc4403a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_create_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// SchemaObjectsCreateURL generates an URL for the schema objects create operation +type SchemaObjectsCreateURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsCreateURL) WithBasePath(bp string) *SchemaObjectsCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaObjectsCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaObjectsCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaObjectsCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaObjectsCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaObjectsCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaObjectsCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaObjectsCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..d3b57eac4f4f25554aa5caf62f29ca75ce90f4c2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsDeleteHandlerFunc turns a function with the right signature into a schema objects delete handler +type SchemaObjectsDeleteHandlerFunc func(SchemaObjectsDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaObjectsDeleteHandlerFunc) Handle(params SchemaObjectsDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaObjectsDeleteHandler interface for that can handle valid schema objects delete params +type SchemaObjectsDeleteHandler interface { + Handle(SchemaObjectsDeleteParams, *models.Principal) middleware.Responder +} + +// NewSchemaObjectsDelete creates a new http.Handler for the schema objects delete operation +func NewSchemaObjectsDelete(ctx *middleware.Context, handler SchemaObjectsDeleteHandler) *SchemaObjectsDelete { + return &SchemaObjectsDelete{Context: ctx, Handler: handler} +} + +/* + SchemaObjectsDelete swagger:route DELETE /schema/{className} schema schemaObjectsDelete + +Remove an Object class (and all data in the instances) from the schema. + +Remove a collection from the schema. This will also delete all the objects in the collection. +*/ +type SchemaObjectsDelete struct { + Context *middleware.Context + Handler SchemaObjectsDeleteHandler +} + +func (o *SchemaObjectsDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaObjectsDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..7bef54d43e72abef177e0bc170b0c97a018453c6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewSchemaObjectsDeleteParams creates a new SchemaObjectsDeleteParams object +// +// There are no default values defined in the spec. +func NewSchemaObjectsDeleteParams() SchemaObjectsDeleteParams { + + return SchemaObjectsDeleteParams{} +} + +// SchemaObjectsDeleteParams contains all the bound params for the schema objects delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.objects.delete +type SchemaObjectsDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaObjectsDeleteParams() beforehand. +func (o *SchemaObjectsDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *SchemaObjectsDeleteParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..bcc136eb4414862762ff74ca3e37520ab8944f3a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsDeleteOKCode is the HTTP code returned for type SchemaObjectsDeleteOK +const SchemaObjectsDeleteOKCode int = 200 + +/* +SchemaObjectsDeleteOK Removed the Object class from the schema. + +swagger:response schemaObjectsDeleteOK +*/ +type SchemaObjectsDeleteOK struct { +} + +// NewSchemaObjectsDeleteOK creates SchemaObjectsDeleteOK with default headers values +func NewSchemaObjectsDeleteOK() *SchemaObjectsDeleteOK { + + return &SchemaObjectsDeleteOK{} +} + +// WriteResponse to the client +func (o *SchemaObjectsDeleteOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// SchemaObjectsDeleteBadRequestCode is the HTTP code returned for type SchemaObjectsDeleteBadRequest +const SchemaObjectsDeleteBadRequestCode int = 400 + +/* +SchemaObjectsDeleteBadRequest Could not delete the Object class. + +swagger:response schemaObjectsDeleteBadRequest +*/ +type SchemaObjectsDeleteBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsDeleteBadRequest creates SchemaObjectsDeleteBadRequest with default headers values +func NewSchemaObjectsDeleteBadRequest() *SchemaObjectsDeleteBadRequest { + + return &SchemaObjectsDeleteBadRequest{} +} + +// WithPayload adds the payload to the schema objects delete bad request response +func (o *SchemaObjectsDeleteBadRequest) WithPayload(payload *models.ErrorResponse) *SchemaObjectsDeleteBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects delete bad request response +func (o *SchemaObjectsDeleteBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsDeleteBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsDeleteUnauthorizedCode is the HTTP code returned for type SchemaObjectsDeleteUnauthorized +const SchemaObjectsDeleteUnauthorizedCode int = 401 + +/* +SchemaObjectsDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaObjectsDeleteUnauthorized +*/ +type SchemaObjectsDeleteUnauthorized struct { +} + +// NewSchemaObjectsDeleteUnauthorized creates SchemaObjectsDeleteUnauthorized with default headers values +func NewSchemaObjectsDeleteUnauthorized() *SchemaObjectsDeleteUnauthorized { + + return &SchemaObjectsDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaObjectsDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaObjectsDeleteForbiddenCode is the HTTP code returned for type SchemaObjectsDeleteForbidden +const SchemaObjectsDeleteForbiddenCode int = 403 + +/* +SchemaObjectsDeleteForbidden Forbidden + +swagger:response schemaObjectsDeleteForbidden +*/ +type SchemaObjectsDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsDeleteForbidden creates SchemaObjectsDeleteForbidden with default headers values +func NewSchemaObjectsDeleteForbidden() *SchemaObjectsDeleteForbidden { + + return &SchemaObjectsDeleteForbidden{} +} + +// WithPayload adds the payload to the schema objects delete forbidden response +func (o *SchemaObjectsDeleteForbidden) WithPayload(payload *models.ErrorResponse) *SchemaObjectsDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects delete forbidden response +func (o *SchemaObjectsDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsDeleteInternalServerErrorCode is the HTTP code returned for type SchemaObjectsDeleteInternalServerError +const SchemaObjectsDeleteInternalServerErrorCode int = 500 + +/* +SchemaObjectsDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaObjectsDeleteInternalServerError +*/ +type SchemaObjectsDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsDeleteInternalServerError creates SchemaObjectsDeleteInternalServerError with default headers values +func NewSchemaObjectsDeleteInternalServerError() *SchemaObjectsDeleteInternalServerError { + + return &SchemaObjectsDeleteInternalServerError{} +} + +// WithPayload adds the payload to the schema objects delete internal server error response +func (o *SchemaObjectsDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaObjectsDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects delete internal server error response +func (o *SchemaObjectsDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..39c9b05fdfb13ca06362937d57f5ad9ef74aa3fd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_delete_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// SchemaObjectsDeleteURL generates an URL for the schema objects delete operation +type SchemaObjectsDeleteURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsDeleteURL) WithBasePath(bp string) *SchemaObjectsDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaObjectsDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on SchemaObjectsDeleteURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaObjectsDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaObjectsDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaObjectsDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaObjectsDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaObjectsDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaObjectsDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get.go new file mode 100644 index 0000000000000000000000000000000000000000..766ddcc2087708db2e39c843a7a6b6170dfded09 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsGetHandlerFunc turns a function with the right signature into a schema objects get handler +type SchemaObjectsGetHandlerFunc func(SchemaObjectsGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaObjectsGetHandlerFunc) Handle(params SchemaObjectsGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaObjectsGetHandler interface for that can handle valid schema objects get params +type SchemaObjectsGetHandler interface { + Handle(SchemaObjectsGetParams, *models.Principal) middleware.Responder +} + +// NewSchemaObjectsGet creates a new http.Handler for the schema objects get operation +func NewSchemaObjectsGet(ctx *middleware.Context, handler SchemaObjectsGetHandler) *SchemaObjectsGet { + return &SchemaObjectsGet{Context: ctx, Handler: handler} +} + +/* + SchemaObjectsGet swagger:route GET /schema/{className} schema schemaObjectsGet + +Get a single class from the schema +*/ +type SchemaObjectsGet struct { + Context *middleware.Context + Handler SchemaObjectsGetHandler +} + +func (o *SchemaObjectsGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaObjectsGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..1b5991c17ac95fa0dbd21d85f731a1988ee4d006 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_parameters.go @@ -0,0 +1,122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewSchemaObjectsGetParams creates a new SchemaObjectsGetParams object +// with the default values initialized. +func NewSchemaObjectsGetParams() SchemaObjectsGetParams { + + var ( + // initialize parameters with default values + + consistencyDefault = bool(true) + ) + + return SchemaObjectsGetParams{ + Consistency: &consistencyDefault, + } +} + +// SchemaObjectsGetParams contains all the bound params for the schema objects get operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.objects.get +type SchemaObjectsGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /*If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + In: header + Default: true + */ + Consistency *bool +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaObjectsGetParams() beforehand. +func (o *SchemaObjectsGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + if err := o.bindConsistency(r.Header[http.CanonicalHeaderKey("consistency")], true, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *SchemaObjectsGetParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistency binds and validates parameter Consistency from header. +func (o *SchemaObjectsGetParams) bindConsistency(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewSchemaObjectsGetParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("consistency", "header", "bool", raw) + } + o.Consistency = &value + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..5575f37cb33f412fde33f9bc7b919d9e3a5b88d7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsGetOKCode is the HTTP code returned for type SchemaObjectsGetOK +const SchemaObjectsGetOKCode int = 200 + +/* +SchemaObjectsGetOK Found the Class, returned as body + +swagger:response schemaObjectsGetOK +*/ +type SchemaObjectsGetOK struct { + + /* + In: Body + */ + Payload *models.Class `json:"body,omitempty"` +} + +// NewSchemaObjectsGetOK creates SchemaObjectsGetOK with default headers values +func NewSchemaObjectsGetOK() *SchemaObjectsGetOK { + + return &SchemaObjectsGetOK{} +} + +// WithPayload adds the payload to the schema objects get o k response +func (o *SchemaObjectsGetOK) WithPayload(payload *models.Class) *SchemaObjectsGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects get o k response +func (o *SchemaObjectsGetOK) SetPayload(payload *models.Class) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsGetUnauthorizedCode is the HTTP code returned for type SchemaObjectsGetUnauthorized +const SchemaObjectsGetUnauthorizedCode int = 401 + +/* +SchemaObjectsGetUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaObjectsGetUnauthorized +*/ +type SchemaObjectsGetUnauthorized struct { +} + +// NewSchemaObjectsGetUnauthorized creates SchemaObjectsGetUnauthorized with default headers values +func NewSchemaObjectsGetUnauthorized() *SchemaObjectsGetUnauthorized { + + return &SchemaObjectsGetUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaObjectsGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaObjectsGetForbiddenCode is the HTTP code returned for type SchemaObjectsGetForbidden +const SchemaObjectsGetForbiddenCode int = 403 + +/* +SchemaObjectsGetForbidden Forbidden + +swagger:response schemaObjectsGetForbidden +*/ +type SchemaObjectsGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsGetForbidden creates SchemaObjectsGetForbidden with default headers values +func NewSchemaObjectsGetForbidden() *SchemaObjectsGetForbidden { + + return &SchemaObjectsGetForbidden{} +} + +// WithPayload adds the payload to the schema objects get forbidden response +func (o *SchemaObjectsGetForbidden) WithPayload(payload *models.ErrorResponse) *SchemaObjectsGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects get forbidden response +func (o *SchemaObjectsGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsGetNotFoundCode is the HTTP code returned for type SchemaObjectsGetNotFound +const SchemaObjectsGetNotFoundCode int = 404 + +/* +SchemaObjectsGetNotFound This class does not exist + +swagger:response schemaObjectsGetNotFound +*/ +type SchemaObjectsGetNotFound struct { +} + +// NewSchemaObjectsGetNotFound creates SchemaObjectsGetNotFound with default headers values +func NewSchemaObjectsGetNotFound() *SchemaObjectsGetNotFound { + + return &SchemaObjectsGetNotFound{} +} + +// WriteResponse to the client +func (o *SchemaObjectsGetNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// SchemaObjectsGetInternalServerErrorCode is the HTTP code returned for type SchemaObjectsGetInternalServerError +const SchemaObjectsGetInternalServerErrorCode int = 500 + +/* +SchemaObjectsGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaObjectsGetInternalServerError +*/ +type SchemaObjectsGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsGetInternalServerError creates SchemaObjectsGetInternalServerError with default headers values +func NewSchemaObjectsGetInternalServerError() *SchemaObjectsGetInternalServerError { + + return &SchemaObjectsGetInternalServerError{} +} + +// WithPayload adds the payload to the schema objects get internal server error response +func (o *SchemaObjectsGetInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaObjectsGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects get internal server error response +func (o *SchemaObjectsGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..94aa5ee343a0c4d5f920b0dab316046e92bf0ebc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_get_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// SchemaObjectsGetURL generates an URL for the schema objects get operation +type SchemaObjectsGetURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsGetURL) WithBasePath(bp string) *SchemaObjectsGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaObjectsGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on SchemaObjectsGetURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaObjectsGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaObjectsGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaObjectsGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaObjectsGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaObjectsGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaObjectsGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add.go new file mode 100644 index 0000000000000000000000000000000000000000..54485711aac6faeb9e27976df7ddcd6f0596ef03 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsPropertiesAddHandlerFunc turns a function with the right signature into a schema objects properties add handler +type SchemaObjectsPropertiesAddHandlerFunc func(SchemaObjectsPropertiesAddParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaObjectsPropertiesAddHandlerFunc) Handle(params SchemaObjectsPropertiesAddParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaObjectsPropertiesAddHandler interface for that can handle valid schema objects properties add params +type SchemaObjectsPropertiesAddHandler interface { + Handle(SchemaObjectsPropertiesAddParams, *models.Principal) middleware.Responder +} + +// NewSchemaObjectsPropertiesAdd creates a new http.Handler for the schema objects properties add operation +func NewSchemaObjectsPropertiesAdd(ctx *middleware.Context, handler SchemaObjectsPropertiesAddHandler) *SchemaObjectsPropertiesAdd { + return &SchemaObjectsPropertiesAdd{Context: ctx, Handler: handler} +} + +/* + SchemaObjectsPropertiesAdd swagger:route POST /schema/{className}/properties schema schemaObjectsPropertiesAdd + +Add a property to an Object class. +*/ +type SchemaObjectsPropertiesAdd struct { + Context *middleware.Context + Handler SchemaObjectsPropertiesAddHandler +} + +func (o *SchemaObjectsPropertiesAdd) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaObjectsPropertiesAddParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..3dde0a933c3eda654760ee52d85493f123578020 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_parameters.go @@ -0,0 +1,120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsPropertiesAddParams creates a new SchemaObjectsPropertiesAddParams object +// +// There are no default values defined in the spec. +func NewSchemaObjectsPropertiesAddParams() SchemaObjectsPropertiesAddParams { + + return SchemaObjectsPropertiesAddParams{} +} + +// SchemaObjectsPropertiesAddParams contains all the bound params for the schema objects properties add operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.objects.properties.add +type SchemaObjectsPropertiesAddParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.Property + /* + Required: true + In: path + */ + ClassName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaObjectsPropertiesAddParams() beforehand. +func (o *SchemaObjectsPropertiesAddParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Property + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *SchemaObjectsPropertiesAddParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..9b342343f6c5e97e1136a3ad07003a29c480b728 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_responses.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsPropertiesAddOKCode is the HTTP code returned for type SchemaObjectsPropertiesAddOK +const SchemaObjectsPropertiesAddOKCode int = 200 + +/* +SchemaObjectsPropertiesAddOK Added the property. + +swagger:response schemaObjectsPropertiesAddOK +*/ +type SchemaObjectsPropertiesAddOK struct { + + /* + In: Body + */ + Payload *models.Property `json:"body,omitempty"` +} + +// NewSchemaObjectsPropertiesAddOK creates SchemaObjectsPropertiesAddOK with default headers values +func NewSchemaObjectsPropertiesAddOK() *SchemaObjectsPropertiesAddOK { + + return &SchemaObjectsPropertiesAddOK{} +} + +// WithPayload adds the payload to the schema objects properties add o k response +func (o *SchemaObjectsPropertiesAddOK) WithPayload(payload *models.Property) *SchemaObjectsPropertiesAddOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects properties add o k response +func (o *SchemaObjectsPropertiesAddOK) SetPayload(payload *models.Property) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsPropertiesAddOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsPropertiesAddUnauthorizedCode is the HTTP code returned for type SchemaObjectsPropertiesAddUnauthorized +const SchemaObjectsPropertiesAddUnauthorizedCode int = 401 + +/* +SchemaObjectsPropertiesAddUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaObjectsPropertiesAddUnauthorized +*/ +type SchemaObjectsPropertiesAddUnauthorized struct { +} + +// NewSchemaObjectsPropertiesAddUnauthorized creates SchemaObjectsPropertiesAddUnauthorized with default headers values +func NewSchemaObjectsPropertiesAddUnauthorized() *SchemaObjectsPropertiesAddUnauthorized { + + return &SchemaObjectsPropertiesAddUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaObjectsPropertiesAddUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaObjectsPropertiesAddForbiddenCode is the HTTP code returned for type SchemaObjectsPropertiesAddForbidden +const SchemaObjectsPropertiesAddForbiddenCode int = 403 + +/* +SchemaObjectsPropertiesAddForbidden Forbidden + +swagger:response schemaObjectsPropertiesAddForbidden +*/ +type SchemaObjectsPropertiesAddForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsPropertiesAddForbidden creates SchemaObjectsPropertiesAddForbidden with default headers values +func NewSchemaObjectsPropertiesAddForbidden() *SchemaObjectsPropertiesAddForbidden { + + return &SchemaObjectsPropertiesAddForbidden{} +} + +// WithPayload adds the payload to the schema objects properties add forbidden response +func (o *SchemaObjectsPropertiesAddForbidden) WithPayload(payload *models.ErrorResponse) *SchemaObjectsPropertiesAddForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects properties add forbidden response +func (o *SchemaObjectsPropertiesAddForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsPropertiesAddForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsPropertiesAddUnprocessableEntityCode is the HTTP code returned for type SchemaObjectsPropertiesAddUnprocessableEntity +const SchemaObjectsPropertiesAddUnprocessableEntityCode int = 422 + +/* +SchemaObjectsPropertiesAddUnprocessableEntity Invalid property. + +swagger:response schemaObjectsPropertiesAddUnprocessableEntity +*/ +type SchemaObjectsPropertiesAddUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsPropertiesAddUnprocessableEntity creates SchemaObjectsPropertiesAddUnprocessableEntity with default headers values +func NewSchemaObjectsPropertiesAddUnprocessableEntity() *SchemaObjectsPropertiesAddUnprocessableEntity { + + return &SchemaObjectsPropertiesAddUnprocessableEntity{} +} + +// WithPayload adds the payload to the schema objects properties add unprocessable entity response +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *SchemaObjectsPropertiesAddUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects properties add unprocessable entity response +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsPropertiesAddUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsPropertiesAddInternalServerErrorCode is the HTTP code returned for type SchemaObjectsPropertiesAddInternalServerError +const SchemaObjectsPropertiesAddInternalServerErrorCode int = 500 + +/* +SchemaObjectsPropertiesAddInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaObjectsPropertiesAddInternalServerError +*/ +type SchemaObjectsPropertiesAddInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsPropertiesAddInternalServerError creates SchemaObjectsPropertiesAddInternalServerError with default headers values +func NewSchemaObjectsPropertiesAddInternalServerError() *SchemaObjectsPropertiesAddInternalServerError { + + return &SchemaObjectsPropertiesAddInternalServerError{} +} + +// WithPayload adds the payload to the schema objects properties add internal server error response +func (o *SchemaObjectsPropertiesAddInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaObjectsPropertiesAddInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects properties add internal server error response +func (o *SchemaObjectsPropertiesAddInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsPropertiesAddInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..e3ae4f038f42086d37d9f6910773b8bc7e9a5de5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_properties_add_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// SchemaObjectsPropertiesAddURL generates an URL for the schema objects properties add operation +type SchemaObjectsPropertiesAddURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsPropertiesAddURL) WithBasePath(bp string) *SchemaObjectsPropertiesAddURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsPropertiesAddURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaObjectsPropertiesAddURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/properties" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on SchemaObjectsPropertiesAddURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaObjectsPropertiesAddURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaObjectsPropertiesAddURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaObjectsPropertiesAddURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaObjectsPropertiesAddURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaObjectsPropertiesAddURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaObjectsPropertiesAddURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get.go new file mode 100644 index 0000000000000000000000000000000000000000..5a975c5505094916fd1656cd5fd4eaae7c50556e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsShardsGetHandlerFunc turns a function with the right signature into a schema objects shards get handler +type SchemaObjectsShardsGetHandlerFunc func(SchemaObjectsShardsGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaObjectsShardsGetHandlerFunc) Handle(params SchemaObjectsShardsGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaObjectsShardsGetHandler interface for that can handle valid schema objects shards get params +type SchemaObjectsShardsGetHandler interface { + Handle(SchemaObjectsShardsGetParams, *models.Principal) middleware.Responder +} + +// NewSchemaObjectsShardsGet creates a new http.Handler for the schema objects shards get operation +func NewSchemaObjectsShardsGet(ctx *middleware.Context, handler SchemaObjectsShardsGetHandler) *SchemaObjectsShardsGet { + return &SchemaObjectsShardsGet{Context: ctx, Handler: handler} +} + +/* + SchemaObjectsShardsGet swagger:route GET /schema/{className}/shards schema schemaObjectsShardsGet + +# Get the shards status of an Object class + +Get the status of every shard in the cluster. +*/ +type SchemaObjectsShardsGet struct { + Context *middleware.Context + Handler SchemaObjectsShardsGetHandler +} + +func (o *SchemaObjectsShardsGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaObjectsShardsGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..ec1d11c2a95c0a6c720b7133fd27513986bebfdb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_parameters.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewSchemaObjectsShardsGetParams creates a new SchemaObjectsShardsGetParams object +// +// There are no default values defined in the spec. +func NewSchemaObjectsShardsGetParams() SchemaObjectsShardsGetParams { + + return SchemaObjectsShardsGetParams{} +} + +// SchemaObjectsShardsGetParams contains all the bound params for the schema objects shards get operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.objects.shards.get +type SchemaObjectsShardsGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /* + In: query + */ + Tenant *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaObjectsShardsGetParams() beforehand. +func (o *SchemaObjectsShardsGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + qTenant, qhkTenant, _ := qs.GetOK("tenant") + if err := o.bindTenant(qTenant, qhkTenant, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *SchemaObjectsShardsGetParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindTenant binds and validates parameter Tenant from query. +func (o *SchemaObjectsShardsGetParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + return nil + } + o.Tenant = &raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..daf843c10064884bbf1dd1c53ae14943cb0d77bf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_responses.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsShardsGetOKCode is the HTTP code returned for type SchemaObjectsShardsGetOK +const SchemaObjectsShardsGetOKCode int = 200 + +/* +SchemaObjectsShardsGetOK Found the status of the shards, returned as body + +swagger:response schemaObjectsShardsGetOK +*/ +type SchemaObjectsShardsGetOK struct { + + /* + In: Body + */ + Payload models.ShardStatusList `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsGetOK creates SchemaObjectsShardsGetOK with default headers values +func NewSchemaObjectsShardsGetOK() *SchemaObjectsShardsGetOK { + + return &SchemaObjectsShardsGetOK{} +} + +// WithPayload adds the payload to the schema objects shards get o k response +func (o *SchemaObjectsShardsGetOK) WithPayload(payload models.ShardStatusList) *SchemaObjectsShardsGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards get o k response +func (o *SchemaObjectsShardsGetOK) SetPayload(payload models.ShardStatusList) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.ShardStatusList{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// SchemaObjectsShardsGetUnauthorizedCode is the HTTP code returned for type SchemaObjectsShardsGetUnauthorized +const SchemaObjectsShardsGetUnauthorizedCode int = 401 + +/* +SchemaObjectsShardsGetUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaObjectsShardsGetUnauthorized +*/ +type SchemaObjectsShardsGetUnauthorized struct { +} + +// NewSchemaObjectsShardsGetUnauthorized creates SchemaObjectsShardsGetUnauthorized with default headers values +func NewSchemaObjectsShardsGetUnauthorized() *SchemaObjectsShardsGetUnauthorized { + + return &SchemaObjectsShardsGetUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaObjectsShardsGetForbiddenCode is the HTTP code returned for type SchemaObjectsShardsGetForbidden +const SchemaObjectsShardsGetForbiddenCode int = 403 + +/* +SchemaObjectsShardsGetForbidden Forbidden + +swagger:response schemaObjectsShardsGetForbidden +*/ +type SchemaObjectsShardsGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsGetForbidden creates SchemaObjectsShardsGetForbidden with default headers values +func NewSchemaObjectsShardsGetForbidden() *SchemaObjectsShardsGetForbidden { + + return &SchemaObjectsShardsGetForbidden{} +} + +// WithPayload adds the payload to the schema objects shards get forbidden response +func (o *SchemaObjectsShardsGetForbidden) WithPayload(payload *models.ErrorResponse) *SchemaObjectsShardsGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards get forbidden response +func (o *SchemaObjectsShardsGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsShardsGetNotFoundCode is the HTTP code returned for type SchemaObjectsShardsGetNotFound +const SchemaObjectsShardsGetNotFoundCode int = 404 + +/* +SchemaObjectsShardsGetNotFound This class does not exist + +swagger:response schemaObjectsShardsGetNotFound +*/ +type SchemaObjectsShardsGetNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsGetNotFound creates SchemaObjectsShardsGetNotFound with default headers values +func NewSchemaObjectsShardsGetNotFound() *SchemaObjectsShardsGetNotFound { + + return &SchemaObjectsShardsGetNotFound{} +} + +// WithPayload adds the payload to the schema objects shards get not found response +func (o *SchemaObjectsShardsGetNotFound) WithPayload(payload *models.ErrorResponse) *SchemaObjectsShardsGetNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards get not found response +func (o *SchemaObjectsShardsGetNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsGetNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsShardsGetInternalServerErrorCode is the HTTP code returned for type SchemaObjectsShardsGetInternalServerError +const SchemaObjectsShardsGetInternalServerErrorCode int = 500 + +/* +SchemaObjectsShardsGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaObjectsShardsGetInternalServerError +*/ +type SchemaObjectsShardsGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsGetInternalServerError creates SchemaObjectsShardsGetInternalServerError with default headers values +func NewSchemaObjectsShardsGetInternalServerError() *SchemaObjectsShardsGetInternalServerError { + + return &SchemaObjectsShardsGetInternalServerError{} +} + +// WithPayload adds the payload to the schema objects shards get internal server error response +func (o *SchemaObjectsShardsGetInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaObjectsShardsGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards get internal server error response +func (o *SchemaObjectsShardsGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..672cdb3feed63efbce405aeff04e102d021e176a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_get_urlbuilder.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// SchemaObjectsShardsGetURL generates an URL for the schema objects shards get operation +type SchemaObjectsShardsGetURL struct { + ClassName string + + Tenant *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsShardsGetURL) WithBasePath(bp string) *SchemaObjectsShardsGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsShardsGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaObjectsShardsGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/shards" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on SchemaObjectsShardsGetURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var tenantQ string + if o.Tenant != nil { + tenantQ = *o.Tenant + } + if tenantQ != "" { + qs.Set("tenant", tenantQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaObjectsShardsGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaObjectsShardsGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaObjectsShardsGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaObjectsShardsGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaObjectsShardsGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaObjectsShardsGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update.go new file mode 100644 index 0000000000000000000000000000000000000000..55f5cc0527a4fa55fd41235df2572b0e6ee8a704 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsShardsUpdateHandlerFunc turns a function with the right signature into a schema objects shards update handler +type SchemaObjectsShardsUpdateHandlerFunc func(SchemaObjectsShardsUpdateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaObjectsShardsUpdateHandlerFunc) Handle(params SchemaObjectsShardsUpdateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaObjectsShardsUpdateHandler interface for that can handle valid schema objects shards update params +type SchemaObjectsShardsUpdateHandler interface { + Handle(SchemaObjectsShardsUpdateParams, *models.Principal) middleware.Responder +} + +// NewSchemaObjectsShardsUpdate creates a new http.Handler for the schema objects shards update operation +func NewSchemaObjectsShardsUpdate(ctx *middleware.Context, handler SchemaObjectsShardsUpdateHandler) *SchemaObjectsShardsUpdate { + return &SchemaObjectsShardsUpdate{Context: ctx, Handler: handler} +} + +/* + SchemaObjectsShardsUpdate swagger:route PUT /schema/{className}/shards/{shardName} schema schemaObjectsShardsUpdate + +Update a shard status. + +Update a shard status for a collection. For example, a shard may have been marked as `READONLY` because its disk was full. After providing more disk space, use this endpoint to set the shard status to `READY` again. There is also a convenience function in each client to set the status of all shards of a collection. +*/ +type SchemaObjectsShardsUpdate struct { + Context *middleware.Context + Handler SchemaObjectsShardsUpdateHandler +} + +func (o *SchemaObjectsShardsUpdate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaObjectsShardsUpdateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e82d5f783a4f3f543e2cf0cd99cf68e2b6d8c3b3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_parameters.go @@ -0,0 +1,144 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsShardsUpdateParams creates a new SchemaObjectsShardsUpdateParams object +// +// There are no default values defined in the spec. +func NewSchemaObjectsShardsUpdateParams() SchemaObjectsShardsUpdateParams { + + return SchemaObjectsShardsUpdateParams{} +} + +// SchemaObjectsShardsUpdateParams contains all the bound params for the schema objects shards update operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.objects.shards.update +type SchemaObjectsShardsUpdateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body *models.ShardStatus + /* + Required: true + In: path + */ + ClassName string + /* + Required: true + In: path + */ + ShardName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaObjectsShardsUpdateParams() beforehand. +func (o *SchemaObjectsShardsUpdateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.ShardStatus + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + rShardName, rhkShardName, _ := route.Params.GetOK("shardName") + if err := o.bindShardName(rShardName, rhkShardName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *SchemaObjectsShardsUpdateParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindShardName binds and validates parameter ShardName from path. +func (o *SchemaObjectsShardsUpdateParams) bindShardName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ShardName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..1f23a317a7e7aaa7c387ab9f21a9d88ece53e384 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsShardsUpdateOKCode is the HTTP code returned for type SchemaObjectsShardsUpdateOK +const SchemaObjectsShardsUpdateOKCode int = 200 + +/* +SchemaObjectsShardsUpdateOK Shard status was updated successfully + +swagger:response schemaObjectsShardsUpdateOK +*/ +type SchemaObjectsShardsUpdateOK struct { + + /* + In: Body + */ + Payload *models.ShardStatus `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsUpdateOK creates SchemaObjectsShardsUpdateOK with default headers values +func NewSchemaObjectsShardsUpdateOK() *SchemaObjectsShardsUpdateOK { + + return &SchemaObjectsShardsUpdateOK{} +} + +// WithPayload adds the payload to the schema objects shards update o k response +func (o *SchemaObjectsShardsUpdateOK) WithPayload(payload *models.ShardStatus) *SchemaObjectsShardsUpdateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards update o k response +func (o *SchemaObjectsShardsUpdateOK) SetPayload(payload *models.ShardStatus) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsShardsUpdateUnauthorizedCode is the HTTP code returned for type SchemaObjectsShardsUpdateUnauthorized +const SchemaObjectsShardsUpdateUnauthorizedCode int = 401 + +/* +SchemaObjectsShardsUpdateUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaObjectsShardsUpdateUnauthorized +*/ +type SchemaObjectsShardsUpdateUnauthorized struct { +} + +// NewSchemaObjectsShardsUpdateUnauthorized creates SchemaObjectsShardsUpdateUnauthorized with default headers values +func NewSchemaObjectsShardsUpdateUnauthorized() *SchemaObjectsShardsUpdateUnauthorized { + + return &SchemaObjectsShardsUpdateUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsUpdateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaObjectsShardsUpdateForbiddenCode is the HTTP code returned for type SchemaObjectsShardsUpdateForbidden +const SchemaObjectsShardsUpdateForbiddenCode int = 403 + +/* +SchemaObjectsShardsUpdateForbidden Forbidden + +swagger:response schemaObjectsShardsUpdateForbidden +*/ +type SchemaObjectsShardsUpdateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsUpdateForbidden creates SchemaObjectsShardsUpdateForbidden with default headers values +func NewSchemaObjectsShardsUpdateForbidden() *SchemaObjectsShardsUpdateForbidden { + + return &SchemaObjectsShardsUpdateForbidden{} +} + +// WithPayload adds the payload to the schema objects shards update forbidden response +func (o *SchemaObjectsShardsUpdateForbidden) WithPayload(payload *models.ErrorResponse) *SchemaObjectsShardsUpdateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards update forbidden response +func (o *SchemaObjectsShardsUpdateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsUpdateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsShardsUpdateNotFoundCode is the HTTP code returned for type SchemaObjectsShardsUpdateNotFound +const SchemaObjectsShardsUpdateNotFoundCode int = 404 + +/* +SchemaObjectsShardsUpdateNotFound Shard to be updated does not exist + +swagger:response schemaObjectsShardsUpdateNotFound +*/ +type SchemaObjectsShardsUpdateNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsUpdateNotFound creates SchemaObjectsShardsUpdateNotFound with default headers values +func NewSchemaObjectsShardsUpdateNotFound() *SchemaObjectsShardsUpdateNotFound { + + return &SchemaObjectsShardsUpdateNotFound{} +} + +// WithPayload adds the payload to the schema objects shards update not found response +func (o *SchemaObjectsShardsUpdateNotFound) WithPayload(payload *models.ErrorResponse) *SchemaObjectsShardsUpdateNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards update not found response +func (o *SchemaObjectsShardsUpdateNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsUpdateNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsShardsUpdateUnprocessableEntityCode is the HTTP code returned for type SchemaObjectsShardsUpdateUnprocessableEntity +const SchemaObjectsShardsUpdateUnprocessableEntityCode int = 422 + +/* +SchemaObjectsShardsUpdateUnprocessableEntity Invalid update attempt + +swagger:response schemaObjectsShardsUpdateUnprocessableEntity +*/ +type SchemaObjectsShardsUpdateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsUpdateUnprocessableEntity creates SchemaObjectsShardsUpdateUnprocessableEntity with default headers values +func NewSchemaObjectsShardsUpdateUnprocessableEntity() *SchemaObjectsShardsUpdateUnprocessableEntity { + + return &SchemaObjectsShardsUpdateUnprocessableEntity{} +} + +// WithPayload adds the payload to the schema objects shards update unprocessable entity response +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *SchemaObjectsShardsUpdateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards update unprocessable entity response +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsUpdateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsShardsUpdateInternalServerErrorCode is the HTTP code returned for type SchemaObjectsShardsUpdateInternalServerError +const SchemaObjectsShardsUpdateInternalServerErrorCode int = 500 + +/* +SchemaObjectsShardsUpdateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaObjectsShardsUpdateInternalServerError +*/ +type SchemaObjectsShardsUpdateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsShardsUpdateInternalServerError creates SchemaObjectsShardsUpdateInternalServerError with default headers values +func NewSchemaObjectsShardsUpdateInternalServerError() *SchemaObjectsShardsUpdateInternalServerError { + + return &SchemaObjectsShardsUpdateInternalServerError{} +} + +// WithPayload adds the payload to the schema objects shards update internal server error response +func (o *SchemaObjectsShardsUpdateInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaObjectsShardsUpdateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects shards update internal server error response +func (o *SchemaObjectsShardsUpdateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsShardsUpdateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..d3d5d65017618ff2740ee938d830c3dd465c9da8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_shards_update_urlbuilder.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// SchemaObjectsShardsUpdateURL generates an URL for the schema objects shards update operation +type SchemaObjectsShardsUpdateURL struct { + ClassName string + ShardName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsShardsUpdateURL) WithBasePath(bp string) *SchemaObjectsShardsUpdateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsShardsUpdateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaObjectsShardsUpdateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/shards/{shardName}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on SchemaObjectsShardsUpdateURL") + } + + shardName := o.ShardName + if shardName != "" { + _path = strings.Replace(_path, "{shardName}", shardName, -1) + } else { + return nil, errors.New("shardName is required on SchemaObjectsShardsUpdateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaObjectsShardsUpdateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaObjectsShardsUpdateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaObjectsShardsUpdateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaObjectsShardsUpdateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaObjectsShardsUpdateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaObjectsShardsUpdateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update.go new file mode 100644 index 0000000000000000000000000000000000000000..c6d52d27541623545bdb95e63a6fac706c828e48 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsUpdateHandlerFunc turns a function with the right signature into a schema objects update handler +type SchemaObjectsUpdateHandlerFunc func(SchemaObjectsUpdateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn SchemaObjectsUpdateHandlerFunc) Handle(params SchemaObjectsUpdateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// SchemaObjectsUpdateHandler interface for that can handle valid schema objects update params +type SchemaObjectsUpdateHandler interface { + Handle(SchemaObjectsUpdateParams, *models.Principal) middleware.Responder +} + +// NewSchemaObjectsUpdate creates a new http.Handler for the schema objects update operation +func NewSchemaObjectsUpdate(ctx *middleware.Context, handler SchemaObjectsUpdateHandler) *SchemaObjectsUpdate { + return &SchemaObjectsUpdate{Context: ctx, Handler: handler} +} + +/* + SchemaObjectsUpdate swagger:route PUT /schema/{className} schema schemaObjectsUpdate + +# Update settings of an existing schema class + +Add a property to an existing collection. +*/ +type SchemaObjectsUpdate struct { + Context *middleware.Context + Handler SchemaObjectsUpdateHandler +} + +func (o *SchemaObjectsUpdate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewSchemaObjectsUpdateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..c066be1084a193936cc710fbb44e7decc694b169 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_parameters.go @@ -0,0 +1,120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewSchemaObjectsUpdateParams creates a new SchemaObjectsUpdateParams object +// +// There are no default values defined in the spec. +func NewSchemaObjectsUpdateParams() SchemaObjectsUpdateParams { + + return SchemaObjectsUpdateParams{} +} + +// SchemaObjectsUpdateParams contains all the bound params for the schema objects update operation +// typically these are obtained from a http.Request +// +// swagger:parameters schema.objects.update +type SchemaObjectsUpdateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /* + Required: true + In: body + */ + ObjectClass *models.Class +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewSchemaObjectsUpdateParams() beforehand. +func (o *SchemaObjectsUpdateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.Class + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("objectClass", "body", "")) + } else { + res = append(res, errors.NewParseError("objectClass", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.ObjectClass = &body + } + } + } else { + res = append(res, errors.Required("objectClass", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *SchemaObjectsUpdateParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..42410b2457cae44729deb5171ed2c918fb29539a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_responses.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// SchemaObjectsUpdateOKCode is the HTTP code returned for type SchemaObjectsUpdateOK +const SchemaObjectsUpdateOKCode int = 200 + +/* +SchemaObjectsUpdateOK Class was updated successfully + +swagger:response schemaObjectsUpdateOK +*/ +type SchemaObjectsUpdateOK struct { + + /* + In: Body + */ + Payload *models.Class `json:"body,omitempty"` +} + +// NewSchemaObjectsUpdateOK creates SchemaObjectsUpdateOK with default headers values +func NewSchemaObjectsUpdateOK() *SchemaObjectsUpdateOK { + + return &SchemaObjectsUpdateOK{} +} + +// WithPayload adds the payload to the schema objects update o k response +func (o *SchemaObjectsUpdateOK) WithPayload(payload *models.Class) *SchemaObjectsUpdateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects update o k response +func (o *SchemaObjectsUpdateOK) SetPayload(payload *models.Class) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsUpdateUnauthorizedCode is the HTTP code returned for type SchemaObjectsUpdateUnauthorized +const SchemaObjectsUpdateUnauthorizedCode int = 401 + +/* +SchemaObjectsUpdateUnauthorized Unauthorized or invalid credentials. + +swagger:response schemaObjectsUpdateUnauthorized +*/ +type SchemaObjectsUpdateUnauthorized struct { +} + +// NewSchemaObjectsUpdateUnauthorized creates SchemaObjectsUpdateUnauthorized with default headers values +func NewSchemaObjectsUpdateUnauthorized() *SchemaObjectsUpdateUnauthorized { + + return &SchemaObjectsUpdateUnauthorized{} +} + +// WriteResponse to the client +func (o *SchemaObjectsUpdateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// SchemaObjectsUpdateForbiddenCode is the HTTP code returned for type SchemaObjectsUpdateForbidden +const SchemaObjectsUpdateForbiddenCode int = 403 + +/* +SchemaObjectsUpdateForbidden Forbidden + +swagger:response schemaObjectsUpdateForbidden +*/ +type SchemaObjectsUpdateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsUpdateForbidden creates SchemaObjectsUpdateForbidden with default headers values +func NewSchemaObjectsUpdateForbidden() *SchemaObjectsUpdateForbidden { + + return &SchemaObjectsUpdateForbidden{} +} + +// WithPayload adds the payload to the schema objects update forbidden response +func (o *SchemaObjectsUpdateForbidden) WithPayload(payload *models.ErrorResponse) *SchemaObjectsUpdateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects update forbidden response +func (o *SchemaObjectsUpdateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsUpdateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsUpdateNotFoundCode is the HTTP code returned for type SchemaObjectsUpdateNotFound +const SchemaObjectsUpdateNotFoundCode int = 404 + +/* +SchemaObjectsUpdateNotFound Class to be updated does not exist + +swagger:response schemaObjectsUpdateNotFound +*/ +type SchemaObjectsUpdateNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsUpdateNotFound creates SchemaObjectsUpdateNotFound with default headers values +func NewSchemaObjectsUpdateNotFound() *SchemaObjectsUpdateNotFound { + + return &SchemaObjectsUpdateNotFound{} +} + +// WithPayload adds the payload to the schema objects update not found response +func (o *SchemaObjectsUpdateNotFound) WithPayload(payload *models.ErrorResponse) *SchemaObjectsUpdateNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects update not found response +func (o *SchemaObjectsUpdateNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsUpdateNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsUpdateUnprocessableEntityCode is the HTTP code returned for type SchemaObjectsUpdateUnprocessableEntity +const SchemaObjectsUpdateUnprocessableEntityCode int = 422 + +/* +SchemaObjectsUpdateUnprocessableEntity Invalid update attempt + +swagger:response schemaObjectsUpdateUnprocessableEntity +*/ +type SchemaObjectsUpdateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsUpdateUnprocessableEntity creates SchemaObjectsUpdateUnprocessableEntity with default headers values +func NewSchemaObjectsUpdateUnprocessableEntity() *SchemaObjectsUpdateUnprocessableEntity { + + return &SchemaObjectsUpdateUnprocessableEntity{} +} + +// WithPayload adds the payload to the schema objects update unprocessable entity response +func (o *SchemaObjectsUpdateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *SchemaObjectsUpdateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects update unprocessable entity response +func (o *SchemaObjectsUpdateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsUpdateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// SchemaObjectsUpdateInternalServerErrorCode is the HTTP code returned for type SchemaObjectsUpdateInternalServerError +const SchemaObjectsUpdateInternalServerErrorCode int = 500 + +/* +SchemaObjectsUpdateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response schemaObjectsUpdateInternalServerError +*/ +type SchemaObjectsUpdateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewSchemaObjectsUpdateInternalServerError creates SchemaObjectsUpdateInternalServerError with default headers values +func NewSchemaObjectsUpdateInternalServerError() *SchemaObjectsUpdateInternalServerError { + + return &SchemaObjectsUpdateInternalServerError{} +} + +// WithPayload adds the payload to the schema objects update internal server error response +func (o *SchemaObjectsUpdateInternalServerError) WithPayload(payload *models.ErrorResponse) *SchemaObjectsUpdateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the schema objects update internal server error response +func (o *SchemaObjectsUpdateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SchemaObjectsUpdateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..0740a3ae31186f3d4881258f3dfa8feb81087736 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/schema_objects_update_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// SchemaObjectsUpdateURL generates an URL for the schema objects update operation +type SchemaObjectsUpdateURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsUpdateURL) WithBasePath(bp string) *SchemaObjectsUpdateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *SchemaObjectsUpdateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *SchemaObjectsUpdateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on SchemaObjectsUpdateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *SchemaObjectsUpdateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *SchemaObjectsUpdateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *SchemaObjectsUpdateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on SchemaObjectsUpdateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on SchemaObjectsUpdateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *SchemaObjectsUpdateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists.go new file mode 100644 index 0000000000000000000000000000000000000000..727fcf370867d23dae5cfc2ee4415835ad09a059 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantExistsHandlerFunc turns a function with the right signature into a tenant exists handler +type TenantExistsHandlerFunc func(TenantExistsParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn TenantExistsHandlerFunc) Handle(params TenantExistsParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// TenantExistsHandler interface for that can handle valid tenant exists params +type TenantExistsHandler interface { + Handle(TenantExistsParams, *models.Principal) middleware.Responder +} + +// NewTenantExists creates a new http.Handler for the tenant exists operation +func NewTenantExists(ctx *middleware.Context, handler TenantExistsHandler) *TenantExists { + return &TenantExists{Context: ctx, Handler: handler} +} + +/* + TenantExists swagger:route HEAD /schema/{className}/tenants/{tenantName} schema tenantExists + +# Check whether a tenant exists + +Check if a tenant exists for a specific class +*/ +type TenantExists struct { + Context *middleware.Context + Handler TenantExistsHandler +} + +func (o *TenantExists) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewTenantExistsParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fcab534bc51bcdb9f6b2ac7aced62278fb929b4a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_parameters.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewTenantExistsParams creates a new TenantExistsParams object +// with the default values initialized. +func NewTenantExistsParams() TenantExistsParams { + + var ( + // initialize parameters with default values + + consistencyDefault = bool(true) + ) + + return TenantExistsParams{ + Consistency: &consistencyDefault, + } +} + +// TenantExistsParams contains all the bound params for the tenant exists operation +// typically these are obtained from a http.Request +// +// swagger:parameters tenant.exists +type TenantExistsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /*If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + In: header + Default: true + */ + Consistency *bool + /* + Required: true + In: path + */ + TenantName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewTenantExistsParams() beforehand. +func (o *TenantExistsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + if err := o.bindConsistency(r.Header[http.CanonicalHeaderKey("consistency")], true, route.Formats); err != nil { + res = append(res, err) + } + + rTenantName, rhkTenantName, _ := route.Params.GetOK("tenantName") + if err := o.bindTenantName(rTenantName, rhkTenantName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *TenantExistsParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistency binds and validates parameter Consistency from header. +func (o *TenantExistsParams) bindConsistency(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewTenantExistsParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("consistency", "header", "bool", raw) + } + o.Consistency = &value + + return nil +} + +// bindTenantName binds and validates parameter TenantName from path. +func (o *TenantExistsParams) bindTenantName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.TenantName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..d0b01b1b5cce61f0ac489bd314ebfbcd8af08ff3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_responses.go @@ -0,0 +1,235 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantExistsOKCode is the HTTP code returned for type TenantExistsOK +const TenantExistsOKCode int = 200 + +/* +TenantExistsOK The tenant exists in the specified class + +swagger:response tenantExistsOK +*/ +type TenantExistsOK struct { +} + +// NewTenantExistsOK creates TenantExistsOK with default headers values +func NewTenantExistsOK() *TenantExistsOK { + + return &TenantExistsOK{} +} + +// WriteResponse to the client +func (o *TenantExistsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// TenantExistsUnauthorizedCode is the HTTP code returned for type TenantExistsUnauthorized +const TenantExistsUnauthorizedCode int = 401 + +/* +TenantExistsUnauthorized Unauthorized or invalid credentials. + +swagger:response tenantExistsUnauthorized +*/ +type TenantExistsUnauthorized struct { +} + +// NewTenantExistsUnauthorized creates TenantExistsUnauthorized with default headers values +func NewTenantExistsUnauthorized() *TenantExistsUnauthorized { + + return &TenantExistsUnauthorized{} +} + +// WriteResponse to the client +func (o *TenantExistsUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// TenantExistsForbiddenCode is the HTTP code returned for type TenantExistsForbidden +const TenantExistsForbiddenCode int = 403 + +/* +TenantExistsForbidden Forbidden + +swagger:response tenantExistsForbidden +*/ +type TenantExistsForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantExistsForbidden creates TenantExistsForbidden with default headers values +func NewTenantExistsForbidden() *TenantExistsForbidden { + + return &TenantExistsForbidden{} +} + +// WithPayload adds the payload to the tenant exists forbidden response +func (o *TenantExistsForbidden) WithPayload(payload *models.ErrorResponse) *TenantExistsForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenant exists forbidden response +func (o *TenantExistsForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantExistsForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantExistsNotFoundCode is the HTTP code returned for type TenantExistsNotFound +const TenantExistsNotFoundCode int = 404 + +/* +TenantExistsNotFound The tenant not found + +swagger:response tenantExistsNotFound +*/ +type TenantExistsNotFound struct { +} + +// NewTenantExistsNotFound creates TenantExistsNotFound with default headers values +func NewTenantExistsNotFound() *TenantExistsNotFound { + + return &TenantExistsNotFound{} +} + +// WriteResponse to the client +func (o *TenantExistsNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// TenantExistsUnprocessableEntityCode is the HTTP code returned for type TenantExistsUnprocessableEntity +const TenantExistsUnprocessableEntityCode int = 422 + +/* +TenantExistsUnprocessableEntity Invalid Tenant class + +swagger:response tenantExistsUnprocessableEntity +*/ +type TenantExistsUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantExistsUnprocessableEntity creates TenantExistsUnprocessableEntity with default headers values +func NewTenantExistsUnprocessableEntity() *TenantExistsUnprocessableEntity { + + return &TenantExistsUnprocessableEntity{} +} + +// WithPayload adds the payload to the tenant exists unprocessable entity response +func (o *TenantExistsUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *TenantExistsUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenant exists unprocessable entity response +func (o *TenantExistsUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantExistsUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantExistsInternalServerErrorCode is the HTTP code returned for type TenantExistsInternalServerError +const TenantExistsInternalServerErrorCode int = 500 + +/* +TenantExistsInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response tenantExistsInternalServerError +*/ +type TenantExistsInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantExistsInternalServerError creates TenantExistsInternalServerError with default headers values +func NewTenantExistsInternalServerError() *TenantExistsInternalServerError { + + return &TenantExistsInternalServerError{} +} + +// WithPayload adds the payload to the tenant exists internal server error response +func (o *TenantExistsInternalServerError) WithPayload(payload *models.ErrorResponse) *TenantExistsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenant exists internal server error response +func (o *TenantExistsInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantExistsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..0c51e3a935abaa4f0d2797706e2a3fd27a70cebe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenant_exists_urlbuilder.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// TenantExistsURL generates an URL for the tenant exists operation +type TenantExistsURL struct { + ClassName string + TenantName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantExistsURL) WithBasePath(bp string) *TenantExistsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantExistsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *TenantExistsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/tenants/{tenantName}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on TenantExistsURL") + } + + tenantName := o.TenantName + if tenantName != "" { + _path = strings.Replace(_path, "{tenantName}", tenantName, -1) + } else { + return nil, errors.New("tenantName is required on TenantExistsURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *TenantExistsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *TenantExistsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *TenantExistsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on TenantExistsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on TenantExistsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *TenantExistsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create.go new file mode 100644 index 0000000000000000000000000000000000000000..ab46c1ab838fcd61149b341783afa610cf3a8fef --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsCreateHandlerFunc turns a function with the right signature into a tenants create handler +type TenantsCreateHandlerFunc func(TenantsCreateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn TenantsCreateHandlerFunc) Handle(params TenantsCreateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// TenantsCreateHandler interface for that can handle valid tenants create params +type TenantsCreateHandler interface { + Handle(TenantsCreateParams, *models.Principal) middleware.Responder +} + +// NewTenantsCreate creates a new http.Handler for the tenants create operation +func NewTenantsCreate(ctx *middleware.Context, handler TenantsCreateHandler) *TenantsCreate { + return &TenantsCreate{Context: ctx, Handler: handler} +} + +/* + TenantsCreate swagger:route POST /schema/{className}/tenants schema tenantsCreate + +# Create a new tenant + +Create a new tenant for a collection. Multi-tenancy must be enabled in the collection definition. +*/ +type TenantsCreate struct { + Context *middleware.Context + Handler TenantsCreateHandler +} + +func (o *TenantsCreate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewTenantsCreateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..37a6d3e3f2b27f83b27c2c24f0a46f8a38d33335 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_parameters.go @@ -0,0 +1,121 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewTenantsCreateParams creates a new TenantsCreateParams object +// +// There are no default values defined in the spec. +func NewTenantsCreateParams() TenantsCreateParams { + + return TenantsCreateParams{} +} + +// TenantsCreateParams contains all the bound params for the tenants create operation +// typically these are obtained from a http.Request +// +// swagger:parameters tenants.create +type TenantsCreateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body []*models.Tenant + /* + Required: true + In: path + */ + ClassName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewTenantsCreateParams() beforehand. +func (o *TenantsCreateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body []*models.Tenant + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + + // validate array of body objects + for i := range body { + if body[i] == nil { + continue + } + if err := body[i].Validate(route.Formats); err != nil { + res = append(res, err) + break + } + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *TenantsCreateParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f7098ed659412476f48da34bf330fd0baaa0bdcb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_responses.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsCreateOKCode is the HTTP code returned for type TenantsCreateOK +const TenantsCreateOKCode int = 200 + +/* +TenantsCreateOK Added new tenants to the specified class + +swagger:response tenantsCreateOK +*/ +type TenantsCreateOK struct { + + /* + In: Body + */ + Payload []*models.Tenant `json:"body,omitempty"` +} + +// NewTenantsCreateOK creates TenantsCreateOK with default headers values +func NewTenantsCreateOK() *TenantsCreateOK { + + return &TenantsCreateOK{} +} + +// WithPayload adds the payload to the tenants create o k response +func (o *TenantsCreateOK) WithPayload(payload []*models.Tenant) *TenantsCreateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants create o k response +func (o *TenantsCreateOK) SetPayload(payload []*models.Tenant) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*models.Tenant, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// TenantsCreateUnauthorizedCode is the HTTP code returned for type TenantsCreateUnauthorized +const TenantsCreateUnauthorizedCode int = 401 + +/* +TenantsCreateUnauthorized Unauthorized or invalid credentials. + +swagger:response tenantsCreateUnauthorized +*/ +type TenantsCreateUnauthorized struct { +} + +// NewTenantsCreateUnauthorized creates TenantsCreateUnauthorized with default headers values +func NewTenantsCreateUnauthorized() *TenantsCreateUnauthorized { + + return &TenantsCreateUnauthorized{} +} + +// WriteResponse to the client +func (o *TenantsCreateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// TenantsCreateForbiddenCode is the HTTP code returned for type TenantsCreateForbidden +const TenantsCreateForbiddenCode int = 403 + +/* +TenantsCreateForbidden Forbidden + +swagger:response tenantsCreateForbidden +*/ +type TenantsCreateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsCreateForbidden creates TenantsCreateForbidden with default headers values +func NewTenantsCreateForbidden() *TenantsCreateForbidden { + + return &TenantsCreateForbidden{} +} + +// WithPayload adds the payload to the tenants create forbidden response +func (o *TenantsCreateForbidden) WithPayload(payload *models.ErrorResponse) *TenantsCreateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants create forbidden response +func (o *TenantsCreateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsCreateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsCreateUnprocessableEntityCode is the HTTP code returned for type TenantsCreateUnprocessableEntity +const TenantsCreateUnprocessableEntityCode int = 422 + +/* +TenantsCreateUnprocessableEntity Invalid Tenant class + +swagger:response tenantsCreateUnprocessableEntity +*/ +type TenantsCreateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsCreateUnprocessableEntity creates TenantsCreateUnprocessableEntity with default headers values +func NewTenantsCreateUnprocessableEntity() *TenantsCreateUnprocessableEntity { + + return &TenantsCreateUnprocessableEntity{} +} + +// WithPayload adds the payload to the tenants create unprocessable entity response +func (o *TenantsCreateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *TenantsCreateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants create unprocessable entity response +func (o *TenantsCreateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsCreateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsCreateInternalServerErrorCode is the HTTP code returned for type TenantsCreateInternalServerError +const TenantsCreateInternalServerErrorCode int = 500 + +/* +TenantsCreateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response tenantsCreateInternalServerError +*/ +type TenantsCreateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsCreateInternalServerError creates TenantsCreateInternalServerError with default headers values +func NewTenantsCreateInternalServerError() *TenantsCreateInternalServerError { + + return &TenantsCreateInternalServerError{} +} + +// WithPayload adds the payload to the tenants create internal server error response +func (o *TenantsCreateInternalServerError) WithPayload(payload *models.ErrorResponse) *TenantsCreateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants create internal server error response +func (o *TenantsCreateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsCreateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..f3d6e9f5bc6a55b9e624eeac2c8883d7762374ac --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_create_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// TenantsCreateURL generates an URL for the tenants create operation +type TenantsCreateURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsCreateURL) WithBasePath(bp string) *TenantsCreateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsCreateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *TenantsCreateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/tenants" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on TenantsCreateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *TenantsCreateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *TenantsCreateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *TenantsCreateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on TenantsCreateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on TenantsCreateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *TenantsCreateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..d209d347e6debefd333b1744389c00bdcd91e281 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsDeleteHandlerFunc turns a function with the right signature into a tenants delete handler +type TenantsDeleteHandlerFunc func(TenantsDeleteParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn TenantsDeleteHandlerFunc) Handle(params TenantsDeleteParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// TenantsDeleteHandler interface for that can handle valid tenants delete params +type TenantsDeleteHandler interface { + Handle(TenantsDeleteParams, *models.Principal) middleware.Responder +} + +// NewTenantsDelete creates a new http.Handler for the tenants delete operation +func NewTenantsDelete(ctx *middleware.Context, handler TenantsDeleteHandler) *TenantsDelete { + return &TenantsDelete{Context: ctx, Handler: handler} +} + +/* + TenantsDelete swagger:route DELETE /schema/{className}/tenants schema tenantsDelete + +delete tenants from a specific class +*/ +type TenantsDelete struct { + Context *middleware.Context + Handler TenantsDeleteHandler +} + +func (o *TenantsDelete) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewTenantsDeleteParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..199d80889ca12203d6ff8816de3630b27c0609f9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_parameters.go @@ -0,0 +1,106 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewTenantsDeleteParams creates a new TenantsDeleteParams object +// +// There are no default values defined in the spec. +func NewTenantsDeleteParams() TenantsDeleteParams { + + return TenantsDeleteParams{} +} + +// TenantsDeleteParams contains all the bound params for the tenants delete operation +// typically these are obtained from a http.Request +// +// swagger:parameters tenants.delete +type TenantsDeleteParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /* + Required: true + In: body + */ + Tenants []string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewTenantsDeleteParams() beforehand. +func (o *TenantsDeleteParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + if runtime.HasBody(r) { + defer r.Body.Close() + var body []string + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("tenants", "body", "")) + } else { + res = append(res, errors.NewParseError("tenants", "body", "", err)) + } + } else { + // no validation required on inline body + o.Tenants = body + } + } else { + res = append(res, errors.Required("tenants", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *TenantsDeleteParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..fb355bfa13f8e820fc35cc183b04299273c8cb54 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_responses.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsDeleteOKCode is the HTTP code returned for type TenantsDeleteOK +const TenantsDeleteOKCode int = 200 + +/* +TenantsDeleteOK Deleted tenants from specified class. + +swagger:response tenantsDeleteOK +*/ +type TenantsDeleteOK struct { +} + +// NewTenantsDeleteOK creates TenantsDeleteOK with default headers values +func NewTenantsDeleteOK() *TenantsDeleteOK { + + return &TenantsDeleteOK{} +} + +// WriteResponse to the client +func (o *TenantsDeleteOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// TenantsDeleteUnauthorizedCode is the HTTP code returned for type TenantsDeleteUnauthorized +const TenantsDeleteUnauthorizedCode int = 401 + +/* +TenantsDeleteUnauthorized Unauthorized or invalid credentials. + +swagger:response tenantsDeleteUnauthorized +*/ +type TenantsDeleteUnauthorized struct { +} + +// NewTenantsDeleteUnauthorized creates TenantsDeleteUnauthorized with default headers values +func NewTenantsDeleteUnauthorized() *TenantsDeleteUnauthorized { + + return &TenantsDeleteUnauthorized{} +} + +// WriteResponse to the client +func (o *TenantsDeleteUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// TenantsDeleteForbiddenCode is the HTTP code returned for type TenantsDeleteForbidden +const TenantsDeleteForbiddenCode int = 403 + +/* +TenantsDeleteForbidden Forbidden + +swagger:response tenantsDeleteForbidden +*/ +type TenantsDeleteForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsDeleteForbidden creates TenantsDeleteForbidden with default headers values +func NewTenantsDeleteForbidden() *TenantsDeleteForbidden { + + return &TenantsDeleteForbidden{} +} + +// WithPayload adds the payload to the tenants delete forbidden response +func (o *TenantsDeleteForbidden) WithPayload(payload *models.ErrorResponse) *TenantsDeleteForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants delete forbidden response +func (o *TenantsDeleteForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsDeleteForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsDeleteUnprocessableEntityCode is the HTTP code returned for type TenantsDeleteUnprocessableEntity +const TenantsDeleteUnprocessableEntityCode int = 422 + +/* +TenantsDeleteUnprocessableEntity Invalid Tenant class + +swagger:response tenantsDeleteUnprocessableEntity +*/ +type TenantsDeleteUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsDeleteUnprocessableEntity creates TenantsDeleteUnprocessableEntity with default headers values +func NewTenantsDeleteUnprocessableEntity() *TenantsDeleteUnprocessableEntity { + + return &TenantsDeleteUnprocessableEntity{} +} + +// WithPayload adds the payload to the tenants delete unprocessable entity response +func (o *TenantsDeleteUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *TenantsDeleteUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants delete unprocessable entity response +func (o *TenantsDeleteUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsDeleteUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsDeleteInternalServerErrorCode is the HTTP code returned for type TenantsDeleteInternalServerError +const TenantsDeleteInternalServerErrorCode int = 500 + +/* +TenantsDeleteInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response tenantsDeleteInternalServerError +*/ +type TenantsDeleteInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsDeleteInternalServerError creates TenantsDeleteInternalServerError with default headers values +func NewTenantsDeleteInternalServerError() *TenantsDeleteInternalServerError { + + return &TenantsDeleteInternalServerError{} +} + +// WithPayload adds the payload to the tenants delete internal server error response +func (o *TenantsDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *TenantsDeleteInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants delete internal server error response +func (o *TenantsDeleteInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsDeleteInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..cb1f8252b28402d1a64d21746161836b3b42c9b2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_delete_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// TenantsDeleteURL generates an URL for the tenants delete operation +type TenantsDeleteURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsDeleteURL) WithBasePath(bp string) *TenantsDeleteURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsDeleteURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *TenantsDeleteURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/tenants" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on TenantsDeleteURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *TenantsDeleteURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *TenantsDeleteURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *TenantsDeleteURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on TenantsDeleteURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on TenantsDeleteURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *TenantsDeleteURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get.go new file mode 100644 index 0000000000000000000000000000000000000000..fef7ae0f9296ad40dd1df45cf963f1b42d071a86 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsGetHandlerFunc turns a function with the right signature into a tenants get handler +type TenantsGetHandlerFunc func(TenantsGetParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn TenantsGetHandlerFunc) Handle(params TenantsGetParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// TenantsGetHandler interface for that can handle valid tenants get params +type TenantsGetHandler interface { + Handle(TenantsGetParams, *models.Principal) middleware.Responder +} + +// NewTenantsGet creates a new http.Handler for the tenants get operation +func NewTenantsGet(ctx *middleware.Context, handler TenantsGetHandler) *TenantsGet { + return &TenantsGet{Context: ctx, Handler: handler} +} + +/* + TenantsGet swagger:route GET /schema/{className}/tenants schema tenantsGet + +Get the list of tenants. + +get all tenants from a specific class +*/ +type TenantsGet struct { + Context *middleware.Context + Handler TenantsGetHandler +} + +func (o *TenantsGet) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewTenantsGetParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one.go new file mode 100644 index 0000000000000000000000000000000000000000..bc6ffd61f5db37c00f8ffd4578ae8136eb2d528b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsGetOneHandlerFunc turns a function with the right signature into a tenants get one handler +type TenantsGetOneHandlerFunc func(TenantsGetOneParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn TenantsGetOneHandlerFunc) Handle(params TenantsGetOneParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// TenantsGetOneHandler interface for that can handle valid tenants get one params +type TenantsGetOneHandler interface { + Handle(TenantsGetOneParams, *models.Principal) middleware.Responder +} + +// NewTenantsGetOne creates a new http.Handler for the tenants get one operation +func NewTenantsGetOne(ctx *middleware.Context, handler TenantsGetOneHandler) *TenantsGetOne { + return &TenantsGetOne{Context: ctx, Handler: handler} +} + +/* + TenantsGetOne swagger:route GET /schema/{className}/tenants/{tenantName} schema tenantsGetOne + +# Get a specific tenant + +get a specific tenant for the given class +*/ +type TenantsGetOne struct { + Context *middleware.Context + Handler TenantsGetOneHandler +} + +func (o *TenantsGetOne) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewTenantsGetOneParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..e21a773ac32d392b73a1f055d436789a5fdec563 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_parameters.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewTenantsGetOneParams creates a new TenantsGetOneParams object +// with the default values initialized. +func NewTenantsGetOneParams() TenantsGetOneParams { + + var ( + // initialize parameters with default values + + consistencyDefault = bool(true) + ) + + return TenantsGetOneParams{ + Consistency: &consistencyDefault, + } +} + +// TenantsGetOneParams contains all the bound params for the tenants get one operation +// typically these are obtained from a http.Request +// +// swagger:parameters tenants.get.one +type TenantsGetOneParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /*If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + In: header + Default: true + */ + Consistency *bool + /* + Required: true + In: path + */ + TenantName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewTenantsGetOneParams() beforehand. +func (o *TenantsGetOneParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + if err := o.bindConsistency(r.Header[http.CanonicalHeaderKey("consistency")], true, route.Formats); err != nil { + res = append(res, err) + } + + rTenantName, rhkTenantName, _ := route.Params.GetOK("tenantName") + if err := o.bindTenantName(rTenantName, rhkTenantName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *TenantsGetOneParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistency binds and validates parameter Consistency from header. +func (o *TenantsGetOneParams) bindConsistency(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewTenantsGetOneParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("consistency", "header", "bool", raw) + } + o.Consistency = &value + + return nil +} + +// bindTenantName binds and validates parameter TenantName from path. +func (o *TenantsGetOneParams) bindTenantName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.TenantName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..d4387494b2ddbc1d50b79bf383753a0e7c178e34 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsGetOneOKCode is the HTTP code returned for type TenantsGetOneOK +const TenantsGetOneOKCode int = 200 + +/* +TenantsGetOneOK load the tenant given the specified class + +swagger:response tenantsGetOneOK +*/ +type TenantsGetOneOK struct { + + /* + In: Body + */ + Payload *models.Tenant `json:"body,omitempty"` +} + +// NewTenantsGetOneOK creates TenantsGetOneOK with default headers values +func NewTenantsGetOneOK() *TenantsGetOneOK { + + return &TenantsGetOneOK{} +} + +// WithPayload adds the payload to the tenants get one o k response +func (o *TenantsGetOneOK) WithPayload(payload *models.Tenant) *TenantsGetOneOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get one o k response +func (o *TenantsGetOneOK) SetPayload(payload *models.Tenant) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetOneOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsGetOneUnauthorizedCode is the HTTP code returned for type TenantsGetOneUnauthorized +const TenantsGetOneUnauthorizedCode int = 401 + +/* +TenantsGetOneUnauthorized Unauthorized or invalid credentials. + +swagger:response tenantsGetOneUnauthorized +*/ +type TenantsGetOneUnauthorized struct { +} + +// NewTenantsGetOneUnauthorized creates TenantsGetOneUnauthorized with default headers values +func NewTenantsGetOneUnauthorized() *TenantsGetOneUnauthorized { + + return &TenantsGetOneUnauthorized{} +} + +// WriteResponse to the client +func (o *TenantsGetOneUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// TenantsGetOneForbiddenCode is the HTTP code returned for type TenantsGetOneForbidden +const TenantsGetOneForbiddenCode int = 403 + +/* +TenantsGetOneForbidden Forbidden + +swagger:response tenantsGetOneForbidden +*/ +type TenantsGetOneForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsGetOneForbidden creates TenantsGetOneForbidden with default headers values +func NewTenantsGetOneForbidden() *TenantsGetOneForbidden { + + return &TenantsGetOneForbidden{} +} + +// WithPayload adds the payload to the tenants get one forbidden response +func (o *TenantsGetOneForbidden) WithPayload(payload *models.ErrorResponse) *TenantsGetOneForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get one forbidden response +func (o *TenantsGetOneForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetOneForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsGetOneNotFoundCode is the HTTP code returned for type TenantsGetOneNotFound +const TenantsGetOneNotFoundCode int = 404 + +/* +TenantsGetOneNotFound Tenant not found + +swagger:response tenantsGetOneNotFound +*/ +type TenantsGetOneNotFound struct { +} + +// NewTenantsGetOneNotFound creates TenantsGetOneNotFound with default headers values +func NewTenantsGetOneNotFound() *TenantsGetOneNotFound { + + return &TenantsGetOneNotFound{} +} + +// WriteResponse to the client +func (o *TenantsGetOneNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// TenantsGetOneUnprocessableEntityCode is the HTTP code returned for type TenantsGetOneUnprocessableEntity +const TenantsGetOneUnprocessableEntityCode int = 422 + +/* +TenantsGetOneUnprocessableEntity Invalid tenant or class + +swagger:response tenantsGetOneUnprocessableEntity +*/ +type TenantsGetOneUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsGetOneUnprocessableEntity creates TenantsGetOneUnprocessableEntity with default headers values +func NewTenantsGetOneUnprocessableEntity() *TenantsGetOneUnprocessableEntity { + + return &TenantsGetOneUnprocessableEntity{} +} + +// WithPayload adds the payload to the tenants get one unprocessable entity response +func (o *TenantsGetOneUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *TenantsGetOneUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get one unprocessable entity response +func (o *TenantsGetOneUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetOneUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsGetOneInternalServerErrorCode is the HTTP code returned for type TenantsGetOneInternalServerError +const TenantsGetOneInternalServerErrorCode int = 500 + +/* +TenantsGetOneInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response tenantsGetOneInternalServerError +*/ +type TenantsGetOneInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsGetOneInternalServerError creates TenantsGetOneInternalServerError with default headers values +func NewTenantsGetOneInternalServerError() *TenantsGetOneInternalServerError { + + return &TenantsGetOneInternalServerError{} +} + +// WithPayload adds the payload to the tenants get one internal server error response +func (o *TenantsGetOneInternalServerError) WithPayload(payload *models.ErrorResponse) *TenantsGetOneInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get one internal server error response +func (o *TenantsGetOneInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetOneInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..66ccef052da7196e891185c69b3ec1b33c789bff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_one_urlbuilder.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// TenantsGetOneURL generates an URL for the tenants get one operation +type TenantsGetOneURL struct { + ClassName string + TenantName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsGetOneURL) WithBasePath(bp string) *TenantsGetOneURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsGetOneURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *TenantsGetOneURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/tenants/{tenantName}" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on TenantsGetOneURL") + } + + tenantName := o.TenantName + if tenantName != "" { + _path = strings.Replace(_path, "{tenantName}", tenantName, -1) + } else { + return nil, errors.New("tenantName is required on TenantsGetOneURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *TenantsGetOneURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *TenantsGetOneURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *TenantsGetOneURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on TenantsGetOneURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on TenantsGetOneURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *TenantsGetOneURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..bf98e08c6ebc85cc7b63a5d4ef549825b426efdc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_parameters.go @@ -0,0 +1,122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewTenantsGetParams creates a new TenantsGetParams object +// with the default values initialized. +func NewTenantsGetParams() TenantsGetParams { + + var ( + // initialize parameters with default values + + consistencyDefault = bool(true) + ) + + return TenantsGetParams{ + Consistency: &consistencyDefault, + } +} + +// TenantsGetParams contains all the bound params for the tenants get operation +// typically these are obtained from a http.Request +// +// swagger:parameters tenants.get +type TenantsGetParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClassName string + /*If consistency is true, the request will be proxied to the leader to ensure strong schema consistency + In: header + Default: true + */ + Consistency *bool +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewTenantsGetParams() beforehand. +func (o *TenantsGetParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + + if err := o.bindConsistency(r.Header[http.CanonicalHeaderKey("consistency")], true, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *TenantsGetParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} + +// bindConsistency binds and validates parameter Consistency from header. +func (o *TenantsGetParams) bindConsistency(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewTenantsGetParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("consistency", "header", "bool", raw) + } + o.Consistency = &value + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..cacba1df6e00d748ff38c0e73aa5a8e901d15a45 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_responses.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsGetOKCode is the HTTP code returned for type TenantsGetOK +const TenantsGetOKCode int = 200 + +/* +TenantsGetOK tenants from specified class. + +swagger:response tenantsGetOK +*/ +type TenantsGetOK struct { + + /* + In: Body + */ + Payload []*models.Tenant `json:"body,omitempty"` +} + +// NewTenantsGetOK creates TenantsGetOK with default headers values +func NewTenantsGetOK() *TenantsGetOK { + + return &TenantsGetOK{} +} + +// WithPayload adds the payload to the tenants get o k response +func (o *TenantsGetOK) WithPayload(payload []*models.Tenant) *TenantsGetOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get o k response +func (o *TenantsGetOK) SetPayload(payload []*models.Tenant) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*models.Tenant, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// TenantsGetUnauthorizedCode is the HTTP code returned for type TenantsGetUnauthorized +const TenantsGetUnauthorizedCode int = 401 + +/* +TenantsGetUnauthorized Unauthorized or invalid credentials. + +swagger:response tenantsGetUnauthorized +*/ +type TenantsGetUnauthorized struct { +} + +// NewTenantsGetUnauthorized creates TenantsGetUnauthorized with default headers values +func NewTenantsGetUnauthorized() *TenantsGetUnauthorized { + + return &TenantsGetUnauthorized{} +} + +// WriteResponse to the client +func (o *TenantsGetUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// TenantsGetForbiddenCode is the HTTP code returned for type TenantsGetForbidden +const TenantsGetForbiddenCode int = 403 + +/* +TenantsGetForbidden Forbidden + +swagger:response tenantsGetForbidden +*/ +type TenantsGetForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsGetForbidden creates TenantsGetForbidden with default headers values +func NewTenantsGetForbidden() *TenantsGetForbidden { + + return &TenantsGetForbidden{} +} + +// WithPayload adds the payload to the tenants get forbidden response +func (o *TenantsGetForbidden) WithPayload(payload *models.ErrorResponse) *TenantsGetForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get forbidden response +func (o *TenantsGetForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsGetUnprocessableEntityCode is the HTTP code returned for type TenantsGetUnprocessableEntity +const TenantsGetUnprocessableEntityCode int = 422 + +/* +TenantsGetUnprocessableEntity Invalid Tenant class + +swagger:response tenantsGetUnprocessableEntity +*/ +type TenantsGetUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsGetUnprocessableEntity creates TenantsGetUnprocessableEntity with default headers values +func NewTenantsGetUnprocessableEntity() *TenantsGetUnprocessableEntity { + + return &TenantsGetUnprocessableEntity{} +} + +// WithPayload adds the payload to the tenants get unprocessable entity response +func (o *TenantsGetUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *TenantsGetUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get unprocessable entity response +func (o *TenantsGetUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsGetInternalServerErrorCode is the HTTP code returned for type TenantsGetInternalServerError +const TenantsGetInternalServerErrorCode int = 500 + +/* +TenantsGetInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response tenantsGetInternalServerError +*/ +type TenantsGetInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsGetInternalServerError creates TenantsGetInternalServerError with default headers values +func NewTenantsGetInternalServerError() *TenantsGetInternalServerError { + + return &TenantsGetInternalServerError{} +} + +// WithPayload adds the payload to the tenants get internal server error response +func (o *TenantsGetInternalServerError) WithPayload(payload *models.ErrorResponse) *TenantsGetInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants get internal server error response +func (o *TenantsGetInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsGetInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..36e85d7ef428a918ef6dfb8139adbc8cf81953e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_get_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// TenantsGetURL generates an URL for the tenants get operation +type TenantsGetURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsGetURL) WithBasePath(bp string) *TenantsGetURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsGetURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *TenantsGetURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/tenants" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on TenantsGetURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *TenantsGetURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *TenantsGetURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *TenantsGetURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on TenantsGetURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on TenantsGetURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *TenantsGetURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update.go new file mode 100644 index 0000000000000000000000000000000000000000..e9560dfd52093784c158725277064ff25634a9ae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsUpdateHandlerFunc turns a function with the right signature into a tenants update handler +type TenantsUpdateHandlerFunc func(TenantsUpdateParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn TenantsUpdateHandlerFunc) Handle(params TenantsUpdateParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// TenantsUpdateHandler interface for that can handle valid tenants update params +type TenantsUpdateHandler interface { + Handle(TenantsUpdateParams, *models.Principal) middleware.Responder +} + +// NewTenantsUpdate creates a new http.Handler for the tenants update operation +func NewTenantsUpdate(ctx *middleware.Context, handler TenantsUpdateHandler) *TenantsUpdate { + return &TenantsUpdate{Context: ctx, Handler: handler} +} + +/* + TenantsUpdate swagger:route PUT /schema/{className}/tenants schema tenantsUpdate + +Update a tenant. + +Update tenant of a specific class +*/ +type TenantsUpdate struct { + Context *middleware.Context + Handler TenantsUpdateHandler +} + +func (o *TenantsUpdate) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewTenantsUpdateParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..719649a57cc82f4550af9ae33f4b759dcc4d331b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_parameters.go @@ -0,0 +1,121 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/models" +) + +// NewTenantsUpdateParams creates a new TenantsUpdateParams object +// +// There are no default values defined in the spec. +func NewTenantsUpdateParams() TenantsUpdateParams { + + return TenantsUpdateParams{} +} + +// TenantsUpdateParams contains all the bound params for the tenants update operation +// typically these are obtained from a http.Request +// +// swagger:parameters tenants.update +type TenantsUpdateParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + Body []*models.Tenant + /* + Required: true + In: path + */ + ClassName string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewTenantsUpdateParams() beforehand. +func (o *TenantsUpdateParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body []*models.Tenant + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("body", "body", "")) + } else { + res = append(res, errors.NewParseError("body", "body", "", err)) + } + } else { + + // validate array of body objects + for i := range body { + if body[i] == nil { + continue + } + if err := body[i].Validate(route.Formats); err != nil { + res = append(res, err) + break + } + } + + if len(res) == 0 { + o.Body = body + } + } + } else { + res = append(res, errors.Required("body", "body", "")) + } + + rClassName, rhkClassName, _ := route.Params.GetOK("className") + if err := o.bindClassName(rClassName, rhkClassName, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClassName binds and validates parameter ClassName from path. +func (o *TenantsUpdateParams) bindClassName(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.ClassName = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..f0f2e86a55875f10172e3e6bbe4331289dc577c2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_responses.go @@ -0,0 +1,233 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// TenantsUpdateOKCode is the HTTP code returned for type TenantsUpdateOK +const TenantsUpdateOKCode int = 200 + +/* +TenantsUpdateOK Updated tenants of the specified class + +swagger:response tenantsUpdateOK +*/ +type TenantsUpdateOK struct { + + /* + In: Body + */ + Payload []*models.Tenant `json:"body,omitempty"` +} + +// NewTenantsUpdateOK creates TenantsUpdateOK with default headers values +func NewTenantsUpdateOK() *TenantsUpdateOK { + + return &TenantsUpdateOK{} +} + +// WithPayload adds the payload to the tenants update o k response +func (o *TenantsUpdateOK) WithPayload(payload []*models.Tenant) *TenantsUpdateOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants update o k response +func (o *TenantsUpdateOK) SetPayload(payload []*models.Tenant) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*models.Tenant, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// TenantsUpdateUnauthorizedCode is the HTTP code returned for type TenantsUpdateUnauthorized +const TenantsUpdateUnauthorizedCode int = 401 + +/* +TenantsUpdateUnauthorized Unauthorized or invalid credentials. + +swagger:response tenantsUpdateUnauthorized +*/ +type TenantsUpdateUnauthorized struct { +} + +// NewTenantsUpdateUnauthorized creates TenantsUpdateUnauthorized with default headers values +func NewTenantsUpdateUnauthorized() *TenantsUpdateUnauthorized { + + return &TenantsUpdateUnauthorized{} +} + +// WriteResponse to the client +func (o *TenantsUpdateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// TenantsUpdateForbiddenCode is the HTTP code returned for type TenantsUpdateForbidden +const TenantsUpdateForbiddenCode int = 403 + +/* +TenantsUpdateForbidden Forbidden + +swagger:response tenantsUpdateForbidden +*/ +type TenantsUpdateForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsUpdateForbidden creates TenantsUpdateForbidden with default headers values +func NewTenantsUpdateForbidden() *TenantsUpdateForbidden { + + return &TenantsUpdateForbidden{} +} + +// WithPayload adds the payload to the tenants update forbidden response +func (o *TenantsUpdateForbidden) WithPayload(payload *models.ErrorResponse) *TenantsUpdateForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants update forbidden response +func (o *TenantsUpdateForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsUpdateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsUpdateUnprocessableEntityCode is the HTTP code returned for type TenantsUpdateUnprocessableEntity +const TenantsUpdateUnprocessableEntityCode int = 422 + +/* +TenantsUpdateUnprocessableEntity Invalid Tenant class + +swagger:response tenantsUpdateUnprocessableEntity +*/ +type TenantsUpdateUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsUpdateUnprocessableEntity creates TenantsUpdateUnprocessableEntity with default headers values +func NewTenantsUpdateUnprocessableEntity() *TenantsUpdateUnprocessableEntity { + + return &TenantsUpdateUnprocessableEntity{} +} + +// WithPayload adds the payload to the tenants update unprocessable entity response +func (o *TenantsUpdateUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *TenantsUpdateUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants update unprocessable entity response +func (o *TenantsUpdateUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsUpdateUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// TenantsUpdateInternalServerErrorCode is the HTTP code returned for type TenantsUpdateInternalServerError +const TenantsUpdateInternalServerErrorCode int = 500 + +/* +TenantsUpdateInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response tenantsUpdateInternalServerError +*/ +type TenantsUpdateInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewTenantsUpdateInternalServerError creates TenantsUpdateInternalServerError with default headers values +func NewTenantsUpdateInternalServerError() *TenantsUpdateInternalServerError { + + return &TenantsUpdateInternalServerError{} +} + +// WithPayload adds the payload to the tenants update internal server error response +func (o *TenantsUpdateInternalServerError) WithPayload(payload *models.ErrorResponse) *TenantsUpdateInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the tenants update internal server error response +func (o *TenantsUpdateInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *TenantsUpdateInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..b6ef13e47eec1475807ee0c0daef414671e92fb3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/schema/tenants_update_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package schema + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// TenantsUpdateURL generates an URL for the tenants update operation +type TenantsUpdateURL struct { + ClassName string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsUpdateURL) WithBasePath(bp string) *TenantsUpdateURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *TenantsUpdateURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *TenantsUpdateURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/schema/{className}/tenants" + + className := o.ClassName + if className != "" { + _path = strings.Replace(_path, "{className}", className, -1) + } else { + return nil, errors.New("className is required on TenantsUpdateURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *TenantsUpdateURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *TenantsUpdateURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *TenantsUpdateURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on TenantsUpdateURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on TenantsUpdateURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *TenantsUpdateURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user.go new file mode 100644 index 0000000000000000000000000000000000000000..60fb4b4ec0483fdc5baa237dd66ec3934a037c75 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ActivateUserHandlerFunc turns a function with the right signature into a activate user handler +type ActivateUserHandlerFunc func(ActivateUserParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ActivateUserHandlerFunc) Handle(params ActivateUserParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ActivateUserHandler interface for that can handle valid activate user params +type ActivateUserHandler interface { + Handle(ActivateUserParams, *models.Principal) middleware.Responder +} + +// NewActivateUser creates a new http.Handler for the activate user operation +func NewActivateUser(ctx *middleware.Context, handler ActivateUserHandler) *ActivateUser { + return &ActivateUser{Context: ctx, Handler: handler} +} + +/* + ActivateUser swagger:route POST /users/db/{user_id}/activate users activateUser + +activate a deactivated user +*/ +type ActivateUser struct { + Context *middleware.Context + Handler ActivateUserHandler +} + +func (o *ActivateUser) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewActivateUserParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..8c1f43f7ec9c0a347405e2b91a56854c2215ec06 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewActivateUserParams creates a new ActivateUserParams object +// +// There are no default values defined in the spec. +func NewActivateUserParams() ActivateUserParams { + + return ActivateUserParams{} +} + +// ActivateUserParams contains all the bound params for the activate user operation +// typically these are obtained from a http.Request +// +// swagger:parameters activateUser +type ActivateUserParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*user id + Required: true + In: path + */ + UserID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewActivateUserParams() beforehand. +func (o *ActivateUserParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rUserID, rhkUserID, _ := route.Params.GetOK("user_id") + if err := o.bindUserID(rUserID, rhkUserID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindUserID binds and validates parameter UserID from path. +func (o *ActivateUserParams) bindUserID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.UserID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..74a46fa8872125b76e09d81dd50d1e1547734eab --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_responses.go @@ -0,0 +1,305 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ActivateUserOKCode is the HTTP code returned for type ActivateUserOK +const ActivateUserOKCode int = 200 + +/* +ActivateUserOK User successfully activated + +swagger:response activateUserOK +*/ +type ActivateUserOK struct { +} + +// NewActivateUserOK creates ActivateUserOK with default headers values +func NewActivateUserOK() *ActivateUserOK { + + return &ActivateUserOK{} +} + +// WriteResponse to the client +func (o *ActivateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// ActivateUserBadRequestCode is the HTTP code returned for type ActivateUserBadRequest +const ActivateUserBadRequestCode int = 400 + +/* +ActivateUserBadRequest Malformed request. + +swagger:response activateUserBadRequest +*/ +type ActivateUserBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewActivateUserBadRequest creates ActivateUserBadRequest with default headers values +func NewActivateUserBadRequest() *ActivateUserBadRequest { + + return &ActivateUserBadRequest{} +} + +// WithPayload adds the payload to the activate user bad request response +func (o *ActivateUserBadRequest) WithPayload(payload *models.ErrorResponse) *ActivateUserBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the activate user bad request response +func (o *ActivateUserBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ActivateUserBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ActivateUserUnauthorizedCode is the HTTP code returned for type ActivateUserUnauthorized +const ActivateUserUnauthorizedCode int = 401 + +/* +ActivateUserUnauthorized Unauthorized or invalid credentials. + +swagger:response activateUserUnauthorized +*/ +type ActivateUserUnauthorized struct { +} + +// NewActivateUserUnauthorized creates ActivateUserUnauthorized with default headers values +func NewActivateUserUnauthorized() *ActivateUserUnauthorized { + + return &ActivateUserUnauthorized{} +} + +// WriteResponse to the client +func (o *ActivateUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ActivateUserForbiddenCode is the HTTP code returned for type ActivateUserForbidden +const ActivateUserForbiddenCode int = 403 + +/* +ActivateUserForbidden Forbidden + +swagger:response activateUserForbidden +*/ +type ActivateUserForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewActivateUserForbidden creates ActivateUserForbidden with default headers values +func NewActivateUserForbidden() *ActivateUserForbidden { + + return &ActivateUserForbidden{} +} + +// WithPayload adds the payload to the activate user forbidden response +func (o *ActivateUserForbidden) WithPayload(payload *models.ErrorResponse) *ActivateUserForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the activate user forbidden response +func (o *ActivateUserForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ActivateUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ActivateUserNotFoundCode is the HTTP code returned for type ActivateUserNotFound +const ActivateUserNotFoundCode int = 404 + +/* +ActivateUserNotFound user not found + +swagger:response activateUserNotFound +*/ +type ActivateUserNotFound struct { +} + +// NewActivateUserNotFound creates ActivateUserNotFound with default headers values +func NewActivateUserNotFound() *ActivateUserNotFound { + + return &ActivateUserNotFound{} +} + +// WriteResponse to the client +func (o *ActivateUserNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ActivateUserConflictCode is the HTTP code returned for type ActivateUserConflict +const ActivateUserConflictCode int = 409 + +/* +ActivateUserConflict user already activated + +swagger:response activateUserConflict +*/ +type ActivateUserConflict struct { +} + +// NewActivateUserConflict creates ActivateUserConflict with default headers values +func NewActivateUserConflict() *ActivateUserConflict { + + return &ActivateUserConflict{} +} + +// WriteResponse to the client +func (o *ActivateUserConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(409) +} + +// ActivateUserUnprocessableEntityCode is the HTTP code returned for type ActivateUserUnprocessableEntity +const ActivateUserUnprocessableEntityCode int = 422 + +/* +ActivateUserUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response activateUserUnprocessableEntity +*/ +type ActivateUserUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewActivateUserUnprocessableEntity creates ActivateUserUnprocessableEntity with default headers values +func NewActivateUserUnprocessableEntity() *ActivateUserUnprocessableEntity { + + return &ActivateUserUnprocessableEntity{} +} + +// WithPayload adds the payload to the activate user unprocessable entity response +func (o *ActivateUserUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *ActivateUserUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the activate user unprocessable entity response +func (o *ActivateUserUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ActivateUserUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ActivateUserInternalServerErrorCode is the HTTP code returned for type ActivateUserInternalServerError +const ActivateUserInternalServerErrorCode int = 500 + +/* +ActivateUserInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response activateUserInternalServerError +*/ +type ActivateUserInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewActivateUserInternalServerError creates ActivateUserInternalServerError with default headers values +func NewActivateUserInternalServerError() *ActivateUserInternalServerError { + + return &ActivateUserInternalServerError{} +} + +// WithPayload adds the payload to the activate user internal server error response +func (o *ActivateUserInternalServerError) WithPayload(payload *models.ErrorResponse) *ActivateUserInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the activate user internal server error response +func (o *ActivateUserInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ActivateUserInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..98a782d7f62e13af1bdce1a88ac826dd7e9452a9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/activate_user_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// ActivateUserURL generates an URL for the activate user operation +type ActivateUserURL struct { + UserID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ActivateUserURL) WithBasePath(bp string) *ActivateUserURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ActivateUserURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ActivateUserURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/db/{user_id}/activate" + + userID := o.UserID + if userID != "" { + _path = strings.Replace(_path, "{user_id}", userID, -1) + } else { + return nil, errors.New("userId is required on ActivateUserURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ActivateUserURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ActivateUserURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ActivateUserURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ActivateUserURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ActivateUserURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ActivateUserURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user.go new file mode 100644 index 0000000000000000000000000000000000000000..3f039f579f2527d03d184c626d2b330ea2d4d0d2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user.go @@ -0,0 +1,149 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" + + "github.com/weaviate/weaviate/entities/models" +) + +// CreateUserHandlerFunc turns a function with the right signature into a create user handler +type CreateUserHandlerFunc func(CreateUserParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn CreateUserHandlerFunc) Handle(params CreateUserParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// CreateUserHandler interface for that can handle valid create user params +type CreateUserHandler interface { + Handle(CreateUserParams, *models.Principal) middleware.Responder +} + +// NewCreateUser creates a new http.Handler for the create user operation +func NewCreateUser(ctx *middleware.Context, handler CreateUserHandler) *CreateUser { + return &CreateUser{Context: ctx, Handler: handler} +} + +/* + CreateUser swagger:route POST /users/db/{user_id} users createUser + +create new user +*/ +type CreateUser struct { + Context *middleware.Context + Handler CreateUserHandler +} + +func (o *CreateUser) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewCreateUserParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// CreateUserBody create user body +// +// swagger:model CreateUserBody +type CreateUserBody struct { + + // EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - set the given time as creation time + // Format: date-time + CreateTime strfmt.DateTime `json:"createTime,omitempty" yaml:"createTime,omitempty"` + + // EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - import api key from static user + Import *bool `json:"import,omitempty" yaml:"import,omitempty"` +} + +// Validate validates this create user body +func (o *CreateUserBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateCreateTime(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *CreateUserBody) validateCreateTime(formats strfmt.Registry) error { + if swag.IsZero(o.CreateTime) { // not required + return nil + } + + if err := validate.FormatOf("body"+"."+"createTime", "body", "date-time", o.CreateTime.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this create user body based on context it is used +func (o *CreateUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *CreateUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *CreateUserBody) UnmarshalBinary(b []byte) error { + var res CreateUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..3947746583d18585b46a3a0cfaf938d070e73c49 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_parameters.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewCreateUserParams creates a new CreateUserParams object +// +// There are no default values defined in the spec. +func NewCreateUserParams() CreateUserParams { + + return CreateUserParams{} +} + +// CreateUserParams contains all the bound params for the create user operation +// typically these are obtained from a http.Request +// +// swagger:parameters createUser +type CreateUserParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + In: body + */ + Body CreateUserBody + /*user id + Required: true + In: path + */ + UserID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewCreateUserParams() beforehand. +func (o *CreateUserParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body CreateUserBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + res = append(res, errors.NewParseError("body", "body", "", err)) + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } + + rUserID, rhkUserID, _ := route.Params.GetOK("user_id") + if err := o.bindUserID(rUserID, rhkUserID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindUserID binds and validates parameter UserID from path. +func (o *CreateUserParams) bindUserID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.UserID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..e0b9ceefa295eba6a24b61d3346381f914e2318c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_responses.go @@ -0,0 +1,365 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// CreateUserCreatedCode is the HTTP code returned for type CreateUserCreated +const CreateUserCreatedCode int = 201 + +/* +CreateUserCreated User created successfully + +swagger:response createUserCreated +*/ +type CreateUserCreated struct { + + /* + In: Body + */ + Payload *models.UserAPIKey `json:"body,omitempty"` +} + +// NewCreateUserCreated creates CreateUserCreated with default headers values +func NewCreateUserCreated() *CreateUserCreated { + + return &CreateUserCreated{} +} + +// WithPayload adds the payload to the create user created response +func (o *CreateUserCreated) WithPayload(payload *models.UserAPIKey) *CreateUserCreated { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create user created response +func (o *CreateUserCreated) SetPayload(payload *models.UserAPIKey) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(201) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateUserBadRequestCode is the HTTP code returned for type CreateUserBadRequest +const CreateUserBadRequestCode int = 400 + +/* +CreateUserBadRequest Malformed request. + +swagger:response createUserBadRequest +*/ +type CreateUserBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateUserBadRequest creates CreateUserBadRequest with default headers values +func NewCreateUserBadRequest() *CreateUserBadRequest { + + return &CreateUserBadRequest{} +} + +// WithPayload adds the payload to the create user bad request response +func (o *CreateUserBadRequest) WithPayload(payload *models.ErrorResponse) *CreateUserBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create user bad request response +func (o *CreateUserBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateUserBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateUserUnauthorizedCode is the HTTP code returned for type CreateUserUnauthorized +const CreateUserUnauthorizedCode int = 401 + +/* +CreateUserUnauthorized Unauthorized or invalid credentials. + +swagger:response createUserUnauthorized +*/ +type CreateUserUnauthorized struct { +} + +// NewCreateUserUnauthorized creates CreateUserUnauthorized with default headers values +func NewCreateUserUnauthorized() *CreateUserUnauthorized { + + return &CreateUserUnauthorized{} +} + +// WriteResponse to the client +func (o *CreateUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// CreateUserForbiddenCode is the HTTP code returned for type CreateUserForbidden +const CreateUserForbiddenCode int = 403 + +/* +CreateUserForbidden Forbidden + +swagger:response createUserForbidden +*/ +type CreateUserForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateUserForbidden creates CreateUserForbidden with default headers values +func NewCreateUserForbidden() *CreateUserForbidden { + + return &CreateUserForbidden{} +} + +// WithPayload adds the payload to the create user forbidden response +func (o *CreateUserForbidden) WithPayload(payload *models.ErrorResponse) *CreateUserForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create user forbidden response +func (o *CreateUserForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateUserNotFoundCode is the HTTP code returned for type CreateUserNotFound +const CreateUserNotFoundCode int = 404 + +/* +CreateUserNotFound user not found + +swagger:response createUserNotFound +*/ +type CreateUserNotFound struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateUserNotFound creates CreateUserNotFound with default headers values +func NewCreateUserNotFound() *CreateUserNotFound { + + return &CreateUserNotFound{} +} + +// WithPayload adds the payload to the create user not found response +func (o *CreateUserNotFound) WithPayload(payload *models.ErrorResponse) *CreateUserNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create user not found response +func (o *CreateUserNotFound) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateUserNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateUserConflictCode is the HTTP code returned for type CreateUserConflict +const CreateUserConflictCode int = 409 + +/* +CreateUserConflict User already exists + +swagger:response createUserConflict +*/ +type CreateUserConflict struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateUserConflict creates CreateUserConflict with default headers values +func NewCreateUserConflict() *CreateUserConflict { + + return &CreateUserConflict{} +} + +// WithPayload adds the payload to the create user conflict response +func (o *CreateUserConflict) WithPayload(payload *models.ErrorResponse) *CreateUserConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create user conflict response +func (o *CreateUserConflict) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateUserConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateUserUnprocessableEntityCode is the HTTP code returned for type CreateUserUnprocessableEntity +const CreateUserUnprocessableEntityCode int = 422 + +/* +CreateUserUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response createUserUnprocessableEntity +*/ +type CreateUserUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateUserUnprocessableEntity creates CreateUserUnprocessableEntity with default headers values +func NewCreateUserUnprocessableEntity() *CreateUserUnprocessableEntity { + + return &CreateUserUnprocessableEntity{} +} + +// WithPayload adds the payload to the create user unprocessable entity response +func (o *CreateUserUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *CreateUserUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create user unprocessable entity response +func (o *CreateUserUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateUserUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CreateUserInternalServerErrorCode is the HTTP code returned for type CreateUserInternalServerError +const CreateUserInternalServerErrorCode int = 500 + +/* +CreateUserInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response createUserInternalServerError +*/ +type CreateUserInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewCreateUserInternalServerError creates CreateUserInternalServerError with default headers values +func NewCreateUserInternalServerError() *CreateUserInternalServerError { + + return &CreateUserInternalServerError{} +} + +// WithPayload adds the payload to the create user internal server error response +func (o *CreateUserInternalServerError) WithPayload(payload *models.ErrorResponse) *CreateUserInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the create user internal server error response +func (o *CreateUserInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CreateUserInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..d406cda6ec46b610d8a835f59c4bb0f8e903deea --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/create_user_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// CreateUserURL generates an URL for the create user operation +type CreateUserURL struct { + UserID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CreateUserURL) WithBasePath(bp string) *CreateUserURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CreateUserURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *CreateUserURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/db/{user_id}" + + userID := o.UserID + if userID != "" { + _path = strings.Replace(_path, "{user_id}", userID, -1) + } else { + return nil, errors.New("userId is required on CreateUserURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *CreateUserURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *CreateUserURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *CreateUserURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on CreateUserURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on CreateUserURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *CreateUserURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user.go new file mode 100644 index 0000000000000000000000000000000000000000..66c97d8eebd57cf878b18129b09e713b848be78e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user.go @@ -0,0 +1,122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeactivateUserHandlerFunc turns a function with the right signature into a deactivate user handler +type DeactivateUserHandlerFunc func(DeactivateUserParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn DeactivateUserHandlerFunc) Handle(params DeactivateUserParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// DeactivateUserHandler interface for that can handle valid deactivate user params +type DeactivateUserHandler interface { + Handle(DeactivateUserParams, *models.Principal) middleware.Responder +} + +// NewDeactivateUser creates a new http.Handler for the deactivate user operation +func NewDeactivateUser(ctx *middleware.Context, handler DeactivateUserHandler) *DeactivateUser { + return &DeactivateUser{Context: ctx, Handler: handler} +} + +/* + DeactivateUser swagger:route POST /users/db/{user_id}/deactivate users deactivateUser + +deactivate a user +*/ +type DeactivateUser struct { + Context *middleware.Context + Handler DeactivateUserHandler +} + +func (o *DeactivateUser) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewDeactivateUserParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// DeactivateUserBody deactivate user body +// +// swagger:model DeactivateUserBody +type DeactivateUserBody struct { + + // if the key should be revoked when deactivating the user + RevokeKey *bool `json:"revoke_key,omitempty" yaml:"revoke_key,omitempty"` +} + +// Validate validates this deactivate user body +func (o *DeactivateUserBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this deactivate user body based on context it is used +func (o *DeactivateUserBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *DeactivateUserBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *DeactivateUserBody) UnmarshalBinary(b []byte) error { + var res DeactivateUserBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..f1b459f099b16c698d9ae3bc69305207cdfe30bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_parameters.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewDeactivateUserParams creates a new DeactivateUserParams object +// +// There are no default values defined in the spec. +func NewDeactivateUserParams() DeactivateUserParams { + + return DeactivateUserParams{} +} + +// DeactivateUserParams contains all the bound params for the deactivate user operation +// typically these are obtained from a http.Request +// +// swagger:parameters deactivateUser +type DeactivateUserParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + In: body + */ + Body DeactivateUserBody + /*user id + Required: true + In: path + */ + UserID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDeactivateUserParams() beforehand. +func (o *DeactivateUserParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body DeactivateUserBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + res = append(res, errors.NewParseError("body", "body", "", err)) + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } + + rUserID, rhkUserID, _ := route.Params.GetOK("user_id") + if err := o.bindUserID(rUserID, rhkUserID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindUserID binds and validates parameter UserID from path. +func (o *DeactivateUserParams) bindUserID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.UserID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..82ce407c0b9c3cacd9a299fa3de59093cc67808c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_responses.go @@ -0,0 +1,305 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeactivateUserOKCode is the HTTP code returned for type DeactivateUserOK +const DeactivateUserOKCode int = 200 + +/* +DeactivateUserOK users successfully deactivated + +swagger:response deactivateUserOK +*/ +type DeactivateUserOK struct { +} + +// NewDeactivateUserOK creates DeactivateUserOK with default headers values +func NewDeactivateUserOK() *DeactivateUserOK { + + return &DeactivateUserOK{} +} + +// WriteResponse to the client +func (o *DeactivateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// DeactivateUserBadRequestCode is the HTTP code returned for type DeactivateUserBadRequest +const DeactivateUserBadRequestCode int = 400 + +/* +DeactivateUserBadRequest Malformed request. + +swagger:response deactivateUserBadRequest +*/ +type DeactivateUserBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeactivateUserBadRequest creates DeactivateUserBadRequest with default headers values +func NewDeactivateUserBadRequest() *DeactivateUserBadRequest { + + return &DeactivateUserBadRequest{} +} + +// WithPayload adds the payload to the deactivate user bad request response +func (o *DeactivateUserBadRequest) WithPayload(payload *models.ErrorResponse) *DeactivateUserBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the deactivate user bad request response +func (o *DeactivateUserBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeactivateUserBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeactivateUserUnauthorizedCode is the HTTP code returned for type DeactivateUserUnauthorized +const DeactivateUserUnauthorizedCode int = 401 + +/* +DeactivateUserUnauthorized Unauthorized or invalid credentials. + +swagger:response deactivateUserUnauthorized +*/ +type DeactivateUserUnauthorized struct { +} + +// NewDeactivateUserUnauthorized creates DeactivateUserUnauthorized with default headers values +func NewDeactivateUserUnauthorized() *DeactivateUserUnauthorized { + + return &DeactivateUserUnauthorized{} +} + +// WriteResponse to the client +func (o *DeactivateUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// DeactivateUserForbiddenCode is the HTTP code returned for type DeactivateUserForbidden +const DeactivateUserForbiddenCode int = 403 + +/* +DeactivateUserForbidden Forbidden + +swagger:response deactivateUserForbidden +*/ +type DeactivateUserForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeactivateUserForbidden creates DeactivateUserForbidden with default headers values +func NewDeactivateUserForbidden() *DeactivateUserForbidden { + + return &DeactivateUserForbidden{} +} + +// WithPayload adds the payload to the deactivate user forbidden response +func (o *DeactivateUserForbidden) WithPayload(payload *models.ErrorResponse) *DeactivateUserForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the deactivate user forbidden response +func (o *DeactivateUserForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeactivateUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeactivateUserNotFoundCode is the HTTP code returned for type DeactivateUserNotFound +const DeactivateUserNotFoundCode int = 404 + +/* +DeactivateUserNotFound user not found + +swagger:response deactivateUserNotFound +*/ +type DeactivateUserNotFound struct { +} + +// NewDeactivateUserNotFound creates DeactivateUserNotFound with default headers values +func NewDeactivateUserNotFound() *DeactivateUserNotFound { + + return &DeactivateUserNotFound{} +} + +// WriteResponse to the client +func (o *DeactivateUserNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// DeactivateUserConflictCode is the HTTP code returned for type DeactivateUserConflict +const DeactivateUserConflictCode int = 409 + +/* +DeactivateUserConflict user already deactivated + +swagger:response deactivateUserConflict +*/ +type DeactivateUserConflict struct { +} + +// NewDeactivateUserConflict creates DeactivateUserConflict with default headers values +func NewDeactivateUserConflict() *DeactivateUserConflict { + + return &DeactivateUserConflict{} +} + +// WriteResponse to the client +func (o *DeactivateUserConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(409) +} + +// DeactivateUserUnprocessableEntityCode is the HTTP code returned for type DeactivateUserUnprocessableEntity +const DeactivateUserUnprocessableEntityCode int = 422 + +/* +DeactivateUserUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file? + +swagger:response deactivateUserUnprocessableEntity +*/ +type DeactivateUserUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeactivateUserUnprocessableEntity creates DeactivateUserUnprocessableEntity with default headers values +func NewDeactivateUserUnprocessableEntity() *DeactivateUserUnprocessableEntity { + + return &DeactivateUserUnprocessableEntity{} +} + +// WithPayload adds the payload to the deactivate user unprocessable entity response +func (o *DeactivateUserUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *DeactivateUserUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the deactivate user unprocessable entity response +func (o *DeactivateUserUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeactivateUserUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeactivateUserInternalServerErrorCode is the HTTP code returned for type DeactivateUserInternalServerError +const DeactivateUserInternalServerErrorCode int = 500 + +/* +DeactivateUserInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response deactivateUserInternalServerError +*/ +type DeactivateUserInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeactivateUserInternalServerError creates DeactivateUserInternalServerError with default headers values +func NewDeactivateUserInternalServerError() *DeactivateUserInternalServerError { + + return &DeactivateUserInternalServerError{} +} + +// WithPayload adds the payload to the deactivate user internal server error response +func (o *DeactivateUserInternalServerError) WithPayload(payload *models.ErrorResponse) *DeactivateUserInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the deactivate user internal server error response +func (o *DeactivateUserInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeactivateUserInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..262db65ab08eff968192fa9704e22a363471a6e9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/deactivate_user_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// DeactivateUserURL generates an URL for the deactivate user operation +type DeactivateUserURL struct { + UserID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeactivateUserURL) WithBasePath(bp string) *DeactivateUserURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeactivateUserURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DeactivateUserURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/db/{user_id}/deactivate" + + userID := o.UserID + if userID != "" { + _path = strings.Replace(_path, "{user_id}", userID, -1) + } else { + return nil, errors.New("userId is required on DeactivateUserURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DeactivateUserURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DeactivateUserURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DeactivateUserURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DeactivateUserURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DeactivateUserURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DeactivateUserURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user.go new file mode 100644 index 0000000000000000000000000000000000000000..211918f5c669e61382969ba4ea0fd43f36910800 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteUserHandlerFunc turns a function with the right signature into a delete user handler +type DeleteUserHandlerFunc func(DeleteUserParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn DeleteUserHandlerFunc) Handle(params DeleteUserParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// DeleteUserHandler interface for that can handle valid delete user params +type DeleteUserHandler interface { + Handle(DeleteUserParams, *models.Principal) middleware.Responder +} + +// NewDeleteUser creates a new http.Handler for the delete user operation +func NewDeleteUser(ctx *middleware.Context, handler DeleteUserHandler) *DeleteUser { + return &DeleteUser{Context: ctx, Handler: handler} +} + +/* + DeleteUser swagger:route DELETE /users/db/{user_id} users deleteUser + +Delete User +*/ +type DeleteUser struct { + Context *middleware.Context + Handler DeleteUserHandler +} + +func (o *DeleteUser) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewDeleteUserParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..50ce9f0c6ac2f994f922b6993eb90e0e6200adcd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewDeleteUserParams creates a new DeleteUserParams object +// +// There are no default values defined in the spec. +func NewDeleteUserParams() DeleteUserParams { + + return DeleteUserParams{} +} + +// DeleteUserParams contains all the bound params for the delete user operation +// typically these are obtained from a http.Request +// +// swagger:parameters deleteUser +type DeleteUserParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*user name + Required: true + In: path + */ + UserID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDeleteUserParams() beforehand. +func (o *DeleteUserParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rUserID, rhkUserID, _ := route.Params.GetOK("user_id") + if err := o.bindUserID(rUserID, rhkUserID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindUserID binds and validates parameter UserID from path. +func (o *DeleteUserParams) bindUserID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.UserID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..2430d56527d8734f5f8c209b0745bc933acf77d6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_responses.go @@ -0,0 +1,280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// DeleteUserNoContentCode is the HTTP code returned for type DeleteUserNoContent +const DeleteUserNoContentCode int = 204 + +/* +DeleteUserNoContent Successfully deleted. + +swagger:response deleteUserNoContent +*/ +type DeleteUserNoContent struct { +} + +// NewDeleteUserNoContent creates DeleteUserNoContent with default headers values +func NewDeleteUserNoContent() *DeleteUserNoContent { + + return &DeleteUserNoContent{} +} + +// WriteResponse to the client +func (o *DeleteUserNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(204) +} + +// DeleteUserBadRequestCode is the HTTP code returned for type DeleteUserBadRequest +const DeleteUserBadRequestCode int = 400 + +/* +DeleteUserBadRequest Malformed request. + +swagger:response deleteUserBadRequest +*/ +type DeleteUserBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteUserBadRequest creates DeleteUserBadRequest with default headers values +func NewDeleteUserBadRequest() *DeleteUserBadRequest { + + return &DeleteUserBadRequest{} +} + +// WithPayload adds the payload to the delete user bad request response +func (o *DeleteUserBadRequest) WithPayload(payload *models.ErrorResponse) *DeleteUserBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete user bad request response +func (o *DeleteUserBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteUserBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteUserUnauthorizedCode is the HTTP code returned for type DeleteUserUnauthorized +const DeleteUserUnauthorizedCode int = 401 + +/* +DeleteUserUnauthorized Unauthorized or invalid credentials. + +swagger:response deleteUserUnauthorized +*/ +type DeleteUserUnauthorized struct { +} + +// NewDeleteUserUnauthorized creates DeleteUserUnauthorized with default headers values +func NewDeleteUserUnauthorized() *DeleteUserUnauthorized { + + return &DeleteUserUnauthorized{} +} + +// WriteResponse to the client +func (o *DeleteUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// DeleteUserForbiddenCode is the HTTP code returned for type DeleteUserForbidden +const DeleteUserForbiddenCode int = 403 + +/* +DeleteUserForbidden Forbidden + +swagger:response deleteUserForbidden +*/ +type DeleteUserForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteUserForbidden creates DeleteUserForbidden with default headers values +func NewDeleteUserForbidden() *DeleteUserForbidden { + + return &DeleteUserForbidden{} +} + +// WithPayload adds the payload to the delete user forbidden response +func (o *DeleteUserForbidden) WithPayload(payload *models.ErrorResponse) *DeleteUserForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete user forbidden response +func (o *DeleteUserForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteUserNotFoundCode is the HTTP code returned for type DeleteUserNotFound +const DeleteUserNotFoundCode int = 404 + +/* +DeleteUserNotFound user not found + +swagger:response deleteUserNotFound +*/ +type DeleteUserNotFound struct { +} + +// NewDeleteUserNotFound creates DeleteUserNotFound with default headers values +func NewDeleteUserNotFound() *DeleteUserNotFound { + + return &DeleteUserNotFound{} +} + +// WriteResponse to the client +func (o *DeleteUserNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// DeleteUserUnprocessableEntityCode is the HTTP code returned for type DeleteUserUnprocessableEntity +const DeleteUserUnprocessableEntityCode int = 422 + +/* +DeleteUserUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response deleteUserUnprocessableEntity +*/ +type DeleteUserUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteUserUnprocessableEntity creates DeleteUserUnprocessableEntity with default headers values +func NewDeleteUserUnprocessableEntity() *DeleteUserUnprocessableEntity { + + return &DeleteUserUnprocessableEntity{} +} + +// WithPayload adds the payload to the delete user unprocessable entity response +func (o *DeleteUserUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *DeleteUserUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete user unprocessable entity response +func (o *DeleteUserUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteUserUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DeleteUserInternalServerErrorCode is the HTTP code returned for type DeleteUserInternalServerError +const DeleteUserInternalServerErrorCode int = 500 + +/* +DeleteUserInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response deleteUserInternalServerError +*/ +type DeleteUserInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewDeleteUserInternalServerError creates DeleteUserInternalServerError with default headers values +func NewDeleteUserInternalServerError() *DeleteUserInternalServerError { + + return &DeleteUserInternalServerError{} +} + +// WithPayload adds the payload to the delete user internal server error response +func (o *DeleteUserInternalServerError) WithPayload(payload *models.ErrorResponse) *DeleteUserInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the delete user internal server error response +func (o *DeleteUserInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DeleteUserInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..d0e287140b710ec46496d55a638ae4d65febb319 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/delete_user_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// DeleteUserURL generates an URL for the delete user operation +type DeleteUserURL struct { + UserID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteUserURL) WithBasePath(bp string) *DeleteUserURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DeleteUserURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DeleteUserURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/db/{user_id}" + + userID := o.UserID + if userID != "" { + _path = strings.Replace(_path, "{user_id}", userID, -1) + } else { + return nil, errors.New("userId is required on DeleteUserURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DeleteUserURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DeleteUserURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DeleteUserURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DeleteUserURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DeleteUserURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DeleteUserURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info.go new file mode 100644 index 0000000000000000000000000000000000000000..ddd627027aa0f0722db413a87f50aaf5f0d2dc66 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetOwnInfoHandlerFunc turns a function with the right signature into a get own info handler +type GetOwnInfoHandlerFunc func(GetOwnInfoParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetOwnInfoHandlerFunc) Handle(params GetOwnInfoParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetOwnInfoHandler interface for that can handle valid get own info params +type GetOwnInfoHandler interface { + Handle(GetOwnInfoParams, *models.Principal) middleware.Responder +} + +// NewGetOwnInfo creates a new http.Handler for the get own info operation +func NewGetOwnInfo(ctx *middleware.Context, handler GetOwnInfoHandler) *GetOwnInfo { + return &GetOwnInfo{Context: ctx, Handler: handler} +} + +/* + GetOwnInfo swagger:route GET /users/own-info users getOwnInfo + +get info relevant to own user, e.g. username, roles +*/ +type GetOwnInfo struct { + Context *middleware.Context + Handler GetOwnInfoHandler +} + +func (o *GetOwnInfo) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetOwnInfoParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..323aa03990dabe9a119062d29824eda50881c804 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewGetOwnInfoParams creates a new GetOwnInfoParams object +// +// There are no default values defined in the spec. +func NewGetOwnInfoParams() GetOwnInfoParams { + + return GetOwnInfoParams{} +} + +// GetOwnInfoParams contains all the bound params for the get own info operation +// typically these are obtained from a http.Request +// +// swagger:parameters getOwnInfo +type GetOwnInfoParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetOwnInfoParams() beforehand. +func (o *GetOwnInfoParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..6c9c6193fd28d584808965117a362d5a802d3c68 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_responses.go @@ -0,0 +1,185 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetOwnInfoOKCode is the HTTP code returned for type GetOwnInfoOK +const GetOwnInfoOKCode int = 200 + +/* +GetOwnInfoOK Info about the user + +swagger:response getOwnInfoOK +*/ +type GetOwnInfoOK struct { + + /* + In: Body + */ + Payload *models.UserOwnInfo `json:"body,omitempty"` +} + +// NewGetOwnInfoOK creates GetOwnInfoOK with default headers values +func NewGetOwnInfoOK() *GetOwnInfoOK { + + return &GetOwnInfoOK{} +} + +// WithPayload adds the payload to the get own info o k response +func (o *GetOwnInfoOK) WithPayload(payload *models.UserOwnInfo) *GetOwnInfoOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get own info o k response +func (o *GetOwnInfoOK) SetPayload(payload *models.UserOwnInfo) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetOwnInfoOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetOwnInfoUnauthorizedCode is the HTTP code returned for type GetOwnInfoUnauthorized +const GetOwnInfoUnauthorizedCode int = 401 + +/* +GetOwnInfoUnauthorized Unauthorized or invalid credentials. + +swagger:response getOwnInfoUnauthorized +*/ +type GetOwnInfoUnauthorized struct { +} + +// NewGetOwnInfoUnauthorized creates GetOwnInfoUnauthorized with default headers values +func NewGetOwnInfoUnauthorized() *GetOwnInfoUnauthorized { + + return &GetOwnInfoUnauthorized{} +} + +// WriteResponse to the client +func (o *GetOwnInfoUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetOwnInfoInternalServerErrorCode is the HTTP code returned for type GetOwnInfoInternalServerError +const GetOwnInfoInternalServerErrorCode int = 500 + +/* +GetOwnInfoInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getOwnInfoInternalServerError +*/ +type GetOwnInfoInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetOwnInfoInternalServerError creates GetOwnInfoInternalServerError with default headers values +func NewGetOwnInfoInternalServerError() *GetOwnInfoInternalServerError { + + return &GetOwnInfoInternalServerError{} +} + +// WithPayload adds the payload to the get own info internal server error response +func (o *GetOwnInfoInternalServerError) WithPayload(payload *models.ErrorResponse) *GetOwnInfoInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get own info internal server error response +func (o *GetOwnInfoInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetOwnInfoInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetOwnInfoNotImplementedCode is the HTTP code returned for type GetOwnInfoNotImplemented +const GetOwnInfoNotImplementedCode int = 501 + +/* +GetOwnInfoNotImplemented Replica movement operations are disabled. + +swagger:response getOwnInfoNotImplemented +*/ +type GetOwnInfoNotImplemented struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetOwnInfoNotImplemented creates GetOwnInfoNotImplemented with default headers values +func NewGetOwnInfoNotImplemented() *GetOwnInfoNotImplemented { + + return &GetOwnInfoNotImplemented{} +} + +// WithPayload adds the payload to the get own info not implemented response +func (o *GetOwnInfoNotImplemented) WithPayload(payload *models.ErrorResponse) *GetOwnInfoNotImplemented { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get own info not implemented response +func (o *GetOwnInfoNotImplemented) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetOwnInfoNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(501) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..beb9e3403735eb30ba61ddb946954f31c769ffe5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_own_info_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GetOwnInfoURL generates an URL for the get own info operation +type GetOwnInfoURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetOwnInfoURL) WithBasePath(bp string) *GetOwnInfoURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetOwnInfoURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetOwnInfoURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/own-info" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetOwnInfoURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetOwnInfoURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetOwnInfoURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetOwnInfoURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetOwnInfoURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetOwnInfoURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info.go new file mode 100644 index 0000000000000000000000000000000000000000..a13dff053865772627b710ce75d0b5374744341d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUserInfoHandlerFunc turns a function with the right signature into a get user info handler +type GetUserInfoHandlerFunc func(GetUserInfoParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetUserInfoHandlerFunc) Handle(params GetUserInfoParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetUserInfoHandler interface for that can handle valid get user info params +type GetUserInfoHandler interface { + Handle(GetUserInfoParams, *models.Principal) middleware.Responder +} + +// NewGetUserInfo creates a new http.Handler for the get user info operation +func NewGetUserInfo(ctx *middleware.Context, handler GetUserInfoHandler) *GetUserInfo { + return &GetUserInfo{Context: ctx, Handler: handler} +} + +/* + GetUserInfo swagger:route GET /users/db/{user_id} users getUserInfo + +get info relevant to user, e.g. username, roles +*/ +type GetUserInfo struct { + Context *middleware.Context + Handler GetUserInfoHandler +} + +func (o *GetUserInfo) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetUserInfoParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..4b4c526948561ac8819a0410297108a992836faf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_parameters.go @@ -0,0 +1,127 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetUserInfoParams creates a new GetUserInfoParams object +// with the default values initialized. +func NewGetUserInfoParams() GetUserInfoParams { + + var ( + // initialize parameters with default values + + includeLastUsedTimeDefault = bool(false) + ) + + return GetUserInfoParams{ + IncludeLastUsedTime: &includeLastUsedTimeDefault, + } +} + +// GetUserInfoParams contains all the bound params for the get user info operation +// typically these are obtained from a http.Request +// +// swagger:parameters getUserInfo +type GetUserInfoParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Whether to include the last used time of the given user + In: query + Default: false + */ + IncludeLastUsedTime *bool + /*user id + Required: true + In: path + */ + UserID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetUserInfoParams() beforehand. +func (o *GetUserInfoParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qIncludeLastUsedTime, qhkIncludeLastUsedTime, _ := qs.GetOK("includeLastUsedTime") + if err := o.bindIncludeLastUsedTime(qIncludeLastUsedTime, qhkIncludeLastUsedTime, route.Formats); err != nil { + res = append(res, err) + } + + rUserID, rhkUserID, _ := route.Params.GetOK("user_id") + if err := o.bindUserID(rUserID, rhkUserID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindIncludeLastUsedTime binds and validates parameter IncludeLastUsedTime from query. +func (o *GetUserInfoParams) bindIncludeLastUsedTime(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewGetUserInfoParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("includeLastUsedTime", "query", "bool", raw) + } + o.IncludeLastUsedTime = &value + + return nil +} + +// bindUserID binds and validates parameter UserID from path. +func (o *GetUserInfoParams) bindUserID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.UserID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..55b0a1ecdef47b6d6c94a4bec77305a3081b675c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_responses.go @@ -0,0 +1,255 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetUserInfoOKCode is the HTTP code returned for type GetUserInfoOK +const GetUserInfoOKCode int = 200 + +/* +GetUserInfoOK Info about the user + +swagger:response getUserInfoOK +*/ +type GetUserInfoOK struct { + + /* + In: Body + */ + Payload *models.DBUserInfo `json:"body,omitempty"` +} + +// NewGetUserInfoOK creates GetUserInfoOK with default headers values +func NewGetUserInfoOK() *GetUserInfoOK { + + return &GetUserInfoOK{} +} + +// WithPayload adds the payload to the get user info o k response +func (o *GetUserInfoOK) WithPayload(payload *models.DBUserInfo) *GetUserInfoOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get user info o k response +func (o *GetUserInfoOK) SetPayload(payload *models.DBUserInfo) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUserInfoOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetUserInfoUnauthorizedCode is the HTTP code returned for type GetUserInfoUnauthorized +const GetUserInfoUnauthorizedCode int = 401 + +/* +GetUserInfoUnauthorized Unauthorized or invalid credentials. + +swagger:response getUserInfoUnauthorized +*/ +type GetUserInfoUnauthorized struct { +} + +// NewGetUserInfoUnauthorized creates GetUserInfoUnauthorized with default headers values +func NewGetUserInfoUnauthorized() *GetUserInfoUnauthorized { + + return &GetUserInfoUnauthorized{} +} + +// WriteResponse to the client +func (o *GetUserInfoUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// GetUserInfoForbiddenCode is the HTTP code returned for type GetUserInfoForbidden +const GetUserInfoForbiddenCode int = 403 + +/* +GetUserInfoForbidden Forbidden + +swagger:response getUserInfoForbidden +*/ +type GetUserInfoForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUserInfoForbidden creates GetUserInfoForbidden with default headers values +func NewGetUserInfoForbidden() *GetUserInfoForbidden { + + return &GetUserInfoForbidden{} +} + +// WithPayload adds the payload to the get user info forbidden response +func (o *GetUserInfoForbidden) WithPayload(payload *models.ErrorResponse) *GetUserInfoForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get user info forbidden response +func (o *GetUserInfoForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUserInfoForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetUserInfoNotFoundCode is the HTTP code returned for type GetUserInfoNotFound +const GetUserInfoNotFoundCode int = 404 + +/* +GetUserInfoNotFound user not found + +swagger:response getUserInfoNotFound +*/ +type GetUserInfoNotFound struct { +} + +// NewGetUserInfoNotFound creates GetUserInfoNotFound with default headers values +func NewGetUserInfoNotFound() *GetUserInfoNotFound { + + return &GetUserInfoNotFound{} +} + +// WriteResponse to the client +func (o *GetUserInfoNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetUserInfoUnprocessableEntityCode is the HTTP code returned for type GetUserInfoUnprocessableEntity +const GetUserInfoUnprocessableEntityCode int = 422 + +/* +GetUserInfoUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response getUserInfoUnprocessableEntity +*/ +type GetUserInfoUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUserInfoUnprocessableEntity creates GetUserInfoUnprocessableEntity with default headers values +func NewGetUserInfoUnprocessableEntity() *GetUserInfoUnprocessableEntity { + + return &GetUserInfoUnprocessableEntity{} +} + +// WithPayload adds the payload to the get user info unprocessable entity response +func (o *GetUserInfoUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GetUserInfoUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get user info unprocessable entity response +func (o *GetUserInfoUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUserInfoUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetUserInfoInternalServerErrorCode is the HTTP code returned for type GetUserInfoInternalServerError +const GetUserInfoInternalServerErrorCode int = 500 + +/* +GetUserInfoInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getUserInfoInternalServerError +*/ +type GetUserInfoInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetUserInfoInternalServerError creates GetUserInfoInternalServerError with default headers values +func NewGetUserInfoInternalServerError() *GetUserInfoInternalServerError { + + return &GetUserInfoInternalServerError{} +} + +// WithPayload adds the payload to the get user info internal server error response +func (o *GetUserInfoInternalServerError) WithPayload(payload *models.ErrorResponse) *GetUserInfoInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get user info internal server error response +func (o *GetUserInfoInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetUserInfoInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..6cc80dbff3900c8c6095dd40ef011320427a97e6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/get_user_info_urlbuilder.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/swag" +) + +// GetUserInfoURL generates an URL for the get user info operation +type GetUserInfoURL struct { + UserID string + + IncludeLastUsedTime *bool + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetUserInfoURL) WithBasePath(bp string) *GetUserInfoURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetUserInfoURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetUserInfoURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/db/{user_id}" + + userID := o.UserID + if userID != "" { + _path = strings.Replace(_path, "{user_id}", userID, -1) + } else { + return nil, errors.New("userId is required on GetUserInfoURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var includeLastUsedTimeQ string + if o.IncludeLastUsedTime != nil { + includeLastUsedTimeQ = swag.FormatBool(*o.IncludeLastUsedTime) + } + if includeLastUsedTimeQ != "" { + qs.Set("includeLastUsedTime", includeLastUsedTimeQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetUserInfoURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetUserInfoURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetUserInfoURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetUserInfoURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetUserInfoURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetUserInfoURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users.go new file mode 100644 index 0000000000000000000000000000000000000000..d072e5abab5e425af7215b0d97b64e100607db4a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// ListAllUsersHandlerFunc turns a function with the right signature into a list all users handler +type ListAllUsersHandlerFunc func(ListAllUsersParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ListAllUsersHandlerFunc) Handle(params ListAllUsersParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// ListAllUsersHandler interface for that can handle valid list all users params +type ListAllUsersHandler interface { + Handle(ListAllUsersParams, *models.Principal) middleware.Responder +} + +// NewListAllUsers creates a new http.Handler for the list all users operation +func NewListAllUsers(ctx *middleware.Context, handler ListAllUsersHandler) *ListAllUsers { + return &ListAllUsers{Context: ctx, Handler: handler} +} + +/* + ListAllUsers swagger:route GET /users/db users listAllUsers + +list all db users +*/ +type ListAllUsers struct { + Context *middleware.Context + Handler ListAllUsersHandler +} + +func (o *ListAllUsers) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewListAllUsersParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..b57e58f59eafffa80f81106949e601462b88c439 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_parameters.go @@ -0,0 +1,103 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewListAllUsersParams creates a new ListAllUsersParams object +// with the default values initialized. +func NewListAllUsersParams() ListAllUsersParams { + + var ( + // initialize parameters with default values + + includeLastUsedTimeDefault = bool(false) + ) + + return ListAllUsersParams{ + IncludeLastUsedTime: &includeLastUsedTimeDefault, + } +} + +// ListAllUsersParams contains all the bound params for the list all users operation +// typically these are obtained from a http.Request +// +// swagger:parameters listAllUsers +type ListAllUsersParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*Whether to include the last used time of the users + In: query + Default: false + */ + IncludeLastUsedTime *bool +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewListAllUsersParams() beforehand. +func (o *ListAllUsersParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + qIncludeLastUsedTime, qhkIncludeLastUsedTime, _ := qs.GetOK("includeLastUsedTime") + if err := o.bindIncludeLastUsedTime(qIncludeLastUsedTime, qhkIncludeLastUsedTime, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindIncludeLastUsedTime binds and validates parameter IncludeLastUsedTime from query. +func (o *ListAllUsersParams) bindIncludeLastUsedTime(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewListAllUsersParams() + return nil + } + + value, err := swag.ConvertBool(raw) + if err != nil { + return errors.InvalidType("includeLastUsedTime", "query", "bool", raw) + } + o.IncludeLastUsedTime = &value + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..c9d8fabe225d2d5b576a9e83e6e512564b687fde --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_responses.go @@ -0,0 +1,188 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// ListAllUsersOKCode is the HTTP code returned for type ListAllUsersOK +const ListAllUsersOKCode int = 200 + +/* +ListAllUsersOK Info about the users + +swagger:response listAllUsersOK +*/ +type ListAllUsersOK struct { + + /* + In: Body + */ + Payload []*models.DBUserInfo `json:"body,omitempty"` +} + +// NewListAllUsersOK creates ListAllUsersOK with default headers values +func NewListAllUsersOK() *ListAllUsersOK { + + return &ListAllUsersOK{} +} + +// WithPayload adds the payload to the list all users o k response +func (o *ListAllUsersOK) WithPayload(payload []*models.DBUserInfo) *ListAllUsersOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list all users o k response +func (o *ListAllUsersOK) SetPayload(payload []*models.DBUserInfo) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListAllUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = make([]*models.DBUserInfo, 0, 50) + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// ListAllUsersUnauthorizedCode is the HTTP code returned for type ListAllUsersUnauthorized +const ListAllUsersUnauthorizedCode int = 401 + +/* +ListAllUsersUnauthorized Unauthorized or invalid credentials. + +swagger:response listAllUsersUnauthorized +*/ +type ListAllUsersUnauthorized struct { +} + +// NewListAllUsersUnauthorized creates ListAllUsersUnauthorized with default headers values +func NewListAllUsersUnauthorized() *ListAllUsersUnauthorized { + + return &ListAllUsersUnauthorized{} +} + +// WriteResponse to the client +func (o *ListAllUsersUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ListAllUsersForbiddenCode is the HTTP code returned for type ListAllUsersForbidden +const ListAllUsersForbiddenCode int = 403 + +/* +ListAllUsersForbidden Forbidden + +swagger:response listAllUsersForbidden +*/ +type ListAllUsersForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewListAllUsersForbidden creates ListAllUsersForbidden with default headers values +func NewListAllUsersForbidden() *ListAllUsersForbidden { + + return &ListAllUsersForbidden{} +} + +// WithPayload adds the payload to the list all users forbidden response +func (o *ListAllUsersForbidden) WithPayload(payload *models.ErrorResponse) *ListAllUsersForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list all users forbidden response +func (o *ListAllUsersForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListAllUsersForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ListAllUsersInternalServerErrorCode is the HTTP code returned for type ListAllUsersInternalServerError +const ListAllUsersInternalServerErrorCode int = 500 + +/* +ListAllUsersInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response listAllUsersInternalServerError +*/ +type ListAllUsersInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewListAllUsersInternalServerError creates ListAllUsersInternalServerError with default headers values +func NewListAllUsersInternalServerError() *ListAllUsersInternalServerError { + + return &ListAllUsersInternalServerError{} +} + +// WithPayload adds the payload to the list all users internal server error response +func (o *ListAllUsersInternalServerError) WithPayload(payload *models.ErrorResponse) *ListAllUsersInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list all users internal server error response +func (o *ListAllUsersInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListAllUsersInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..9bef72ffdbb07e5916efe49905ad7bcdd623e2d7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/list_all_users_urlbuilder.go @@ -0,0 +1,116 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + + "github.com/go-openapi/swag" +) + +// ListAllUsersURL generates an URL for the list all users operation +type ListAllUsersURL struct { + IncludeLastUsedTime *bool + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListAllUsersURL) WithBasePath(bp string) *ListAllUsersURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListAllUsersURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ListAllUsersURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/db" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var includeLastUsedTimeQ string + if o.IncludeLastUsedTime != nil { + includeLastUsedTimeQ = swag.FormatBool(*o.IncludeLastUsedTime) + } + if includeLastUsedTimeQ != "" { + qs.Set("includeLastUsedTime", includeLastUsedTimeQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ListAllUsersURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ListAllUsersURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ListAllUsersURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ListAllUsersURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ListAllUsersURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ListAllUsersURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key.go new file mode 100644 index 0000000000000000000000000000000000000000..2b838237635520779bedb65bfacba59db09eeba4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// RotateUserAPIKeyHandlerFunc turns a function with the right signature into a rotate user Api key handler +type RotateUserAPIKeyHandlerFunc func(RotateUserAPIKeyParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn RotateUserAPIKeyHandlerFunc) Handle(params RotateUserAPIKeyParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// RotateUserAPIKeyHandler interface for that can handle valid rotate user Api key params +type RotateUserAPIKeyHandler interface { + Handle(RotateUserAPIKeyParams, *models.Principal) middleware.Responder +} + +// NewRotateUserAPIKey creates a new http.Handler for the rotate user Api key operation +func NewRotateUserAPIKey(ctx *middleware.Context, handler RotateUserAPIKeyHandler) *RotateUserAPIKey { + return &RotateUserAPIKey{Context: ctx, Handler: handler} +} + +/* + RotateUserAPIKey swagger:route POST /users/db/{user_id}/rotate-key users rotateUserApiKey + +rotate user api key +*/ +type RotateUserAPIKey struct { + Context *middleware.Context + Handler RotateUserAPIKeyHandler +} + +func (o *RotateUserAPIKey) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewRotateUserAPIKeyParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..fe038567936ee7b84204ada447e5d450ea3b537e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_parameters.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" +) + +// NewRotateUserAPIKeyParams creates a new RotateUserAPIKeyParams object +// +// There are no default values defined in the spec. +func NewRotateUserAPIKeyParams() RotateUserAPIKeyParams { + + return RotateUserAPIKeyParams{} +} + +// RotateUserAPIKeyParams contains all the bound params for the rotate user Api key operation +// typically these are obtained from a http.Request +// +// swagger:parameters rotateUserApiKey +type RotateUserAPIKeyParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /*user id + Required: true + In: path + */ + UserID string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewRotateUserAPIKeyParams() beforehand. +func (o *RotateUserAPIKeyParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rUserID, rhkUserID, _ := route.Params.GetOK("user_id") + if err := o.bindUserID(rUserID, rhkUserID, route.Formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindUserID binds and validates parameter UserID from path. +func (o *RotateUserAPIKeyParams) bindUserID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + o.UserID = raw + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..cdba99c76fa30139d3aad563ecec7ebbc27cb0c1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_responses.go @@ -0,0 +1,300 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// RotateUserAPIKeyOKCode is the HTTP code returned for type RotateUserAPIKeyOK +const RotateUserAPIKeyOKCode int = 200 + +/* +RotateUserAPIKeyOK ApiKey successfully changed + +swagger:response rotateUserApiKeyOK +*/ +type RotateUserAPIKeyOK struct { + + /* + In: Body + */ + Payload *models.UserAPIKey `json:"body,omitempty"` +} + +// NewRotateUserAPIKeyOK creates RotateUserAPIKeyOK with default headers values +func NewRotateUserAPIKeyOK() *RotateUserAPIKeyOK { + + return &RotateUserAPIKeyOK{} +} + +// WithPayload adds the payload to the rotate user Api key o k response +func (o *RotateUserAPIKeyOK) WithPayload(payload *models.UserAPIKey) *RotateUserAPIKeyOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the rotate user Api key o k response +func (o *RotateUserAPIKeyOK) SetPayload(payload *models.UserAPIKey) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RotateUserAPIKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RotateUserAPIKeyBadRequestCode is the HTTP code returned for type RotateUserAPIKeyBadRequest +const RotateUserAPIKeyBadRequestCode int = 400 + +/* +RotateUserAPIKeyBadRequest Malformed request. + +swagger:response rotateUserApiKeyBadRequest +*/ +type RotateUserAPIKeyBadRequest struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRotateUserAPIKeyBadRequest creates RotateUserAPIKeyBadRequest with default headers values +func NewRotateUserAPIKeyBadRequest() *RotateUserAPIKeyBadRequest { + + return &RotateUserAPIKeyBadRequest{} +} + +// WithPayload adds the payload to the rotate user Api key bad request response +func (o *RotateUserAPIKeyBadRequest) WithPayload(payload *models.ErrorResponse) *RotateUserAPIKeyBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the rotate user Api key bad request response +func (o *RotateUserAPIKeyBadRequest) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RotateUserAPIKeyBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RotateUserAPIKeyUnauthorizedCode is the HTTP code returned for type RotateUserAPIKeyUnauthorized +const RotateUserAPIKeyUnauthorizedCode int = 401 + +/* +RotateUserAPIKeyUnauthorized Unauthorized or invalid credentials. + +swagger:response rotateUserApiKeyUnauthorized +*/ +type RotateUserAPIKeyUnauthorized struct { +} + +// NewRotateUserAPIKeyUnauthorized creates RotateUserAPIKeyUnauthorized with default headers values +func NewRotateUserAPIKeyUnauthorized() *RotateUserAPIKeyUnauthorized { + + return &RotateUserAPIKeyUnauthorized{} +} + +// WriteResponse to the client +func (o *RotateUserAPIKeyUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// RotateUserAPIKeyForbiddenCode is the HTTP code returned for type RotateUserAPIKeyForbidden +const RotateUserAPIKeyForbiddenCode int = 403 + +/* +RotateUserAPIKeyForbidden Forbidden + +swagger:response rotateUserApiKeyForbidden +*/ +type RotateUserAPIKeyForbidden struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRotateUserAPIKeyForbidden creates RotateUserAPIKeyForbidden with default headers values +func NewRotateUserAPIKeyForbidden() *RotateUserAPIKeyForbidden { + + return &RotateUserAPIKeyForbidden{} +} + +// WithPayload adds the payload to the rotate user Api key forbidden response +func (o *RotateUserAPIKeyForbidden) WithPayload(payload *models.ErrorResponse) *RotateUserAPIKeyForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the rotate user Api key forbidden response +func (o *RotateUserAPIKeyForbidden) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RotateUserAPIKeyForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RotateUserAPIKeyNotFoundCode is the HTTP code returned for type RotateUserAPIKeyNotFound +const RotateUserAPIKeyNotFoundCode int = 404 + +/* +RotateUserAPIKeyNotFound user not found + +swagger:response rotateUserApiKeyNotFound +*/ +type RotateUserAPIKeyNotFound struct { +} + +// NewRotateUserAPIKeyNotFound creates RotateUserAPIKeyNotFound with default headers values +func NewRotateUserAPIKeyNotFound() *RotateUserAPIKeyNotFound { + + return &RotateUserAPIKeyNotFound{} +} + +// WriteResponse to the client +func (o *RotateUserAPIKeyNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// RotateUserAPIKeyUnprocessableEntityCode is the HTTP code returned for type RotateUserAPIKeyUnprocessableEntity +const RotateUserAPIKeyUnprocessableEntityCode int = 422 + +/* +RotateUserAPIKeyUnprocessableEntity Request body is well-formed (i.e., syntactically correct), but semantically erroneous. + +swagger:response rotateUserApiKeyUnprocessableEntity +*/ +type RotateUserAPIKeyUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRotateUserAPIKeyUnprocessableEntity creates RotateUserAPIKeyUnprocessableEntity with default headers values +func NewRotateUserAPIKeyUnprocessableEntity() *RotateUserAPIKeyUnprocessableEntity { + + return &RotateUserAPIKeyUnprocessableEntity{} +} + +// WithPayload adds the payload to the rotate user Api key unprocessable entity response +func (o *RotateUserAPIKeyUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *RotateUserAPIKeyUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the rotate user Api key unprocessable entity response +func (o *RotateUserAPIKeyUnprocessableEntity) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RotateUserAPIKeyUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RotateUserAPIKeyInternalServerErrorCode is the HTTP code returned for type RotateUserAPIKeyInternalServerError +const RotateUserAPIKeyInternalServerErrorCode int = 500 + +/* +RotateUserAPIKeyInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response rotateUserApiKeyInternalServerError +*/ +type RotateUserAPIKeyInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewRotateUserAPIKeyInternalServerError creates RotateUserAPIKeyInternalServerError with default headers values +func NewRotateUserAPIKeyInternalServerError() *RotateUserAPIKeyInternalServerError { + + return &RotateUserAPIKeyInternalServerError{} +} + +// WithPayload adds the payload to the rotate user Api key internal server error response +func (o *RotateUserAPIKeyInternalServerError) WithPayload(payload *models.ErrorResponse) *RotateUserAPIKeyInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the rotate user Api key internal server error response +func (o *RotateUserAPIKeyInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RotateUserAPIKeyInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..a233db1639e44ecdbd0cbe4a5bb186db3e07c464 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/users/rotate_user_api_key_urlbuilder.go @@ -0,0 +1,110 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package users + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" +) + +// RotateUserAPIKeyURL generates an URL for the rotate user Api key operation +type RotateUserAPIKeyURL struct { + UserID string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RotateUserAPIKeyURL) WithBasePath(bp string) *RotateUserAPIKeyURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *RotateUserAPIKeyURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *RotateUserAPIKeyURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/users/db/{user_id}/rotate-key" + + userID := o.UserID + if userID != "" { + _path = strings.Replace(_path, "{user_id}", userID, -1) + } else { + return nil, errors.New("userId is required on RotateUserAPIKeyURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *RotateUserAPIKeyURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *RotateUserAPIKeyURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *RotateUserAPIKeyURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on RotateUserAPIKeyURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on RotateUserAPIKeyURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *RotateUserAPIKeyURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_api.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_api.go new file mode 100644 index 0000000000000000000000000000000000000000..8639173f54b20e731199ff1c6cda854f5bc54f75 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_api.go @@ -0,0 +1,1496 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/runtime/security" + "github.com/go-openapi/runtime/yamlpc" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/authz" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/backups" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/batch" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/classifications" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/cluster" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/distributed_tasks" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/graphql" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/meta" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/nodes" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/objects" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/replication" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/schema" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/users" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/well_known" + "github.com/weaviate/weaviate/entities/models" +) + +// NewWeaviateAPI creates a new Weaviate instance +func NewWeaviateAPI(spec *loads.Document) *WeaviateAPI { + return &WeaviateAPI{ + handlers: make(map[string]map[string]http.Handler), + formats: strfmt.Default, + defaultConsumes: "application/json", + defaultProduces: "application/json", + customConsumers: make(map[string]runtime.Consumer), + customProducers: make(map[string]runtime.Producer), + PreServerShutdown: func() {}, + ServerShutdown: func() {}, + spec: spec, + useSwaggerUI: false, + ServeError: errors.ServeError, + BasicAuthenticator: security.BasicAuth, + APIKeyAuthenticator: security.APIKeyAuth, + BearerAuthenticator: security.BearerAuth, + + JSONConsumer: runtime.JSONConsumer(), + YamlConsumer: yamlpc.YAMLConsumer(), + + JSONProducer: runtime.JSONProducer(), + + WellKnownGetWellKnownOpenidConfigurationHandler: well_known.GetWellKnownOpenidConfigurationHandlerFunc(func(params well_known.GetWellKnownOpenidConfigurationParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation well_known.GetWellKnownOpenidConfiguration has not yet been implemented") + }), + UsersActivateUserHandler: users.ActivateUserHandlerFunc(func(params users.ActivateUserParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.ActivateUser has not yet been implemented") + }), + AuthzAddPermissionsHandler: authz.AddPermissionsHandlerFunc(func(params authz.AddPermissionsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.AddPermissions has not yet been implemented") + }), + SchemaAliasesCreateHandler: schema.AliasesCreateHandlerFunc(func(params schema.AliasesCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.AliasesCreate has not yet been implemented") + }), + SchemaAliasesDeleteHandler: schema.AliasesDeleteHandlerFunc(func(params schema.AliasesDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.AliasesDelete has not yet been implemented") + }), + SchemaAliasesGetHandler: schema.AliasesGetHandlerFunc(func(params schema.AliasesGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.AliasesGet has not yet been implemented") + }), + SchemaAliasesGetAliasHandler: schema.AliasesGetAliasHandlerFunc(func(params schema.AliasesGetAliasParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.AliasesGetAlias has not yet been implemented") + }), + SchemaAliasesUpdateHandler: schema.AliasesUpdateHandlerFunc(func(params schema.AliasesUpdateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.AliasesUpdate has not yet been implemented") + }), + AuthzAssignRoleToGroupHandler: authz.AssignRoleToGroupHandlerFunc(func(params authz.AssignRoleToGroupParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.AssignRoleToGroup has not yet been implemented") + }), + AuthzAssignRoleToUserHandler: authz.AssignRoleToUserHandlerFunc(func(params authz.AssignRoleToUserParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.AssignRoleToUser has not yet been implemented") + }), + BackupsBackupsCancelHandler: backups.BackupsCancelHandlerFunc(func(params backups.BackupsCancelParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation backups.BackupsCancel has not yet been implemented") + }), + BackupsBackupsCreateHandler: backups.BackupsCreateHandlerFunc(func(params backups.BackupsCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation backups.BackupsCreate has not yet been implemented") + }), + BackupsBackupsCreateStatusHandler: backups.BackupsCreateStatusHandlerFunc(func(params backups.BackupsCreateStatusParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation backups.BackupsCreateStatus has not yet been implemented") + }), + BackupsBackupsListHandler: backups.BackupsListHandlerFunc(func(params backups.BackupsListParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation backups.BackupsList has not yet been implemented") + }), + BackupsBackupsRestoreHandler: backups.BackupsRestoreHandlerFunc(func(params backups.BackupsRestoreParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation backups.BackupsRestore has not yet been implemented") + }), + BackupsBackupsRestoreStatusHandler: backups.BackupsRestoreStatusHandlerFunc(func(params backups.BackupsRestoreStatusParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation backups.BackupsRestoreStatus has not yet been implemented") + }), + BatchBatchObjectsCreateHandler: batch.BatchObjectsCreateHandlerFunc(func(params batch.BatchObjectsCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation batch.BatchObjectsCreate has not yet been implemented") + }), + BatchBatchObjectsDeleteHandler: batch.BatchObjectsDeleteHandlerFunc(func(params batch.BatchObjectsDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation batch.BatchObjectsDelete has not yet been implemented") + }), + BatchBatchReferencesCreateHandler: batch.BatchReferencesCreateHandlerFunc(func(params batch.BatchReferencesCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation batch.BatchReferencesCreate has not yet been implemented") + }), + ReplicationCancelReplicationHandler: replication.CancelReplicationHandlerFunc(func(params replication.CancelReplicationParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.CancelReplication has not yet been implemented") + }), + ClassificationsClassificationsGetHandler: classifications.ClassificationsGetHandlerFunc(func(params classifications.ClassificationsGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation classifications.ClassificationsGet has not yet been implemented") + }), + ClassificationsClassificationsPostHandler: classifications.ClassificationsPostHandlerFunc(func(params classifications.ClassificationsPostParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation classifications.ClassificationsPost has not yet been implemented") + }), + ClusterClusterGetStatisticsHandler: cluster.ClusterGetStatisticsHandlerFunc(func(params cluster.ClusterGetStatisticsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation cluster.ClusterGetStatistics has not yet been implemented") + }), + AuthzCreateRoleHandler: authz.CreateRoleHandlerFunc(func(params authz.CreateRoleParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.CreateRole has not yet been implemented") + }), + UsersCreateUserHandler: users.CreateUserHandlerFunc(func(params users.CreateUserParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.CreateUser has not yet been implemented") + }), + UsersDeactivateUserHandler: users.DeactivateUserHandlerFunc(func(params users.DeactivateUserParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.DeactivateUser has not yet been implemented") + }), + ReplicationDeleteAllReplicationsHandler: replication.DeleteAllReplicationsHandlerFunc(func(params replication.DeleteAllReplicationsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.DeleteAllReplications has not yet been implemented") + }), + ReplicationDeleteReplicationHandler: replication.DeleteReplicationHandlerFunc(func(params replication.DeleteReplicationParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.DeleteReplication has not yet been implemented") + }), + AuthzDeleteRoleHandler: authz.DeleteRoleHandlerFunc(func(params authz.DeleteRoleParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.DeleteRole has not yet been implemented") + }), + UsersDeleteUserHandler: users.DeleteUserHandlerFunc(func(params users.DeleteUserParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.DeleteUser has not yet been implemented") + }), + DistributedTasksDistributedTasksGetHandler: distributed_tasks.DistributedTasksGetHandlerFunc(func(params distributed_tasks.DistributedTasksGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation distributed_tasks.DistributedTasksGet has not yet been implemented") + }), + ReplicationForceDeleteReplicationsHandler: replication.ForceDeleteReplicationsHandlerFunc(func(params replication.ForceDeleteReplicationsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.ForceDeleteReplications has not yet been implemented") + }), + ReplicationGetCollectionShardingStateHandler: replication.GetCollectionShardingStateHandlerFunc(func(params replication.GetCollectionShardingStateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.GetCollectionShardingState has not yet been implemented") + }), + AuthzGetGroupsHandler: authz.GetGroupsHandlerFunc(func(params authz.GetGroupsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetGroups has not yet been implemented") + }), + AuthzGetGroupsForRoleHandler: authz.GetGroupsForRoleHandlerFunc(func(params authz.GetGroupsForRoleParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetGroupsForRole has not yet been implemented") + }), + UsersGetOwnInfoHandler: users.GetOwnInfoHandlerFunc(func(params users.GetOwnInfoParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.GetOwnInfo has not yet been implemented") + }), + AuthzGetRoleHandler: authz.GetRoleHandlerFunc(func(params authz.GetRoleParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetRole has not yet been implemented") + }), + AuthzGetRolesHandler: authz.GetRolesHandlerFunc(func(params authz.GetRolesParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetRoles has not yet been implemented") + }), + AuthzGetRolesForGroupHandler: authz.GetRolesForGroupHandlerFunc(func(params authz.GetRolesForGroupParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetRolesForGroup has not yet been implemented") + }), + AuthzGetRolesForUserHandler: authz.GetRolesForUserHandlerFunc(func(params authz.GetRolesForUserParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetRolesForUser has not yet been implemented") + }), + AuthzGetRolesForUserDeprecatedHandler: authz.GetRolesForUserDeprecatedHandlerFunc(func(params authz.GetRolesForUserDeprecatedParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetRolesForUserDeprecated has not yet been implemented") + }), + UsersGetUserInfoHandler: users.GetUserInfoHandlerFunc(func(params users.GetUserInfoParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.GetUserInfo has not yet been implemented") + }), + AuthzGetUsersForRoleHandler: authz.GetUsersForRoleHandlerFunc(func(params authz.GetUsersForRoleParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetUsersForRole has not yet been implemented") + }), + AuthzGetUsersForRoleDeprecatedHandler: authz.GetUsersForRoleDeprecatedHandlerFunc(func(params authz.GetUsersForRoleDeprecatedParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.GetUsersForRoleDeprecated has not yet been implemented") + }), + GraphqlGraphqlBatchHandler: graphql.GraphqlBatchHandlerFunc(func(params graphql.GraphqlBatchParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation graphql.GraphqlBatch has not yet been implemented") + }), + GraphqlGraphqlPostHandler: graphql.GraphqlPostHandlerFunc(func(params graphql.GraphqlPostParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation graphql.GraphqlPost has not yet been implemented") + }), + AuthzHasPermissionHandler: authz.HasPermissionHandlerFunc(func(params authz.HasPermissionParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.HasPermission has not yet been implemented") + }), + UsersListAllUsersHandler: users.ListAllUsersHandlerFunc(func(params users.ListAllUsersParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.ListAllUsers has not yet been implemented") + }), + ReplicationListReplicationHandler: replication.ListReplicationHandlerFunc(func(params replication.ListReplicationParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.ListReplication has not yet been implemented") + }), + MetaMetaGetHandler: meta.MetaGetHandlerFunc(func(params meta.MetaGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation meta.MetaGet has not yet been implemented") + }), + NodesNodesGetHandler: nodes.NodesGetHandlerFunc(func(params nodes.NodesGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation nodes.NodesGet has not yet been implemented") + }), + NodesNodesGetClassHandler: nodes.NodesGetClassHandlerFunc(func(params nodes.NodesGetClassParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation nodes.NodesGetClass has not yet been implemented") + }), + ObjectsObjectsClassDeleteHandler: objects.ObjectsClassDeleteHandlerFunc(func(params objects.ObjectsClassDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassDelete has not yet been implemented") + }), + ObjectsObjectsClassGetHandler: objects.ObjectsClassGetHandlerFunc(func(params objects.ObjectsClassGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassGet has not yet been implemented") + }), + ObjectsObjectsClassHeadHandler: objects.ObjectsClassHeadHandlerFunc(func(params objects.ObjectsClassHeadParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassHead has not yet been implemented") + }), + ObjectsObjectsClassPatchHandler: objects.ObjectsClassPatchHandlerFunc(func(params objects.ObjectsClassPatchParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassPatch has not yet been implemented") + }), + ObjectsObjectsClassPutHandler: objects.ObjectsClassPutHandlerFunc(func(params objects.ObjectsClassPutParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassPut has not yet been implemented") + }), + ObjectsObjectsClassReferencesCreateHandler: objects.ObjectsClassReferencesCreateHandlerFunc(func(params objects.ObjectsClassReferencesCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassReferencesCreate has not yet been implemented") + }), + ObjectsObjectsClassReferencesDeleteHandler: objects.ObjectsClassReferencesDeleteHandlerFunc(func(params objects.ObjectsClassReferencesDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassReferencesDelete has not yet been implemented") + }), + ObjectsObjectsClassReferencesPutHandler: objects.ObjectsClassReferencesPutHandlerFunc(func(params objects.ObjectsClassReferencesPutParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsClassReferencesPut has not yet been implemented") + }), + ObjectsObjectsCreateHandler: objects.ObjectsCreateHandlerFunc(func(params objects.ObjectsCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsCreate has not yet been implemented") + }), + ObjectsObjectsDeleteHandler: objects.ObjectsDeleteHandlerFunc(func(params objects.ObjectsDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsDelete has not yet been implemented") + }), + ObjectsObjectsGetHandler: objects.ObjectsGetHandlerFunc(func(params objects.ObjectsGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsGet has not yet been implemented") + }), + ObjectsObjectsHeadHandler: objects.ObjectsHeadHandlerFunc(func(params objects.ObjectsHeadParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsHead has not yet been implemented") + }), + ObjectsObjectsListHandler: objects.ObjectsListHandlerFunc(func(params objects.ObjectsListParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsList has not yet been implemented") + }), + ObjectsObjectsPatchHandler: objects.ObjectsPatchHandlerFunc(func(params objects.ObjectsPatchParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsPatch has not yet been implemented") + }), + ObjectsObjectsReferencesCreateHandler: objects.ObjectsReferencesCreateHandlerFunc(func(params objects.ObjectsReferencesCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsReferencesCreate has not yet been implemented") + }), + ObjectsObjectsReferencesDeleteHandler: objects.ObjectsReferencesDeleteHandlerFunc(func(params objects.ObjectsReferencesDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsReferencesDelete has not yet been implemented") + }), + ObjectsObjectsReferencesUpdateHandler: objects.ObjectsReferencesUpdateHandlerFunc(func(params objects.ObjectsReferencesUpdateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsReferencesUpdate has not yet been implemented") + }), + ObjectsObjectsUpdateHandler: objects.ObjectsUpdateHandlerFunc(func(params objects.ObjectsUpdateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsUpdate has not yet been implemented") + }), + ObjectsObjectsValidateHandler: objects.ObjectsValidateHandlerFunc(func(params objects.ObjectsValidateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation objects.ObjectsValidate has not yet been implemented") + }), + AuthzRemovePermissionsHandler: authz.RemovePermissionsHandlerFunc(func(params authz.RemovePermissionsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.RemovePermissions has not yet been implemented") + }), + ReplicationReplicateHandler: replication.ReplicateHandlerFunc(func(params replication.ReplicateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.Replicate has not yet been implemented") + }), + ReplicationReplicationDetailsHandler: replication.ReplicationDetailsHandlerFunc(func(params replication.ReplicationDetailsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation replication.ReplicationDetails has not yet been implemented") + }), + AuthzRevokeRoleFromGroupHandler: authz.RevokeRoleFromGroupHandlerFunc(func(params authz.RevokeRoleFromGroupParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.RevokeRoleFromGroup has not yet been implemented") + }), + AuthzRevokeRoleFromUserHandler: authz.RevokeRoleFromUserHandlerFunc(func(params authz.RevokeRoleFromUserParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation authz.RevokeRoleFromUser has not yet been implemented") + }), + UsersRotateUserAPIKeyHandler: users.RotateUserAPIKeyHandlerFunc(func(params users.RotateUserAPIKeyParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation users.RotateUserAPIKey has not yet been implemented") + }), + SchemaSchemaDumpHandler: schema.SchemaDumpHandlerFunc(func(params schema.SchemaDumpParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaDump has not yet been implemented") + }), + SchemaSchemaObjectsCreateHandler: schema.SchemaObjectsCreateHandlerFunc(func(params schema.SchemaObjectsCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaObjectsCreate has not yet been implemented") + }), + SchemaSchemaObjectsDeleteHandler: schema.SchemaObjectsDeleteHandlerFunc(func(params schema.SchemaObjectsDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaObjectsDelete has not yet been implemented") + }), + SchemaSchemaObjectsGetHandler: schema.SchemaObjectsGetHandlerFunc(func(params schema.SchemaObjectsGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaObjectsGet has not yet been implemented") + }), + SchemaSchemaObjectsPropertiesAddHandler: schema.SchemaObjectsPropertiesAddHandlerFunc(func(params schema.SchemaObjectsPropertiesAddParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaObjectsPropertiesAdd has not yet been implemented") + }), + SchemaSchemaObjectsShardsGetHandler: schema.SchemaObjectsShardsGetHandlerFunc(func(params schema.SchemaObjectsShardsGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaObjectsShardsGet has not yet been implemented") + }), + SchemaSchemaObjectsShardsUpdateHandler: schema.SchemaObjectsShardsUpdateHandlerFunc(func(params schema.SchemaObjectsShardsUpdateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaObjectsShardsUpdate has not yet been implemented") + }), + SchemaSchemaObjectsUpdateHandler: schema.SchemaObjectsUpdateHandlerFunc(func(params schema.SchemaObjectsUpdateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.SchemaObjectsUpdate has not yet been implemented") + }), + SchemaTenantExistsHandler: schema.TenantExistsHandlerFunc(func(params schema.TenantExistsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.TenantExists has not yet been implemented") + }), + SchemaTenantsCreateHandler: schema.TenantsCreateHandlerFunc(func(params schema.TenantsCreateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.TenantsCreate has not yet been implemented") + }), + SchemaTenantsDeleteHandler: schema.TenantsDeleteHandlerFunc(func(params schema.TenantsDeleteParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.TenantsDelete has not yet been implemented") + }), + SchemaTenantsGetHandler: schema.TenantsGetHandlerFunc(func(params schema.TenantsGetParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.TenantsGet has not yet been implemented") + }), + SchemaTenantsGetOneHandler: schema.TenantsGetOneHandlerFunc(func(params schema.TenantsGetOneParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.TenantsGetOne has not yet been implemented") + }), + SchemaTenantsUpdateHandler: schema.TenantsUpdateHandlerFunc(func(params schema.TenantsUpdateParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation schema.TenantsUpdate has not yet been implemented") + }), + WeaviateRootHandler: WeaviateRootHandlerFunc(func(params WeaviateRootParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation WeaviateRoot has not yet been implemented") + }), + WeaviateWellknownLivenessHandler: WeaviateWellknownLivenessHandlerFunc(func(params WeaviateWellknownLivenessParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation WeaviateWellknownLiveness has not yet been implemented") + }), + WeaviateWellknownReadinessHandler: WeaviateWellknownReadinessHandlerFunc(func(params WeaviateWellknownReadinessParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation WeaviateWellknownReadiness has not yet been implemented") + }), + + OidcAuth: func(token string, scopes []string) (*models.Principal, error) { + return nil, errors.NotImplemented("oauth2 bearer auth (oidc) has not yet been implemented") + }, + // default authorizer is authorized meaning no requests are blocked + APIAuthorizer: security.Authorized(), + } +} + +/* +WeaviateAPI # Introduction + + Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications. + ### Base Path + +The base path for the Weaviate server is structured as `[YOUR-WEAVIATE-HOST]:[PORT]/v1`. As an example, if you wish to access the `schema` endpoint on a local instance, you would navigate to `http://localhost:8080/v1/schema`. Ensure you replace `[YOUR-WEAVIATE-HOST]` and `[PORT]` with your actual server host and port number respectively. + + ### Questions? + +If you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/). +### Issues? +If you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate). +### Want more documentation? +For a quickstart, code examples, concepts and more, please visit our [documentation page](https://weaviate.io/developers/weaviate). +*/ +type WeaviateAPI struct { + spec *loads.Document + context *middleware.Context + handlers map[string]map[string]http.Handler + formats strfmt.Registry + customConsumers map[string]runtime.Consumer + customProducers map[string]runtime.Producer + defaultConsumes string + defaultProduces string + Middleware func(middleware.Builder) http.Handler + useSwaggerUI bool + + // BasicAuthenticator generates a runtime.Authenticator from the supplied basic auth function. + // It has a default implementation in the security package, however you can replace it for your particular usage. + BasicAuthenticator func(security.UserPassAuthentication) runtime.Authenticator + + // APIKeyAuthenticator generates a runtime.Authenticator from the supplied token auth function. + // It has a default implementation in the security package, however you can replace it for your particular usage. + APIKeyAuthenticator func(string, string, security.TokenAuthentication) runtime.Authenticator + + // BearerAuthenticator generates a runtime.Authenticator from the supplied bearer token auth function. + // It has a default implementation in the security package, however you can replace it for your particular usage. + BearerAuthenticator func(string, security.ScopedTokenAuthentication) runtime.Authenticator + + // JSONConsumer registers a consumer for the following mime types: + // - application/json + JSONConsumer runtime.Consumer + // YamlConsumer registers a consumer for the following mime types: + // - application/yaml + YamlConsumer runtime.Consumer + + // JSONProducer registers a producer for the following mime types: + // - application/json + JSONProducer runtime.Producer + + // OidcAuth registers a function that takes an access token and a collection of required scopes and returns a principal + // it performs authentication based on an oauth2 bearer token provided in the request + OidcAuth func(string, []string) (*models.Principal, error) + + // APIAuthorizer provides access control (ACL/RBAC/ABAC) by providing access to the request and authenticated principal + APIAuthorizer runtime.Authorizer + + // WellKnownGetWellKnownOpenidConfigurationHandler sets the operation handler for the get well known openid configuration operation + WellKnownGetWellKnownOpenidConfigurationHandler well_known.GetWellKnownOpenidConfigurationHandler + // UsersActivateUserHandler sets the operation handler for the activate user operation + UsersActivateUserHandler users.ActivateUserHandler + // AuthzAddPermissionsHandler sets the operation handler for the add permissions operation + AuthzAddPermissionsHandler authz.AddPermissionsHandler + // SchemaAliasesCreateHandler sets the operation handler for the aliases create operation + SchemaAliasesCreateHandler schema.AliasesCreateHandler + // SchemaAliasesDeleteHandler sets the operation handler for the aliases delete operation + SchemaAliasesDeleteHandler schema.AliasesDeleteHandler + // SchemaAliasesGetHandler sets the operation handler for the aliases get operation + SchemaAliasesGetHandler schema.AliasesGetHandler + // SchemaAliasesGetAliasHandler sets the operation handler for the aliases get alias operation + SchemaAliasesGetAliasHandler schema.AliasesGetAliasHandler + // SchemaAliasesUpdateHandler sets the operation handler for the aliases update operation + SchemaAliasesUpdateHandler schema.AliasesUpdateHandler + // AuthzAssignRoleToGroupHandler sets the operation handler for the assign role to group operation + AuthzAssignRoleToGroupHandler authz.AssignRoleToGroupHandler + // AuthzAssignRoleToUserHandler sets the operation handler for the assign role to user operation + AuthzAssignRoleToUserHandler authz.AssignRoleToUserHandler + // BackupsBackupsCancelHandler sets the operation handler for the backups cancel operation + BackupsBackupsCancelHandler backups.BackupsCancelHandler + // BackupsBackupsCreateHandler sets the operation handler for the backups create operation + BackupsBackupsCreateHandler backups.BackupsCreateHandler + // BackupsBackupsCreateStatusHandler sets the operation handler for the backups create status operation + BackupsBackupsCreateStatusHandler backups.BackupsCreateStatusHandler + // BackupsBackupsListHandler sets the operation handler for the backups list operation + BackupsBackupsListHandler backups.BackupsListHandler + // BackupsBackupsRestoreHandler sets the operation handler for the backups restore operation + BackupsBackupsRestoreHandler backups.BackupsRestoreHandler + // BackupsBackupsRestoreStatusHandler sets the operation handler for the backups restore status operation + BackupsBackupsRestoreStatusHandler backups.BackupsRestoreStatusHandler + // BatchBatchObjectsCreateHandler sets the operation handler for the batch objects create operation + BatchBatchObjectsCreateHandler batch.BatchObjectsCreateHandler + // BatchBatchObjectsDeleteHandler sets the operation handler for the batch objects delete operation + BatchBatchObjectsDeleteHandler batch.BatchObjectsDeleteHandler + // BatchBatchReferencesCreateHandler sets the operation handler for the batch references create operation + BatchBatchReferencesCreateHandler batch.BatchReferencesCreateHandler + // ReplicationCancelReplicationHandler sets the operation handler for the cancel replication operation + ReplicationCancelReplicationHandler replication.CancelReplicationHandler + // ClassificationsClassificationsGetHandler sets the operation handler for the classifications get operation + ClassificationsClassificationsGetHandler classifications.ClassificationsGetHandler + // ClassificationsClassificationsPostHandler sets the operation handler for the classifications post operation + ClassificationsClassificationsPostHandler classifications.ClassificationsPostHandler + // ClusterClusterGetStatisticsHandler sets the operation handler for the cluster get statistics operation + ClusterClusterGetStatisticsHandler cluster.ClusterGetStatisticsHandler + // AuthzCreateRoleHandler sets the operation handler for the create role operation + AuthzCreateRoleHandler authz.CreateRoleHandler + // UsersCreateUserHandler sets the operation handler for the create user operation + UsersCreateUserHandler users.CreateUserHandler + // UsersDeactivateUserHandler sets the operation handler for the deactivate user operation + UsersDeactivateUserHandler users.DeactivateUserHandler + // ReplicationDeleteAllReplicationsHandler sets the operation handler for the delete all replications operation + ReplicationDeleteAllReplicationsHandler replication.DeleteAllReplicationsHandler + // ReplicationDeleteReplicationHandler sets the operation handler for the delete replication operation + ReplicationDeleteReplicationHandler replication.DeleteReplicationHandler + // AuthzDeleteRoleHandler sets the operation handler for the delete role operation + AuthzDeleteRoleHandler authz.DeleteRoleHandler + // UsersDeleteUserHandler sets the operation handler for the delete user operation + UsersDeleteUserHandler users.DeleteUserHandler + // DistributedTasksDistributedTasksGetHandler sets the operation handler for the distributed tasks get operation + DistributedTasksDistributedTasksGetHandler distributed_tasks.DistributedTasksGetHandler + // ReplicationForceDeleteReplicationsHandler sets the operation handler for the force delete replications operation + ReplicationForceDeleteReplicationsHandler replication.ForceDeleteReplicationsHandler + // ReplicationGetCollectionShardingStateHandler sets the operation handler for the get collection sharding state operation + ReplicationGetCollectionShardingStateHandler replication.GetCollectionShardingStateHandler + // AuthzGetGroupsHandler sets the operation handler for the get groups operation + AuthzGetGroupsHandler authz.GetGroupsHandler + // AuthzGetGroupsForRoleHandler sets the operation handler for the get groups for role operation + AuthzGetGroupsForRoleHandler authz.GetGroupsForRoleHandler + // UsersGetOwnInfoHandler sets the operation handler for the get own info operation + UsersGetOwnInfoHandler users.GetOwnInfoHandler + // AuthzGetRoleHandler sets the operation handler for the get role operation + AuthzGetRoleHandler authz.GetRoleHandler + // AuthzGetRolesHandler sets the operation handler for the get roles operation + AuthzGetRolesHandler authz.GetRolesHandler + // AuthzGetRolesForGroupHandler sets the operation handler for the get roles for group operation + AuthzGetRolesForGroupHandler authz.GetRolesForGroupHandler + // AuthzGetRolesForUserHandler sets the operation handler for the get roles for user operation + AuthzGetRolesForUserHandler authz.GetRolesForUserHandler + // AuthzGetRolesForUserDeprecatedHandler sets the operation handler for the get roles for user deprecated operation + AuthzGetRolesForUserDeprecatedHandler authz.GetRolesForUserDeprecatedHandler + // UsersGetUserInfoHandler sets the operation handler for the get user info operation + UsersGetUserInfoHandler users.GetUserInfoHandler + // AuthzGetUsersForRoleHandler sets the operation handler for the get users for role operation + AuthzGetUsersForRoleHandler authz.GetUsersForRoleHandler + // AuthzGetUsersForRoleDeprecatedHandler sets the operation handler for the get users for role deprecated operation + AuthzGetUsersForRoleDeprecatedHandler authz.GetUsersForRoleDeprecatedHandler + // GraphqlGraphqlBatchHandler sets the operation handler for the graphql batch operation + GraphqlGraphqlBatchHandler graphql.GraphqlBatchHandler + // GraphqlGraphqlPostHandler sets the operation handler for the graphql post operation + GraphqlGraphqlPostHandler graphql.GraphqlPostHandler + // AuthzHasPermissionHandler sets the operation handler for the has permission operation + AuthzHasPermissionHandler authz.HasPermissionHandler + // UsersListAllUsersHandler sets the operation handler for the list all users operation + UsersListAllUsersHandler users.ListAllUsersHandler + // ReplicationListReplicationHandler sets the operation handler for the list replication operation + ReplicationListReplicationHandler replication.ListReplicationHandler + // MetaMetaGetHandler sets the operation handler for the meta get operation + MetaMetaGetHandler meta.MetaGetHandler + // NodesNodesGetHandler sets the operation handler for the nodes get operation + NodesNodesGetHandler nodes.NodesGetHandler + // NodesNodesGetClassHandler sets the operation handler for the nodes get class operation + NodesNodesGetClassHandler nodes.NodesGetClassHandler + // ObjectsObjectsClassDeleteHandler sets the operation handler for the objects class delete operation + ObjectsObjectsClassDeleteHandler objects.ObjectsClassDeleteHandler + // ObjectsObjectsClassGetHandler sets the operation handler for the objects class get operation + ObjectsObjectsClassGetHandler objects.ObjectsClassGetHandler + // ObjectsObjectsClassHeadHandler sets the operation handler for the objects class head operation + ObjectsObjectsClassHeadHandler objects.ObjectsClassHeadHandler + // ObjectsObjectsClassPatchHandler sets the operation handler for the objects class patch operation + ObjectsObjectsClassPatchHandler objects.ObjectsClassPatchHandler + // ObjectsObjectsClassPutHandler sets the operation handler for the objects class put operation + ObjectsObjectsClassPutHandler objects.ObjectsClassPutHandler + // ObjectsObjectsClassReferencesCreateHandler sets the operation handler for the objects class references create operation + ObjectsObjectsClassReferencesCreateHandler objects.ObjectsClassReferencesCreateHandler + // ObjectsObjectsClassReferencesDeleteHandler sets the operation handler for the objects class references delete operation + ObjectsObjectsClassReferencesDeleteHandler objects.ObjectsClassReferencesDeleteHandler + // ObjectsObjectsClassReferencesPutHandler sets the operation handler for the objects class references put operation + ObjectsObjectsClassReferencesPutHandler objects.ObjectsClassReferencesPutHandler + // ObjectsObjectsCreateHandler sets the operation handler for the objects create operation + ObjectsObjectsCreateHandler objects.ObjectsCreateHandler + // ObjectsObjectsDeleteHandler sets the operation handler for the objects delete operation + ObjectsObjectsDeleteHandler objects.ObjectsDeleteHandler + // ObjectsObjectsGetHandler sets the operation handler for the objects get operation + ObjectsObjectsGetHandler objects.ObjectsGetHandler + // ObjectsObjectsHeadHandler sets the operation handler for the objects head operation + ObjectsObjectsHeadHandler objects.ObjectsHeadHandler + // ObjectsObjectsListHandler sets the operation handler for the objects list operation + ObjectsObjectsListHandler objects.ObjectsListHandler + // ObjectsObjectsPatchHandler sets the operation handler for the objects patch operation + ObjectsObjectsPatchHandler objects.ObjectsPatchHandler + // ObjectsObjectsReferencesCreateHandler sets the operation handler for the objects references create operation + ObjectsObjectsReferencesCreateHandler objects.ObjectsReferencesCreateHandler + // ObjectsObjectsReferencesDeleteHandler sets the operation handler for the objects references delete operation + ObjectsObjectsReferencesDeleteHandler objects.ObjectsReferencesDeleteHandler + // ObjectsObjectsReferencesUpdateHandler sets the operation handler for the objects references update operation + ObjectsObjectsReferencesUpdateHandler objects.ObjectsReferencesUpdateHandler + // ObjectsObjectsUpdateHandler sets the operation handler for the objects update operation + ObjectsObjectsUpdateHandler objects.ObjectsUpdateHandler + // ObjectsObjectsValidateHandler sets the operation handler for the objects validate operation + ObjectsObjectsValidateHandler objects.ObjectsValidateHandler + // AuthzRemovePermissionsHandler sets the operation handler for the remove permissions operation + AuthzRemovePermissionsHandler authz.RemovePermissionsHandler + // ReplicationReplicateHandler sets the operation handler for the replicate operation + ReplicationReplicateHandler replication.ReplicateHandler + // ReplicationReplicationDetailsHandler sets the operation handler for the replication details operation + ReplicationReplicationDetailsHandler replication.ReplicationDetailsHandler + // AuthzRevokeRoleFromGroupHandler sets the operation handler for the revoke role from group operation + AuthzRevokeRoleFromGroupHandler authz.RevokeRoleFromGroupHandler + // AuthzRevokeRoleFromUserHandler sets the operation handler for the revoke role from user operation + AuthzRevokeRoleFromUserHandler authz.RevokeRoleFromUserHandler + // UsersRotateUserAPIKeyHandler sets the operation handler for the rotate user Api key operation + UsersRotateUserAPIKeyHandler users.RotateUserAPIKeyHandler + // SchemaSchemaDumpHandler sets the operation handler for the schema dump operation + SchemaSchemaDumpHandler schema.SchemaDumpHandler + // SchemaSchemaObjectsCreateHandler sets the operation handler for the schema objects create operation + SchemaSchemaObjectsCreateHandler schema.SchemaObjectsCreateHandler + // SchemaSchemaObjectsDeleteHandler sets the operation handler for the schema objects delete operation + SchemaSchemaObjectsDeleteHandler schema.SchemaObjectsDeleteHandler + // SchemaSchemaObjectsGetHandler sets the operation handler for the schema objects get operation + SchemaSchemaObjectsGetHandler schema.SchemaObjectsGetHandler + // SchemaSchemaObjectsPropertiesAddHandler sets the operation handler for the schema objects properties add operation + SchemaSchemaObjectsPropertiesAddHandler schema.SchemaObjectsPropertiesAddHandler + // SchemaSchemaObjectsShardsGetHandler sets the operation handler for the schema objects shards get operation + SchemaSchemaObjectsShardsGetHandler schema.SchemaObjectsShardsGetHandler + // SchemaSchemaObjectsShardsUpdateHandler sets the operation handler for the schema objects shards update operation + SchemaSchemaObjectsShardsUpdateHandler schema.SchemaObjectsShardsUpdateHandler + // SchemaSchemaObjectsUpdateHandler sets the operation handler for the schema objects update operation + SchemaSchemaObjectsUpdateHandler schema.SchemaObjectsUpdateHandler + // SchemaTenantExistsHandler sets the operation handler for the tenant exists operation + SchemaTenantExistsHandler schema.TenantExistsHandler + // SchemaTenantsCreateHandler sets the operation handler for the tenants create operation + SchemaTenantsCreateHandler schema.TenantsCreateHandler + // SchemaTenantsDeleteHandler sets the operation handler for the tenants delete operation + SchemaTenantsDeleteHandler schema.TenantsDeleteHandler + // SchemaTenantsGetHandler sets the operation handler for the tenants get operation + SchemaTenantsGetHandler schema.TenantsGetHandler + // SchemaTenantsGetOneHandler sets the operation handler for the tenants get one operation + SchemaTenantsGetOneHandler schema.TenantsGetOneHandler + // SchemaTenantsUpdateHandler sets the operation handler for the tenants update operation + SchemaTenantsUpdateHandler schema.TenantsUpdateHandler + // WeaviateRootHandler sets the operation handler for the weaviate root operation + WeaviateRootHandler WeaviateRootHandler + // WeaviateWellknownLivenessHandler sets the operation handler for the weaviate wellknown liveness operation + WeaviateWellknownLivenessHandler WeaviateWellknownLivenessHandler + // WeaviateWellknownReadinessHandler sets the operation handler for the weaviate wellknown readiness operation + WeaviateWellknownReadinessHandler WeaviateWellknownReadinessHandler + + // ServeError is called when an error is received, there is a default handler + // but you can set your own with this + ServeError func(http.ResponseWriter, *http.Request, error) + + // PreServerShutdown is called before the HTTP(S) server is shutdown + // This allows for custom functions to get executed before the HTTP(S) server stops accepting traffic + PreServerShutdown func() + + // ServerShutdown is called when the HTTP(S) server is shut down and done + // handling all active connections and does not accept connections any more + ServerShutdown func() + + // Custom command line argument groups with their descriptions + CommandLineOptionsGroups []swag.CommandLineOptionsGroup + + // User defined logger function. + Logger func(string, ...interface{}) +} + +// UseRedoc for documentation at /docs +func (o *WeaviateAPI) UseRedoc() { + o.useSwaggerUI = false +} + +// UseSwaggerUI for documentation at /docs +func (o *WeaviateAPI) UseSwaggerUI() { + o.useSwaggerUI = true +} + +// SetDefaultProduces sets the default produces media type +func (o *WeaviateAPI) SetDefaultProduces(mediaType string) { + o.defaultProduces = mediaType +} + +// SetDefaultConsumes returns the default consumes media type +func (o *WeaviateAPI) SetDefaultConsumes(mediaType string) { + o.defaultConsumes = mediaType +} + +// SetSpec sets a spec that will be served for the clients. +func (o *WeaviateAPI) SetSpec(spec *loads.Document) { + o.spec = spec +} + +// DefaultProduces returns the default produces media type +func (o *WeaviateAPI) DefaultProduces() string { + return o.defaultProduces +} + +// DefaultConsumes returns the default consumes media type +func (o *WeaviateAPI) DefaultConsumes() string { + return o.defaultConsumes +} + +// Formats returns the registered string formats +func (o *WeaviateAPI) Formats() strfmt.Registry { + return o.formats +} + +// RegisterFormat registers a custom format validator +func (o *WeaviateAPI) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { + o.formats.Add(name, format, validator) +} + +// Validate validates the registrations in the WeaviateAPI +func (o *WeaviateAPI) Validate() error { + var unregistered []string + + if o.JSONConsumer == nil { + unregistered = append(unregistered, "JSONConsumer") + } + if o.YamlConsumer == nil { + unregistered = append(unregistered, "YamlConsumer") + } + + if o.JSONProducer == nil { + unregistered = append(unregistered, "JSONProducer") + } + + if o.OidcAuth == nil { + unregistered = append(unregistered, "OidcAuth") + } + + if o.WellKnownGetWellKnownOpenidConfigurationHandler == nil { + unregistered = append(unregistered, "well_known.GetWellKnownOpenidConfigurationHandler") + } + if o.UsersActivateUserHandler == nil { + unregistered = append(unregistered, "users.ActivateUserHandler") + } + if o.AuthzAddPermissionsHandler == nil { + unregistered = append(unregistered, "authz.AddPermissionsHandler") + } + if o.SchemaAliasesCreateHandler == nil { + unregistered = append(unregistered, "schema.AliasesCreateHandler") + } + if o.SchemaAliasesDeleteHandler == nil { + unregistered = append(unregistered, "schema.AliasesDeleteHandler") + } + if o.SchemaAliasesGetHandler == nil { + unregistered = append(unregistered, "schema.AliasesGetHandler") + } + if o.SchemaAliasesGetAliasHandler == nil { + unregistered = append(unregistered, "schema.AliasesGetAliasHandler") + } + if o.SchemaAliasesUpdateHandler == nil { + unregistered = append(unregistered, "schema.AliasesUpdateHandler") + } + if o.AuthzAssignRoleToGroupHandler == nil { + unregistered = append(unregistered, "authz.AssignRoleToGroupHandler") + } + if o.AuthzAssignRoleToUserHandler == nil { + unregistered = append(unregistered, "authz.AssignRoleToUserHandler") + } + if o.BackupsBackupsCancelHandler == nil { + unregistered = append(unregistered, "backups.BackupsCancelHandler") + } + if o.BackupsBackupsCreateHandler == nil { + unregistered = append(unregistered, "backups.BackupsCreateHandler") + } + if o.BackupsBackupsCreateStatusHandler == nil { + unregistered = append(unregistered, "backups.BackupsCreateStatusHandler") + } + if o.BackupsBackupsListHandler == nil { + unregistered = append(unregistered, "backups.BackupsListHandler") + } + if o.BackupsBackupsRestoreHandler == nil { + unregistered = append(unregistered, "backups.BackupsRestoreHandler") + } + if o.BackupsBackupsRestoreStatusHandler == nil { + unregistered = append(unregistered, "backups.BackupsRestoreStatusHandler") + } + if o.BatchBatchObjectsCreateHandler == nil { + unregistered = append(unregistered, "batch.BatchObjectsCreateHandler") + } + if o.BatchBatchObjectsDeleteHandler == nil { + unregistered = append(unregistered, "batch.BatchObjectsDeleteHandler") + } + if o.BatchBatchReferencesCreateHandler == nil { + unregistered = append(unregistered, "batch.BatchReferencesCreateHandler") + } + if o.ReplicationCancelReplicationHandler == nil { + unregistered = append(unregistered, "replication.CancelReplicationHandler") + } + if o.ClassificationsClassificationsGetHandler == nil { + unregistered = append(unregistered, "classifications.ClassificationsGetHandler") + } + if o.ClassificationsClassificationsPostHandler == nil { + unregistered = append(unregistered, "classifications.ClassificationsPostHandler") + } + if o.ClusterClusterGetStatisticsHandler == nil { + unregistered = append(unregistered, "cluster.ClusterGetStatisticsHandler") + } + if o.AuthzCreateRoleHandler == nil { + unregistered = append(unregistered, "authz.CreateRoleHandler") + } + if o.UsersCreateUserHandler == nil { + unregistered = append(unregistered, "users.CreateUserHandler") + } + if o.UsersDeactivateUserHandler == nil { + unregistered = append(unregistered, "users.DeactivateUserHandler") + } + if o.ReplicationDeleteAllReplicationsHandler == nil { + unregistered = append(unregistered, "replication.DeleteAllReplicationsHandler") + } + if o.ReplicationDeleteReplicationHandler == nil { + unregistered = append(unregistered, "replication.DeleteReplicationHandler") + } + if o.AuthzDeleteRoleHandler == nil { + unregistered = append(unregistered, "authz.DeleteRoleHandler") + } + if o.UsersDeleteUserHandler == nil { + unregistered = append(unregistered, "users.DeleteUserHandler") + } + if o.DistributedTasksDistributedTasksGetHandler == nil { + unregistered = append(unregistered, "distributed_tasks.DistributedTasksGetHandler") + } + if o.ReplicationForceDeleteReplicationsHandler == nil { + unregistered = append(unregistered, "replication.ForceDeleteReplicationsHandler") + } + if o.ReplicationGetCollectionShardingStateHandler == nil { + unregistered = append(unregistered, "replication.GetCollectionShardingStateHandler") + } + if o.AuthzGetGroupsHandler == nil { + unregistered = append(unregistered, "authz.GetGroupsHandler") + } + if o.AuthzGetGroupsForRoleHandler == nil { + unregistered = append(unregistered, "authz.GetGroupsForRoleHandler") + } + if o.UsersGetOwnInfoHandler == nil { + unregistered = append(unregistered, "users.GetOwnInfoHandler") + } + if o.AuthzGetRoleHandler == nil { + unregistered = append(unregistered, "authz.GetRoleHandler") + } + if o.AuthzGetRolesHandler == nil { + unregistered = append(unregistered, "authz.GetRolesHandler") + } + if o.AuthzGetRolesForGroupHandler == nil { + unregistered = append(unregistered, "authz.GetRolesForGroupHandler") + } + if o.AuthzGetRolesForUserHandler == nil { + unregistered = append(unregistered, "authz.GetRolesForUserHandler") + } + if o.AuthzGetRolesForUserDeprecatedHandler == nil { + unregistered = append(unregistered, "authz.GetRolesForUserDeprecatedHandler") + } + if o.UsersGetUserInfoHandler == nil { + unregistered = append(unregistered, "users.GetUserInfoHandler") + } + if o.AuthzGetUsersForRoleHandler == nil { + unregistered = append(unregistered, "authz.GetUsersForRoleHandler") + } + if o.AuthzGetUsersForRoleDeprecatedHandler == nil { + unregistered = append(unregistered, "authz.GetUsersForRoleDeprecatedHandler") + } + if o.GraphqlGraphqlBatchHandler == nil { + unregistered = append(unregistered, "graphql.GraphqlBatchHandler") + } + if o.GraphqlGraphqlPostHandler == nil { + unregistered = append(unregistered, "graphql.GraphqlPostHandler") + } + if o.AuthzHasPermissionHandler == nil { + unregistered = append(unregistered, "authz.HasPermissionHandler") + } + if o.UsersListAllUsersHandler == nil { + unregistered = append(unregistered, "users.ListAllUsersHandler") + } + if o.ReplicationListReplicationHandler == nil { + unregistered = append(unregistered, "replication.ListReplicationHandler") + } + if o.MetaMetaGetHandler == nil { + unregistered = append(unregistered, "meta.MetaGetHandler") + } + if o.NodesNodesGetHandler == nil { + unregistered = append(unregistered, "nodes.NodesGetHandler") + } + if o.NodesNodesGetClassHandler == nil { + unregistered = append(unregistered, "nodes.NodesGetClassHandler") + } + if o.ObjectsObjectsClassDeleteHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassDeleteHandler") + } + if o.ObjectsObjectsClassGetHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassGetHandler") + } + if o.ObjectsObjectsClassHeadHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassHeadHandler") + } + if o.ObjectsObjectsClassPatchHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassPatchHandler") + } + if o.ObjectsObjectsClassPutHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassPutHandler") + } + if o.ObjectsObjectsClassReferencesCreateHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassReferencesCreateHandler") + } + if o.ObjectsObjectsClassReferencesDeleteHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassReferencesDeleteHandler") + } + if o.ObjectsObjectsClassReferencesPutHandler == nil { + unregistered = append(unregistered, "objects.ObjectsClassReferencesPutHandler") + } + if o.ObjectsObjectsCreateHandler == nil { + unregistered = append(unregistered, "objects.ObjectsCreateHandler") + } + if o.ObjectsObjectsDeleteHandler == nil { + unregistered = append(unregistered, "objects.ObjectsDeleteHandler") + } + if o.ObjectsObjectsGetHandler == nil { + unregistered = append(unregistered, "objects.ObjectsGetHandler") + } + if o.ObjectsObjectsHeadHandler == nil { + unregistered = append(unregistered, "objects.ObjectsHeadHandler") + } + if o.ObjectsObjectsListHandler == nil { + unregistered = append(unregistered, "objects.ObjectsListHandler") + } + if o.ObjectsObjectsPatchHandler == nil { + unregistered = append(unregistered, "objects.ObjectsPatchHandler") + } + if o.ObjectsObjectsReferencesCreateHandler == nil { + unregistered = append(unregistered, "objects.ObjectsReferencesCreateHandler") + } + if o.ObjectsObjectsReferencesDeleteHandler == nil { + unregistered = append(unregistered, "objects.ObjectsReferencesDeleteHandler") + } + if o.ObjectsObjectsReferencesUpdateHandler == nil { + unregistered = append(unregistered, "objects.ObjectsReferencesUpdateHandler") + } + if o.ObjectsObjectsUpdateHandler == nil { + unregistered = append(unregistered, "objects.ObjectsUpdateHandler") + } + if o.ObjectsObjectsValidateHandler == nil { + unregistered = append(unregistered, "objects.ObjectsValidateHandler") + } + if o.AuthzRemovePermissionsHandler == nil { + unregistered = append(unregistered, "authz.RemovePermissionsHandler") + } + if o.ReplicationReplicateHandler == nil { + unregistered = append(unregistered, "replication.ReplicateHandler") + } + if o.ReplicationReplicationDetailsHandler == nil { + unregistered = append(unregistered, "replication.ReplicationDetailsHandler") + } + if o.AuthzRevokeRoleFromGroupHandler == nil { + unregistered = append(unregistered, "authz.RevokeRoleFromGroupHandler") + } + if o.AuthzRevokeRoleFromUserHandler == nil { + unregistered = append(unregistered, "authz.RevokeRoleFromUserHandler") + } + if o.UsersRotateUserAPIKeyHandler == nil { + unregistered = append(unregistered, "users.RotateUserAPIKeyHandler") + } + if o.SchemaSchemaDumpHandler == nil { + unregistered = append(unregistered, "schema.SchemaDumpHandler") + } + if o.SchemaSchemaObjectsCreateHandler == nil { + unregistered = append(unregistered, "schema.SchemaObjectsCreateHandler") + } + if o.SchemaSchemaObjectsDeleteHandler == nil { + unregistered = append(unregistered, "schema.SchemaObjectsDeleteHandler") + } + if o.SchemaSchemaObjectsGetHandler == nil { + unregistered = append(unregistered, "schema.SchemaObjectsGetHandler") + } + if o.SchemaSchemaObjectsPropertiesAddHandler == nil { + unregistered = append(unregistered, "schema.SchemaObjectsPropertiesAddHandler") + } + if o.SchemaSchemaObjectsShardsGetHandler == nil { + unregistered = append(unregistered, "schema.SchemaObjectsShardsGetHandler") + } + if o.SchemaSchemaObjectsShardsUpdateHandler == nil { + unregistered = append(unregistered, "schema.SchemaObjectsShardsUpdateHandler") + } + if o.SchemaSchemaObjectsUpdateHandler == nil { + unregistered = append(unregistered, "schema.SchemaObjectsUpdateHandler") + } + if o.SchemaTenantExistsHandler == nil { + unregistered = append(unregistered, "schema.TenantExistsHandler") + } + if o.SchemaTenantsCreateHandler == nil { + unregistered = append(unregistered, "schema.TenantsCreateHandler") + } + if o.SchemaTenantsDeleteHandler == nil { + unregistered = append(unregistered, "schema.TenantsDeleteHandler") + } + if o.SchemaTenantsGetHandler == nil { + unregistered = append(unregistered, "schema.TenantsGetHandler") + } + if o.SchemaTenantsGetOneHandler == nil { + unregistered = append(unregistered, "schema.TenantsGetOneHandler") + } + if o.SchemaTenantsUpdateHandler == nil { + unregistered = append(unregistered, "schema.TenantsUpdateHandler") + } + if o.WeaviateRootHandler == nil { + unregistered = append(unregistered, "WeaviateRootHandler") + } + if o.WeaviateWellknownLivenessHandler == nil { + unregistered = append(unregistered, "WeaviateWellknownLivenessHandler") + } + if o.WeaviateWellknownReadinessHandler == nil { + unregistered = append(unregistered, "WeaviateWellknownReadinessHandler") + } + + if len(unregistered) > 0 { + return fmt.Errorf("missing registration: %s", strings.Join(unregistered, ", ")) + } + + return nil +} + +// ServeErrorFor gets a error handler for a given operation id +func (o *WeaviateAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { + return o.ServeError +} + +// AuthenticatorsFor gets the authenticators for the specified security schemes +func (o *WeaviateAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + result := make(map[string]runtime.Authenticator) + for name := range schemes { + switch name { + case "oidc": + result[name] = o.BearerAuthenticator(name, func(token string, scopes []string) (interface{}, error) { + return o.OidcAuth(token, scopes) + }) + + } + } + return result +} + +// Authorizer returns the registered authorizer +func (o *WeaviateAPI) Authorizer() runtime.Authorizer { + return o.APIAuthorizer +} + +// ConsumersFor gets the consumers for the specified media types. +// MIME type parameters are ignored here. +func (o *WeaviateAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + result := make(map[string]runtime.Consumer, len(mediaTypes)) + for _, mt := range mediaTypes { + switch mt { + case "application/json": + result["application/json"] = o.JSONConsumer + case "application/yaml": + result["application/yaml"] = o.YamlConsumer + } + + if c, ok := o.customConsumers[mt]; ok { + result[mt] = c + } + } + return result +} + +// ProducersFor gets the producers for the specified media types. +// MIME type parameters are ignored here. +func (o *WeaviateAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + result := make(map[string]runtime.Producer, len(mediaTypes)) + for _, mt := range mediaTypes { + switch mt { + case "application/json": + result["application/json"] = o.JSONProducer + } + + if p, ok := o.customProducers[mt]; ok { + result[mt] = p + } + } + return result +} + +// HandlerFor gets a http.Handler for the provided operation method and path +func (o *WeaviateAPI) HandlerFor(method, path string) (http.Handler, bool) { + if o.handlers == nil { + return nil, false + } + um := strings.ToUpper(method) + if _, ok := o.handlers[um]; !ok { + return nil, false + } + if path == "/" { + path = "" + } + h, ok := o.handlers[um][path] + return h, ok +} + +// Context returns the middleware context for the weaviate API +func (o *WeaviateAPI) Context() *middleware.Context { + if o.context == nil { + o.context = middleware.NewRoutableContext(o.spec, o, nil) + } + + return o.context +} + +func (o *WeaviateAPI) initHandlerCache() { + o.Context() // don't care about the result, just that the initialization happened + if o.handlers == nil { + o.handlers = make(map[string]map[string]http.Handler) + } + + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/.well-known/openid-configuration"] = well_known.NewGetWellKnownOpenidConfiguration(o.context, o.WellKnownGetWellKnownOpenidConfigurationHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/users/db/{user_id}/activate"] = users.NewActivateUser(o.context, o.UsersActivateUserHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/roles/{id}/add-permissions"] = authz.NewAddPermissions(o.context, o.AuthzAddPermissionsHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/aliases"] = schema.NewAliasesCreate(o.context, o.SchemaAliasesCreateHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/aliases/{aliasName}"] = schema.NewAliasesDelete(o.context, o.SchemaAliasesDeleteHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/aliases"] = schema.NewAliasesGet(o.context, o.SchemaAliasesGetHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/aliases/{aliasName}"] = schema.NewAliasesGetAlias(o.context, o.SchemaAliasesGetAliasHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/aliases/{aliasName}"] = schema.NewAliasesUpdate(o.context, o.SchemaAliasesUpdateHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/groups/{id}/assign"] = authz.NewAssignRoleToGroup(o.context, o.AuthzAssignRoleToGroupHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/users/{id}/assign"] = authz.NewAssignRoleToUser(o.context, o.AuthzAssignRoleToUserHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/backups/{backend}/{id}"] = backups.NewBackupsCancel(o.context, o.BackupsBackupsCancelHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/backups/{backend}"] = backups.NewBackupsCreate(o.context, o.BackupsBackupsCreateHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/backups/{backend}/{id}"] = backups.NewBackupsCreateStatus(o.context, o.BackupsBackupsCreateStatusHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/backups/{backend}"] = backups.NewBackupsList(o.context, o.BackupsBackupsListHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/backups/{backend}/{id}/restore"] = backups.NewBackupsRestore(o.context, o.BackupsBackupsRestoreHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/backups/{backend}/{id}/restore"] = backups.NewBackupsRestoreStatus(o.context, o.BackupsBackupsRestoreStatusHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/batch/objects"] = batch.NewBatchObjectsCreate(o.context, o.BatchBatchObjectsCreateHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/batch/objects"] = batch.NewBatchObjectsDelete(o.context, o.BatchBatchObjectsDeleteHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/batch/references"] = batch.NewBatchReferencesCreate(o.context, o.BatchBatchReferencesCreateHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/replication/replicate/{id}/cancel"] = replication.NewCancelReplication(o.context, o.ReplicationCancelReplicationHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/classifications/{id}"] = classifications.NewClassificationsGet(o.context, o.ClassificationsClassificationsGetHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/classifications"] = classifications.NewClassificationsPost(o.context, o.ClassificationsClassificationsPostHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/cluster/statistics"] = cluster.NewClusterGetStatistics(o.context, o.ClusterClusterGetStatisticsHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/roles"] = authz.NewCreateRole(o.context, o.AuthzCreateRoleHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/users/db/{user_id}"] = users.NewCreateUser(o.context, o.UsersCreateUserHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/users/db/{user_id}/deactivate"] = users.NewDeactivateUser(o.context, o.UsersDeactivateUserHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/replication/replicate"] = replication.NewDeleteAllReplications(o.context, o.ReplicationDeleteAllReplicationsHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/replication/replicate/{id}"] = replication.NewDeleteReplication(o.context, o.ReplicationDeleteReplicationHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/authz/roles/{id}"] = authz.NewDeleteRole(o.context, o.AuthzDeleteRoleHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/users/db/{user_id}"] = users.NewDeleteUser(o.context, o.UsersDeleteUserHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/tasks"] = distributed_tasks.NewDistributedTasksGet(o.context, o.DistributedTasksDistributedTasksGetHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/replication/replicate/force-delete"] = replication.NewForceDeleteReplications(o.context, o.ReplicationForceDeleteReplicationsHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/replication/sharding-state"] = replication.NewGetCollectionShardingState(o.context, o.ReplicationGetCollectionShardingStateHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/groups/{groupType}"] = authz.NewGetGroups(o.context, o.AuthzGetGroupsHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/roles/{id}/group-assignments"] = authz.NewGetGroupsForRole(o.context, o.AuthzGetGroupsForRoleHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/users/own-info"] = users.NewGetOwnInfo(o.context, o.UsersGetOwnInfoHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/roles/{id}"] = authz.NewGetRole(o.context, o.AuthzGetRoleHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/roles"] = authz.NewGetRoles(o.context, o.AuthzGetRolesHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/groups/{id}/roles/{groupType}"] = authz.NewGetRolesForGroup(o.context, o.AuthzGetRolesForGroupHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/users/{id}/roles/{userType}"] = authz.NewGetRolesForUser(o.context, o.AuthzGetRolesForUserHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/users/{id}/roles"] = authz.NewGetRolesForUserDeprecated(o.context, o.AuthzGetRolesForUserDeprecatedHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/users/db/{user_id}"] = users.NewGetUserInfo(o.context, o.UsersGetUserInfoHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/roles/{id}/user-assignments"] = authz.NewGetUsersForRole(o.context, o.AuthzGetUsersForRoleHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/authz/roles/{id}/users"] = authz.NewGetUsersForRoleDeprecated(o.context, o.AuthzGetUsersForRoleDeprecatedHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/graphql/batch"] = graphql.NewGraphqlBatch(o.context, o.GraphqlGraphqlBatchHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/graphql"] = graphql.NewGraphqlPost(o.context, o.GraphqlGraphqlPostHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/roles/{id}/has-permission"] = authz.NewHasPermission(o.context, o.AuthzHasPermissionHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/users/db"] = users.NewListAllUsers(o.context, o.UsersListAllUsersHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/replication/replicate/list"] = replication.NewListReplication(o.context, o.ReplicationListReplicationHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/meta"] = meta.NewMetaGet(o.context, o.MetaMetaGetHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/nodes"] = nodes.NewNodesGet(o.context, o.NodesNodesGetHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/nodes/{className}"] = nodes.NewNodesGetClass(o.context, o.NodesNodesGetClassHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/objects/{className}/{id}"] = objects.NewObjectsClassDelete(o.context, o.ObjectsObjectsClassDeleteHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/objects/{className}/{id}"] = objects.NewObjectsClassGet(o.context, o.ObjectsObjectsClassGetHandler) + if o.handlers["HEAD"] == nil { + o.handlers["HEAD"] = make(map[string]http.Handler) + } + o.handlers["HEAD"]["/objects/{className}/{id}"] = objects.NewObjectsClassHead(o.context, o.ObjectsObjectsClassHeadHandler) + if o.handlers["PATCH"] == nil { + o.handlers["PATCH"] = make(map[string]http.Handler) + } + o.handlers["PATCH"]["/objects/{className}/{id}"] = objects.NewObjectsClassPatch(o.context, o.ObjectsObjectsClassPatchHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/objects/{className}/{id}"] = objects.NewObjectsClassPut(o.context, o.ObjectsObjectsClassPutHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/objects/{className}/{id}/references/{propertyName}"] = objects.NewObjectsClassReferencesCreate(o.context, o.ObjectsObjectsClassReferencesCreateHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/objects/{className}/{id}/references/{propertyName}"] = objects.NewObjectsClassReferencesDelete(o.context, o.ObjectsObjectsClassReferencesDeleteHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/objects/{className}/{id}/references/{propertyName}"] = objects.NewObjectsClassReferencesPut(o.context, o.ObjectsObjectsClassReferencesPutHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/objects"] = objects.NewObjectsCreate(o.context, o.ObjectsObjectsCreateHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/objects/{id}"] = objects.NewObjectsDelete(o.context, o.ObjectsObjectsDeleteHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/objects/{id}"] = objects.NewObjectsGet(o.context, o.ObjectsObjectsGetHandler) + if o.handlers["HEAD"] == nil { + o.handlers["HEAD"] = make(map[string]http.Handler) + } + o.handlers["HEAD"]["/objects/{id}"] = objects.NewObjectsHead(o.context, o.ObjectsObjectsHeadHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/objects"] = objects.NewObjectsList(o.context, o.ObjectsObjectsListHandler) + if o.handlers["PATCH"] == nil { + o.handlers["PATCH"] = make(map[string]http.Handler) + } + o.handlers["PATCH"]["/objects/{id}"] = objects.NewObjectsPatch(o.context, o.ObjectsObjectsPatchHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/objects/{id}/references/{propertyName}"] = objects.NewObjectsReferencesCreate(o.context, o.ObjectsObjectsReferencesCreateHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/objects/{id}/references/{propertyName}"] = objects.NewObjectsReferencesDelete(o.context, o.ObjectsObjectsReferencesDeleteHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/objects/{id}/references/{propertyName}"] = objects.NewObjectsReferencesUpdate(o.context, o.ObjectsObjectsReferencesUpdateHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/objects/{id}"] = objects.NewObjectsUpdate(o.context, o.ObjectsObjectsUpdateHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/objects/validate"] = objects.NewObjectsValidate(o.context, o.ObjectsObjectsValidateHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/roles/{id}/remove-permissions"] = authz.NewRemovePermissions(o.context, o.AuthzRemovePermissionsHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/replication/replicate"] = replication.NewReplicate(o.context, o.ReplicationReplicateHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/replication/replicate/{id}"] = replication.NewReplicationDetails(o.context, o.ReplicationReplicationDetailsHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/groups/{id}/revoke"] = authz.NewRevokeRoleFromGroup(o.context, o.AuthzRevokeRoleFromGroupHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/authz/users/{id}/revoke"] = authz.NewRevokeRoleFromUser(o.context, o.AuthzRevokeRoleFromUserHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/users/db/{user_id}/rotate-key"] = users.NewRotateUserAPIKey(o.context, o.UsersRotateUserAPIKeyHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/schema"] = schema.NewSchemaDump(o.context, o.SchemaSchemaDumpHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/schema"] = schema.NewSchemaObjectsCreate(o.context, o.SchemaSchemaObjectsCreateHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/schema/{className}"] = schema.NewSchemaObjectsDelete(o.context, o.SchemaSchemaObjectsDeleteHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/schema/{className}"] = schema.NewSchemaObjectsGet(o.context, o.SchemaSchemaObjectsGetHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/schema/{className}/properties"] = schema.NewSchemaObjectsPropertiesAdd(o.context, o.SchemaSchemaObjectsPropertiesAddHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/schema/{className}/shards"] = schema.NewSchemaObjectsShardsGet(o.context, o.SchemaSchemaObjectsShardsGetHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/schema/{className}/shards/{shardName}"] = schema.NewSchemaObjectsShardsUpdate(o.context, o.SchemaSchemaObjectsShardsUpdateHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/schema/{className}"] = schema.NewSchemaObjectsUpdate(o.context, o.SchemaSchemaObjectsUpdateHandler) + if o.handlers["HEAD"] == nil { + o.handlers["HEAD"] = make(map[string]http.Handler) + } + o.handlers["HEAD"]["/schema/{className}/tenants/{tenantName}"] = schema.NewTenantExists(o.context, o.SchemaTenantExistsHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/schema/{className}/tenants"] = schema.NewTenantsCreate(o.context, o.SchemaTenantsCreateHandler) + if o.handlers["DELETE"] == nil { + o.handlers["DELETE"] = make(map[string]http.Handler) + } + o.handlers["DELETE"]["/schema/{className}/tenants"] = schema.NewTenantsDelete(o.context, o.SchemaTenantsDeleteHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/schema/{className}/tenants"] = schema.NewTenantsGet(o.context, o.SchemaTenantsGetHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/schema/{className}/tenants/{tenantName}"] = schema.NewTenantsGetOne(o.context, o.SchemaTenantsGetOneHandler) + if o.handlers["PUT"] == nil { + o.handlers["PUT"] = make(map[string]http.Handler) + } + o.handlers["PUT"]["/schema/{className}/tenants"] = schema.NewTenantsUpdate(o.context, o.SchemaTenantsUpdateHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"][""] = NewWeaviateRoot(o.context, o.WeaviateRootHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/.well-known/live"] = NewWeaviateWellknownLiveness(o.context, o.WeaviateWellknownLivenessHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/.well-known/ready"] = NewWeaviateWellknownReadiness(o.context, o.WeaviateWellknownReadinessHandler) +} + +// Serve creates a http handler to serve the API over HTTP +// can be used directly in http.ListenAndServe(":8000", api.Serve(nil)) +func (o *WeaviateAPI) Serve(builder middleware.Builder) http.Handler { + o.Init() + + if o.Middleware != nil { + return o.Middleware(builder) + } + if o.useSwaggerUI { + return o.context.APIHandlerSwaggerUI(builder) + } + return o.context.APIHandler(builder) +} + +// Init allows you to just initialize the handler cache, you can then recompose the middleware as you see fit +func (o *WeaviateAPI) Init() { + if len(o.handlers) == 0 { + o.initHandlerCache() + } +} + +// RegisterConsumer allows you to add (or override) a consumer for a media type. +func (o *WeaviateAPI) RegisterConsumer(mediaType string, consumer runtime.Consumer) { + o.customConsumers[mediaType] = consumer +} + +// RegisterProducer allows you to add (or override) a producer for a media type. +func (o *WeaviateAPI) RegisterProducer(mediaType string, producer runtime.Producer) { + o.customProducers[mediaType] = producer +} + +// AddMiddlewareFor adds a http middleware to existing handler +func (o *WeaviateAPI) AddMiddlewareFor(method, path string, builder middleware.Builder) { + um := strings.ToUpper(method) + if path == "/" { + path = "" + } + o.Init() + if h, ok := o.handlers[um][path]; ok { + o.handlers[method][path] = builder(h) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root.go new file mode 100644 index 0000000000000000000000000000000000000000..2341054ae58ec906d16844e13968556c93198cb0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// WeaviateRootHandlerFunc turns a function with the right signature into a weaviate root handler +type WeaviateRootHandlerFunc func(WeaviateRootParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn WeaviateRootHandlerFunc) Handle(params WeaviateRootParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// WeaviateRootHandler interface for that can handle valid weaviate root params +type WeaviateRootHandler interface { + Handle(WeaviateRootParams, *models.Principal) middleware.Responder +} + +// NewWeaviateRoot creates a new http.Handler for the weaviate root operation +func NewWeaviateRoot(ctx *middleware.Context, handler WeaviateRootHandler) *WeaviateRoot { + return &WeaviateRoot{Context: ctx, Handler: handler} +} + +/* + WeaviateRoot swagger:route GET / weaviateRoot + +# List available endpoints + +Get links to other endpoints to help discover the REST API +*/ +type WeaviateRoot struct { + Context *middleware.Context + Handler WeaviateRootHandler +} + +func (o *WeaviateRoot) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewWeaviateRootParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// WeaviateRootOKBody weaviate root o k body +// +// swagger:model WeaviateRootOKBody +type WeaviateRootOKBody struct { + + // links + Links []*models.Link `json:"links" yaml:"links"` +} + +// Validate validates this weaviate root o k body +func (o *WeaviateRootOKBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateLinks(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *WeaviateRootOKBody) validateLinks(formats strfmt.Registry) error { + if swag.IsZero(o.Links) { // not required + return nil + } + + for i := 0; i < len(o.Links); i++ { + if swag.IsZero(o.Links[i]) { // not required + continue + } + + if o.Links[i] != nil { + if err := o.Links[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this weaviate root o k body based on the context it is used +func (o *WeaviateRootOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateLinks(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *WeaviateRootOKBody) contextValidateLinks(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(o.Links); i++ { + + if o.Links[i] != nil { + if err := o.Links[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("weaviateRootOK" + "." + "links" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *WeaviateRootOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *WeaviateRootOKBody) UnmarshalBinary(b []byte) error { + var res WeaviateRootOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..705f632fde51cf2a2ad618c220d6de65d3a4a732 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewWeaviateRootParams creates a new WeaviateRootParams object +// +// There are no default values defined in the spec. +func NewWeaviateRootParams() WeaviateRootParams { + + return WeaviateRootParams{} +} + +// WeaviateRootParams contains all the bound params for the weaviate root operation +// typically these are obtained from a http.Request +// +// swagger:parameters weaviate.root +type WeaviateRootParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewWeaviateRootParams() beforehand. +func (o *WeaviateRootParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..88f8ec1fff4fb5788b18aac9aeebee44a7f8e64b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_responses.go @@ -0,0 +1,68 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// WeaviateRootOKCode is the HTTP code returned for type WeaviateRootOK +const WeaviateRootOKCode int = 200 + +/* +WeaviateRootOK Weaviate is alive and ready to serve content + +swagger:response weaviateRootOK +*/ +type WeaviateRootOK struct { + + /* + In: Body + */ + Payload *WeaviateRootOKBody `json:"body,omitempty"` +} + +// NewWeaviateRootOK creates WeaviateRootOK with default headers values +func NewWeaviateRootOK() *WeaviateRootOK { + + return &WeaviateRootOK{} +} + +// WithPayload adds the payload to the weaviate root o k response +func (o *WeaviateRootOK) WithPayload(payload *WeaviateRootOKBody) *WeaviateRootOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the weaviate root o k response +func (o *WeaviateRootOK) SetPayload(payload *WeaviateRootOKBody) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *WeaviateRootOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..e680a20cf0ac54e00f1dab2e65bfafd78dd8f6fb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_root_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// WeaviateRootURL generates an URL for the weaviate root operation +type WeaviateRootURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *WeaviateRootURL) WithBasePath(bp string) *WeaviateRootURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *WeaviateRootURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *WeaviateRootURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *WeaviateRootURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *WeaviateRootURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *WeaviateRootURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on WeaviateRootURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on WeaviateRootURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *WeaviateRootURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness.go new file mode 100644 index 0000000000000000000000000000000000000000..9fb0e721a2758d6804fba156643b54758590d94b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// WeaviateWellknownLivenessHandlerFunc turns a function with the right signature into a weaviate wellknown liveness handler +type WeaviateWellknownLivenessHandlerFunc func(WeaviateWellknownLivenessParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn WeaviateWellknownLivenessHandlerFunc) Handle(params WeaviateWellknownLivenessParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// WeaviateWellknownLivenessHandler interface for that can handle valid weaviate wellknown liveness params +type WeaviateWellknownLivenessHandler interface { + Handle(WeaviateWellknownLivenessParams, *models.Principal) middleware.Responder +} + +// NewWeaviateWellknownLiveness creates a new http.Handler for the weaviate wellknown liveness operation +func NewWeaviateWellknownLiveness(ctx *middleware.Context, handler WeaviateWellknownLivenessHandler) *WeaviateWellknownLiveness { + return &WeaviateWellknownLiveness{Context: ctx, Handler: handler} +} + +/* + WeaviateWellknownLiveness swagger:route GET /.well-known/live weaviateWellknownLiveness + +Get application liveness. + +Determines whether the application is alive. Can be used for kubernetes liveness probe +*/ +type WeaviateWellknownLiveness struct { + Context *middleware.Context + Handler WeaviateWellknownLivenessHandler +} + +func (o *WeaviateWellknownLiveness) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewWeaviateWellknownLivenessParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..5f57c32aae7f1caeefd2a9d8615a11379183ebfa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewWeaviateWellknownLivenessParams creates a new WeaviateWellknownLivenessParams object +// +// There are no default values defined in the spec. +func NewWeaviateWellknownLivenessParams() WeaviateWellknownLivenessParams { + + return WeaviateWellknownLivenessParams{} +} + +// WeaviateWellknownLivenessParams contains all the bound params for the weaviate wellknown liveness operation +// typically these are obtained from a http.Request +// +// swagger:parameters weaviate.wellknown.liveness +type WeaviateWellknownLivenessParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewWeaviateWellknownLivenessParams() beforehand. +func (o *WeaviateWellknownLivenessParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..32f098d8ffc2240aa7b0bec8ae0950ac8de2dac1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_responses.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// WeaviateWellknownLivenessOKCode is the HTTP code returned for type WeaviateWellknownLivenessOK +const WeaviateWellknownLivenessOKCode int = 200 + +/* +WeaviateWellknownLivenessOK The application is able to respond to HTTP requests + +swagger:response weaviateWellknownLivenessOK +*/ +type WeaviateWellknownLivenessOK struct { +} + +// NewWeaviateWellknownLivenessOK creates WeaviateWellknownLivenessOK with default headers values +func NewWeaviateWellknownLivenessOK() *WeaviateWellknownLivenessOK { + + return &WeaviateWellknownLivenessOK{} +} + +// WriteResponse to the client +func (o *WeaviateWellknownLivenessOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..c89ee331fb12fe88f698e88a4e6f2afda6dbc6ce --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_liveness_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// WeaviateWellknownLivenessURL generates an URL for the weaviate wellknown liveness operation +type WeaviateWellknownLivenessURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *WeaviateWellknownLivenessURL) WithBasePath(bp string) *WeaviateWellknownLivenessURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *WeaviateWellknownLivenessURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *WeaviateWellknownLivenessURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/.well-known/live" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *WeaviateWellknownLivenessURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *WeaviateWellknownLivenessURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *WeaviateWellknownLivenessURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on WeaviateWellknownLivenessURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on WeaviateWellknownLivenessURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *WeaviateWellknownLivenessURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness.go new file mode 100644 index 0000000000000000000000000000000000000000..2314008f5d33d1cdb71ed0e8ce2e334c796eb1a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/weaviate/weaviate/entities/models" +) + +// WeaviateWellknownReadinessHandlerFunc turns a function with the right signature into a weaviate wellknown readiness handler +type WeaviateWellknownReadinessHandlerFunc func(WeaviateWellknownReadinessParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn WeaviateWellknownReadinessHandlerFunc) Handle(params WeaviateWellknownReadinessParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// WeaviateWellknownReadinessHandler interface for that can handle valid weaviate wellknown readiness params +type WeaviateWellknownReadinessHandler interface { + Handle(WeaviateWellknownReadinessParams, *models.Principal) middleware.Responder +} + +// NewWeaviateWellknownReadiness creates a new http.Handler for the weaviate wellknown readiness operation +func NewWeaviateWellknownReadiness(ctx *middleware.Context, handler WeaviateWellknownReadinessHandler) *WeaviateWellknownReadiness { + return &WeaviateWellknownReadiness{Context: ctx, Handler: handler} +} + +/* + WeaviateWellknownReadiness swagger:route GET /.well-known/ready weaviateWellknownReadiness + +Get application readiness. + +Determines whether the application is ready to receive traffic. Can be used for kubernetes readiness probe. +*/ +type WeaviateWellknownReadiness struct { + Context *middleware.Context + Handler WeaviateWellknownReadinessHandler +} + +func (o *WeaviateWellknownReadiness) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewWeaviateWellknownReadinessParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..255c6a6ccf6844816f788446eaa65499f07f322f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewWeaviateWellknownReadinessParams creates a new WeaviateWellknownReadinessParams object +// +// There are no default values defined in the spec. +func NewWeaviateWellknownReadinessParams() WeaviateWellknownReadinessParams { + + return WeaviateWellknownReadinessParams{} +} + +// WeaviateWellknownReadinessParams contains all the bound params for the weaviate wellknown readiness operation +// typically these are obtained from a http.Request +// +// swagger:parameters weaviate.wellknown.readiness +type WeaviateWellknownReadinessParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewWeaviateWellknownReadinessParams() beforehand. +func (o *WeaviateWellknownReadinessParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..3358483587b94395aea8c83cdb21b285be6284bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_responses.go @@ -0,0 +1,73 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// WeaviateWellknownReadinessOKCode is the HTTP code returned for type WeaviateWellknownReadinessOK +const WeaviateWellknownReadinessOKCode int = 200 + +/* +WeaviateWellknownReadinessOK The application has completed its start-up routine and is ready to accept traffic. + +swagger:response weaviateWellknownReadinessOK +*/ +type WeaviateWellknownReadinessOK struct { +} + +// NewWeaviateWellknownReadinessOK creates WeaviateWellknownReadinessOK with default headers values +func NewWeaviateWellknownReadinessOK() *WeaviateWellknownReadinessOK { + + return &WeaviateWellknownReadinessOK{} +} + +// WriteResponse to the client +func (o *WeaviateWellknownReadinessOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// WeaviateWellknownReadinessServiceUnavailableCode is the HTTP code returned for type WeaviateWellknownReadinessServiceUnavailable +const WeaviateWellknownReadinessServiceUnavailableCode int = 503 + +/* +WeaviateWellknownReadinessServiceUnavailable The application is currently not able to serve traffic. If other horizontal replicas of weaviate are available and they are capable of receiving traffic, all traffic should be redirected there instead. + +swagger:response weaviateWellknownReadinessServiceUnavailable +*/ +type WeaviateWellknownReadinessServiceUnavailable struct { +} + +// NewWeaviateWellknownReadinessServiceUnavailable creates WeaviateWellknownReadinessServiceUnavailable with default headers values +func NewWeaviateWellknownReadinessServiceUnavailable() *WeaviateWellknownReadinessServiceUnavailable { + + return &WeaviateWellknownReadinessServiceUnavailable{} +} + +// WriteResponse to the client +func (o *WeaviateWellknownReadinessServiceUnavailable) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(503) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..21a59a62a627160f421722ff68737b6aade4a012 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/weaviate_wellknown_readiness_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// WeaviateWellknownReadinessURL generates an URL for the weaviate wellknown readiness operation +type WeaviateWellknownReadinessURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *WeaviateWellknownReadinessURL) WithBasePath(bp string) *WeaviateWellknownReadinessURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *WeaviateWellknownReadinessURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *WeaviateWellknownReadinessURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/.well-known/ready" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *WeaviateWellknownReadinessURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *WeaviateWellknownReadinessURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *WeaviateWellknownReadinessURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on WeaviateWellknownReadinessURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on WeaviateWellknownReadinessURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *WeaviateWellknownReadinessURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration.go new file mode 100644 index 0000000000000000000000000000000000000000..4161373f4c7eb68abef9a5b0a0cb0a97c17003ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration.go @@ -0,0 +1,130 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package well_known + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetWellKnownOpenidConfigurationHandlerFunc turns a function with the right signature into a get well known openid configuration handler +type GetWellKnownOpenidConfigurationHandlerFunc func(GetWellKnownOpenidConfigurationParams, *models.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetWellKnownOpenidConfigurationHandlerFunc) Handle(params GetWellKnownOpenidConfigurationParams, principal *models.Principal) middleware.Responder { + return fn(params, principal) +} + +// GetWellKnownOpenidConfigurationHandler interface for that can handle valid get well known openid configuration params +type GetWellKnownOpenidConfigurationHandler interface { + Handle(GetWellKnownOpenidConfigurationParams, *models.Principal) middleware.Responder +} + +// NewGetWellKnownOpenidConfiguration creates a new http.Handler for the get well known openid configuration operation +func NewGetWellKnownOpenidConfiguration(ctx *middleware.Context, handler GetWellKnownOpenidConfigurationHandler) *GetWellKnownOpenidConfiguration { + return &GetWellKnownOpenidConfiguration{Context: ctx, Handler: handler} +} + +/* + GetWellKnownOpenidConfiguration swagger:route GET /.well-known/openid-configuration well-known oidc discovery getWellKnownOpenidConfiguration + +# OIDC discovery information if OIDC auth is enabled + +OIDC Discovery page, redirects to the token issuer if one is configured +*/ +type GetWellKnownOpenidConfiguration struct { + Context *middleware.Context + Handler GetWellKnownOpenidConfigurationHandler +} + +func (o *GetWellKnownOpenidConfiguration) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetWellKnownOpenidConfigurationParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *models.Principal + if uprinc != nil { + principal = uprinc.(*models.Principal) // this is really a models.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// GetWellKnownOpenidConfigurationOKBody get well known openid configuration o k body +// +// swagger:model GetWellKnownOpenidConfigurationOKBody +type GetWellKnownOpenidConfigurationOKBody struct { + + // OAuth Client ID + ClientID string `json:"clientId,omitempty" yaml:"clientId,omitempty"` + + // The Location to redirect to + Href string `json:"href,omitempty" yaml:"href,omitempty"` + + // OAuth Scopes + Scopes []string `json:"scopes,omitempty" yaml:"scopes,omitempty"` +} + +// Validate validates this get well known openid configuration o k body +func (o *GetWellKnownOpenidConfigurationOKBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get well known openid configuration o k body based on context it is used +func (o *GetWellKnownOpenidConfigurationOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetWellKnownOpenidConfigurationOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetWellKnownOpenidConfigurationOKBody) UnmarshalBinary(b []byte) error { + var res GetWellKnownOpenidConfigurationOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_parameters.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_parameters.go new file mode 100644 index 0000000000000000000000000000000000000000..36c916fa92325ef2ed8c806df398f401ecc88935 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_parameters.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package well_known + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewGetWellKnownOpenidConfigurationParams creates a new GetWellKnownOpenidConfigurationParams object +// +// There are no default values defined in the spec. +func NewGetWellKnownOpenidConfigurationParams() GetWellKnownOpenidConfigurationParams { + + return GetWellKnownOpenidConfigurationParams{} +} + +// GetWellKnownOpenidConfigurationParams contains all the bound params for the get well known openid configuration operation +// typically these are obtained from a http.Request +// +// swagger:parameters GetWellKnownOpenidConfiguration +type GetWellKnownOpenidConfigurationParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetWellKnownOpenidConfigurationParams() beforehand. +func (o *GetWellKnownOpenidConfigurationParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_responses.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_responses.go new file mode 100644 index 0000000000000000000000000000000000000000..21400c73012f8405af45e994efb067c06d01d40e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_responses.go @@ -0,0 +1,140 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package well_known + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/weaviate/weaviate/entities/models" +) + +// GetWellKnownOpenidConfigurationOKCode is the HTTP code returned for type GetWellKnownOpenidConfigurationOK +const GetWellKnownOpenidConfigurationOKCode int = 200 + +/* +GetWellKnownOpenidConfigurationOK Successful response, inspect body + +swagger:response getWellKnownOpenidConfigurationOK +*/ +type GetWellKnownOpenidConfigurationOK struct { + + /* + In: Body + */ + Payload *GetWellKnownOpenidConfigurationOKBody `json:"body,omitempty"` +} + +// NewGetWellKnownOpenidConfigurationOK creates GetWellKnownOpenidConfigurationOK with default headers values +func NewGetWellKnownOpenidConfigurationOK() *GetWellKnownOpenidConfigurationOK { + + return &GetWellKnownOpenidConfigurationOK{} +} + +// WithPayload adds the payload to the get well known openid configuration o k response +func (o *GetWellKnownOpenidConfigurationOK) WithPayload(payload *GetWellKnownOpenidConfigurationOKBody) *GetWellKnownOpenidConfigurationOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get well known openid configuration o k response +func (o *GetWellKnownOpenidConfigurationOK) SetPayload(payload *GetWellKnownOpenidConfigurationOKBody) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetWellKnownOpenidConfigurationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetWellKnownOpenidConfigurationNotFoundCode is the HTTP code returned for type GetWellKnownOpenidConfigurationNotFound +const GetWellKnownOpenidConfigurationNotFoundCode int = 404 + +/* +GetWellKnownOpenidConfigurationNotFound Not found, no oidc provider present + +swagger:response getWellKnownOpenidConfigurationNotFound +*/ +type GetWellKnownOpenidConfigurationNotFound struct { +} + +// NewGetWellKnownOpenidConfigurationNotFound creates GetWellKnownOpenidConfigurationNotFound with default headers values +func NewGetWellKnownOpenidConfigurationNotFound() *GetWellKnownOpenidConfigurationNotFound { + + return &GetWellKnownOpenidConfigurationNotFound{} +} + +// WriteResponse to the client +func (o *GetWellKnownOpenidConfigurationNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// GetWellKnownOpenidConfigurationInternalServerErrorCode is the HTTP code returned for type GetWellKnownOpenidConfigurationInternalServerError +const GetWellKnownOpenidConfigurationInternalServerErrorCode int = 500 + +/* +GetWellKnownOpenidConfigurationInternalServerError An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error. + +swagger:response getWellKnownOpenidConfigurationInternalServerError +*/ +type GetWellKnownOpenidConfigurationInternalServerError struct { + + /* + In: Body + */ + Payload *models.ErrorResponse `json:"body,omitempty"` +} + +// NewGetWellKnownOpenidConfigurationInternalServerError creates GetWellKnownOpenidConfigurationInternalServerError with default headers values +func NewGetWellKnownOpenidConfigurationInternalServerError() *GetWellKnownOpenidConfigurationInternalServerError { + + return &GetWellKnownOpenidConfigurationInternalServerError{} +} + +// WithPayload adds the payload to the get well known openid configuration internal server error response +func (o *GetWellKnownOpenidConfigurationInternalServerError) WithPayload(payload *models.ErrorResponse) *GetWellKnownOpenidConfigurationInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get well known openid configuration internal server error response +func (o *GetWellKnownOpenidConfigurationInternalServerError) SetPayload(payload *models.ErrorResponse) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetWellKnownOpenidConfigurationInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_urlbuilder.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_urlbuilder.go new file mode 100644 index 0000000000000000000000000000000000000000..e4d02bf37d1c5fa4558ae2cdfe9f1c7134c46d0a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/operations/well_known/get_well_known_openid_configuration_urlbuilder.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package well_known + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GetWellKnownOpenidConfigurationURL generates an URL for the get well known openid configuration operation +type GetWellKnownOpenidConfigurationURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetWellKnownOpenidConfigurationURL) WithBasePath(bp string) *GetWellKnownOpenidConfigurationURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetWellKnownOpenidConfigurationURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetWellKnownOpenidConfigurationURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/.well-known/openid-configuration" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetWellKnownOpenidConfigurationURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetWellKnownOpenidConfigurationURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetWellKnownOpenidConfigurationURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetWellKnownOpenidConfigurationURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetWellKnownOpenidConfigurationURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetWellKnownOpenidConfigurationURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/panics_middleware.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/panics_middleware.go new file mode 100644 index 0000000000000000000000000000000000000000..0f37928c4e6b2e1fd4b0e5e5b468d45630e697d5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/panics_middleware.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "fmt" + "net" + "net/http" + "runtime/debug" + "syscall" + + entsentry "github.com/weaviate/weaviate/entities/sentry" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func makeCatchPanics(logger logrus.FieldLogger, metricRequestsTotal restApiRequestsTotal) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer handlePanics(logger, metricRequestsTotal, r) + next.ServeHTTP(w, r) + }) + } +} + +func handlePanics(logger logrus.FieldLogger, metricRequestsTotal restApiRequestsTotal, r *http.Request) { + recovered := recover() + if recovered == nil { + return + } + + err, ok := recovered.(error) + if !ok { + // not a typed error, we can not handle this error other returning it to + // the user + logger.WithFields(logrus.Fields{ + "error": recovered, + "method": r.Method, + "path": r.URL, + }).Errorf("%v", recovered) + + // This was not expected, so we want to print the stack, this will help us + // find the source of the issue if the user sends their logs + metricRequestsTotal.logServerError("", fmt.Errorf("%v", recovered)) + debug.PrintStack() + return + } + + if errors.Is(err, syscall.EPIPE) { + metricRequestsTotal.logUserError("") + handleBrokenPipe(err, logger, r) + return + } + + var netErr net.Error + if errors.As(err, &netErr) { + if netErr.Timeout() { + metricRequestsTotal.logUserError("") + handleTimeout(netErr, logger, r) + return + } + } + + // typed as error, but none we are currently handling explicitly + logger.WithError(err).WithFields(logrus.Fields{ + "method": r.Method, + "path": r.URL, + }).Error(err.Error()) + // This was not expected, so we want to print the stack, this will help us + // find the source of the issue if the user sends their logs + metricRequestsTotal.logServerError("", err) + + entsentry.Recover(r) + + debug.PrintStack() +} + +func handleBrokenPipe(err error, logger logrus.FieldLogger, r *http.Request) { + logger.WithError(err).WithFields(logrus.Fields{ + "method": r.Method, + "path": r.URL, + "description": "A broken pipe error occurs when Weaviate tries to write a response onto a connection that has already been closed or reset by the client. Typically, this is the case when the server was not able to respond within the configured client-side timeout.", + "hint": "Either try increasing the client-side timeout, or sending a computationally cheaper request, for example by reducing a batch size, reducing a limit, using less complex filters, etc.", + }).Errorf("broken pipe") +} + +func handleTimeout(err net.Error, logger logrus.FieldLogger, r *http.Request) { + logger.WithError(err).WithFields(logrus.Fields{ + "method": r.Method, + "path": r.URL, + "description": "An I/O timeout occurs when the request takes longer than the specified server-side timeout.", + "hint": "Either try increasing the server-side timeout using e.g. '--write-timeout=600s' as a command line flag when starting Weaviate, or try sending a computationally cheaper request, for example by reducing a batch size, reducing a limit, using less complex filters, etc. Note that this error is only thrown if client-side and server-side timeouts are not in sync, more precisely if the client-side timeout is longer than the server side timeout.", + }).Errorf("i/o timeout") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/raft/handler.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/raft/handler.go new file mode 100644 index 0000000000000000000000000000000000000000..7bc24eb672b33cf7f38cffbea572248e0aae6173 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/raft/handler.go @@ -0,0 +1,169 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package raft + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/weaviate/weaviate/usecases/schema" +) + +// RaftHandler struct implements all the http endpoints for raft related requests +type RaftHandler struct { + schemaHandler schema.Handler +} + +// JoinNodeRequest defines the needed parameter for a node to join a raft cluster +type JoinNodeRequest struct { + // Node is the ID of the node that will join the cluster. + // It needs the following format NODE_ID[:NODE_PORT] + // If NODE_PORT is not specified, default raft interal port will be used + Node string `json:"node"` + // Voter is whether or not the node wants to join as a voter in the raft cluster + Voter bool `json:"voter"` +} + +// Validate ensures that r is valid. +// If an error is returned the request should not be used. +func (r JoinNodeRequest) Validate() error { + if r.Node == "" { + return fmt.Errorf("node parameter is empty") + } + return nil +} + +// JoinNode parses the received request and body, ensures that they are valid and then forwards the parameters to the scheme handler that +// will join the node to the cluster. +// If the request is invalid, returns http.StatusBadRequest +// If an internal error occurs, returns http.StatusInternalServerError +func (h RaftHandler) JoinNode(w http.ResponseWriter, r *http.Request) { + var joinRequest JoinNodeRequest + + // Decode the request body + err := json.NewDecoder(r.Body).Decode(&joinRequest) + if err != nil { + errString := err.Error() + if errors.Is(err, io.EOF) { + // Nicer error message than "EOF" + errString = "request body is empty" + } + http.Error(w, errString, http.StatusBadRequest) + return + } + + // Verify that the request is valid + err = joinRequest.Validate() + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + nodeAddrs := strings.Split(joinRequest.Node, ":") + // This should not happen with the previous validate step, but better safe than sorry + if len(nodeAddrs) < 1 { + http.Error(w, "node parameter is empty", http.StatusBadRequest) + return + } + nodeAddr := nodeAddrs[0] + nodePort := "" + if len(nodeAddrs) >= 2 { + nodePort = nodeAddrs[1] + } + + // Forward to the handler + err = h.schemaHandler.JoinNode(context.Background(), nodeAddr, nodePort, joinRequest.Voter) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// RemoveNodeRequest defines the needed parameter for a node to be removed from the raft cluster. +type RemoveNodeRequest struct { + // Node is the ID of the node that will be removed from the cluster. + Node string `json:"node"` +} + +// Validate ensures that r is valid. +// If an error is returned the request should not be used. +func (r RemoveNodeRequest) Validate() error { + if r.Node == "" { + return fmt.Errorf("node parameter is empty") + } + return nil +} + +// RemoveNode parses the received request and body, ensures that they are valid and then forwards the parameters to the scheme handler that +// will remove the node from the cluster. +// If the request is invalid, returns http.StatusBadRequest +// If an internal error occurs, returns http.StatusInternalServerError +func (h RaftHandler) RemoveNode(w http.ResponseWriter, r *http.Request) { + var removeRequest RemoveNodeRequest + + // Decode the request body + err := json.NewDecoder(r.Body).Decode(&removeRequest) + if err != nil { + errString := err.Error() + if errors.Is(err, io.EOF) { + // Nicer error message than "EOF" + errString = "request body is empty" + } + http.Error(w, errString, http.StatusBadRequest) + return + } + + // Verify that the request is valid + err = removeRequest.Validate() + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = h.schemaHandler.RemoveNode(context.Background(), removeRequest.Node) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// StoreSchemaV1 migrate from v2 (RAFT) to v1 (Non-RAFT) +func (h RaftHandler) StoreSchemaV1(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "", http.StatusMethodNotAllowed) + return + } + restore := h.schemaHandler.StoreSchemaV1() + w.Header().Set("Content-Type", "application/json") + + err := json.NewEncoder(w).Encode(restore) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// ClusterRouter returns a *mux.Router that will requests starting with "/v1/cluster". +// The schemaHandler is kept in memory internally to forward request to once parsed. +func ClusterRouter(schemaHandler schema.Handler) *http.ServeMux { + raftHandler := RaftHandler{schemaHandler: schemaHandler} + r := http.NewServeMux() + + root := "/v1/cluster" + r.HandleFunc(root+"/join", raftHandler.JoinNode) + r.HandleFunc(root+"/remove", raftHandler.RemoveNode) + r.HandleFunc(root+"/schema-v1", raftHandler.StoreSchemaV1) + + return r +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_replicate.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_replicate.go new file mode 100644 index 0000000000000000000000000000000000000000..8003322b15556a9d87a8c23ddc3cd701524bcde6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_replicate.go @@ -0,0 +1,454 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "errors" + "fmt" + + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + cerrors "github.com/weaviate/weaviate/adapters/handlers/rest/errors" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/replication" + "github.com/weaviate/weaviate/cluster/proto/api" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func (h *replicationHandler) replicate(params replication.ReplicateParams, principal *models.Principal) middleware.Responder { + if err := params.Body.Validate(nil /* pass nil as we don't validate formatting here*/); err != nil { + return replication.NewReplicateBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + collection := schema.UppercaseClassName(*params.Body.Collection) + ctx := params.HTTPRequest.Context() + + if err := h.authorizer.Authorize(ctx, principal, authorization.CREATE, authorization.Replications(collection, *params.Body.Shard)); err != nil { + return replication.NewReplicateForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + id, err := uuid.NewRandom() + if err != nil { + return replication.NewReplicateInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("could not generate uuid v4: %w", err))) + } + uuid := strfmt.UUID(id.String()) + + replicationType := models.ReplicationReplicateReplicaRequestTypeCOPY + if params.Body.Type != nil { + replicationType = *params.Body.Type + } + if err := h.replicationManager.ReplicationReplicateReplica(params.HTTPRequest.Context(), uuid, *params.Body.SourceNode, collection, *params.Body.Shard, *params.Body.TargetNode, replicationType); err != nil { + if errors.Is(err, replicationTypes.ErrInvalidRequest) { + return replication.NewReplicateUnprocessableEntity().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + return replication.NewReplicateInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + h.logger.WithFields(logrus.Fields{ + "action": "replication_engine", + "op": "replicate", + "id": id, + "collection": *params.Body.Collection, + "shard": *params.Body.Shard, + "sourceNode": *params.Body.SourceNode, + "targetNode": *params.Body.TargetNode, + "type": params.Body.Type, + }).Info("replicate operation registered") + + return h.handleReplicationReplicateResponse(uuid) +} + +func (h *replicationHandler) getReplicationDetailsByReplicationId(params replication.ReplicationDetailsParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + response, err := h.replicationManager.GetReplicationDetailsByReplicationId(params.HTTPRequest.Context(), params.ID) + if errors.Is(err, replicationTypes.ErrReplicationOperationNotFound) { + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Replications("*", "*")); err != nil { + return replication.NewReplicationDetailsForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + return h.handleOperationNotFoundError(params.ID, err) + } else if err != nil { + return h.handleInternalServerError(params.ID, err) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Replications(response.Collection, response.ShardId)); err != nil { + return replication.NewReplicationDetailsForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + includeHistory := false + if params.IncludeHistory != nil { + includeHistory = *params.IncludeHistory + } + return h.handleReplicationDetailsResponse(includeHistory, response) +} + +func (h *replicationHandler) handleReplicationReplicateResponse(id strfmt.UUID) *replication.ReplicateOK { + return replication.NewReplicateOK().WithPayload(&models.ReplicationReplicateReplicaResponse{ID: &id}) +} + +func (h *replicationHandler) generateReplicationDetailsResponse(withHistory bool, response api.ReplicationDetailsResponse) *models.ReplicationReplicateDetailsReplicaResponse { + // Compute history only if requested + var history []*models.ReplicationReplicateDetailsReplicaStatus + if withHistory { + history = make([]*models.ReplicationReplicateDetailsReplicaStatus, len(response.StatusHistory)) + for i, status := range response.StatusHistory { + errors := make([]*models.ReplicationReplicateDetailsReplicaStatusError, 0, len(status.Errors)) + for _, err := range status.Errors { + errors = append(errors, &models.ReplicationReplicateDetailsReplicaStatusError{ + Message: err.Message, + WhenErroredUnixMs: err.ErroredTimeUnixMs, + }) + } + history[i] = &models.ReplicationReplicateDetailsReplicaStatus{ + State: status.State, + Errors: errors, + WhenStartedUnixMs: status.StartTimeUnixMs, + } + } + } + + errors := make([]*models.ReplicationReplicateDetailsReplicaStatusError, 0, len(response.Status.Errors)) + for _, err := range response.Status.Errors { + errors = append(errors, &models.ReplicationReplicateDetailsReplicaStatusError{ + Message: err.Message, + WhenErroredUnixMs: err.ErroredTimeUnixMs, + }) + } + + return &models.ReplicationReplicateDetailsReplicaResponse{ + Collection: &response.Collection, + ID: &response.Uuid, + Shard: &response.ShardId, + SourceNode: &response.SourceNodeId, + TargetNode: &response.TargetNodeId, + Uncancelable: response.Uncancelable, + ScheduledForCancel: response.ScheduledForCancel, + ScheduledForDelete: response.ScheduledForDelete, + Status: &models.ReplicationReplicateDetailsReplicaStatus{ + State: response.Status.State, + Errors: errors, + WhenStartedUnixMs: response.StartTimeUnixMs, + }, + StatusHistory: history, + Type: &response.TransferType, + WhenStartedUnixMs: response.StartTimeUnixMs, + } +} + +func (h *replicationHandler) generateArrayReplicationDetailsResponse(withHistory bool, response []api.ReplicationDetailsResponse) []*models.ReplicationReplicateDetailsReplicaResponse { + responses := make([]*models.ReplicationReplicateDetailsReplicaResponse, len(response)) + for i, r := range response { + responses[i] = h.generateReplicationDetailsResponse(withHistory, r) + } + return responses +} + +func (h *replicationHandler) handleReplicationDetailsResponse(withHistory bool, response api.ReplicationDetailsResponse) *replication.ReplicationDetailsOK { + return replication.NewReplicationDetailsOK().WithPayload(h.generateReplicationDetailsResponse(withHistory, response)) +} + +func (h *replicationHandler) handleOperationNotFoundError(id strfmt.UUID, err error) middleware.Responder { + h.logger.WithFields(logrus.Fields{ + "action": "replication", + "op": "replication_details", + "id": id, + "error": err, + }).Debug("replication operation not found") + + return replication.NewReplicationDetailsNotFound() +} + +func (h *replicationHandler) handleInternalServerError(id strfmt.UUID, err error) middleware.Responder { + h.logger.WithFields(logrus.Fields{ + "action": "replication", + "op": "replication_details", + "id": id, + "error": err, + }).Error("error while retrieving replication operation details") + + return replication.NewReplicationDetailsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr( + fmt.Errorf("error while retrieving details for replication operation id '%s': %w", id, err))) +} + +func (h *replicationHandler) deleteReplication(params replication.DeleteReplicationParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + response, err := h.replicationManager.GetReplicationDetailsByReplicationId(ctx, params.ID) + if errors.Is(err, replicationTypes.ErrReplicationOperationNotFound) { + return replication.NewDeleteReplicationNoContent() + } else if err != nil { + return replication.NewDeleteReplicationInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.DELETE, authorization.Replications(response.Collection, response.ShardId)); err != nil { + return replication.NewDeleteReplicationForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.replicationManager.DeleteReplication(ctx, params.ID); err != nil { + if errors.Is(err, replicationTypes.ErrReplicationOperationNotFound) { + return replication.NewDeleteReplicationNoContent() + } + if errors.Is(err, replicationTypes.ErrDeletionImpossible) { + return replication.NewDeleteReplicationConflict().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + return replication.NewDeleteReplicationInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + h.logger.WithFields(logrus.Fields{ + "action": "replication", + "op": "delete_replication", + "id": params.ID, + }).Info("replication operation stopped") + + return replication.NewDeleteReplicationNoContent() +} + +func (h *replicationHandler) deleteAllReplications(params replication.DeleteAllReplicationsParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + if err := h.authorizer.Authorize(ctx, principal, authorization.DELETE, authorization.Replications("*", "*")); err != nil { + return replication.NewDeleteAllReplicationsForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.replicationManager.DeleteAllReplications(ctx); err != nil { + return replication.NewDeleteAllReplicationsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + h.logger.WithFields(logrus.Fields{ + "action": "replication", + "op": "delete_all_operations", + }).Info("delete all replication operations") + + return replication.NewDeleteAllReplicationsNoContent() +} + +func (h *replicationHandler) forceDeleteReplications(params replication.ForceDeleteReplicationsParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := h.authorizer.Authorize(ctx, principal, authorization.DELETE, authorization.Replications("*", "*")); err != nil { + return replication.NewForceDeleteReplicationsForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + all := params.Body == nil || (params.Body.Collection == "" && params.Body.Shard == "" && params.Body.ID == "" && params.Body.Node == "") + byCollection := params.Body != nil && params.Body.Collection != "" + byShard := params.Body != nil && params.Body.Shard != "" + byId := params.Body != nil && params.Body.ID != "" + byNode := params.Body != nil && params.Body.Node != "" + dryRun := params.Body != nil && params.Body.DryRun != nil && *params.Body.DryRun + + var err error + if dryRun { + var details []api.ReplicationDetailsResponse + + if all { + details, err = h.replicationManager.GetAllReplicationDetails(params.HTTPRequest.Context()) + } else if byCollection { + if byShard { + details, err = h.replicationManager.GetReplicationDetailsByCollectionAndShard(params.HTTPRequest.Context(), params.Body.Collection, params.Body.Shard) + } else { + details, err = h.replicationManager.GetReplicationDetailsByCollection(params.HTTPRequest.Context(), params.Body.Collection) + } + } else if byId { + detail, innerErr := h.replicationManager.GetReplicationDetailsByReplicationId(params.HTTPRequest.Context(), params.Body.ID) + if errors.Is(innerErr, replicationTypes.ErrReplicationOperationNotFound) { + return replication.NewForceDeleteReplicationsOK().WithPayload(&models.ReplicationReplicateForceDeleteResponse{ + Deleted: []strfmt.UUID{params.Body.ID}, + DryRun: dryRun, + }) + } + details = []api.ReplicationDetailsResponse{detail} + err = innerErr + } else if byNode { + details, err = h.replicationManager.GetReplicationDetailsByTargetNode(params.HTTPRequest.Context(), params.Body.Node) + } else { + // This can happen if the user provides only a shard id without a collection id + return replication.NewForceDeleteReplicationsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("shard id provided without collection id"))) + } + if err != nil { + return replication.NewForceDeleteReplicationsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + uuids := make([]strfmt.UUID, len(details)) + for i, detail := range details { + uuids[i] = detail.Uuid + } + + h.logger.WithFields(logrus.Fields{ + "action": "replication", + "op": "force_delete_operations", + }).Info("dry run of force delete replication operations") + + return replication.NewForceDeleteReplicationsOK().WithPayload(&models.ReplicationReplicateForceDeleteResponse{ + Deleted: uuids, + DryRun: true, + }) + } + + if all { + err = h.replicationManager.ForceDeleteAllReplications(params.HTTPRequest.Context()) + } else if byCollection { + if byShard { + err = h.replicationManager.ForceDeleteReplicationsByCollectionAndShard(params.HTTPRequest.Context(), params.Body.Collection, params.Body.Shard) + } else { + err = h.replicationManager.ForceDeleteReplicationsByCollection(params.HTTPRequest.Context(), params.Body.Collection) + } + } else if byId { + innerErr := h.replicationManager.ForceDeleteReplicationByUuid(params.HTTPRequest.Context(), params.Body.ID) + if errors.Is(innerErr, replicationTypes.ErrReplicationOperationNotFound) { + return replication.NewForceDeleteReplicationsOK().WithPayload(&models.ReplicationReplicateForceDeleteResponse{ + Deleted: []strfmt.UUID{params.Body.ID}, + DryRun: dryRun, + }) + } + err = innerErr + } else if byNode { + err = h.replicationManager.ForceDeleteReplicationsByTargetNode(params.HTTPRequest.Context(), params.Body.Node) + } else { + // This can happen if the user provides only a shard id without a collection id + return replication.NewForceDeleteReplicationsBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("shard id provided without collection id"))) + } + if err != nil { + return replication.NewForceDeleteReplicationsInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + h.logger.WithFields(logrus.Fields{ + "action": "replication", + "op": "force_delete_operations", + }).Info("force delete replication operations") + + return replication.NewForceDeleteReplicationsOK().WithPayload(&models.ReplicationReplicateForceDeleteResponse{}) +} + +func (h *replicationHandler) cancelReplication(params replication.CancelReplicationParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + response, err := h.replicationManager.GetReplicationDetailsByReplicationId(ctx, params.ID) + if errors.Is(err, replicationTypes.ErrReplicationOperationNotFound) { + return replication.NewCancelReplicationNoContent() + } else if err != nil { + return replication.NewCancelReplicationInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.Replications(response.Collection, response.ShardId)); err != nil { + return replication.NewCancelReplicationForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + if err := h.replicationManager.CancelReplication(ctx, params.ID); err != nil { + if errors.Is(err, replicationTypes.ErrReplicationOperationNotFound) { + return replication.NewCancelReplicationNoContent() + } + if errors.Is(err, replicationTypes.ErrCancellationImpossible) { + return replication.NewCancelReplicationConflict().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + return replication.NewCancelReplicationInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + h.logger.WithFields(logrus.Fields{ + "action": "replication", + "op": "cancel_replication", + "id": params.ID, + }).Info("replication operation cancelled") + + return replication.NewCancelReplicationNoContent() +} + +func (h *replicationHandler) listReplication(params replication.ListReplicationParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Replications("*", "*")); err != nil { + return replication.NewListReplicationForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + var response []api.ReplicationDetailsResponse + var err error + + if params.Collection == nil && params.Shard == nil && params.TargetNode == nil { + response, err = h.replicationManager.GetAllReplicationDetails(params.HTTPRequest.Context()) + } else if params.Collection != nil { + if params.Shard != nil { + response, err = h.replicationManager.GetReplicationDetailsByCollectionAndShard(params.HTTPRequest.Context(), *params.Collection, *params.Shard) + } else { + response, err = h.replicationManager.GetReplicationDetailsByCollection(params.HTTPRequest.Context(), *params.Collection) + } + } else if params.TargetNode != nil { + response, err = h.replicationManager.GetReplicationDetailsByTargetNode(params.HTTPRequest.Context(), *params.TargetNode) + } else { + // This can happen if the user provides only a shard id without a collection id + return replication.NewListReplicationBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("shard id provided without collection id"))) + } + + // Handle error if any + if errors.Is(err, replicationTypes.ErrReplicationOperationNotFound) { + return replication.NewListReplicationOK() // No content is returned if no replication operations are found + } else if err != nil { + return replication.NewListReplicationInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + // Parse response into the correct format and return + includeHistory := false + if params.IncludeHistory != nil { + includeHistory = *params.IncludeHistory + } + return replication.NewListReplicationOK().WithPayload(h.generateArrayReplicationDetailsResponse(includeHistory, response)) +} + +func (h *replicationHandler) generateShardingStateResponse(collection string, shards map[string][]string) *models.ReplicationShardingStateResponse { + shardsResponse := make([]*models.ReplicationShardReplicas, 0, len(shards)) + for shard, replicas := range shards { + shardsResponse = append(shardsResponse, &models.ReplicationShardReplicas{ + Shard: shard, + Replicas: replicas, + }) + } + return &models.ReplicationShardingStateResponse{ + ShardingState: &models.ReplicationShardingState{ + Collection: collection, + Shards: shardsResponse, + }, + } +} + +func (h *replicationHandler) getCollectionShardingState(params replication.GetCollectionShardingStateParams, principal *models.Principal) middleware.Responder { + ctx := params.HTTPRequest.Context() + if params.Collection == nil { + return replication.NewGetCollectionShardingStateBadRequest().WithPayload(cerrors.ErrPayloadFromSingleErr(fmt.Errorf("collection is required"))) + } + collection := *params.Collection + + shard := "*" + if params.Shard != nil { + shard = *params.Shard + } + + if err := h.authorizer.Authorize(ctx, principal, authorization.READ, collection, shard); err != nil { + return replication.NewGetCollectionShardingStateForbidden().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + var shardingState api.ShardingState + var err error + if params.Shard != nil { + shardingState, err = h.replicationManager.QueryShardingStateByCollectionAndShard(params.HTTPRequest.Context(), collection, shard) + } else { + shardingState, err = h.replicationManager.QueryShardingStateByCollection(params.HTTPRequest.Context(), collection) + } + + if errors.Is(err, replicationTypes.ErrNotFound) { + return replication.NewGetCollectionShardingStateNotFound() + } else if err != nil { + return replication.NewGetCollectionShardingStateInternalServerError().WithPayload(cerrors.ErrPayloadFromSingleErr(err)) + } + + return replication.NewGetCollectionShardingStateOK().WithPayload(h.generateShardingStateResponse(shardingState.Collection, shardingState.Shards)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_replicate_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_replicate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c2538881107c85362d163acba937789fbe8412fc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_replicate_test.go @@ -0,0 +1,541 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "fmt" + "math/rand" + "net/http" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/weaviate/weaviate/usecases/auth/authorization" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/replication" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/models" +) + +func createReplicationHandlerWithMocks(t *testing.T, logger *logrus.Logger) (*replicationHandler, *authorization.MockAuthorizer, *types.MockManager) { + t.Helper() + mockAuthorizer := authorization.NewMockAuthorizer(t) + mockReplicationManager := types.NewMockManager(t) + + handler := &replicationHandler{ + authorizer: mockAuthorizer, + replicationManager: mockReplicationManager, + logger: logger, + } + + return handler, mockAuthorizer, mockReplicationManager +} + +func TestReplicationReplicate(t *testing.T) { + t.Run("successful replication", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shard := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNode := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNode := fmt.Sprintf("node-%d", randomInt(5)*2+1) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + Collection: &collection, + TargetNode: &targetNode, + Shard: &shard, + SourceNode: &sourceNode, + Type: &replicationType, + }, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicationManager.EXPECT().ReplicationReplicateReplica(mock.Anything, mock.AnythingOfType("strfmt.UUID"), sourceNode, collection, shard, targetNode, replicationType).Return(nil) + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateOK{}, response) + assert.NotNil(t, response.(*replication.ReplicateOK).Payload.ID) + mockAuthorizer.AssertExpectations(t) + mockReplicationManager.AssertExpectations(t) + }) + + t.Run("missing collection in request body", func(t *testing.T) { + // GIVEN + handler, _, _ := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + shard := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNode := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNode := fmt.Sprintf("node-%d", randomInt(5)*2+1) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + TargetNode: &targetNode, + Shard: &shard, + SourceNode: &sourceNode, + Type: &replicationType, + }, + } + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateBadRequest{}, response) + }) + + t.Run("missing target node id in request body", func(t *testing.T) { + // GIVEN + handler, _, _ := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shard := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNode := fmt.Sprintf("node-%d", randomInt(5)*2) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + Collection: &collection, + Shard: &shard, + SourceNode: &sourceNode, + Type: &replicationType, + }, + } + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateBadRequest{}, response) + }) + + t.Run("missing shard id in request body", func(t *testing.T) { + // GIVEN + handler, _, _ := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + sourceNode := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNode := fmt.Sprintf("node-%d", randomInt(5)*2+1) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + Collection: &collection, + TargetNode: &targetNode, + SourceNode: &sourceNode, + Type: &replicationType, + }, + } + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateBadRequest{}, response) + }) + + t.Run("missing source node id in request body", func(t *testing.T) { + // GIVEN + handler, _, _ := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shard := fmt.Sprintf("shard-%d", randomInt(10)) + targetNode := fmt.Sprintf("node-%d", randomInt(5)*2+1) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + Collection: &collection, + Shard: &shard, + TargetNode: &targetNode, + Type: &replicationType, + }, + } + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateBadRequest{}, response) + }) + + t.Run("unprocessable entity error", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shard := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNode := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNode := fmt.Sprintf("node-%d", randomInt(5)*2+1) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + Collection: &collection, + TargetNode: &targetNode, + Shard: &shard, + SourceNode: &sourceNode, + Type: &replicationType, + }, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicationManager.EXPECT().ReplicationReplicateReplica(mock.Anything, mock.AnythingOfType("strfmt.UUID"), sourceNode, collection, shard, targetNode, replicationType).Return(types.ErrInvalidRequest) + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateUnprocessableEntity{}, response) + mockAuthorizer.AssertExpectations(t) + mockReplicationManager.AssertExpectations(t) + }) + + t.Run("internal server error", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shard := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNode := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNode := fmt.Sprintf("node-%d", randomInt(5)*2+1) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + Collection: &collection, + TargetNode: &targetNode, + Shard: &shard, + SourceNode: &sourceNode, + Type: &replicationType, + }, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicationManager.EXPECT().ReplicationReplicateReplica(mock.Anything, mock.AnythingOfType("strfmt.UUID"), sourceNode, collection, shard, targetNode, replicationType).Return(errors.New("target node does not exist")) + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateInternalServerError{}, response) + mockAuthorizer.AssertExpectations(t) + mockReplicationManager.AssertExpectations(t) + }) + + t.Run("authorization error", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, _ := createReplicationHandlerWithMocks(t, createNullLogger(t)) + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shard := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNode := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNode := fmt.Sprintf("node-%d", randomInt(5)*2+1) + replicationType := randomReplicationType() + params := replication.ReplicateParams{ + HTTPRequest: &http.Request{}, + Body: &models.ReplicationReplicateReplicaRequest{ + Collection: &collection, + TargetNode: &targetNode, + Shard: &shard, + SourceNode: &sourceNode, + Type: &replicationType, + }, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("authorization error")) + + // WHEN + response := handler.replicate(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicateForbidden{}, response) + mockAuthorizer.AssertExpectations(t) + }) +} + +func TestGetReplicationDetailsByReplicationId(t *testing.T) { + t.Run("successful retrieval", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + id := uuid4() + params := replication.ReplicationDetailsParams{ + ID: id, + HTTPRequest: &http.Request{}, + } + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shardId := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNodeId := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNodeId := fmt.Sprintf("node-%d", randomInt(5)*2+1) + statusOptions := []string{ + models.ReplicationReplicateDetailsReplicaStatusStateREGISTERED, + models.ReplicationReplicateDetailsReplicaStatusStateHYDRATING, + models.ReplicationReplicateDetailsReplicaStatusStateFINALIZING, + models.ReplicationReplicateDetailsReplicaStatusStateDEHYDRATING, + models.ReplicationReplicateDetailsReplicaStatusStateREADY, + models.ReplicationReplicateDetailsReplicaStatusStateCANCELLED, + } + status := randomString(statusOptions) + replicationType := randomReplicationType() + + startTime := time.Now().UnixMilli() + expectedResponse := api.ReplicationDetailsResponse{ + Uuid: id, + Collection: collection, + ShardId: shardId, + SourceNodeId: sourceNodeId, + TargetNodeId: targetNodeId, + Status: api.ReplicationDetailsState{ + State: status, + Errors: []api.ReplicationDetailsError{}, + StartTimeUnixMs: startTime, + }, + StatusHistory: []api.ReplicationDetailsState{}, + TransferType: replicationType, + StartTimeUnixMs: startTime, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicationManager.EXPECT().GetReplicationDetailsByReplicationId(mock.Anything, id).Return(expectedResponse, nil) + + // WHEN + response := handler.getReplicationDetailsByReplicationId(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicationDetailsOK{}, response) + mockAuthorizer.AssertExpectations(t) + mockReplicationManager.AssertExpectations(t) + + replicationDetails := response.(*replication.ReplicationDetailsOK) + assert.Equal(t, id, *replicationDetails.Payload.ID) + assert.Equal(t, collection, *replicationDetails.Payload.Collection) + assert.Equal(t, shardId, *replicationDetails.Payload.Shard) + assert.Equal(t, sourceNodeId, *replicationDetails.Payload.SourceNode) + assert.Equal(t, targetNodeId, *replicationDetails.Payload.TargetNode) + assert.Equal(t, status, replicationDetails.Payload.Status.State) + assert.Equal(t, 0, len(replicationDetails.Payload.Status.Errors)) + assert.Equal(t, startTime, replicationDetails.Payload.Status.WhenStartedUnixMs) + assert.Equal(t, 0, len(replicationDetails.Payload.StatusHistory)) + assert.Equal(t, replicationType, *replicationDetails.Payload.Type) + assert.Equal(t, startTime, replicationDetails.Payload.WhenStartedUnixMs) + }) + + t.Run("successful retrieval with history", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + uuid := uuid4() + id := uint64(randomInt(100)) + params := replication.ReplicationDetailsParams{ + ID: uuid, + HTTPRequest: &http.Request{}, + IncludeHistory: &[]bool{true}[0], + } + + collection := fmt.Sprintf("Collection%d", randomInt(10)) + shardId := fmt.Sprintf("shard-%d", randomInt(10)) + sourceNodeId := fmt.Sprintf("node-%d", randomInt(5)*2) + targetNodeId := fmt.Sprintf("node-%d", randomInt(5)*2+1) + statusOptions := []string{ + models.ReplicationReplicateDetailsReplicaStatusStateREGISTERED, + models.ReplicationReplicateDetailsReplicaStatusStateHYDRATING, + models.ReplicationReplicateDetailsReplicaStatusStateFINALIZING, + models.ReplicationReplicateDetailsReplicaStatusStateDEHYDRATING, + models.ReplicationReplicateDetailsReplicaStatusStateREADY, + models.ReplicationReplicateDetailsReplicaStatusStateCANCELLED, + } + status := randomString(statusOptions) + historyStatus := randomString(statusOptions) + + startTime := time.Now().Add(-time.Hour).UnixMilli() + firstErrorTime := time.Now().Add(-time.Hour).UnixMilli() + secondErrorTime := time.Now().Add(-time.Hour).Add(time.Minute).UnixMilli() + + expectedResponse := api.ReplicationDetailsResponse{ + Uuid: uuid, + Id: id, + Collection: collection, + ShardId: shardId, + SourceNodeId: sourceNodeId, + TargetNodeId: targetNodeId, + Status: api.ReplicationDetailsState{ + State: status, + Errors: []api.ReplicationDetailsError{}, + }, + StatusHistory: []api.ReplicationDetailsState{ + { + State: historyStatus, + Errors: []api.ReplicationDetailsError{{Message: "error1", ErroredTimeUnixMs: firstErrorTime}, {Message: "error2", ErroredTimeUnixMs: secondErrorTime}}, + StartTimeUnixMs: startTime, + }, + }, + StartTimeUnixMs: startTime, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockReplicationManager.EXPECT().GetReplicationDetailsByReplicationId(mock.Anything, mock.AnythingOfType("strfmt.UUID")).Return(expectedResponse, nil) + + // WHEN + response := handler.getReplicationDetailsByReplicationId(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicationDetailsOK{}, response) + mockAuthorizer.AssertExpectations(t) + mockReplicationManager.AssertExpectations(t) + + replicationDetails := response.(*replication.ReplicationDetailsOK) + assert.Equal(t, uuid, *replicationDetails.Payload.ID) + assert.Equal(t, collection, *replicationDetails.Payload.Collection) + assert.Equal(t, shardId, *replicationDetails.Payload.Shard) + assert.Equal(t, sourceNodeId, *replicationDetails.Payload.SourceNode) + assert.Equal(t, targetNodeId, *replicationDetails.Payload.TargetNode) + assert.Equal(t, status, replicationDetails.Payload.Status.State) + assert.Equal(t, 0, len(replicationDetails.Payload.Status.Errors)) + assert.Equal(t, startTime, replicationDetails.Payload.Status.WhenStartedUnixMs) + assert.Equal(t, historyStatus, replicationDetails.Payload.StatusHistory[0].State) + assert.Equal(t, "error1", replicationDetails.Payload.StatusHistory[0].Errors[0].Message) + assert.Equal(t, "error2", replicationDetails.Payload.StatusHistory[0].Errors[1].Message) + assert.Equal(t, firstErrorTime, replicationDetails.Payload.StatusHistory[0].Errors[0].WhenErroredUnixMs) + assert.Equal(t, secondErrorTime, replicationDetails.Payload.StatusHistory[0].Errors[1].WhenErroredUnixMs) + assert.Equal(t, startTime, replicationDetails.Payload.WhenStartedUnixMs) + }) + + t.Run("request id not found authorized", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + id := uuid4() + params := replication.ReplicationDetailsParams{ + ID: id, + HTTPRequest: &http.Request{}, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, authorization.READ, authorization.Replications("*", "*")).Return(nil) + mockReplicationManager.EXPECT().GetReplicationDetailsByReplicationId(mock.Anything, id).Return(api.ReplicationDetailsResponse{}, types.ErrReplicationOperationNotFound) + + // WHEN + response := handler.getReplicationDetailsByReplicationId(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicationDetailsNotFound{}, response) + mockAuthorizer.AssertExpectations(t) + }) + + t.Run("request id not found forbidden", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + id := uuid4() + params := replication.ReplicationDetailsParams{ + ID: id, + HTTPRequest: &http.Request{}, + } + + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, authorization.READ, authorization.Replications("*", "*")).Return(fmt.Errorf("forbidden access")) + mockReplicationManager.EXPECT().GetReplicationDetailsByReplicationId(mock.Anything, id).Return(api.ReplicationDetailsResponse{}, types.ErrReplicationOperationNotFound) + + // WHEN + response := handler.getReplicationDetailsByReplicationId(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicationDetailsForbidden{}, response) + mockAuthorizer.AssertExpectations(t) + }) + + t.Run("internal server error", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + id := uuid4() + params := replication.ReplicationDetailsParams{ + ID: id, + HTTPRequest: &http.Request{}, + } + + mockReplicationManager.EXPECT().GetReplicationDetailsByReplicationId(mock.Anything, id).Return(api.ReplicationDetailsResponse{}, errors.New("internal error")) + + // WHEN + response := handler.getReplicationDetailsByReplicationId(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicationDetailsInternalServerError{}, response) + mockAuthorizer.AssertExpectations(t) + }) + + t.Run("authorization error", func(t *testing.T) { + // GIVEN + handler, mockAuthorizer, mockReplicationManager := createReplicationHandlerWithMocks(t, createNullLogger(t)) + id := uuid4() + params := replication.ReplicationDetailsParams{ + ID: id, + HTTPRequest: &http.Request{}, + } + + // Retrieves details first by ID then authorizes on the collection/shard of the replication + mockReplicationManager.EXPECT().GetReplicationDetailsByReplicationId(mock.Anything, id).Return(api.ReplicationDetailsResponse{}, nil) + mockAuthorizer.EXPECT().Authorize(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("forbidden access")) + + // WHEN + response := handler.getReplicationDetailsByReplicationId(params, &models.Principal{}) + + // THEN + assert.IsType(t, &replication.ReplicationDetailsForbidden{}, response) + mockAuthorizer.AssertExpectations(t) + }) +} + +func createNullLogger(t *testing.T) *logrus.Logger { + t.Helper() + logger, _ := logrustest.NewNullLogger() + return logger +} + +func randomInt(max int64) int64 { + if max <= 0 { + panic(fmt.Sprintf("max parameter must be positive, received %d", max)) + } + + return rand.Int63n(max) +} + +func uuid4() strfmt.UUID { + id, err := uuid.NewRandom() + if err != nil { + panic(fmt.Sprintf("failed to generate UUID: %v", err)) + } + return strfmt.UUID(id.String()) +} + +func randomString(candidates []string) string { + if len(candidates) == 0 { + panic("candidates slice cannot be empty") + } + + return candidates[randomInt(int64(len(candidates)))] +} + +func randomReplicationType() string { + if rand.Uint64()%2 == 0 { + return api.COPY.String() + } + return api.MOVE.String() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_setup.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_setup.go new file mode 100644 index 0000000000000000000000000000000000000000..68f8af7d7956cdfcb8f4ba3268b5e2c0350c6861 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/replication/handlers_setup.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replication + +import ( + "github.com/go-openapi/runtime/middleware" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" + "github.com/weaviate/weaviate/adapters/handlers/rest/operations/replication" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type replicationHandler struct { + authorizer authorization.Authorizer + replicationManager replicationTypes.Manager + + logger logrus.FieldLogger + metrics *monitoring.PrometheusMetrics +} + +func SetupHandlers(enabled bool, api *operations.WeaviateAPI, replicationManager replicationTypes.Manager, metrics *monitoring.PrometheusMetrics, authorizer authorization.Authorizer, logger logrus.FieldLogger, +) { + if !enabled { + setupUnimplementedHandlers(api) + return + } + + h := &replicationHandler{ + authorizer: authorizer, + replicationManager: replicationManager, + logger: logger, + metrics: metrics, + } + api.ReplicationReplicateHandler = replication.ReplicateHandlerFunc(h.replicate) + api.ReplicationReplicationDetailsHandler = replication.ReplicationDetailsHandlerFunc(h.getReplicationDetailsByReplicationId) + api.ReplicationCancelReplicationHandler = replication.CancelReplicationHandlerFunc(h.cancelReplication) + api.ReplicationDeleteReplicationHandler = replication.DeleteReplicationHandlerFunc(h.deleteReplication) + api.ReplicationDeleteAllReplicationsHandler = replication.DeleteAllReplicationsHandlerFunc(h.deleteAllReplications) + api.ReplicationForceDeleteReplicationsHandler = replication.ForceDeleteReplicationsHandlerFunc(h.forceDeleteReplications) + + // Sharding state query handlers + api.ReplicationGetCollectionShardingStateHandler = replication.GetCollectionShardingStateHandlerFunc(h.getCollectionShardingState) + + // Replication node details query handlers + api.ReplicationListReplicationHandler = replication.ListReplicationHandlerFunc(h.listReplication) +} + +func setupUnimplementedHandlers(api *operations.WeaviateAPI) { + api.ReplicationReplicateHandler = replication.ReplicateHandlerFunc(func(replication.ReplicateParams, *models.Principal) middleware.Responder { + return replication.NewReplicationDetailsNotImplemented() + }) + api.ReplicationReplicationDetailsHandler = replication.ReplicationDetailsHandlerFunc(func(replication.ReplicationDetailsParams, *models.Principal) middleware.Responder { + return replication.NewReplicationDetailsNotImplemented() + }) + api.ReplicationCancelReplicationHandler = replication.CancelReplicationHandlerFunc(func(replication.CancelReplicationParams, *models.Principal) middleware.Responder { + return replication.NewCancelReplicationNotImplemented() + }) + api.ReplicationDeleteReplicationHandler = replication.DeleteReplicationHandlerFunc(func(replication.DeleteReplicationParams, *models.Principal) middleware.Responder { + return replication.NewDeleteReplicationNotImplemented() + }) + api.ReplicationDeleteAllReplicationsHandler = replication.DeleteAllReplicationsHandlerFunc(func(replication.DeleteAllReplicationsParams, *models.Principal) middleware.Responder { + return replication.NewDeleteAllReplicationsNotImplemented() + }) + api.ReplicationGetCollectionShardingStateHandler = replication.GetCollectionShardingStateHandlerFunc(func(replication.GetCollectionShardingStateParams, *models.Principal) middleware.Responder { + return replication.NewGetCollectionShardingStateNotImplemented() + }) + api.ReplicationListReplicationHandler = replication.ListReplicationHandlerFunc(func(replication.ListReplicationParams, *models.Principal) middleware.Responder { + return replication.NewListReplicationNotImplemented() + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/requests_total_metrics.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/requests_total_metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..628c01a43709c28c4581c521f68c5be15a9756f8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/requests_total_metrics.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type RequestStatus int + +const ( + Ok RequestStatus = iota + UserError + ServerError +) + +func (s RequestStatus) String() string { + switch s { + case Ok: + return "ok" + case UserError: + return "user_error" + case ServerError: + return "server_error" + } + return "unknown" +} + +type requestsTotalMetric struct { + requestsTotal *prometheus.GaugeVec + groupClasses bool + api string +} + +func newRequestsTotalMetric(prom *monitoring.PrometheusMetrics, api string) *requestsTotalMetric { + if prom == nil { + return nil + } + return &requestsTotalMetric{ + requestsTotal: prom.RequestsTotal, + groupClasses: prom.Group, + api: api, + } +} + +func (m *requestsTotalMetric) RequestsTotalInc(status RequestStatus, className, queryType string) { + if m == nil { + return + } + + if m.groupClasses { + className = "n/a" + } + + m.requestsTotal.With(prometheus.Labels{ + "status": status.String(), + "class_name": className, + "api": m.api, + "query_type": queryType, + }).Inc() +} + +type restApiRequestsTotal interface { + logError(className string, err error) + logOk(className string) + logUserError(className string) + logServerError(className string, err error) +} + +type restApiRequestsTotalImpl struct { + metrics *requestsTotalMetric + api, queryType string + logger logrus.FieldLogger +} + +func (e *restApiRequestsTotalImpl) logOk(className string) { + if e.metrics != nil { + e.metrics.RequestsTotalInc(Ok, className, e.queryType) + } +} + +func (e *restApiRequestsTotalImpl) logUserError(className string) { + if e.metrics != nil { + e.metrics.RequestsTotalInc(UserError, className, e.queryType) + } +} + +func (e *restApiRequestsTotalImpl) logServerError(className string, err error) { + e.logger.WithFields(logrus.Fields{ + "action": "requests_total", + "api": e.api, + "query_type": e.queryType, + "class_name": className, + }).WithError(err).Error("unexpected error") + if e.metrics != nil { + e.metrics.RequestsTotalInc(ServerError, className, e.queryType) + } +} + +type panicsRequestsTotal struct { + *restApiRequestsTotalImpl +} + +func newPanicsRequestsTotal(metrics *monitoring.PrometheusMetrics, logger logrus.FieldLogger) restApiRequestsTotal { + return &panicsRequestsTotal{ + restApiRequestsTotalImpl: &restApiRequestsTotalImpl{newRequestsTotalMetric(metrics, "rest"), "rest", "", logger}, + } +} + +func (e *panicsRequestsTotal) logError(className string, err error) { + e.logServerError(className, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/server.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/server.go new file mode 100644 index 0000000000000000000000000000000000000000..169a86fdc01e962c34bda961f4f696956d1b8dc6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/server.go @@ -0,0 +1,518 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package rest + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "log" + "net" + "net/http" + "os" + "os/signal" + "strconv" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/go-openapi/runtime/flagext" + "github.com/go-openapi/swag" + flags "github.com/jessevdk/go-flags" + "golang.org/x/net/netutil" + + "github.com/weaviate/weaviate/adapters/handlers/rest/operations" +) + +const ( + schemeHTTP = "http" + schemeHTTPS = "https" + schemeUnix = "unix" +) + +var defaultSchemes []string + +func init() { + defaultSchemes = []string{ + schemeHTTPS, + } +} + +// NewServer creates a new api weaviate server but does not configure it +func NewServer(api *operations.WeaviateAPI) *Server { + s := new(Server) + + s.shutdown = make(chan struct{}) + s.api = api + s.interrupt = make(chan os.Signal, 1) + return s +} + +// ConfigureAPI configures the API and handlers. +func (s *Server) ConfigureAPI() { + if s.api != nil { + s.handler = configureAPI(s.api) + } +} + +// ConfigureFlags configures the additional flags defined by the handlers. Needs to be called before the parser.Parse +func (s *Server) ConfigureFlags() { + if s.api != nil { + configureFlags(s.api) + } +} + +// Server for the weaviate API +type Server struct { + EnabledListeners []string `long:"scheme" description:"the listeners to enable, this can be repeated and defaults to the schemes in the swagger spec"` + CleanupTimeout time.Duration `long:"cleanup-timeout" description:"grace period for which to wait before killing idle connections" default:"10s"` + GracefulTimeout time.Duration `long:"graceful-timeout" description:"grace period for which to wait before shutting down the server" default:"15s"` + MaxHeaderSize flagext.ByteSize `long:"max-header-size" description:"controls the maximum number of bytes the server will read parsing the request header's keys and values, including the request line. It does not limit the size of the request body." default:"1MiB"` + + SocketPath flags.Filename `long:"socket-path" description:"the unix socket to listen on" default:"/var/run/weaviate.sock"` + domainSocketL net.Listener + + Host string `long:"host" description:"the IP to listen on" default:"localhost" env:"HOST"` + Port int `long:"port" description:"the port to listen on for insecure connections, defaults to a random value" env:"PORT"` + ListenLimit int `long:"listen-limit" description:"limit the number of outstanding requests"` + KeepAlive time.Duration `long:"keep-alive" description:"sets the TCP keep-alive timeouts on accepted connections. It prunes dead TCP connections ( e.g. closing laptop mid-download)" default:"3m"` + ReadTimeout time.Duration `long:"read-timeout" description:"maximum duration before timing out read of the request" default:"30s"` + WriteTimeout time.Duration `long:"write-timeout" description:"maximum duration before timing out write of the response" default:"60s"` + httpServerL net.Listener + + TLSHost string `long:"tls-host" description:"the IP to listen on for tls, when not specified it's the same as --host" env:"TLS_HOST"` + TLSPort int `long:"tls-port" description:"the port to listen on for secure connections, defaults to a random value" env:"TLS_PORT"` + TLSCertificate flags.Filename `long:"tls-certificate" description:"the certificate to use for secure connections" env:"TLS_CERTIFICATE"` + TLSCertificateKey flags.Filename `long:"tls-key" description:"the private key to use for secure connections" env:"TLS_PRIVATE_KEY"` + TLSCACertificate flags.Filename `long:"tls-ca" description:"the certificate authority file to be used with mutual tls auth" env:"TLS_CA_CERTIFICATE"` + TLSListenLimit int `long:"tls-listen-limit" description:"limit the number of outstanding requests"` + TLSKeepAlive time.Duration `long:"tls-keep-alive" description:"sets the TCP keep-alive timeouts on accepted connections. It prunes dead TCP connections ( e.g. closing laptop mid-download)"` + TLSReadTimeout time.Duration `long:"tls-read-timeout" description:"maximum duration before timing out read of the request"` + TLSWriteTimeout time.Duration `long:"tls-write-timeout" description:"maximum duration before timing out write of the response"` + httpsServerL net.Listener + + api *operations.WeaviateAPI + handler http.Handler + hasListeners bool + shutdown chan struct{} + shuttingDown int32 + interrupted bool + interrupt chan os.Signal +} + +// Logf logs message either via defined user logger or via system one if no user logger is defined. +func (s *Server) Logf(f string, args ...interface{}) { + if s.api != nil && s.api.Logger != nil { + s.api.Logger(f, args...) + } else { + log.Printf(f, args...) + } +} + +// Fatalf logs message either via defined user logger or via system one if no user logger is defined. +// Exits with non-zero status after printing +func (s *Server) Fatalf(f string, args ...interface{}) { + if s.api != nil && s.api.Logger != nil { + s.api.Logger(f, args...) + os.Exit(1) + } else { + log.Fatalf(f, args...) + } +} + +// SetAPI configures the server with the specified API. Needs to be called before Serve +func (s *Server) SetAPI(api *operations.WeaviateAPI) { + if api == nil { + s.api = nil + s.handler = nil + return + } + + s.api = api + s.handler = configureAPI(api) +} + +func (s *Server) hasScheme(scheme string) bool { + schemes := s.EnabledListeners + if len(schemes) == 0 { + schemes = defaultSchemes + } + + for _, v := range schemes { + if v == scheme { + return true + } + } + return false +} + +// Serve the api +func (s *Server) Serve() (err error) { + if !s.hasListeners { + if err = s.Listen(); err != nil { + return err + } + } + + // set default handler, if none is set + if s.handler == nil { + if s.api == nil { + return errors.New("can't create the default handler, as no api is set") + } + + s.SetHandler(s.api.Serve(nil)) + } + + wg := new(sync.WaitGroup) + once := new(sync.Once) + signalNotify(s.interrupt) + go handleInterrupt(once, s) + + servers := []*http.Server{} + + if s.hasScheme(schemeUnix) { + domainSocket := new(http.Server) + domainSocket.MaxHeaderBytes = int(s.MaxHeaderSize) + domainSocket.Handler = s.handler + if int64(s.CleanupTimeout) > 0 { + domainSocket.IdleTimeout = s.CleanupTimeout + } + + configureServer(domainSocket, "unix", string(s.SocketPath)) + + servers = append(servers, domainSocket) + wg.Add(1) + s.Logf("Serving weaviate at unix://%s", s.SocketPath) + go func(l net.Listener) { + defer wg.Done() + if err := domainSocket.Serve(l); err != nil && err != http.ErrServerClosed { + s.Fatalf("%v", err) + } + s.Logf("Stopped serving weaviate at unix://%s", s.SocketPath) + }(s.domainSocketL) + } + + if s.hasScheme(schemeHTTP) { + httpServer := new(http.Server) + httpServer.MaxHeaderBytes = int(s.MaxHeaderSize) + httpServer.ReadTimeout = s.ReadTimeout + httpServer.WriteTimeout = s.WriteTimeout + httpServer.SetKeepAlivesEnabled(int64(s.KeepAlive) > 0) + if s.ListenLimit > 0 { + s.httpServerL = netutil.LimitListener(s.httpServerL, s.ListenLimit) + } + + if int64(s.CleanupTimeout) > 0 { + httpServer.IdleTimeout = s.CleanupTimeout + } + + httpServer.Handler = s.handler + + configureServer(httpServer, "http", s.httpServerL.Addr().String()) + + servers = append(servers, httpServer) + wg.Add(1) + s.Logf("Serving weaviate at http://%s", s.httpServerL.Addr()) + go func(l net.Listener) { + defer wg.Done() + if err := httpServer.Serve(l); err != nil && err != http.ErrServerClosed { + s.Fatalf("%v", err) + } + s.Logf("Stopped serving weaviate at http://%s", l.Addr()) + }(s.httpServerL) + } + + if s.hasScheme(schemeHTTPS) { + httpsServer := new(http.Server) + httpsServer.MaxHeaderBytes = int(s.MaxHeaderSize) + httpsServer.ReadTimeout = s.TLSReadTimeout + httpsServer.WriteTimeout = s.TLSWriteTimeout + httpsServer.SetKeepAlivesEnabled(int64(s.TLSKeepAlive) > 0) + if s.TLSListenLimit > 0 { + s.httpsServerL = netutil.LimitListener(s.httpsServerL, s.TLSListenLimit) + } + if int64(s.CleanupTimeout) > 0 { + httpsServer.IdleTimeout = s.CleanupTimeout + } + httpsServer.Handler = s.handler + + // Inspired by https://blog.bracebin.com/achieving-perfect-ssl-labs-score-with-go + httpsServer.TLSConfig = &tls.Config{ + // Causes servers to use Go's default ciphersuite preferences, + // which are tuned to avoid attacks. Does nothing on clients. + PreferServerCipherSuites: true, + // Only use curves which have assembly implementations + // https://github.com/golang/go/tree/master/src/crypto/elliptic + CurvePreferences: []tls.CurveID{tls.CurveP256}, + // Use modern tls mode https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + NextProtos: []string{"h2", "http/1.1"}, + // https://www.owasp.org/index.php/Transport_Layer_Protection_Cheat_Sheet#Rule_-_Only_Support_Strong_Protocols + MinVersion: tls.VersionTLS12, + // These ciphersuites support Forward Secrecy: https://en.wikipedia.org/wiki/Forward_secrecy + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + }, + } + + // build standard config from server options + if s.TLSCertificate != "" && s.TLSCertificateKey != "" { + httpsServer.TLSConfig.Certificates = make([]tls.Certificate, 1) + httpsServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(string(s.TLSCertificate), string(s.TLSCertificateKey)) + if err != nil { + return err + } + } + + if s.TLSCACertificate != "" { + // include specified CA certificate + caCert, caCertErr := os.ReadFile(string(s.TLSCACertificate)) + if caCertErr != nil { + return caCertErr + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(caCert) + if !ok { + return fmt.Errorf("cannot parse CA certificate") + } + httpsServer.TLSConfig.ClientCAs = caCertPool + httpsServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert + } + + // call custom TLS configurator + configureTLS(httpsServer.TLSConfig) + + if len(httpsServer.TLSConfig.Certificates) == 0 && httpsServer.TLSConfig.GetCertificate == nil { + // after standard and custom config are passed, this ends up with no certificate + if s.TLSCertificate == "" { + if s.TLSCertificateKey == "" { + s.Fatalf("the required flags `--tls-certificate` and `--tls-key` were not specified") + } + s.Fatalf("the required flag `--tls-certificate` was not specified") + } + if s.TLSCertificateKey == "" { + s.Fatalf("the required flag `--tls-key` was not specified") + } + // this happens with a wrong custom TLS configurator + s.Fatalf("no certificate was configured for TLS") + } + + configureServer(httpsServer, "https", s.httpsServerL.Addr().String()) + + servers = append(servers, httpsServer) + wg.Add(1) + s.Logf("Serving weaviate at https://%s", s.httpsServerL.Addr()) + go func(l net.Listener) { + defer wg.Done() + if err := httpsServer.Serve(l); err != nil && err != http.ErrServerClosed { + s.Fatalf("%v", err) + } + s.Logf("Stopped serving weaviate at https://%s", l.Addr()) + }(tls.NewListener(s.httpsServerL, httpsServer.TLSConfig)) + } + + wg.Add(1) + go s.handleShutdown(wg, &servers) + + wg.Wait() + return nil +} + +// Listen creates the listeners for the server +func (s *Server) Listen() error { + if s.hasListeners { // already done this + return nil + } + + if s.hasScheme(schemeHTTPS) { + // Use http host if https host wasn't defined + if s.TLSHost == "" { + s.TLSHost = s.Host + } + // Use http listen limit if https listen limit wasn't defined + if s.TLSListenLimit == 0 { + s.TLSListenLimit = s.ListenLimit + } + // Use http tcp keep alive if https tcp keep alive wasn't defined + if int64(s.TLSKeepAlive) == 0 { + s.TLSKeepAlive = s.KeepAlive + } + // Use http read timeout if https read timeout wasn't defined + if int64(s.TLSReadTimeout) == 0 { + s.TLSReadTimeout = s.ReadTimeout + } + // Use http write timeout if https write timeout wasn't defined + if int64(s.TLSWriteTimeout) == 0 { + s.TLSWriteTimeout = s.WriteTimeout + } + } + + if s.hasScheme(schemeUnix) { + domSockListener, err := net.Listen("unix", string(s.SocketPath)) + if err != nil { + return err + } + s.domainSocketL = domSockListener + } + + if s.hasScheme(schemeHTTP) { + listener, err := net.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port))) + if err != nil { + return err + } + + h, p, err := swag.SplitHostPort(listener.Addr().String()) + if err != nil { + return err + } + s.Host = h + s.Port = p + s.httpServerL = listener + } + + if s.hasScheme(schemeHTTPS) { + tlsListener, err := net.Listen("tcp", net.JoinHostPort(s.TLSHost, strconv.Itoa(s.TLSPort))) + if err != nil { + return err + } + + sh, sp, err := swag.SplitHostPort(tlsListener.Addr().String()) + if err != nil { + return err + } + s.TLSHost = sh + s.TLSPort = sp + s.httpsServerL = tlsListener + } + + s.hasListeners = true + return nil +} + +// Shutdown server and clean up resources +func (s *Server) Shutdown() error { + if atomic.CompareAndSwapInt32(&s.shuttingDown, 0, 1) { + close(s.shutdown) + } + return nil +} + +func (s *Server) handleShutdown(wg *sync.WaitGroup, serversPtr *[]*http.Server) { + // wg.Done must occur last, after s.api.ServerShutdown() + // (to preserve old behaviour) + defer wg.Done() + + <-s.shutdown + + servers := *serversPtr + + ctx, cancel := context.WithTimeout(context.TODO(), s.GracefulTimeout) + defer cancel() + + // first execute the pre-shutdown hook + s.api.PreServerShutdown() + + shutdownChan := make(chan bool) + for i := range servers { + server := servers[i] + go func() { + var success bool + defer func() { + shutdownChan <- success + }() + if err := server.Shutdown(ctx); err != nil { + // Error from closing listeners, or context timeout: + s.Logf("HTTP server Shutdown: %v", err) + } else { + success = true + } + }() + } + + // Wait until all listeners have successfully shut down before calling ServerShutdown + success := true + for range servers { + success = success && <-shutdownChan + } + if success { + s.api.ServerShutdown() + } +} + +// GetHandler returns a handler useful for testing +func (s *Server) GetHandler() http.Handler { + return s.handler +} + +// SetHandler allows for setting a http handler on this server +func (s *Server) SetHandler(handler http.Handler) { + s.handler = handler +} + +// UnixListener returns the domain socket listener +func (s *Server) UnixListener() (net.Listener, error) { + if !s.hasListeners { + if err := s.Listen(); err != nil { + return nil, err + } + } + return s.domainSocketL, nil +} + +// HTTPListener returns the http listener +func (s *Server) HTTPListener() (net.Listener, error) { + if !s.hasListeners { + if err := s.Listen(); err != nil { + return nil, err + } + } + return s.httpServerL, nil +} + +// TLSListener returns the https listener +func (s *Server) TLSListener() (net.Listener, error) { + if !s.hasListeners { + if err := s.Listen(); err != nil { + return nil, err + } + } + return s.httpsServerL, nil +} + +func handleInterrupt(once *sync.Once, s *Server) { + once.Do(func() { + for range s.interrupt { + if s.interrupted { + s.Logf("Server already shutting down") + continue + } + s.interrupted = true + s.Logf("Shutting down... ") + if err := s.Shutdown(); err != nil { + s.Logf("HTTP server Shutdown: %v", err) + } + } + }) +} + +func signalNotify(interrupt chan<- os.Signal) { + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/source_ip_middleware.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/source_ip_middleware.go new file mode 100644 index 0000000000000000000000000000000000000000..a95093ddc92bf1a06fbbdeebe5a92af5f069decc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/source_ip_middleware.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package rest + +import ( + "context" + "net" + "net/http" + "strings" +) + +func addSourceIpToContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Your middleware logic here + sourceIp := getRealIP(r) + ctx := context.WithValue(r.Context(), "sourceIp", sourceIp) + r = r.WithContext(ctx) + + next.ServeHTTP(w, r) + }) +} + +func getRealIP(req *http.Request) string { + if ip := req.Header.Get("X-Real-IP"); ip != "" { + return ip + } + + if ip := req.Header.Get("X-Forwarded-For"); ip != "" { + // can contain multiple IPs, take the first one + ips := strings.Split(ip, ",") + if len(ips) > 0 { + return strings.TrimSpace(ips[0]) + } + } + + // Fall back to RemoteAddr + ip, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + return req.RemoteAddr + } + return ip +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/state/state.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/state/state.go new file mode 100644 index 0000000000000000000000000000000000000000..250aedd491f90097b312aa10675fc38ec4b301ba --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/state/state.go @@ -0,0 +1,111 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package state + +import ( + "context" + "net/http" + "sync" + + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac" + + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/handlers/graphql" + "github.com/weaviate/weaviate/adapters/handlers/rest/tenantactivity" + "github.com/weaviate/weaviate/adapters/handlers/rest/types" + "github.com/weaviate/weaviate/adapters/repos/classifications" + "github.com/weaviate/weaviate/adapters/repos/db" + rCluster "github.com/weaviate/weaviate/cluster" + "github.com/weaviate/weaviate/cluster/distributedtask" + "github.com/weaviate/weaviate/cluster/fsm" + "github.com/weaviate/weaviate/usecases/auth/authentication/anonymous" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authentication/oidc" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/backup" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config" + configRuntime "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + "github.com/weaviate/weaviate/usecases/traverser" +) + +// State is the only source of application-wide state +// NOTE: This is not true yet, see gh-723 +// TODO: remove dependencies to anything that's not an ent or uc +type State struct { + OIDC *oidc.Client + AnonymousAccess *anonymous.Client + APIKey *apikey.ApiKey + APIKeyRemote *apikey.RemoteApiKey + Authorizer authorization.Authorizer + AuthzController authorization.Controller + AuthzSnapshotter fsm.Snapshotter + RBAC *rbac.Manager + + ServerConfig *config.WeaviateConfig + LDIntegration *configRuntime.LDIntegration + Logger *logrus.Logger + gqlMutex sync.Mutex + GraphQL graphql.GraphQL + Modules *modules.Provider + SchemaManager *schema.Manager + Cluster *cluster.State + RemoteIndexIncoming *sharding.RemoteIndexIncoming + RemoteNodeIncoming *sharding.RemoteNodeIncoming + RemoteReplicaIncoming *replica.RemoteReplicaIncoming + Traverser *traverser.Traverser + + ClassificationRepo *classifications.DistributedRepo + Metrics *monitoring.PrometheusMetrics + HTTPServerMetrics *monitoring.HTTPServerMetrics + GRPCServerMetrics *monitoring.GRPCServerMetrics + BackupManager *backup.Handler + DB *db.DB + BatchManager *objects.BatchManager + AutoSchemaManager *objects.AutoSchemaManager + ClusterHttpClient *http.Client + ReindexCtxCancel context.CancelCauseFunc + MemWatch *memwatch.Monitor + + ClusterService *rCluster.Service + TenantActivity *tenantactivity.Handler + InternalServer types.ClusterServer + + DistributedTaskScheduler *distributedtask.Scheduler + Migrator *db.Migrator +} + +// GetGraphQL is the safe way to retrieve GraphQL from the state as it can be +// replaced at runtime. Instead of passing appState.GraphQL to your adapters, +// pass appState itself which you can abstract with a local interface such as: +// +// type gqlProvider interface { GetGraphQL graphql.GraphQL } +func (s *State) GetGraphQL() graphql.GraphQL { + s.gqlMutex.Lock() + gql := s.GraphQL + s.gqlMutex.Unlock() + return gql +} + +func (s *State) SetGraphQL(gql graphql.GraphQL) { + s.gqlMutex.Lock() + s.GraphQL = gql + s.gqlMutex.Unlock() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/swagger_middleware/swagger_middleware.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/swagger_middleware/swagger_middleware.go new file mode 100644 index 0000000000000000000000000000000000000000..7666f6835ae44a1ff08d95404febbbadf12ded6b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/swagger_middleware/swagger_middleware.go @@ -0,0 +1,155 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package swagger_middleware + +import ( + "fmt" + "html/template" + "net/http" + "strings" +) + +const swaggerUIVersion = "3.19.4" + +type templateData struct { + Prefix string + APIKey string + APIToken string +} + +func AddMiddleware(swaggerJSON []byte, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/v1/swagger.json") && r.Method == http.MethodGet { + w.Header().Set("Content-Type", "application/json") + w.Write(swaggerJSON) + } else if strings.HasPrefix(r.URL.Path, "/v1/swagger") && r.Method == http.MethodGet { + renderSwagger(w, r) + } else { + next.ServeHTTP(w, r) + } + }) +} + +// renderswagger renders the swagger GUI +func renderSwagger(w http.ResponseWriter, r *http.Request) { + w.Header().Set("WWW-Authenticate", `Basic realm="Provide your key and token (as username as password respectively)"`) + + user, password, authOk := r.BasicAuth() + if !authOk { + http.Error(w, "Not authorized", http.StatusUnauthorized) + return + } + + t := template.New("Swagger") + t, err := t.Parse(swaggerTemplate) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Create result string + d := templateData{ + Prefix: fmt.Sprintf("https://cdn.jsdelivr.net/npm/swagger-ui-dist@%s", swaggerUIVersion), + APIKey: user, + APIToken: password, + } + + err = t.ExecuteTemplate(w, "index", d) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// tmpl is the page template to render GraphiQL +const swaggerTemplate = ` +{{ define "index" }} + + + + + + Weaviate API + + + + + + + +
    + + + + + + +{{ end }} +` diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/tenantactivity/handler.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/tenantactivity/handler.go new file mode 100644 index 0000000000000000000000000000000000000000..7ce86f794b3a8dea3f9b45a631fa0de68823eed3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/tenantactivity/handler.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package tenantactivity + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/weaviate/weaviate/entities/tenantactivity" +) + +type Handler struct { + mu sync.RWMutex + src ActivitySource +} + +type ActivitySource interface { + LocalTenantActivity(filter tenantactivity.UsageFilter) tenantactivity.ByCollection +} + +func NewHandler() *Handler { + return &Handler{} +} + +func (h *Handler) SetSource(source ActivitySource) { + h.mu.Lock() + defer h.mu.Unlock() + + h.src = source +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if h == nil { + // no tenant handler configured, for example because there is no + // monitoring activated. + http.NotFound(w, r) + return + } + + h.mu.RLock() + defer h.mu.RUnlock() + + if h.src == nil { + w.Header().Add("retry-after", "30") + w.WriteHeader(http.StatusServiceUnavailable) + return + } + + var filter tenantactivity.UsageFilter + // parse ?filter from query params + if filterParam := strings.ToLower(r.URL.Query().Get("filter")); filterParam != "" { + switch filterParam { + case "reads", "read", "r": + filter = tenantactivity.UsageFilterOnlyReads + case "writes", "write", "w": + filter = tenantactivity.UsageFilterOnlyWrites + case "all", "a": + filter = tenantactivity.UsageFilterAll + default: + http.Error(w, fmt.Sprintf("invalid filter: %s", filterParam), http.StatusBadRequest) + return + } + } + + act := h.src.LocalTenantActivity(filter) + + payload, err := json.Marshal(act) + if err != nil { + http.Error(w, fmt.Errorf("encode json: %w", err).Error(), http.StatusInternalServerError) + } + + w.Header().Add("content-type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(payload) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/tenantactivity/handler_test.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/tenantactivity/handler_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b20e0236403a71afca06abe7bd25729fe4fce7de --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/tenantactivity/handler_test.go @@ -0,0 +1,187 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package tenantactivity + +import ( + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/tenantactivity" +) + +func TestHandler_NilHandler(t *testing.T) { + // this would be the case when the feature is turned off entirely + var h *Handler + r := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + + h.ServeHTTP(w, r) + + res := w.Result() + defer res.Body.Close() + assert.Equal(t, http.StatusNotFound, res.StatusCode) +} + +func TestHandler_NoSourceSet(t *testing.T) { + // this would be the case when a request comes in before the DB is fully + // loaded. + h := &Handler{} + r := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + + h.ServeHTTP(w, r) + + res := w.Result() + defer res.Body.Close() + assert.Equal(t, http.StatusServiceUnavailable, res.StatusCode) + assert.Equal(t, "30", res.Header.Get("retry-after")) +} + +func TestHandler_ValidSource(t *testing.T) { + type filterTest struct { + name string + expected tenantactivity.UsageFilter + status int + } + + tests := []filterTest{ + { + name: "", + expected: tenantactivity.UsageFilterAll, + status: http.StatusOK, + }, + { + name: "all", + expected: tenantactivity.UsageFilterAll, + status: http.StatusOK, + }, + { + name: "All", + expected: tenantactivity.UsageFilterAll, + status: http.StatusOK, + }, + { + name: "ALL", + expected: tenantactivity.UsageFilterAll, + status: http.StatusOK, + }, + { + name: "a", + expected: tenantactivity.UsageFilterAll, + status: http.StatusOK, + }, + { + name: "A", + expected: tenantactivity.UsageFilterAll, + status: http.StatusOK, + }, + { + name: "reads", + expected: tenantactivity.UsageFilterOnlyReads, + status: http.StatusOK, + }, + { + name: "read", + expected: tenantactivity.UsageFilterOnlyReads, + status: http.StatusOK, + }, + { + name: "r", + expected: tenantactivity.UsageFilterOnlyReads, + status: http.StatusOK, + }, + { + name: "R", + expected: tenantactivity.UsageFilterOnlyReads, + status: http.StatusOK, + }, + { + name: "w", + expected: tenantactivity.UsageFilterOnlyWrites, + status: http.StatusOK, + }, + { + name: "write", + expected: tenantactivity.UsageFilterOnlyWrites, + status: http.StatusOK, + }, + { + name: "WRITES", + expected: tenantactivity.UsageFilterOnlyWrites, + status: http.StatusOK, + }, + { + name: "potatoes", + expected: tenantactivity.UsageFilterOnlyWrites, + status: http.StatusBadRequest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // normal operation + now := time.Now() + + h := &Handler{} + s := &dummySource{returnVal: tenantactivity.ByCollection{ + "Col1": tenantactivity.ByTenant{ + "t1": now, + }, + }} + h.SetSource(s) + + url := "/" + if tt.name != "" { + url += "?filter=" + tt.name + } + + r := httptest.NewRequest(http.MethodGet, url, nil) + w := httptest.NewRecorder() + + h.ServeHTTP(w, r) + + res := w.Result() + defer res.Body.Close() + assert.Equal(t, tt.status, res.StatusCode) + + if tt.status != http.StatusOK { + return + } + + jsonData, err := io.ReadAll(res.Body) + require.Nil(t, err) + + var act tenantactivity.ByCollection + err = json.Unmarshal(jsonData, &act) + require.Nil(t, err) + + assert.Equal(t, now.Format(time.RFC3339Nano), act["Col1"]["t1"].Format(time.RFC3339Nano)) + assert.Equal(t, s.lastFilter, tt.expected, "filter should match the expected value") + }) + } +} + +type dummySource struct { + returnVal tenantactivity.ByCollection + lastFilter tenantactivity.UsageFilter +} + +func (d *dummySource) LocalTenantActivity(filter tenantactivity.UsageFilter) tenantactivity.ByCollection { + d.lastFilter = filter + return d.returnVal +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/types/cluster.go b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/types/cluster.go new file mode 100644 index 0000000000000000000000000000000000000000..ba949956700d74c5be4b2c998bbde02850c18581 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/handlers/rest/types/cluster.go @@ -0,0 +1,20 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package types + +import "context" + +// ClusterServer defines the interface for cluster API server operations +type ClusterServer interface { + Serve() error + Close(ctx context.Context) error +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/distributed_repo.go b/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/distributed_repo.go new file mode 100644 index 0000000000000000000000000000000000000000..75a0e7b1204fdce845b59c0bd564fd881b79bf1f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/distributed_repo.go @@ -0,0 +1,121 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classifications + +import ( + "context" + "sync" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/classification" + "github.com/weaviate/weaviate/usecases/cluster" +) + +const DefaultTxTTL = 60 * time.Second + +type DistributedRepo struct { + sync.RWMutex + txRemote *cluster.TxManager + localRepo localRepo +} + +type localRepo interface { + Get(ctx context.Context, id strfmt.UUID) (*models.Classification, error) + Put(ctx context.Context, classification models.Classification) error +} + +func NewDistributeRepo(remoteClient cluster.Client, + memberLister cluster.MemberLister, localRepo localRepo, + logger logrus.FieldLogger, +) *DistributedRepo { + broadcaster := cluster.NewTxBroadcaster(memberLister, remoteClient, logger) + txRemote := cluster.NewTxManager(broadcaster, &dummyTxPersistence{}, logger) + txRemote.StartAcceptIncoming() + repo := &DistributedRepo{ + txRemote: txRemote, + localRepo: localRepo, + } + + repo.txRemote.SetCommitFn(repo.incomingCommit) + + return repo +} + +func (r *DistributedRepo) Get(ctx context.Context, + id strfmt.UUID, +) (*models.Classification, error) { + r.RLock() + defer r.RUnlock() + + return r.localRepo.Get(ctx, id) +} + +func (r *DistributedRepo) Put(ctx context.Context, + pl models.Classification, +) error { + r.Lock() + defer r.Unlock() + + tx, err := r.txRemote.BeginTransaction(ctx, classification.TransactionPut, + classification.TransactionPutPayload{ + Classification: pl, + }, DefaultTxTTL) + if err != nil { + return errors.Wrap(err, "open cluster-wide transaction") + } + + err = r.txRemote.CommitWriteTransaction(ctx, tx) + if err != nil { + return errors.Wrap(err, "commit cluster-wide transaction") + } + + return r.localRepo.Put(ctx, pl) +} + +func (r *DistributedRepo) incomingCommit(ctx context.Context, + tx *cluster.Transaction, +) error { + if tx.Type != classification.TransactionPut { + return errors.Errorf("unrecognized tx type: %s", tx.Type) + } + + return r.localRepo.Put(ctx, tx.Payload.(classification.TransactionPutPayload). + Classification) +} + +func (r *DistributedRepo) TxManager() *cluster.TxManager { + return r.txRemote +} + +// NOTE: classifications do not yet make use of the new durability guarantees +// introduced by the txManager as part of v1.21.3. The reasoning behind this is +// that the classification itself is not crash-safe anyway, so there is no +// point. We need to decide down the line what to do with this? It is a rarely +// used, but not used feature. For now we are not aware of anyone having any +// issues with its stability. +type dummyTxPersistence struct{} + +func (d *dummyTxPersistence) StoreTx(ctx context.Context, tx *cluster.Transaction) error { + return nil +} + +func (d *dummyTxPersistence) DeleteTx(ctx context.Context, txID string) error { + return nil +} + +func (d *dummyTxPersistence) IterateAll(ctx context.Context, cb func(tx *cluster.Transaction)) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/repo.go b/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/repo.go new file mode 100644 index 0000000000000000000000000000000000000000..e5a9c2a9289b825a07629d02e12b4bd8dc1c675f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/repo.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package classifications + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/classification" + bolt "go.etcd.io/bbolt" +) + +var classificationsBucket = []byte("classifications") + +type Repo struct { + logger logrus.FieldLogger + baseDir string + db *bolt.DB +} + +func NewRepo(baseDir string, logger logrus.FieldLogger) (*Repo, error) { + r := &Repo{ + baseDir: baseDir, + logger: logger, + } + + err := r.init() + return r, err +} + +func (r *Repo) DBPath() string { + return fmt.Sprintf("%s/classifications.db", r.baseDir) +} + +func (r *Repo) keyFromID(id strfmt.UUID) []byte { + return []byte(id) +} + +func (r *Repo) init() error { + if err := os.MkdirAll(r.baseDir, 0o777); err != nil { + return errors.Wrapf(err, "create root path directory at %s", r.baseDir) + } + + boltdb, err := bolt.Open(r.DBPath(), 0o600, nil) + if err != nil { + return errors.Wrapf(err, "open bolt at %s", r.DBPath()) + } + + err = boltdb.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(classificationsBucket); err != nil { + return errors.Wrapf(err, "create classifications bucket '%s'", + string(helpers.ObjectsBucket)) + } + return nil + }) + if err != nil { + return errors.Wrapf(err, "create bolt buckets") + } + + r.db = boltdb + + return nil +} + +func (r *Repo) Put(ctx context.Context, classification models.Classification) error { + classificationJSON, err := json.Marshal(classification) + if err != nil { + return errors.Wrap(err, "marshal classification to JSON") + } + + return r.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(classificationsBucket) + return b.Put(r.keyFromID(classification.ID), classificationJSON) + }) +} + +func (r *Repo) Get(ctx context.Context, id strfmt.UUID) (*models.Classification, error) { + var classificationJSON []byte + r.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(classificationsBucket) + classificationJSON = b.Get(r.keyFromID(id)) + return nil + }) + + if len(classificationJSON) == 0 { + return nil, nil + } + + var c models.Classification + err := json.Unmarshal(classificationJSON, &c) + if err != nil { + return nil, errors.Wrapf(err, "parse classification from JSON") + } + + return &c, nil +} + +var _ = classification.Repo(&Repo{}) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/repo_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/repo_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0ff09ccd027d025613ac6b4121f8c05a046aeffc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/classifications/repo_integration_test.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package classifications + +import ( + "context" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" +) + +func Test_ClassificationsRepo(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + r, err := NewRepo(dirName, logger) + require.Nil(t, err) + _ = r + + t.Run("asking for a non-existing classification", func(t *testing.T) { + res, err := r.Get(context.Background(), "wrong-id") + require.Nil(t, err) + assert.Nil(t, res) + }) + + t.Run("storing classifications", func(t *testing.T) { + err := r.Put(context.Background(), exampleOne()) + require.Nil(t, err) + + err = r.Put(context.Background(), exampleTwo()) + require.Nil(t, err) + }) + + t.Run("retrieving stored classifications", func(t *testing.T) { + expectedOne := exampleOne() + expectedTwo := exampleTwo() + + res, err := r.Get(context.Background(), expectedOne.ID) + require.Nil(t, err) + assert.Equal(t, &expectedOne, res) + + res, err = r.Get(context.Background(), expectedTwo.ID) + require.Nil(t, err) + assert.Equal(t, &expectedTwo, res) + }) +} + +func exampleOne() models.Classification { + return models.Classification{ + ID: "01ed111a-919c-4dd5-ab9e-7b247b11e18c", + Class: "ExampleClassOne", + BasedOnProperties: []string{"prop1"}, + } +} + +func exampleTwo() models.Classification { + return models.Classification{ + ID: "4fbaebf3-41a9-414b-ac1d-433d74d4ef2c", + Class: "ExampleClassTwo", + BasedOnProperties: []string{"prop2"}, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregations_fixtures_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregations_fixtures_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2705e24a2476147e60b895e63f0ef117447caf40 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregations_fixtures_for_test.go @@ -0,0 +1,386 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package db + +import ( + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +var productClass = &models.Class{ + Class: "AggregationsTestProduct", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, +} + +func boolRef(b bool) *bool { + return &b +} + +var notIndexedClass = &models.Class{ + Class: "NotIndexedClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: boolRef(false), + IndexInverted: boolRef(false), + }, + }, +} + +var companyClass = &models.Class{ + Class: "AggregationsTestCompany", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "sector", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "location", + DataType: []string{"text"}, + Tokenization: "word", + }, + { + Name: "dividendYield", + DataType: []string{"number"}, + }, + { + Name: "price", + DataType: []string{"int"}, // unrealistic for this to be an int, but + // we've already tested another number prop ;-) + }, + { + Name: "listedInIndex", + DataType: []string{"boolean"}, + }, + { + Name: "makesProduct", + DataType: []string{"AggregationsTestProduct"}, + }, + }, +} + +var arrayTypesClass = &models.Class{ + Class: "AggregationsTestArrayTypes", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "strings", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "numbers", + DataType: []string{"number[]"}, + }, + }, +} + +var customerClass = &models.Class{ + Class: "AggregationsTestCustomer", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "internalId", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "timeArrived", + DataType: []string{"date"}, + }, + { + Name: "countryOfOrigin", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, +} + +var products = []map[string]interface{}{ + { + "name": "Superbread", + }, +} + +var productsIds = []strfmt.UUID{ + "1295c052-263d-4aae-99dd-920c5a370d06", +} + +// company objects are imported not just once each, but each is imported +// importFactor times. This should even out shard imbalances a bit better. +var importFactor = 10 + +var companies = []map[string]interface{}{ + { + "sector": "Financials", + "location": "New York", + "dividendYield": 1.3, + "price": int64(150), + "listedInIndex": true, + }, + { + "sector": "Financials", + "location": "New York", + "dividendYield": 4.0, + "price": int64(600), + "listedInIndex": true, + }, + { + "sector": "Financials", + "location": "San Francisco", + "dividendYield": 1.3, + "price": int64(47), + "listedInIndex": true, + }, + { + "sector": "Food", + "location": "Atlanta", + "dividendYield": 1.3, + "price": int64(160), + "listedInIndex": true, + }, + { + "sector": "Food", + "location": "Atlanta", + "dividendYield": 2.0, + "price": int64(70), + "listedInIndex": true, + }, + { + "sector": "Food", + "location": "Los Angeles", + "dividendYield": 0.0, + "price": int64(800), + "listedInIndex": false, + }, + { + "sector": "Food", + "location": "Detroit", + "dividendYield": 8.0, + "price": int64(10), + "listedInIndex": true, + "makesProduct": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", productsIds[0])), + }, + }, + }, + { + "sector": "Food", + "location": "San Francisco", + "dividendYield": 0.0, + "price": int64(200), + "listedInIndex": true, + }, + { + "sector": "Food", + "location": "New York", + "dividendYield": 1.1, + "price": int64(70), + "listedInIndex": true, + }, +} + +// Use fixed ids to make the test deterministic. The length of this must match +// the len(companies)*importFactor These are somewhat carefully arranged to +// make sure that we prevent the flakiness that was described in +// https://github.com/weaviate/weaviate/issues/1884 +var companyIDs = []strfmt.UUID{ + "9ee7640f-b4fc-45f1-b502-580e79062c99", + "f745485b-f6ef-4785-bd54-574cc4923899", + "0109289b-88d5-48d5-9f3e-1e5edb7e56c1", + "ce7c2930-2af8-44ec-8789-eceee930c37d", + "3f7b1ea2-e6e8-49d4-b44c-30916dbab43a", + "ed3b417f-2b7e-456e-9599-744541728950", + "175c22ea-190a-4b8c-9fd0-fbffa5b1b8be", + "e0165a52-f504-47d3-b947-b3829eca3563", + "19ede86d-86d6-4ea8-9ae3-a10a1607b433", + "034c8d0b-50d7-4753-811c-abbfad44c653", + "99564c6b-8529-4f56-9c3f-f808385f034a", + "adc362ea-105d-4544-b981-dedc100d25b9", + "21e1dc5b-49e1-4d35-acb4-be969e4e3a30", + "cb278f74-c632-4460-ae78-2c2c140c0e12", + "4d552f33-552a-41aa-b732-da3d0f708b79", + "faf509ad-bdb6-4aa1-ae9f-e9f410210419", + "59992957-65cf-44bd-8894-d8b2364f080f", + "529217e6-9ec3-41c6-bb6d-13e8f380960a", + "058ec709-782d-4b58-a38f-01593d97f181", + "d027204b-805c-46ae-8a61-7d35f9c6eaba", + "0fce8fea-6ca7-4c80-bc81-55d26e1fd0bd", + "1f4832c7-d164-441e-b197-aa193b4d128f", + "15ad080d-49fc-4b8b-b621-d47f98aa5fdb", + "cb40966d-963d-4283-94be-7da5de70992e", + "6516f7d9-c505-40b3-94de-a42498eea22d", + "9dbcbd08-1067-4bec-84a4-4f3ad19286d3", + "dabd68eb-27b9-462b-a271-300058c5798b", + "9a33f431-cb28-49ae-b9c9-94e97f752a2a", + "4aa27f5c-f475-444b-9279-94b8a5da14c9", + "e71cf490-9a59-4475-80c1-b6c872f3b33c", + "a8e7e8bf-237b-4d95-99be-6af332bdf942", + "08c239c3-e19e-4d88-b0fd-861853dd5e36", + "0209ab7c-2573-4d66-9917-942450e02750", + "0a71a4da-d9c9-423e-8dd8-ebd5c2a86a2d", + "f312aa16-992e-4505-82aa-04da30ea5fe3", + "5b9f9449-1336-44e3-88f9-979f3c752bd7", + "dcec96ab-9377-4272-8a48-07568c4ed533", + "3f28352d-9960-4251-aa05-ce4c612e1ab7", + "4a08101e-f280-41f9-9e7b-fe12d7216c3c", + "0dd7383f-c71b-483c-9253-e180f8763405", + "cfb83c85-cf8f-49da-952f-5bd954b7e616", + "b016bb0f-9e07-4d40-9878-a6bbaa67d866", + "311d7423-552b-4d4c-b7b6-cdcd15e1009b", + "895877d6-9cf3-4d79-989e-4d89f6867e09", + "92bdb79c-6870-44e2-ab71-c3137c78cb2d", + "b16cb9c4-5a6c-444c-bbf5-7b0ef2c1ac12", + "efb09d97-09c4-4976-aa14-abfd520c114d", + "6431f59d-9ed8-4963-9ed7-7336a5795d8b", + "1ad26844-6f6b-4007-834d-09ec8668fe7b", + "6d83b349-6ec8-49bb-b438-7f29a16216d3", + "68156360-1cae-406e-8baf-177178f0815e", + "3c726a50-ec82-4828-8967-f704477dfef7", + "46f702b2-e1c3-4e4a-868e-10ec1e260c75", + "51ff679a-87d8-4bef-830d-327e7b4c8f8e", + "aea6fc5c-8eb0-4cd7-8cfe-1285239d16bd", + "b70bbf68-5ebc-4315-9819-deb65b241f3f", + "6069853f-8822-434f-9d59-c881800e0a27", + "9a287ef6-6920-4d01-a44a-7561d2fb627d", + "fa057d95-9ba8-418a-aeb2-fe4b2acd31b0", + "9b0fb28f-21f1-4df1-a55b-67bf6be79911", + "044403fb-25f6-461f-ad92-8f9533908070", + "35d09151-c469-4901-8092-2114060cb86b", + "85884aa2-5d0b-4dff-80f6-8ca7cbab9fef", + "bd36a31a-f14e-4040-ad11-0ec5b6217b6a", + "fe20620f-c612-4712-9475-d4cfc59e8bba", + "09ba0773-e81d-4cb9-968a-552e1cbaf143", + "7a7378a5-2d05-4457-b2d4-1fe068535167", + "6867d1e5-2d30-4f91-90d2-2b453ffd5cd5", + "2fef1b16-3dd1-4ad5-bc55-cf8f9783b40c", + "0590f51b-7c9f-41a0-b81e-cdbc205ebdd9", + "7ed55b94-86d5-440a-9a8a-5f83dabcb69b", + "2daca92a-c8a6-4ab4-a528-3d99ce6f72f2", + "24187e67-947d-436d-ae7f-d20b03874b56", + "864ff42d-00fe-44a8-8163-8af459dc1c0c", + "0c2cc9a5-089a-4d10-882a-837c154117ea", + "fb256f18-e812-4355-b41a-c69d933f2a61", + "b631c4df-8229-43c0-9e5e-189c5d666ac2", + "8da03018-3272-4bd3-987c-1dd1e807bc1d", + "bf736b76-fccc-4d1b-8d9f-2e78fdb0d972", + "1fc9dffc-da23-4b99-8330-44c5598919db", + "1ed74402-bc81-4245-8275-c2862a4d6a86", + "6a91adb4-23df-43bd-9564-f97e76382a52", + "d1661202-c568-4032-8ec6-99fe4238de84", + "e4b4186d-f02e-47d7-8214-d3854ee475fd", + "664e6157-bfcc-4513-8f04-c095d3ecb2d5", + "b3b9951f-0867-453d-bf20-9f22bdb5a38b", + "52adfeae-ab75-4250-ae45-61af9e231e86", + "e994c378-ac0b-4f08-bd56-462990be36dd", + "a012f65a-28a7-4005-95bb-f55783bcdda0", + "0cbc7fb6-843c-4aad-b540-e37ddb7c84c6", +} + +var arrayTypes = []map[string]interface{}{ + { + "strings": []string{"a", "b", "c"}, + "numbers": []float64{1.0, 2.0, 2.0, 3.0, 3.0}, + }, + { + "strings": []string{"a"}, + "numbers": []float64{1.0, 2.0}, + }, +} + +var customers = []map[string]interface{}{ + { + "internalId": "customer 1", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:17.231346Z"), + "isNewCustomer": false, + }, + { + "internalId": "customer 2", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:17.231346Z"), + "isNewCustomer": false, + }, + { + "internalId": "customer 3", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:17.231346Z"), + "isNewCustomer": false, + }, + { + "internalId": "customer 4", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:20.123546Z"), + "isNewCustomer": true, + }, + { + "internalId": "customer 5", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:20.123546Z"), + "isNewCustomer": true, + }, + { + "internalId": "customer 6", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:22.112435Z"), + "isNewCustomer": false, + }, + { + "internalId": "customer 7", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:23.754272Z"), + "isNewCustomer": false, + }, + { + "internalId": "customer 8", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:24.325698Z"), + "isNewCustomer": true, + }, + { + "internalId": "customer 9", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:25.524536Z"), + "isNewCustomer": false, + }, + { + "internalId": "customer 10", + "countryOfOrigin": "US", + "timeArrived": mustStringToTime("2022-06-16T17:30:26.451235Z"), + "isNewCustomer": true, + }, +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregations_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregations_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9784f649f420574893d628c7f8caa6982d38de4d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregations_integration_test.go @@ -0,0 +1,2320 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func Test_Aggregations(t *testing.T) { + dirName := t.TempDir() + + shardState := singleShardState() + logger := logrus.New() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("prepare test schema and data ", + prepareCompanyTestSchemaAndData(repo, migrator, schemaGetter)) + + t.Run("numerical aggregations with grouping 1", + testNumericalAggregationsWithGrouping(repo, true)) + + t.Run("numerical aggregations without grouping (formerly Meta)", + testNumericalAggregationsWithoutGrouping(repo, true)) + + t.Run("numerical aggregations with filters", + testNumericalAggregationsWithFilters(repo)) + + t.Run("date aggregations with grouping", + testDateAggregationsWithGrouping(repo, true)) + + t.Run("date aggregations without grouping", + testDateAggregationsWithoutGrouping(repo, true)) + + t.Run("date aggregations with filters", + testDateAggregationsWithFilters(repo)) + + t.Run("clean up", + cleanupCompanyTestSchemaAndData(repo, migrator)) +} + +func Test_Aggregations_MultiShard(t *testing.T) { + dirName := t.TempDir() + + shardState := fixedMultiShardState() + logger := logrus.New() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + migrator := NewMigrator(repo, logger, mockNodeSelector.LocalName()) + + t.Run("prepare test schema and data ", + prepareCompanyTestSchemaAndData(repo, migrator, schemaGetter)) + + t.Run("numerical aggregations with grouping 2", + testNumericalAggregationsWithGrouping(repo, false)) + + t.Run("numerical aggregations without grouping (formerly Meta)", + testNumericalAggregationsWithoutGrouping(repo, false)) + + t.Run("numerical aggregations with filters", + testNumericalAggregationsWithFilters(repo)) + + t.Run("date aggregations with grouping", + testDateAggregationsWithGrouping(repo, true)) + + t.Run("date aggregations without grouping", + testDateAggregationsWithoutGrouping(repo, true)) + + t.Run("date aggregations with filters", + testDateAggregationsWithFilters(repo)) + + t.Run("clean up", + cleanupCompanyTestSchemaAndData(repo, migrator)) +} + +func prepareCompanyTestSchemaAndData(repo *DB, + migrator *Migrator, schemaGetter *fakeSchemaGetter, +) func(t *testing.T) { + return func(t *testing.T) { + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + productClass, + notIndexedClass, + companyClass, + arrayTypesClass, + customerClass, + }, + }, + } + + schemaGetter.schema = schema + + t.Run("creating the class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), productClass)) + require.Nil(t, + migrator.AddClass(context.Background(), companyClass)) + require.Nil(t, + migrator.AddClass(context.Background(), arrayTypesClass)) + require.Nil(t, + migrator.AddClass(context.Background(), customerClass)) + require.Nil(t, + migrator.AddClass(context.Background(), notIndexedClass)) + }) + + schemaGetter.schema = schema + + t.Run("import products", func(t *testing.T) { + for i, schema := range products { + t.Run(fmt.Sprintf("importing product %d", i), func(t *testing.T) { + fixture := models.Object{ + Class: productClass.Class, + ID: productsIds[i], + Properties: schema, + } + require.Nil(t, + repo.PutObject(context.Background(), &fixture, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("import products into notIndexed class", func(t *testing.T) { + for i, schema := range products { + t.Run(fmt.Sprintf("importing product %d", i), func(t *testing.T) { + fixture := models.Object{ + Class: notIndexedClass.Class, + ID: productsIds[i], + Properties: schema, + } + require.Nil(t, + repo.PutObject(context.Background(), &fixture, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("import companies", func(t *testing.T) { + for j := 0; j < importFactor; j++ { + for i, schema := range companies { + t.Run(fmt.Sprintf("importing company %d", i), func(t *testing.T) { + fixture := models.Object{ + Class: companyClass.Class, + ID: companyIDs[j*(importFactor-1)+i], + Properties: schema, + } + + require.Nil(t, + repo.PutObject(context.Background(), &fixture, []float32{0.1, 0.1, 0.1, 0.1}, nil, nil, nil, 0)) + }) + } + } + }) + + t.Run("import array types", func(t *testing.T) { + for i, schema := range arrayTypes { + t.Run(fmt.Sprintf("importing array type %d", i), func(t *testing.T) { + fixture := models.Object{ + Class: arrayTypesClass.Class, + ID: strfmt.UUID(uuid.Must(uuid.NewRandom()).String()), + Properties: schema, + } + require.Nil(t, + repo.PutObject(context.Background(), &fixture, []float32{0.1, 0.1, 0.1, 0.1}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("import customers", func(t *testing.T) { + for i, schema := range customers { + t.Run(fmt.Sprintf("importing customer #%d", i), func(t *testing.T) { + fixture := models.Object{ + Class: customerClass.Class, + ID: strfmt.UUID(uuid.Must(uuid.NewRandom()).String()), + Properties: schema, + } + require.Nil(t, + repo.PutObject(context.Background(), &fixture, []float32{0.1, 0.1, 0.1, 0.1}, nil, nil, nil, 0)) + }) + } + }) + } +} + +func cleanupCompanyTestSchemaAndData(repo *DB, + migrator *Migrator, +) func(t *testing.T) { + return func(t *testing.T) { + assert.Nil(t, repo.Shutdown(context.Background())) + } +} + +func testNumericalAggregationsWithGrouping(repo *DB, exact bool) func(t *testing.T) { + return func(t *testing.T) { + epsilon := 0.1 + if !exact { + epsilon = 1.0 + } + + t.Run("single field, single aggregator", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(companyClass.Class), + Property: schema.PropertyName("sector"), + }, + IncludeMetaCount: true, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 60, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"sector"}, + Value: "Food", + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.066666666666666, + }, + }, + }, + }, + { + Count: 30, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"sector"}, + Value: "Financials", + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.1999999999999999, + }, + }, + }, + }, + }, + } + + require.Equal(t, len(expectedResult.Groups), len(res.Groups)) + + for i := 0; i <= 1; i++ { + assert.Equal(t, expectedResult.Groups[i].Count, + res.Groups[i].Count) + + expectedDivYield := expectedResult.Groups[i].Properties["dividendYield"] + actualDivYield := res.Groups[i].Properties["dividendYield"] + + assert.InEpsilon(t, expectedDivYield.NumericalAggregations["mean"], + actualDivYield.NumericalAggregations["mean"], epsilon) + } + }) + + t.Run("grouping by a non-numerical, non-string prop", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(companyClass.Class), + Property: schema.PropertyName("listedInIndex"), + }, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 80, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"listedInIndex"}, + Value: true, + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.375, + }, + }, + }, + }, + { + Count: 10, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"listedInIndex"}, + Value: false, + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 0.0, + }, + }, + }, + }, + }, + } + + // there is now way to use InEpsilon or InDelta on nested structs with + // testify, so unfortunately we have to do a manual deep equal: + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[0].Count, res.Groups[0].Count) + assert.Equal(t, expectedResult.Groups[0].GroupedBy, res.Groups[0].GroupedBy) + assert.InEpsilon(t, expectedResult.Groups[0].Properties["dividendYield"]. + NumericalAggregations["mean"], + res.Groups[0].Properties["dividendYield"].NumericalAggregations["mean"], + epsilon) + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[1].Count, res.Groups[1].Count) + assert.Equal(t, expectedResult.Groups[1].GroupedBy, res.Groups[1].GroupedBy) + assert.InDelta(t, expectedResult.Groups[1].Properties["dividendYield"]. + NumericalAggregations["mean"], + res.Groups[1].Properties["dividendYield"].NumericalAggregations["mean"], + epsilon) + }) + + t.Run("multiple fields, multiple aggregators, grouped by string", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(companyClass.Class), + Property: schema.PropertyName("sector"), + }, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, + }, + }, + { + Name: schema.PropertyName("price"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + // aggregation.ModeAggregator, // ignore as there is no most common value + aggregation.MedianAggregator, + aggregation.CountAggregator, + }, + }, + { + Name: schema.PropertyName("listedInIndex"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.PercentageTrueAggregator, + aggregation.PercentageFalseAggregator, + aggregation.TotalTrueAggregator, + aggregation.TotalFalseAggregator, + }, + }, + { + Name: schema.PropertyName("location"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.NewTopOccurrencesAggregator(ptInt(5)), + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 60, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"sector"}, + Value: "Food", + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.06667, + "maximum": 8.0, + "minimum": 0.0, + "sum": 124, + "mode": 0., + "median": 1.1, + "count": 60, + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 218.33333, + "maximum": 800., + "minimum": 10., + "sum": 13100., + // "mode": 70, + "median": 115, + "count": 60, + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 50, + TotalFalse: 10, + PercentageTrue: 0.8333333333333334, + PercentageFalse: 0.16666666666666666, + Count: 60, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 60, + Items: []aggregation.TextOccurrence{ + { + Value: "Atlanta", + Occurs: 20, + }, + { + Value: "Detroit", + Occurs: 10, + }, + { + Value: "Los Angeles", + Occurs: 10, + }, + { + Value: "New York", + Occurs: 10, + }, + { + Value: "San Francisco", + Occurs: 10, + }, + }, + }, + }, + }, + }, + { + Count: 30, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"sector"}, + Value: "Financials", + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.2, + "maximum": 4.0, + "minimum": 1.3, + "sum": 66., + "mode": 1.3, + "median": 1.3, + "count": 30, + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 265.66667, + "maximum": 600., + "minimum": 47., + "sum": 7970., + // "mode": 47, + "median": 150., + "count": 30., + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 30, + TotalFalse: 0, + PercentageTrue: 1, + PercentageFalse: 0, + Count: 30, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 30, + Items: []aggregation.TextOccurrence{ + { + Value: "New York", + Occurs: 20, + }, + { + Value: "San Francisco", + Occurs: 10, + }, + }, + }, + }, + }, + }, + }, + } + + // there is now way to use InEpsilon or InDelta on nested structs with + // testify, so unfortunately we have to do a manual deep equal: + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[0].Count, res.Groups[0].Count) + assert.Equal(t, expectedResult.Groups[0].GroupedBy, res.Groups[0].GroupedBy) + expectedProps := expectedResult.Groups[0].Properties + actualProps := res.Groups[0].Properties + assert.Equal(t, expectedProps["location"].TextAggregation.Count, + actualProps["location"].TextAggregation.Count) + assert.ElementsMatch(t, expectedProps["location"].TextAggregation.Items, + actualProps["location"].TextAggregation.Items) + assert.Equal(t, expectedProps["listedInIndex"], actualProps["listedInIndex"]) + assert.InDeltaMapValues(t, expectedProps["dividendYield"].NumericalAggregations, + actualProps["dividendYield"].NumericalAggregations, epsilon*100) + assert.InDeltaMapValues(t, expectedProps["price"].NumericalAggregations, + actualProps["price"].NumericalAggregations, epsilon*100) + + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[1].Count, res.Groups[1].Count) + assert.Equal(t, expectedResult.Groups[1].GroupedBy, res.Groups[1].GroupedBy) + expectedProps = expectedResult.Groups[1].Properties + actualProps = res.Groups[1].Properties + assert.Equal(t, expectedProps["location"], actualProps["location"]) + assert.Equal(t, expectedProps["listedInIndex"], actualProps["listedInIndex"]) + assert.InDeltaMapValues(t, expectedProps["dividendYield"].NumericalAggregations, + actualProps["dividendYield"].NumericalAggregations, epsilon*100) + assert.InDeltaMapValues(t, expectedProps["price"].NumericalAggregations, + actualProps["price"].NumericalAggregations, epsilon*500) + }) + + t.Run("with filters, grouped by string", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(companyClass.Class), + Property: schema.PropertyName("sector"), + }, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLessThan, + Value: &filters.Value{ + Type: schema.DataTypeInt, + Value: 600, + }, + On: &filters.Path{ + Property: "price", + }, + }, + }, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + // aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, + }, + }, + { + Name: schema.PropertyName("price"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + // aggregation.ModeAggregator, // ignore as there is no most common value + aggregation.MedianAggregator, + aggregation.CountAggregator, + }, + }, + { + Name: schema.PropertyName("listedInIndex"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.PercentageTrueAggregator, + aggregation.PercentageFalseAggregator, + aggregation.TotalTrueAggregator, + aggregation.TotalFalseAggregator, + }, + }, + { + Name: schema.PropertyName("location"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.NewTopOccurrencesAggregator(ptInt(5)), + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 50, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"sector"}, + Value: "Food", + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.48, + "maximum": 8.0, + "minimum": 0.0, + "sum": 124., + "median": 1.3, + "count": 50, + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 102., + "maximum": 200., + "minimum": 10., + "sum": 5100., + "median": 70., + "count": 50., + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 50, + TotalFalse: 0, + PercentageTrue: 1, + PercentageFalse: 0, + Count: 50, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 50, + Items: []aggregation.TextOccurrence{ + { + Value: "Atlanta", + Occurs: 20, + }, + { + Value: "Detroit", + Occurs: 10, + }, + { + Value: "New York", + Occurs: 10, + }, + { + Value: "San Francisco", + Occurs: 10, + }, + }, + }, + }, + }, + }, + { + Count: 20, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"sector"}, + Value: "Financials", + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 1.3, + "maximum": 1.3, + "minimum": 1.3, + "sum": 26., + "median": 1.3, + "count": 20., + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 98.5, + "maximum": 150., + "minimum": 47., + "sum": 1970., + "median": 98.5, + "count": 20., + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 20, + TotalFalse: 0, + PercentageTrue: 1, + PercentageFalse: 0, + Count: 20, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 20, + Items: []aggregation.TextOccurrence{ + { + Value: "New York", + Occurs: 10, + }, + { + Value: "San Francisco", + Occurs: 10, + }, + }, + }, + }, + }, + }, + }, + } + + // there is now way to use InEpsilon or InDelta on nested structs with + // testify, so unfortunately we have to do a manual deep equal: + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[0].Count, res.Groups[0].Count) + assert.Equal(t, expectedResult.Groups[0].GroupedBy, res.Groups[0].GroupedBy) + expectedProps := expectedResult.Groups[0].Properties + actualProps := res.Groups[0].Properties + assert.Equal(t, expectedProps["location"].TextAggregation.Count, + actualProps["location"].TextAggregation.Count) + assert.ElementsMatch(t, expectedProps["location"].TextAggregation.Items, + actualProps["location"].TextAggregation.Items) + assert.Equal(t, expectedProps["listedInIndex"], actualProps["listedInIndex"]) + assert.InDeltaMapValues(t, expectedProps["dividendYield"].NumericalAggregations, + actualProps["dividendYield"].NumericalAggregations, epsilon*100) + assert.InDeltaMapValues(t, expectedProps["price"].NumericalAggregations, + actualProps["price"].NumericalAggregations, epsilon*100) + + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[1].Count, res.Groups[1].Count) + assert.Equal(t, expectedResult.Groups[1].GroupedBy, res.Groups[1].GroupedBy) + expectedProps = expectedResult.Groups[1].Properties + actualProps = res.Groups[1].Properties + assert.Equal(t, expectedProps["location"].TextAggregation.Count, + actualProps["location"].TextAggregation.Count) + assert.ElementsMatch(t, expectedProps["location"].TextAggregation.Items, + actualProps["location"].TextAggregation.Items) + assert.Equal(t, expectedProps["listedInIndex"], actualProps["listedInIndex"]) + assert.InDeltaMapValues(t, expectedProps["dividendYield"].NumericalAggregations, + actualProps["dividendYield"].NumericalAggregations, epsilon*100) + assert.InDeltaMapValues(t, expectedProps["price"].NumericalAggregations, + actualProps["price"].NumericalAggregations, epsilon*100) + }) + + t.Run("no filters, grouped by ref prop", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(companyClass.Class), + Property: schema.PropertyName("makesProduct"), + }, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + // aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, + }, + }, + { + Name: schema.PropertyName("price"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + // aggregation.ModeAggregator, // ignore as there is no most common value + aggregation.MedianAggregator, + aggregation.CountAggregator, + }, + }, + { + Name: schema.PropertyName("listedInIndex"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.PercentageTrueAggregator, + aggregation.PercentageFalseAggregator, + aggregation.TotalTrueAggregator, + aggregation.TotalFalseAggregator, + }, + }, + { + Name: schema.PropertyName("location"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.NewTopOccurrencesAggregator(ptInt(5)), + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 10, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"makesProduct"}, + Value: strfmt.URI("weaviate://localhost/1295c052-263d-4aae-99dd-920c5a370d06"), + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 8.0, + "maximum": 8.0, + "minimum": 8.0, + "sum": 80.0, + "median": 8.0, + "count": 10., + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 10., + "maximum": 10., + "minimum": 10., + "sum": 100., + "median": 10., + "count": 10., + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 10, + TotalFalse: 0, + PercentageTrue: 1, + PercentageFalse: 0, + Count: 10, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 10, + Items: []aggregation.TextOccurrence{ + { + Value: "Detroit", + Occurs: 10, + }, + }, + }, + }, + }, + }, + }, + } + + // there is now way to use InEpsilon or InDelta on nested structs with + // testify, so unfortunately we have to do a manual deep equal: + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[0].Count, res.Groups[0].Count) + assert.Equal(t, expectedResult.Groups[0].GroupedBy, res.Groups[0].GroupedBy) + expectedProps := expectedResult.Groups[0].Properties + actualProps := res.Groups[0].Properties + assert.Equal(t, expectedProps["location"].TextAggregation.Count, + actualProps["location"].TextAggregation.Count) + assert.ElementsMatch(t, expectedProps["location"].TextAggregation.Items, + actualProps["location"].TextAggregation.Items) + assert.Equal(t, expectedProps["listedInIndex"], actualProps["listedInIndex"]) + assert.InDeltaMapValues(t, expectedProps["dividendYield"].NumericalAggregations, + actualProps["dividendYield"].NumericalAggregations, epsilon*100) + assert.InDeltaMapValues(t, expectedProps["price"].NumericalAggregations, + actualProps["price"].NumericalAggregations, epsilon*100) + }) + + t.Run("with ref filter, grouped by string", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(companyClass.Class), + Property: schema.PropertyName("sector"), + }, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Type: schema.DataTypeText, + Value: "Superbread", + }, + On: &filters.Path{ + Property: "makesProduct", + Child: &filters.Path{ + Class: "AggregationsTestProduct", + Property: "name", + }, + }, + }, + }, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + // aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, + }, + }, + { + Name: schema.PropertyName("price"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + // aggregation.ModeAggregator, // ignore as there is no most common value + aggregation.MedianAggregator, + aggregation.CountAggregator, + }, + }, + { + Name: schema.PropertyName("listedInIndex"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.PercentageTrueAggregator, + aggregation.PercentageFalseAggregator, + aggregation.TotalTrueAggregator, + aggregation.TotalFalseAggregator, + }, + }, + { + Name: schema.PropertyName("location"), + Aggregators: []aggregation.Aggregator{ + aggregation.TypeAggregator, + aggregation.NewTopOccurrencesAggregator(ptInt(5)), + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + require.NotNil(t, res) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 10, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"sector"}, + Value: "Food", + }, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 8.0, + "maximum": 8.0, + "minimum": 8.0, + "sum": 80., + "median": 8.0, + "count": 10., + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 10., + "maximum": 10., + "minimum": 10., + "sum": 100., + "median": 10., + "count": 10., + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 10, + TotalFalse: 0, + PercentageTrue: 1, + PercentageFalse: 0, + Count: 10, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 10, + Items: []aggregation.TextOccurrence{ + { + Value: "Detroit", + Occurs: 10, + }, + }, + }, + }, + }, + }, + }, + } + + // there is now way to use InEpsilon or InDelta on nested structs with + // testify, so unfortunately we have to do a manual deep equal: + assert.Equal(t, len(res.Groups), len(expectedResult.Groups)) + assert.Equal(t, expectedResult.Groups[0].Count, res.Groups[0].Count) + assert.Equal(t, expectedResult.Groups[0].GroupedBy, res.Groups[0].GroupedBy) + expectedProps := expectedResult.Groups[0].Properties + actualProps := res.Groups[0].Properties + assert.Equal(t, expectedProps["location"], actualProps["location"]) + assert.Equal(t, expectedProps["listedInIndex"], actualProps["listedInIndex"]) + assert.InDeltaMapValues(t, expectedProps["dividendYield"].NumericalAggregations, + actualProps["dividendYield"].NumericalAggregations, 0.001) + assert.InDeltaMapValues(t, expectedProps["price"].NumericalAggregations, + actualProps["price"].NumericalAggregations, 0.001) + }) + + t.Run("array types, single aggregator strings", func(t *testing.T) { + if !exact { + t.Skip() + } + params := aggregation.Params{ + ClassName: schema.ClassName(arrayTypesClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(arrayTypesClass.Class), + Property: schema.PropertyName("strings"), + }, + IncludeMetaCount: true, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 2, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"strings"}, + Value: "a", + }, + Properties: map[string]aggregation.Property{}, + }, + { + Count: 1, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"strings"}, + Value: "b", + }, + Properties: map[string]aggregation.Property{}, + }, + { + Count: 1, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"strings"}, + Value: "c", + }, + Properties: map[string]aggregation.Property{}, + }, + }, + } + + assert.ElementsMatch(t, expectedResult.Groups, res.Groups) + }) + + t.Run("array types, single aggregator numbers", func(t *testing.T) { + if !exact { + t.Skip() + } + params := aggregation.Params{ + ClassName: schema.ClassName(arrayTypesClass.Class), + GroupBy: &filters.Path{ + Class: schema.ClassName(arrayTypesClass.Class), + Property: schema.PropertyName("numbers"), + }, + IncludeMetaCount: true, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 2, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"numbers"}, + Value: float64(1.0), + }, + Properties: map[string]aggregation.Property{}, + }, + { + Count: 2, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"numbers"}, + Value: float64(2.0), + }, + Properties: map[string]aggregation.Property{}, + }, + { + Count: 1, + GroupedBy: &aggregation.GroupedBy{ + Path: []string{"numbers"}, + Value: float64(3.0), + }, + Properties: map[string]aggregation.Property{}, + }, + }, + } + + assert.ElementsMatch(t, expectedResult.Groups, res.Groups) + }) + } +} + +func testDateAggregationsWithFilters(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + t.Run("Aggregations with filter that matches nothing", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(customerClass.Class), + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorGreaterThan, + Value: &filters.Value{ + Type: schema.DataTypeDate, + Value: "0312-06-16T17:30:17.231346Z", // hello roman empire! + }, + On: &filters.Path{ + Property: "timeArrived", + }, + }, + }, + IncludeMetaCount: true, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("timeArrived"), + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator, aggregation.CountAggregator, aggregation.MaximumAggregator, aggregation.MedianAggregator, aggregation.MinimumAggregator, aggregation.ModeAggregator, aggregation.TypeAggregator}, + }, + }, + } + res, err := repo.Aggregate(context.Background(), params, nil) + + // No results match the filter, so only a count of 0 is included + require.Nil(t, err) + require.Equal(t, 1, len(res.Groups)) + require.Equal(t, 1, len(res.Groups[0].Properties)) + require.Equal(t, 1, len(res.Groups[0].Properties["timeArrived"].DateAggregations)) + require.Equal(t, int64(0), res.Groups[0].Properties["timeArrived"].DateAggregations["count"].(int64)) + }) + } +} + +func testNumericalAggregationsWithFilters(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + t.Run("Aggregations with filter that matches nothing", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLessThan, + Value: &filters.Value{ + Type: schema.DataTypeInt, + Value: -5, // price is positive everywhere + }, + On: &filters.Path{ + Property: "price", + }, + }, + }, + IncludeMetaCount: true, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator, aggregation.CountAggregator, aggregation.MaximumAggregator, aggregation.MedianAggregator, aggregation.MinimumAggregator, aggregation.ModeAggregator, aggregation.TypeAggregator}, + }, + }, + } + res, err := repo.Aggregate(context.Background(), params, nil) + + // No results match the filter, so only a count of 0 is included + require.Nil(t, err) + require.Equal(t, 1, len(res.Groups)) + require.Equal(t, 1, len(res.Groups[0].Properties)) + require.Equal(t, 1, len(res.Groups[0].Properties["dividendYield"].NumericalAggregations)) + require.Equal(t, float64(0), res.Groups[0].Properties["dividendYield"].NumericalAggregations["count"].(float64)) + }) + } +} + +func testNumericalAggregationsWithoutGrouping(repo *DB, + exact bool, +) func(t *testing.T) { + return func(t *testing.T) { + t.Run("only meta count, no other aggregations", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + IncludeMetaCount: true, + GroupBy: nil, // explicitly set to nil + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + GroupedBy: nil, + Count: 90, + }, + }, + } + + require.NotNil(t, res) + assert.Equal(t, expectedResult.Groups, res.Groups) + }) + + t.Run("single field, single aggregator", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: nil, // explicitly set to nil + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{aggregation.MeanAggregator}, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + if exact { + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + GroupedBy: nil, + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.111111111111111, + }, + }, + }, + }, + }, + } + + assert.Equal(t, expectedResult.Groups, res.Groups) + } else { + require.Len(t, res.Groups, 1) + divYield := res.Groups[0].Properties["dividendYield"] + assert.Equal(t, aggregation.PropertyTypeNumerical, divYield.Type) + assert.InDelta(t, 2.1111, divYield.NumericalAggregations["mean"], 2) + } + }) + + t.Run("multiple fields, multiple aggregators", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: nil, // explicitly set to nil, + IncludeMetaCount: true, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("price"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("listedInIndex"), + Aggregators: []aggregation.Aggregator{ + aggregation.PercentageTrueAggregator, + aggregation.PercentageFalseAggregator, + aggregation.TotalTrueAggregator, + aggregation.TotalFalseAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("location"), + Aggregators: []aggregation.Aggregator{ + // limit is so high, it's not really restrictive + aggregation.NewTopOccurrencesAggregator(ptInt(10)), + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("sector"), + Aggregators: []aggregation.Aggregator{ + // limit is very restrictive + aggregation.NewTopOccurrencesAggregator(ptInt(1)), + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + // we are not expecting any result from the following agg, as this is + // handled in the usecase. However, we at least want to make sure it + // doesn't block or lead to any errors + { + Name: schema.PropertyName("makesProduct"), + Aggregators: []aggregation.Aggregator{ + aggregation.PointingToAggregator, + aggregation.TypeAggregator, + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 90, // because includeMetaCount was set + Properties: map[string]aggregation.Property{ + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.111111111111111, + "maximum": 8.0, + "minimum": 0.0, + "sum": 190., + "mode": 1.3, + "median": 1.3, + "count": 90., + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 234.11111111111111, + "maximum": 800., + "minimum": 10., + "sum": 21070., + "mode": 70., + "median": 150., + "count": 90., + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 80, + TotalFalse: 10, + PercentageTrue: 0.8888888888888888, + PercentageFalse: 0.1111111111111111, + Count: 90, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 90, + Items: []aggregation.TextOccurrence{ + { + Value: "New York", + Occurs: 30, + }, + { + Value: "Atlanta", + Occurs: 20, + }, + { + Value: "San Francisco", + Occurs: 20, + }, + { + Value: "Detroit", + Occurs: 10, + }, + { + Value: "Los Angeles", + Occurs: 10, + }, + }, + }, + }, + "sector": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 90, + Items: []aggregation.TextOccurrence{ + { + Value: "Food", + Occurs: 60, + }, + }, + }, + }, + }, + }, + }, + } + + if exact { + assert.Equal(t, expectedResult.Groups, res.Groups) + } else { + t.Run("numerical fields", func(t *testing.T) { + aggs := res.Groups[0].Properties["dividendYield"].NumericalAggregations + expextedAggs := expectedResult.Groups[0].Properties["dividendYield"].NumericalAggregations + + // max, min, count, sum are always exact matches, but we need an + // epsilon check because of floating point arithmetics + assert.InEpsilon(t, expextedAggs["maximum"], aggs["maximum"], 0.1) + assert.Equal(t, expextedAggs["minimum"], aggs["minimum"]) // equal because the result == 0 + assert.InEpsilon(t, expextedAggs["count"], aggs["count"], 0.1) + assert.InEpsilon(t, expextedAggs["sum"], aggs["sum"], 0.1) + + // mean, mode, median are always fuzzy + assert.InDelta(t, expextedAggs["mean"], aggs["mean"], 2) + assert.InDelta(t, expextedAggs["mode"], aggs["mode"], 2) + assert.InDelta(t, expextedAggs["median"], aggs["median"], 2) + }) + + t.Run("int fields", func(t *testing.T) { + aggs := res.Groups[0].Properties["price"].NumericalAggregations + expextedAggs := expectedResult.Groups[0].Properties["price"].NumericalAggregations + + // max, min, count, sum are always exact matches, but we need an + // epsilon check because of floating point arithmetics + assert.InEpsilon(t, expextedAggs["maximum"], aggs["maximum"], 0.1) + assert.InEpsilon(t, expextedAggs["minimum"], aggs["minimum"], 0.1) + assert.InEpsilon(t, expextedAggs["count"], aggs["count"], 0.1) + assert.InEpsilon(t, expextedAggs["sum"], aggs["sum"], 0.1) + + // mean, mode, median are always fuzzy + assert.InEpsilon(t, expextedAggs["mean"], aggs["mean"], 0.5, "mean") + assert.InEpsilon(t, expextedAggs["mode"], aggs["mode"], 10, "mode") + assert.InEpsilon(t, expextedAggs["median"], aggs["median"], 0.5, "median") + }) + + t.Run("boolean fields", func(t *testing.T) { + aggs := res.Groups[0].Properties["listedInIndex"].BooleanAggregation + expectedAggs := expectedResult.Groups[0].Properties["listedInIndex"].BooleanAggregation + + assert.InEpsilon(t, expectedAggs.TotalTrue, aggs.TotalTrue, 0.1) + assert.InEpsilon(t, expectedAggs.TotalFalse, aggs.TotalFalse, 0.1) + assert.InEpsilon(t, expectedAggs.PercentageTrue, aggs.PercentageTrue, 0.1) + assert.InEpsilon(t, expectedAggs.PercentageFalse, aggs.PercentageFalse, 0.1) + assert.InEpsilon(t, expectedAggs.Count, aggs.Count, 0.1) + }) + + t.Run("text fields (location)", func(t *testing.T) { + aggs := res.Groups[0].Properties["location"].TextAggregation + expectedAggs := expectedResult.Groups[0].Properties["location"].TextAggregation + + assert.Equal(t, expectedAggs.Count, aggs.Count) + assert.ElementsMatch(t, expectedAggs.Items, aggs.Items) + }) + t.Run("text fields (sector)", func(t *testing.T) { + aggs := res.Groups[0].Properties["sector"].TextAggregation + expectedAggs := expectedResult.Groups[0].Properties["sector"].TextAggregation + + assert.Equal(t, expectedAggs.Count, aggs.Count) + assert.ElementsMatch(t, expectedAggs.Items, aggs.Items) + }) + } + }) + + t.Run("multiple fields, multiple aggregators, single-level filter", func(t *testing.T) { + if !exact { + // filtering is happening inside a shard, so there is no need to test + // this again for multi-sharding. This saves us from adapting all the + // assertions to work with fuzzy values + t.Skip() + } + + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: nil, // explicitly set to nil, + Filters: sectorEqualsFoodFilter(), + IncludeMetaCount: true, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("price"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("listedInIndex"), + Aggregators: []aggregation.Aggregator{ + aggregation.PercentageTrueAggregator, + aggregation.PercentageFalseAggregator, + aggregation.TotalTrueAggregator, + aggregation.TotalFalseAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("location"), + Aggregators: []aggregation.Aggregator{ + // limit is so high, it's not really restrictive + aggregation.NewTopOccurrencesAggregator(ptInt(10)), + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("sector"), + Aggregators: []aggregation.Aggregator{ + // limit is very restrictive + aggregation.NewTopOccurrencesAggregator(ptInt(1)), + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + // we are not expecting any result from the following agg, as this is + // handled in the usecase. However, we at least want to make sure it + // doesn't block or lead to any errors + { + Name: schema.PropertyName("makesProduct"), + Aggregators: []aggregation.Aggregator{ + aggregation.PointingToAggregator, + aggregation.TypeAggregator, + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + actualDivYield := res.Groups[0].Properties["dividendYield"] + delete(res.Groups[0].Properties, "dividendYield") + actualPrice := res.Groups[0].Properties["price"] + delete(res.Groups[0].Properties, "price") + actualMakesProduct := res.Groups[0].Properties["makesProduct"] + delete(res.Groups[0].Properties, "makesProduct") + + expectedDivYield := aggregation.Property{ + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.066666666666666, + "maximum": 8.0, + "minimum": 0.0, + "sum": 124, + "mode": 0.0, + "median": 1.2, + "count": 60., + }, + } + + expectedPrice := aggregation.Property{ + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 218.33333333333334, + "maximum": 800., + "minimum": 10., + "sum": 13100., + "mode": 70., + "median": 115., + "count": 60., + }, + } + + expectedMakesProduct := aggregation.Property{ + Type: aggregation.PropertyTypeReference, + ReferenceAggregation: aggregation.Reference{ + PointingTo: []string{"weaviate://localhost/1295c052-263d-4aae-99dd-920c5a370d06"}, + }, + } + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 60, // because includeMetaCount was set + Properties: map[string]aggregation.Property{ + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 50, + TotalFalse: 10, + PercentageTrue: 0.8333333333333334, + PercentageFalse: 0.16666666666666666, + Count: 60, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 60, + Items: []aggregation.TextOccurrence{ + { + Value: "Atlanta", + Occurs: 20, + }, + { + Value: "Detroit", + Occurs: 10, + }, + { + Value: "Los Angeles", + Occurs: 10, + }, + { + Value: "New York", + Occurs: 10, + }, + { + Value: "San Francisco", + Occurs: 10, + }, + }, + }, + }, + "sector": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 60, + Items: []aggregation.TextOccurrence{ + { + Value: "Food", + Occurs: 60, + }, + }, + }, + }, + }, + }, + }, + } + + assert.Equal(t, expectedResult.Groups, res.Groups) + + // floating point arithmetic for numerical fields + + assert.InEpsilon(t, expectedDivYield.NumericalAggregations["mean"], + actualDivYield.NumericalAggregations["mean"], 0.1) + assert.InEpsilon(t, expectedPrice.NumericalAggregations["mean"], + actualPrice.NumericalAggregations["mean"], 0.1) + + assert.InEpsilon(t, expectedDivYield.NumericalAggregations["maximum"], + actualDivYield.NumericalAggregations["maximum"], 0.1) + assert.InEpsilon(t, expectedPrice.NumericalAggregations["maximum"], + actualPrice.NumericalAggregations["maximum"], 0.1) + + assert.Equal(t, expectedDivYield.NumericalAggregations["minimum"], + actualDivYield.NumericalAggregations["minimum"]) + assert.Equal(t, expectedPrice.NumericalAggregations["minimum"], + actualPrice.NumericalAggregations["minimum"]) + + assert.Equal(t, expectedDivYield.NumericalAggregations["mode"], + actualDivYield.NumericalAggregations["mode"]) + assert.Equal(t, expectedPrice.NumericalAggregations["mode"], + actualPrice.NumericalAggregations["mode"]) + + assert.InEpsilon(t, expectedDivYield.NumericalAggregations["median"], + actualDivYield.NumericalAggregations["median"], 0.1) + assert.InEpsilon(t, expectedPrice.NumericalAggregations["median"], + actualPrice.NumericalAggregations["median"], 0.1) + + assert.InEpsilon(t, expectedDivYield.NumericalAggregations["count"], + actualDivYield.NumericalAggregations["count"], 0.1) + assert.InEpsilon(t, expectedPrice.NumericalAggregations["count"], + actualPrice.NumericalAggregations["count"], 0.1) + + assert.Equal(t, expectedMakesProduct.ReferenceAggregation.PointingTo, + actualMakesProduct.ReferenceAggregation.PointingTo) + }) + + t.Run("multiple fields, multiple aggregators, ref filter", func(t *testing.T) { + if !exact { + // filtering is happening inside a shard, so there is no need to test + // this again for multi-sharding. This saves us from adapting all the + // assertions to work with fuzzy values + t.Skip() + } + + params := aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + GroupBy: nil, // explicitly set to nil, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Type: schema.DataTypeText, + Value: "Superbread", + }, + On: &filters.Path{ + Property: "makesProduct", + Child: &filters.Path{ + Class: "AggregationsTestProduct", + Property: "name", + }, + }, + }, + }, + IncludeMetaCount: true, + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("dividendYield"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("price"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("listedInIndex"), + Aggregators: []aggregation.Aggregator{ + aggregation.PercentageTrueAggregator, + aggregation.PercentageFalseAggregator, + aggregation.TotalTrueAggregator, + aggregation.TotalFalseAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("location"), + Aggregators: []aggregation.Aggregator{ + // limit is so high, it's not really restrictive + aggregation.NewTopOccurrencesAggregator(ptInt(10)), + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + { + Name: schema.PropertyName("sector"), + Aggregators: []aggregation.Aggregator{ + // limit is very restrictive + aggregation.NewTopOccurrencesAggregator(ptInt(1)), + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + // we are not expecting any result from the following agg, as this is + // handled in the usecase. However, we at least want to make sure it + // doesn't block or lead to any errors + { + Name: schema.PropertyName("makesProduct"), + Aggregators: []aggregation.Aggregator{ + aggregation.PointingToAggregator, + aggregation.TypeAggregator, + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: 10, + Properties: map[string]aggregation.Property{ + "makesProduct": { + Type: aggregation.PropertyTypeReference, + ReferenceAggregation: aggregation.Reference{PointingTo: []string{"weaviate://localhost/1295c052-263d-4aae-99dd-920c5a370d06"}}, + }, + "dividendYield": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 8.0, + "maximum": 8.0, + "minimum": 8.0, + "sum": 80., + "mode": 8.0, + "median": 8.0, + "count": 10., + }, + }, + "price": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 10., + "maximum": 10., + "minimum": 10., + "sum": 100., + "mode": 10., + "median": 10., + "count": 10., + }, + }, + "listedInIndex": { + Type: aggregation.PropertyTypeBoolean, + BooleanAggregation: aggregation.Boolean{ + TotalTrue: 10, + TotalFalse: 0, + PercentageTrue: 1, + PercentageFalse: 0, + Count: 10, + }, + }, + "location": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 10, + Items: []aggregation.TextOccurrence{ + { + Value: "Detroit", + Occurs: 10, + }, + }, + }, + }, + "sector": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 10, + Items: []aggregation.TextOccurrence{ + { + Value: "Food", + Occurs: 10, + }, + }, + }, + }, + }, + }, + }, + } + + assert.Equal(t, expectedResult.Groups, res.Groups) + }) + + t.Run("array types, only meta count, no other aggregations", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(arrayTypesClass.Class), + IncludeMetaCount: true, + GroupBy: nil, // explicitly set to nil + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + GroupedBy: nil, + Count: 2, + }, + }, + } + + require.NotNil(t, res) + assert.Equal(t, expectedResult.Groups, res.Groups) + }) + + t.Run("array types, single aggregator numbers", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(arrayTypesClass.Class), + GroupBy: nil, // explicitly set to nil + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("numbers"), + Aggregators: []aggregation.Aggregator{ + aggregation.MeanAggregator, + aggregation.MaximumAggregator, + aggregation.MinimumAggregator, + aggregation.SumAggregator, + aggregation.ModeAggregator, + aggregation.MedianAggregator, + aggregation.CountAggregator, + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + GroupedBy: nil, + Properties: map[string]aggregation.Property{ + "numbers": { + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{ + "mean": 2.0, + "maximum": 3.0, + "minimum": 1.0, + "sum": 14.0, + "mode": 2.0, + "median": 2.0, + "count": 7., + }, + }, + }, + }, + }, + } + + assert.Equal(t, expectedResult.Groups, res.Groups) + }) + + t.Run("array types, single aggregator strings", func(t *testing.T) { + if !exact { + t.Skip() + } + params := aggregation.Params{ + ClassName: schema.ClassName(arrayTypesClass.Class), + GroupBy: nil, // explicitly set to nil + Properties: []aggregation.ParamProperty{ + { + Name: schema.PropertyName("strings"), + Aggregators: []aggregation.Aggregator{ + // limit is very restrictive + aggregation.NewTopOccurrencesAggregator(ptInt(1)), + aggregation.TypeAggregator, // ignored in the repo, but can't block + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + GroupedBy: nil, + Properties: map[string]aggregation.Property{ + "strings": { + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{ + Count: 4, + Items: []aggregation.TextOccurrence{ + { + Value: "a", + Occurs: 2, + }, + }, + }, + }, + }, + }, + }, + } + + assert.Equal(t, expectedResult.Groups, res.Groups) + }) + } +} + +func testDateAggregationsWithGrouping(repo *DB, exact bool) func(t *testing.T) { + return func(t *testing.T) { + t.Run("group on only unique values", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(customerClass.Class), + IncludeMetaCount: true, + GroupBy: &filters.Path{ + Class: schema.ClassName(customerClass.Class), + // Each customer obj has a unique value for the `internalId` field + Property: schema.PropertyName("internalId"), + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + require.NotNil(t, res) + assert.Len(t, res.Groups, len(customers)) + }) + + t.Run("group on only identical values", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(customerClass.Class), + IncludeMetaCount: true, + GroupBy: &filters.Path{ + Class: schema.ClassName(customerClass.Class), + // Each customer obj has the same value for the `countryOfOrigin` field + Property: schema.PropertyName("countryOfOrigin"), + }, + Properties: []aggregation.ParamProperty{ + { + Name: "timeArrived", + Aggregators: []aggregation.Aggregator{ + aggregation.CountAggregator, + aggregation.MinimumAggregator, + aggregation.MaximumAggregator, + aggregation.MedianAggregator, + aggregation.ModeAggregator, + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + require.NotNil(t, res) + assert.Len(t, res.Groups, 1) + + expectedProperties := map[string]interface{}{ + "count": int64(10), + "minimum": "2022-06-16T17:30:17.231346Z", + "maximum": "2022-06-16T17:30:26.451235Z", + "median": "2022-06-16T17:30:21.1179905Z", + "mode": "2022-06-16T17:30:17.231346Z", + } + receivedProperties := res.Groups[0].Properties["timeArrived"].DateAggregations + assert.EqualValues(t, expectedProperties, receivedProperties) + }) + + t.Run("group on some unique values", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(customerClass.Class), + IncludeMetaCount: true, + GroupBy: &filters.Path{ + Class: schema.ClassName(customerClass.Class), + // should result in two groups due to bool value + Property: schema.PropertyName("isNewCustomer"), + }, + Properties: []aggregation.ParamProperty{ + { + Name: "timeArrived", + Aggregators: []aggregation.Aggregator{ + aggregation.CountAggregator, + aggregation.MinimumAggregator, + aggregation.MaximumAggregator, + aggregation.MedianAggregator, + aggregation.ModeAggregator, + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + require.NotNil(t, res) + assert.Len(t, res.Groups, 2) + + expectedResult := []aggregation.Group{ + { + Properties: map[string]aggregation.Property{ + "timeArrived": { + Type: "date", + DateAggregations: map[string]interface{}{ + "count": int64(6), + "maximum": "2022-06-16T17:30:25.524536Z", + "median": "2022-06-16T17:30:19.6718905Z", + "minimum": "2022-06-16T17:30:17.231346Z", + "mode": "2022-06-16T17:30:17.231346Z", + }, + }, + }, + GroupedBy: &aggregation.GroupedBy{ + Value: false, + Path: []string{"isNewCustomer"}, + }, + Count: 6, + }, + { + Properties: map[string]aggregation.Property{ + "timeArrived": { + Type: "date", + DateAggregations: map[string]interface{}{ + "count": int64(4), + "maximum": "2022-06-16T17:30:26.451235Z", + "median": "2022-06-16T17:30:22.224622Z", + "minimum": "2022-06-16T17:30:20.123546Z", + "mode": "2022-06-16T17:30:20.123546Z", + }, + }, + }, + GroupedBy: &aggregation.GroupedBy{ + Value: true, + Path: []string{"isNewCustomer"}, + }, + Count: 4, + }, + } + + assert.EqualValues(t, expectedResult, res.Groups) + }) + } +} + +func testDateAggregationsWithoutGrouping(repo *DB, exact bool) func(t *testing.T) { + return func(t *testing.T) { + t.Run("without grouping", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(customerClass.Class), + GroupBy: nil, + Properties: []aggregation.ParamProperty{ + { + Name: "timeArrived", + Aggregators: []aggregation.Aggregator{ + aggregation.CountAggregator, + aggregation.MinimumAggregator, + aggregation.MaximumAggregator, + aggregation.MedianAggregator, + aggregation.ModeAggregator, + }, + }, + }, + } + + res, err := repo.Aggregate(context.Background(), params, nil) + require.Nil(t, err) + + require.NotNil(t, res) + require.Len(t, res.Groups, 1) + }) + } +} + +func ptInt(in int) *int { + return &in +} + +func sectorEqualsFoodFilter() *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "Company", + Property: "sector", + }, + Value: &filters.Value{ + Value: "Food", + Type: schema.DataTypeText, + }, + }, + } +} + +func mustStringToTime(s string) time.Time { + asTime, err := time.ParseInLocation(time.RFC3339Nano, s, time.UTC) + if err != nil { + panic(fmt.Sprintf("failed to parse time: %s, %s", s, err)) + } + return asTime +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/aggregator.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/aggregator.go new file mode 100644 index 0000000000000000000000000000000000000000..86ab3456b94aacf445f542597ed47d0ad2ba7b79 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/aggregator.go @@ -0,0 +1,146 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/stopwords" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/modules" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +type vectorIndex interface { + SearchByVectorDistance(ctx context.Context, vector []float32, targetDistance float32, maxLimit int64, + allowList helpers.AllowList) ([]uint64, []float32, error) + SearchByVector(ctx context.Context, vector []float32, k int, allowList helpers.AllowList) ([]uint64, []float32, error) +} + +type vectorIndexMulti interface { + SearchByMultiVectorDistance(ctx context.Context, vector [][]float32, targetDistance float32, + maxLimit int64, allowList helpers.AllowList) ([]uint64, []float32, error) + SearchByMultiVector(ctx context.Context, vector [][]float32, k int, allowList helpers.AllowList) ([]uint64, []float32, error) +} + +type Aggregator struct { + logger logrus.FieldLogger + store *lsmkv.Store + params aggregation.Params + getSchema schemaUC.SchemaGetter + classSearcher inverted.ClassSearcher // to support ref-filters + vectorIndex vectorIndex + stopwords stopwords.StopwordDetector + shardVersion uint16 + propLenTracker *inverted.JsonShardMetaData + isFallbackToSearchable inverted.IsFallbackToSearchable + tenant string + nestedCrossRefLimit int64 + bitmapFactory *roaringset.BitmapFactory + modules *modules.Provider + defaultLimit int64 +} + +func New(store *lsmkv.Store, params aggregation.Params, + getSchema schemaUC.SchemaGetter, classSearcher inverted.ClassSearcher, + stopwords stopwords.StopwordDetector, shardVersion uint16, + vectorIndex vectorIndex, logger logrus.FieldLogger, + propLenTracker *inverted.JsonShardMetaData, + isFallbackToSearchable inverted.IsFallbackToSearchable, + tenant string, nestedCrossRefLimit int64, + bitmapFactory *roaringset.BitmapFactory, + modules *modules.Provider, defaultLimit int64, +) *Aggregator { + return &Aggregator{ + logger: logger, + store: store, + params: params, + getSchema: getSchema, + classSearcher: classSearcher, + stopwords: stopwords, + shardVersion: shardVersion, + vectorIndex: vectorIndex, + propLenTracker: propLenTracker, + isFallbackToSearchable: isFallbackToSearchable, + tenant: tenant, + nestedCrossRefLimit: nestedCrossRefLimit, + bitmapFactory: bitmapFactory, + modules: modules, + defaultLimit: defaultLimit, + } +} + +func (a *Aggregator) GetPropertyLengthTracker() *inverted.JsonShardMetaData { + return a.propLenTracker +} + +func (a *Aggregator) Do(ctx context.Context) (*aggregation.Result, error) { + if a.params.GroupBy != nil { + return newGroupedAggregator(a).Do(ctx) + } + + isVectorEmpty, err := dto.IsVectorEmpty(a.params.SearchVector) + if err != nil { + return nil, fmt.Errorf("aggregator: %w", err) + } + + if a.params.Filters != nil || !isVectorEmpty || a.params.Hybrid != nil { + return newFilteredAggregator(a).Do(ctx) + } + + return newUnfilteredAggregator(a).Do(ctx) +} + +func (a *Aggregator) aggTypeOfProperty( + name schema.PropertyName, +) (aggregation.PropertyType, schema.DataType, error) { + class := a.getSchema.ReadOnlyClass(a.params.ClassName.String()) + if class == nil { + return "", "", fmt.Errorf("could not find class %s in schema", a.params.ClassName) + } + schemaProp, err := schema.GetPropertyByName(class, name.String()) + if err != nil { + return "", "", errors.Wrapf(err, "property %s", name) + } + + if schema.IsRefDataType(schemaProp.DataType) { + return aggregation.PropertyTypeReference, schema.DataTypeCRef, nil + } + + dt := schema.DataType(schemaProp.DataType[0]) + switch dt { + case schema.DataTypeInt, schema.DataTypeNumber, schema.DataTypeIntArray, + schema.DataTypeNumberArray: + return aggregation.PropertyTypeNumerical, dt, nil + case schema.DataTypeBoolean, schema.DataTypeBooleanArray: + return aggregation.PropertyTypeBoolean, dt, nil + case schema.DataTypeText, schema.DataTypeTextArray: + return aggregation.PropertyTypeText, dt, nil + case schema.DataTypeDate, schema.DataTypeDateArray: + return aggregation.PropertyTypeDate, dt, nil + case schema.DataTypeGeoCoordinates: + return "", "", fmt.Errorf("dataType geoCoordinates can't be aggregated") + case schema.DataTypePhoneNumber: + return "", "", fmt.Errorf("dataType phoneNumber can't be aggregated") + default: + return "", "", fmt.Errorf("unrecoginzed dataType %v", schemaProp.DataType[0]) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/boolean.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/boolean.go new file mode 100644 index 0000000000000000000000000000000000000000..9965ee4909a51864121baf1fa1f7ccf68dd0a5a0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/boolean.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "bytes" + "encoding/binary" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/aggregation" +) + +func newBoolAggregator() *boolAggregator { + return &boolAggregator{} +} + +type boolAggregator struct { + countTrue uint64 + countFalse uint64 +} + +func (a *boolAggregator) AddBoolRow(value []byte, count uint64) error { + var valueParsed bool + + if err := binary.Read(bytes.NewReader(value), binary.LittleEndian, + &valueParsed); err != nil { + return errors.Wrap(err, "read bool") + } + + if count == 0 { + // skip + return nil + } + + if valueParsed { + a.countTrue += count + } else { + a.countFalse += count + } + + return nil +} + +func (a *boolAggregator) AddBool(value bool) error { + if value { + a.countTrue++ + } else { + a.countFalse++ + } + + return nil +} + +func (a *boolAggregator) Res() aggregation.Boolean { + out := aggregation.Boolean{} + + count := int(a.countTrue) + int(a.countFalse) + if count == 0 { + return out + } + + out.Count = count + out.TotalFalse = int(a.countFalse) + out.TotalTrue = int(a.countTrue) + out.PercentageTrue = float64(a.countTrue) / float64(count) + out.PercentageFalse = float64(a.countFalse) / float64(count) + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/date.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/date.go new file mode 100644 index 0000000000000000000000000000000000000000..94e6a624510461d2039858265f28b88d7b2e3f43 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/date.go @@ -0,0 +1,211 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "fmt" + "math" + "sort" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/inverted" +) + +func addDateAggregations(prop *aggregation.Property, + aggs []aggregation.Aggregator, agg *dateAggregator, +) { + if prop.DateAggregations == nil { + prop.DateAggregations = map[string]interface{}{} + } + agg.buildPairsFromCounts() + + // if there are no elements to aggregate over because a filter does not match anything, calculating median etc. makes + // no sense. Non-existent entries evaluate to nil with an interface{} map + if agg.count == 0 { + for _, entry := range aggs { + if entry == aggregation.CountAggregator { + prop.DateAggregations["count"] = int64(agg.count) + break + } + } + return + } + + // when combining the results from different shards, we need the raw dates to recompute the mode and median. + // Therefore we add a reference later which needs to be cleared out before returning the results to a user + for _, aProp := range aggs { + switch aProp { + case aggregation.ModeAggregator, aggregation.MedianAggregator: + prop.DateAggregations["_dateAggregator"] = agg + } + } + + for _, aProp := range aggs { + switch aProp { + case aggregation.MinimumAggregator: + prop.DateAggregations[aProp.String()] = agg.Min() + case aggregation.MaximumAggregator: + prop.DateAggregations[aProp.String()] = agg.Max() + case aggregation.ModeAggregator: + prop.DateAggregations[aProp.String()] = agg.Mode() + case aggregation.CountAggregator: + prop.DateAggregations[aProp.String()] = agg.Count() + case aggregation.MedianAggregator: + prop.DateAggregations[aProp.String()] = agg.Median() + + default: + continue + } + } +} + +type dateAggregator struct { + count uint64 + maxCount uint64 + min timestamp + max timestamp + mode timestamp + pairs []timestampCountPair // for row-based median calculation + valueCounter map[timestamp]uint64 // for individual median calculation +} + +func newDateAggregator() *dateAggregator { + return &dateAggregator{ + min: timestamp{epochNano: math.MaxInt64}, + valueCounter: map[timestamp]uint64{}, + pairs: make([]timestampCountPair, 0), + } +} + +// timestamp allows us to contain multiple representations of a datetime +// the nanosecs value is needed for the numerical comparisons, and the +// string value is what the user expects to see +type timestamp struct { + epochNano int64 + rfc3339 string +} + +func newTimestamp(epochNano int64) timestamp { + return timestamp{ + epochNano: epochNano, + rfc3339: time.Unix(0, epochNano).UTC().Format(time.RFC3339Nano), + } +} + +type timestampCountPair struct { + value timestamp + count uint64 +} + +func (a *dateAggregator) AddTimestamp(rfc3339 string) error { + t, err := time.Parse(time.RFC3339Nano, rfc3339) + if err != nil { + return fmt.Errorf("failed to parse timestamp: %w", err) + } + + ts := timestamp{ + epochNano: t.UnixNano(), + rfc3339: rfc3339, + } + return a.addRow(ts, 1) +} + +func (a *dateAggregator) AddTimestampRow(b []byte, count uint64) error { + nsec, err := inverted.ParseLexicographicallySortableInt64(b) + if err != nil { + return errors.Wrap(err, "read int64") + } + + ts := newTimestamp(nsec) + + return a.addRow(ts, count) +} + +func (a *dateAggregator) addRow(ts timestamp, count uint64) error { + if count == 0 { + // skip + return nil + } + + a.count += count + if ts.epochNano < a.min.epochNano { + a.min = ts + } + if ts.epochNano > a.max.epochNano { + a.max = ts + } + + currentCount := a.valueCounter[ts] + currentCount += count + a.valueCounter[ts] = currentCount + + return nil +} + +func (a *dateAggregator) Max() string { + return a.max.rfc3339 +} + +func (a *dateAggregator) Min() string { + return a.min.rfc3339 +} + +// Mode does not require preparation if build from rows, but requires a call of +// buildPairsFromCounts() if it was built using individual objects +func (a *dateAggregator) Mode() string { + return a.mode.rfc3339 +} + +func (a *dateAggregator) Count() int64 { + return int64(a.count) +} + +// Median does not require preparation if build from rows, but requires a call of +// buildPairsFromCounts() if it was built using individual objects +// +// Check the numericalAggregator.Median() for details about the calculation +func (a *dateAggregator) Median() string { + middleIndex := a.count / 2 + count := uint64(0) + for index, pair := range a.pairs { + count += pair.count + if a.count%2 == 1 && count > middleIndex { + return pair.value.rfc3339 // case a) + } else if a.count%2 == 0 { + if count == middleIndex { + MedianEpochNano := pair.value.epochNano + (a.pairs[index+1].value.epochNano-pair.value.epochNano)/2 + return time.Unix(0, MedianEpochNano).UTC().Format(time.RFC3339Nano) // case b2) + } else if count > middleIndex { + return pair.value.rfc3339 // case b1) + } + } + } + panic("Couldn't determine median. This should never happen. Did you add values and call buildRows before?") +} + +// turns the value counter into a sorted list, as well as identifying the mode +func (a *dateAggregator) buildPairsFromCounts() { + a.pairs = a.pairs[:0] // clear out old values in case this function called more than once + for value, count := range a.valueCounter { + if count > a.maxCount { + a.maxCount = count + a.mode = value + } + a.pairs = append(a.pairs, timestampCountPair{value: value, count: count}) + } + + sort.Slice(a.pairs, func(x, y int) bool { + return a.pairs[x].value.epochNano < a.pairs[y].value.epochNano + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/date_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/date_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ef626d6d5958849e340841491cb31828ae3ba4b1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/date_test.go @@ -0,0 +1,78 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + DateYearMonthDayHourMinute = "2022-06-16T17:30:" + DateNanoSecondsTimeZone = ".451235Z" +) + +func TestDateAggregator(t *testing.T) { + tests := []struct { + name string + seconds []string + expectedMedian string + expectedMode string + }{ + { + name: "Single value", + seconds: []string{"17"}, + expectedMedian: "17", + expectedMode: "17", + }, + { + name: "Even number of values", + seconds: []string{"18", "18", "20", "25"}, + expectedMedian: "19", + expectedMode: "18", + }, + { + name: "Uneven number of values", + seconds: []string{"18", "18", "19", "20", "25"}, + expectedMedian: "19", + expectedMode: "18", + }, + } + names := []string{"AddTimestamp", "AddRow"} + for _, tt := range tests { + for _, name := range names { // test two ways of adding the value to the aggregator + t.Run(tt.name+" "+name, func(t *testing.T) { + agg := newDateAggregator() + for _, second := range tt.seconds { + fullDate := DateYearMonthDayHourMinute + second + DateNanoSecondsTimeZone + if name == names[0] { + err := agg.AddTimestamp(fullDate) + assert.Nil(t, err) + } else { + timeParsed, err := time.Parse(time.RFC3339, fullDate) + assert.Nil(t, err) + ts := newTimestamp(timeParsed.UnixNano()) + err = agg.addRow(ts, 1) + assert.Nil(t, err) + } + } + agg.buildPairsFromCounts() // needed to populate all required info + assert.Equal(t, DateYearMonthDayHourMinute+tt.expectedMedian+DateNanoSecondsTimeZone, agg.Median()) + if len(tt.expectedMode) > 0 { // if there is no value that appears more often than other values + assert.Equal(t, DateYearMonthDayHourMinute+tt.expectedMode+DateNanoSecondsTimeZone, agg.Mode()) + } + }) + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/filtered.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/filtered.go new file mode 100644 index 0000000000000000000000000000000000000000..eb08ed85c35d17e107545bd130098f2133ff8278 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/filtered.go @@ -0,0 +1,473 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/docid" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/propertyspecific" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/traverser" + "github.com/weaviate/weaviate/usecases/traverser/hybrid" +) + +type filteredAggregator struct { + *Aggregator +} + +func newFilteredAggregator(agg *Aggregator) *filteredAggregator { + return &filteredAggregator{Aggregator: agg} +} + +func (fa *filteredAggregator) GetPropertyLengthTracker() *inverted.JsonShardMetaData { + return fa.propLenTracker +} + +func (fa *filteredAggregator) Do(ctx context.Context) (*aggregation.Result, error) { + if fa.params.Hybrid != nil { + return fa.hybrid(ctx) + } + + return fa.filtered(ctx) +} + +func (fa *filteredAggregator) hybrid(ctx context.Context) (*aggregation.Result, error) { + sparseSearch := func() ([]*storobj.Object, []float32, error) { + kw, err := fa.buildHybridKeywordRanking() + if err != nil { + return nil, nil, fmt.Errorf("build hybrid keyword ranking: %w", err) + } + + if fa.params.ObjectLimit == nil { + limit := int(fa.defaultLimit) + fa.params.ObjectLimit = &limit + } + + sparse, scores, err := fa.bm25Objects(ctx, kw) + if err != nil { + return nil, nil, fmt.Errorf("aggregate sparse search: %w", err) + } + + return sparse, scores, nil + } + + denseSearch := func(vec models.Vector) ([]*storobj.Object, []float32, error) { + allowList, err := fa.buildAllowList(ctx) + if err != nil { + return nil, nil, err + } + if allowList != nil { + defer allowList.Close() + } + + res, dists, err := fa.objectVectorSearch(ctx, vec, allowList) + if err != nil { + return nil, nil, fmt.Errorf("aggregate dense search: %w", err) + } + + return res, dists, nil + } + + res, err := hybrid.Search(ctx, &hybrid.Params{ + HybridSearch: fa.params.Hybrid, + Class: fa.params.ClassName.String(), + }, fa.logger, sparseSearch, denseSearch, nil, fa.modules, fa.getSchema, traverser.NewTargetParamHelper()) + if err != nil { + return nil, err + } + + ids := make([]uint64, len(res)) + for i, r := range res { + ids[i] = *r.DocID + } + + return fa.prepareResult(ctx, ids) +} + +func (fa *filteredAggregator) filtered(ctx context.Context) (*aggregation.Result, error) { + var foundIDs []uint64 + + allowList, err := fa.buildAllowList(ctx) + if err != nil { + return nil, err + } + if allowList != nil { + defer allowList.Close() + } + + isVectorEmpty, err := dto.IsVectorEmpty(fa.params.SearchVector) + if err != nil { + return nil, fmt.Errorf("aggregate filtered: %w", err) + } + + if !isVectorEmpty { + foundIDs, _, err = fa.vectorSearch(ctx, allowList, fa.params.SearchVector) + if err != nil { + return nil, err + } + } else { + foundIDs = allowList.Slice() + } + + return fa.prepareResult(ctx, foundIDs) +} + +func (fa *filteredAggregator) bm25Objects(ctx context.Context, kw *searchparams.KeywordRanking) ([]*storobj.Object, []float32, error) { + class := fa.getSchema.ReadOnlyClass(fa.params.ClassName.String()) + if class == nil { + return nil, nil, fmt.Errorf("bm25 objects: could not find class %s in schema", fa.params.ClassName) + } + cfg := inverted.ConfigFromModel(class.InvertedIndexConfig) + + kw.ChooseSearchableProperties(class) + + objs, scores, err := inverted.NewBM25Searcher(cfg.BM25, fa.store, fa.getSchema.ReadOnlyClass, + propertyspecific.Indices{}, fa.classSearcher, + fa.GetPropertyLengthTracker(), fa.logger, fa.shardVersion, + ).BM25F(ctx, nil, fa.params.ClassName, *fa.params.ObjectLimit, *kw, additional.Properties{}) + if err != nil { + return nil, nil, fmt.Errorf("bm25 objects: %w", err) + } + return objs, scores, nil +} + +func (fa *filteredAggregator) properties(ctx context.Context, + ids []uint64, +) (map[string]aggregation.Property, error) { + propAggs, err := fa.prepareAggregatorsForProps() + if err != nil { + return nil, errors.Wrap(err, "prepare aggregators for props") + } + + scan := func(properties *models.PropertySchema, docID uint64) (bool, error) { + if err := fa.AnalyzeObject(ctx, properties, propAggs); err != nil { + return false, errors.Wrapf(err, "analyze object %d", docID) + } + return true, nil + } + propertyNames := make([]string, 0, len(propAggs)) + for k := range propAggs { + propertyNames = append(propertyNames, k) + } + + err = docid.ScanObjectsLSM(fa.store, ids, scan, propertyNames) + if err != nil { + return nil, errors.Wrap(err, "properties view tx") + } + + return propAggs.results() +} + +func (fa *filteredAggregator) AnalyzeObject(ctx context.Context, + properties *models.PropertySchema, propAggs map[string]propAgg, +) error { + if err := ctx.Err(); err != nil { + return err + } + + if properties == nil { + return nil + } + + for propName, prop := range propAggs { + value, ok := (*properties).(map[string]interface{})[propName] + if !ok { + continue + } + + if err := fa.addPropValue(prop, value); err != nil { + return fmt.Errorf("failed to add prop value: %w", err) + } + } + + return nil +} + +func (fa *filteredAggregator) addPropValue(prop propAgg, value interface{}) error { + switch prop.aggType { + case aggregation.PropertyTypeBoolean: + analyzeBool := func(value interface{}) error { + asBool, ok := value.(bool) + if !ok { + return fmt.Errorf("expected property type boolean, received %T", value) + } + if err := prop.boolAgg.AddBool(asBool); err != nil { + return err + } + return nil + } + switch prop.dataType { + case schema.DataTypeBoolean: + if err := analyzeBool(value); err != nil { + return err + } + case schema.DataTypeBooleanArray: + valueStruct, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("expected property type []boolean, received %T", valueStruct) + } + for _, val := range valueStruct { + if err := analyzeBool(val); err != nil { + return err + } + } + default: + return fmt.Errorf("unknown datatype %v for aggregation %v", prop.dataType, aggregation.PropertyTypeText) + } + case aggregation.PropertyTypeNumerical: + analyzeFloat := func(value interface{}) error { + asFloat, ok := value.(float64) + if !ok { + return fmt.Errorf("expected property type float64, received %T", value) + } + if err := prop.numericalAgg.AddFloat64(asFloat); err != nil { + return err + } + return nil + } + switch prop.dataType { + case schema.DataTypeNumber, schema.DataTypeInt: + if err := analyzeFloat(value); err != nil { + return err + } + case schema.DataTypeNumberArray, schema.DataTypeIntArray: + valueStruct, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("expected property type []float* or []int*, received %T", valueStruct) + } + for _, val := range valueStruct { + if err := analyzeFloat(val); err != nil { + return err + } + } + default: + return fmt.Errorf("unknown datatype %v for aggregation %v", prop.dataType, aggregation.PropertyTypeText) + } + case aggregation.PropertyTypeText: + analyzeString := func(value interface{}) error { + asString, ok := value.(string) + if !ok { + return fmt.Errorf("expected property type string, received %T", value) + } + if err := prop.textAgg.AddText(asString); err != nil { + return err + } + return nil + } + switch prop.dataType { + case schema.DataTypeText: + if err := analyzeString(value); err != nil { + return err + } + case schema.DataTypeTextArray: + valueStruct, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("expected property type []text or []string, received %T", valueStruct) + } + for _, val := range valueStruct { + if err := analyzeString(val); err != nil { + return err + } + } + default: + return fmt.Errorf("unknown datatype %v for aggregation %v", prop.dataType, aggregation.PropertyTypeText) + } + case aggregation.PropertyTypeDate: + analyzeDate := func(value interface{}) error { + asString, ok := value.(string) + if !ok { + return fmt.Errorf("expected property type date, received %T", value) + } + if err := prop.dateAgg.AddTimestamp(asString); err != nil { + return err + } + return nil + } + switch prop.dataType { + case schema.DataTypeDate: + if err := analyzeDate(value); err != nil { + return err + } + case schema.DataTypeDateArray: + valueStruct, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("expected property type []date, received %T", valueStruct) + } + for _, val := range valueStruct { + if err := analyzeDate(val); err != nil { + return err + } + } + default: + return fmt.Errorf("unknown datatype %v for aggregation %v", prop.dataType, aggregation.PropertyTypeText) + } + case aggregation.PropertyTypeReference: + if prop.dataType != schema.DataTypeCRef { + return errors.New(string("unknown datatype for aggregation type reference: " + prop.dataType)) + } + + analyzeRef := func(value interface{}) error { + referenceList, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("expected property type reference, received %T", value) + } + if len(referenceList) != 1 { + return fmt.Errorf("expected list with length 1, got %T", len(referenceList)) + } + refMap, ok := referenceList[0].(map[string]interface{}) + if !ok { + return fmt.Errorf("expected property type reference, received %T", value) + } + + if err := prop.refAgg.AddReference(refMap); err != nil { + return err + } + return nil + } + if err := analyzeRef(value); err != nil { + return err + } + default: + return errors.New(string("Unknown aggregation type " + prop.aggType)) + + } + + return nil +} + +func (fa *filteredAggregator) prepareResult(ctx context.Context, foundIDs []uint64) (*aggregation.Result, error) { + var out aggregation.Result + // without grouping there is always exactly one group + out.Groups = make([]aggregation.Group, 1) + + if fa.params.IncludeMetaCount { + out.Groups[0].Count = len(foundIDs) + } + + props, err := fa.properties(ctx, foundIDs) + if err != nil { + return nil, errors.Wrap(err, "aggregate properties") + } + + out.Groups[0].Properties = props + return &out, nil +} + +// a helper type to select the right aggregator for a prop +type propAgg struct { + name schema.PropertyName + + // the user is interested in those specific aggregations + specifiedAggregators []aggregation.Aggregator + + // underlying data type of prop + dataType schema.DataType + + // use aggType to chose with agg to use + aggType aggregation.PropertyType + + boolAgg *boolAggregator + textAgg *textAggregator + numericalAgg *numericalAggregator + dateAgg *dateAggregator + refAgg *refAggregator +} + +// propAggs groups propAgg helpers by prop name +type propAggs map[string]propAgg + +func (pa *propAgg) initAggregator() { + switch pa.aggType { + case aggregation.PropertyTypeText: + limit := extractLimitFromTopOccs(pa.specifiedAggregators) + pa.textAgg = newTextAggregator(limit) + case aggregation.PropertyTypeBoolean: + pa.boolAgg = newBoolAggregator() + case aggregation.PropertyTypeNumerical: + pa.numericalAgg = newNumericalAggregator() + case aggregation.PropertyTypeDate: + pa.dateAgg = newDateAggregator() + case aggregation.PropertyTypeReference: + pa.refAgg = newRefAggregator() + default: + panic("Unknown aggregation type: " + pa.aggType) + } +} + +func (pa propAggs) results() (map[string]aggregation.Property, error) { + out := map[string]aggregation.Property{} + + for _, prop := range pa { + aggProp := aggregation.Property{ + Type: prop.aggType, + } + + switch prop.aggType { + case aggregation.PropertyTypeBoolean: + aggProp.BooleanAggregation = prop.boolAgg.Res() + out[prop.name.String()] = aggProp + case aggregation.PropertyTypeText: + aggProp.TextAggregation = prop.textAgg.Res() + out[prop.name.String()] = aggProp + case aggregation.PropertyTypeNumerical: + addNumericalAggregations(&aggProp, prop.specifiedAggregators, + prop.numericalAgg) + out[prop.name.String()] = aggProp + case aggregation.PropertyTypeDate: + addDateAggregations(&aggProp, prop.specifiedAggregators, + prop.dateAgg) + out[prop.name.String()] = aggProp + case aggregation.PropertyTypeReference: + addReferenceAggregations(&aggProp, prop.specifiedAggregators, + prop.refAgg) + out[prop.name.String()] = aggProp + default: + return nil, errors.New(string("unknown aggregation type " + prop.aggType)) + } + } + + return out, nil +} + +func (fa *filteredAggregator) prepareAggregatorsForProps() (propAggs, error) { + out := propAggs{} + + for _, prop := range fa.params.Properties { + pa := propAgg{ + name: prop.Name, + specifiedAggregators: prop.Aggregators, + } + + at, dt, err := fa.aggTypeOfProperty(prop.Name) + if err != nil { + return nil, errors.Wrapf(err, "property %s", prop.Name) + } + + pa.aggType = at + pa.dataType = dt + pa.initAggregator() + out[prop.Name.String()] = pa + } + + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/grouped.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/grouped.go new file mode 100644 index 0000000000000000000000000000000000000000..5dbcc0be2ed531edf3f3a2bea4518b67ad532de6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/grouped.go @@ -0,0 +1,83 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/aggregation" +) + +// groupedAggregator performs aggregation in groups. This is a two-step +// process. First a whole-db scan is performed to identify the groups, then +// the top-n groups are selected (the rest is discarded). Only for those top +// groups an actual aggregation is performed +type groupedAggregator struct { + *Aggregator +} + +func newGroupedAggregator(agg *Aggregator) *groupedAggregator { + return &groupedAggregator{Aggregator: agg} +} + +func (ga *groupedAggregator) Do(ctx context.Context) (*aggregation.Result, error) { + out := aggregation.Result{} + + groups, err := ga.identifyGroups(ctx) + if err != nil { + return nil, errors.Wrap(err, "identify groups") + } + + out.Groups = make([]aggregation.Group, len(groups)) + for i, g := range groups { + res, err := ga.aggregateGroup(ctx, g.res, g.docIDs) + if err != nil { + return nil, errors.Wrapf(err, "aggregate group %d (%v)", i, + g.res.GroupedBy.Value) + } + out.Groups[i] = res + } + + return &out, nil +} + +// group is a helper construct that contains the final aggregation.Group which +// will eventually be served to the user. But it also contains the list of +// docIDs in that group, so we can use those to perform the actual aggregation +// (for each group) in a second step +type group struct { + res aggregation.Group + docIDs []uint64 +} + +func (ga *groupedAggregator) identifyGroups(ctx context.Context) ([]group, error) { + limit := 100 // reasonable default in case we get none + if ga.params.Limit != nil { + limit = *ga.params.Limit + } + return newGrouper(ga.Aggregator, limit).Do(ctx) +} + +func (ga *groupedAggregator) aggregateGroup(ctx context.Context, + in aggregation.Group, ids []uint64, +) (aggregation.Group, error) { + out := in + fa := newFilteredAggregator(ga.Aggregator) + props, err := fa.properties(ctx, ids) + if err != nil { + return out, errors.Wrap(err, "aggregate properties") + } + + out.Properties = props + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/grouper.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/grouper.go new file mode 100644 index 0000000000000000000000000000000000000000..845f01f60552f2cc58308d5d3d74a5de5b8ed7f1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/grouper.go @@ -0,0 +1,310 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/docid" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/traverser" + "github.com/weaviate/weaviate/usecases/traverser/hybrid" +) + +// grouper is the component which identifies the top-n groups for a specific +// group-by parameter. It is used as part of the grouped aggregator, which then +// additionally performs an aggregation for each group. +type grouper struct { + *Aggregator + values map[interface{}]map[uint64]struct{} // map[value][docID]struct, to keep docIds unique + topGroups []group + limit int +} + +func newGrouper(a *Aggregator, limit int) *grouper { + return &grouper{ + Aggregator: a, + values: map[interface{}]map[uint64]struct{}{}, + limit: limit, + } +} + +func (g *grouper) Do(ctx context.Context) ([]group, error) { + if len(g.params.GroupBy.Slice()) > 1 { + return nil, fmt.Errorf("grouping by cross-refs not supported") + } + isVectorEmpty, err := dto.IsVectorEmpty(g.params.SearchVector) + if err != nil { + return nil, fmt.Errorf("grouper: %w", err) + } + + if g.params.Filters == nil && isVectorEmpty && g.params.Hybrid == nil { + return g.groupAll(ctx) + } else { + return g.groupFiltered(ctx) + } +} + +func (g *grouper) groupAll(ctx context.Context) ([]group, error) { + err := ScanAllLSM(ctx, g.store, func(prop *models.PropertySchema, docID uint64) (bool, error) { + return true, g.addElementById(prop, docID) + }, &storobj.PropertyExtraction{ + PropertyPaths: [][]string{{g.params.GroupBy.Property.String()}}, + }) + if err != nil { + return nil, errors.Wrap(err, "group all (unfiltered)") + } + + return g.aggregateAndSelect() +} + +func (g *grouper) groupFiltered(ctx context.Context) ([]group, error) { + ids, err := g.fetchDocIDs(ctx) + if err != nil { + return nil, err + } + + if err := docid.ScanObjectsLSM(g.store, ids, + func(prop *models.PropertySchema, docID uint64) (bool, error) { + return true, g.addElementById(prop, docID) + }, []string{g.params.GroupBy.Property.String()}); err != nil { + return nil, err + } + + return g.aggregateAndSelect() +} + +func (g *grouper) fetchDocIDs(ctx context.Context) (ids []uint64, err error) { + allowList, err := g.buildAllowList(ctx) + if err != nil { + return nil, err + } + if allowList != nil { + defer allowList.Close() + } + + isVectorEmpty, err := dto.IsVectorEmpty(g.params.SearchVector) + if err != nil { + return nil, fmt.Errorf("grouper: fetch doc ids: %w", err) + } + + if !isVectorEmpty { + ids, _, err = g.vectorSearch(ctx, allowList, g.params.SearchVector) + if err != nil { + return nil, fmt.Errorf("failed to perform vector search: %w", err) + } + } else if g.params.Hybrid != nil { + ids, err = g.hybrid(ctx, allowList, g.modules) + if err != nil { + return nil, fmt.Errorf("hybrid search: %w", err) + } + } else { + ids = allowList.Slice() + } + + return +} + +func (g *grouper) hybrid(ctx context.Context, allowList helpers.AllowList, modules *modules.Provider) ([]uint64, error) { + sparseSearch := func() ([]*storobj.Object, []float32, error) { + kw, err := g.buildHybridKeywordRanking() + if err != nil { + return nil, nil, fmt.Errorf("build hybrid keyword ranking: %w", err) + } + + if g.params.ObjectLimit == nil { + limit := int(g.defaultLimit) + g.params.ObjectLimit = &limit + } + + sparse, dists, err := g.bm25Objects(ctx, kw) + if err != nil { + return nil, nil, fmt.Errorf("aggregate sparse search: %w", err) + } + + return sparse, dists, nil + } + + denseSearch := func(vec models.Vector) ([]*storobj.Object, []float32, error) { + res, dists, err := g.objectVectorSearch(ctx, vec, allowList) + if err != nil { + return nil, nil, fmt.Errorf("aggregate grouped dense search: %w", err) + } + + return res, dists, nil + } + + res, err := hybrid.Search(ctx, &hybrid.Params{ + HybridSearch: g.params.Hybrid, + Keyword: nil, + Class: g.params.ClassName.String(), + }, g.logger, sparseSearch, denseSearch, nil, modules, g.getSchema, traverser.NewTargetParamHelper()) + if err != nil { + return nil, err + } + + ids := make([]uint64, len(res)) + for i, r := range res { + ids[i] = *r.DocID + } + + return ids, nil +} + +func (g *grouper) addElementById(s *models.PropertySchema, docID uint64) error { + if s == nil { + return nil + } + + item, ok := (*s).(map[string]interface{})[g.params.GroupBy.Property.String()] + if !ok { + return nil + } + + switch val := item.(type) { + case []string: + for i := range val { + g.addItem(val[i], docID) + } + case []float64: + for i := range val { + g.addItem(val[i], docID) + } + case []bool: + for i := range val { + g.addItem(val[i], docID) + } + case []interface{}: + for i := range val { + g.addItem(val[i], docID) + } + case models.MultipleRef: + for i := range val { + g.addItem(val[i].Beacon, docID) + } + default: + g.addItem(val, docID) + } + + return nil +} + +func (g *grouper) addItem(item interface{}, docID uint64) { + idsMap, ok := g.values[item] + if !ok { + idsMap = map[uint64]struct{}{} + } + idsMap[docID] = struct{}{} + g.values[item] = idsMap +} + +func (g *grouper) aggregateAndSelect() ([]group, error) { + for value, idsMap := range g.values { + count := len(idsMap) + ids := make([]uint64, count) + + i := 0 + for id := range idsMap { + ids[i] = id + i++ + } + + g.insertOrdered(group{ + res: aggregation.Group{ + GroupedBy: &aggregation.GroupedBy{ + Path: g.params.GroupBy.Slice(), + Value: value, + }, + Count: count, + }, + docIDs: ids, + }) + } + + return g.topGroups, nil +} + +func (g *grouper) insertOrdered(elem group) { + if len(g.topGroups) == 0 { + g.topGroups = []group{elem} + return + } + + added := false + for i, existing := range g.topGroups { + if existing.res.Count > elem.res.Count { + continue + } + + // we have found the first one that's smaller so we must insert before i + g.topGroups = append( + g.topGroups[:i], append( + []group{elem}, + g.topGroups[i:]..., + )..., + ) + + added = true + break + } + + if len(g.topGroups) > g.limit { + g.topGroups = g.topGroups[:len(g.topGroups)-1] + } + + if !added && len(g.topGroups) < g.limit { + g.topGroups = append(g.topGroups, elem) + } +} + +// ScanAllLSM iterates over every row in the object buckets. +// Caller can specify which properties it is interested in to make the scanning more performant, or pass nil to +// decode everything. +func ScanAllLSM(ctx context.Context, store *lsmkv.Store, scan docid.ObjectScanFn, properties *storobj.PropertyExtraction) error { + b := store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return fmt.Errorf("objects bucket not found") + } + + c := b.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + elem, err := storobj.FromBinaryOptional(v, additional.Properties{}, properties) + if err != nil { + return errors.Wrapf(err, "unmarshal data object") + } + + // scanAll has no abort, so we can ignore the first arg + properties := elem.Properties() + _, err = scan(&properties, elem.DocID) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/hybrid.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/hybrid.go new file mode 100644 index 0000000000000000000000000000000000000000..302b4a081c3cad18943bfe1477dd2afe9fdf7237 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/hybrid.go @@ -0,0 +1,65 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/additional" + + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/propertyspecific" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (a *Aggregator) buildHybridKeywordRanking() (*searchparams.KeywordRanking, error) { + kw := &searchparams.KeywordRanking{ + Type: "bm25", + Query: a.params.Hybrid.Query, + MinimumOrTokensMatch: a.params.Hybrid.MinimumOrTokensMatch, + SearchOperator: a.params.Hybrid.SearchOperator, + } + + cl := a.getSchema.ReadOnlyClass(a.params.ClassName.String()) + if cl == nil { + return nil, fmt.Errorf("could not find class %s in schema", a.params.ClassName) + } + + for _, v := range cl.Properties { + if v.DataType[0] == "text" || v.DataType[0] == "string" { // TODO: Also the array types? + kw.Properties = append(kw.Properties, v.Name) + } + } + + return kw, nil +} + +func (a *Aggregator) bm25Objects(ctx context.Context, kw *searchparams.KeywordRanking) ([]*storobj.Object, []float32, error) { + class := a.getSchema.ReadOnlyClass(a.params.ClassName.String()) + if class == nil { + return nil, nil, fmt.Errorf("bm25 objects: could not find class %s in schema", a.params.ClassName) + } + cfg := inverted.ConfigFromModel(class.InvertedIndexConfig) + + kw.ChooseSearchableProperties(class) + + objs, dists, err := inverted.NewBM25Searcher(cfg.BM25, a.store, a.getSchema.ReadOnlyClass, + propertyspecific.Indices{}, a.classSearcher, + a.GetPropertyLengthTracker(), a.logger, a.shardVersion, + ).BM25F(ctx, nil, a.params.ClassName, *a.params.ObjectLimit, *kw, additional.Properties{}) + if err != nil { + return nil, nil, fmt.Errorf("bm25 objects: %w", err) + } + return objs, dists, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/numerical.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/numerical.go new file mode 100644 index 0000000000000000000000000000000000000000..7d1ca8d8b2125fdbf31e48359629c415fc93d4b0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/numerical.go @@ -0,0 +1,226 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "math" + "sort" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/inverted" +) + +func addNumericalAggregations(prop *aggregation.Property, + aggs []aggregation.Aggregator, agg *numericalAggregator, +) { + if prop.NumericalAggregations == nil { + prop.NumericalAggregations = map[string]interface{}{} + } + agg.buildPairsFromCounts() + + // if there are no elements to aggregate over because a filter does not match anything, calculating mean etc. makes + // no sense. Non-existent entries evaluate to nil with an interface{} map + if agg.count == 0 { + for _, entry := range aggs { + if entry == aggregation.CountAggregator { + prop.NumericalAggregations["count"] = float64(agg.count) + break + } + } + return + } + + // when combining the results from different shards, we need the raw numbers to recompute the mode, mean and median. + // Therefore we add a reference later which needs to be cleared out before returning the results to a user +loop: + for _, aProp := range aggs { + switch aProp { + case aggregation.ModeAggregator, aggregation.MedianAggregator, aggregation.MeanAggregator: + prop.NumericalAggregations["_numericalAggregator"] = agg + break loop + } + } + + for _, aProp := range aggs { + switch aProp { + case aggregation.MeanAggregator: + prop.NumericalAggregations[aProp.String()] = agg.Mean() + case aggregation.MinimumAggregator: + prop.NumericalAggregations[aProp.String()] = agg.Min() + case aggregation.MaximumAggregator: + prop.NumericalAggregations[aProp.String()] = agg.Max() + case aggregation.MedianAggregator: + prop.NumericalAggregations[aProp.String()] = agg.Median() + case aggregation.ModeAggregator: + prop.NumericalAggregations[aProp.String()] = agg.Mode() + case aggregation.SumAggregator: + prop.NumericalAggregations[aProp.String()] = agg.Sum() + case aggregation.CountAggregator: + prop.NumericalAggregations[aProp.String()] = agg.Count() + default: + continue + } + } +} + +func newNumericalAggregator() *numericalAggregator { + return &numericalAggregator{ + min: math.MaxFloat64, + max: -math.MaxFloat64, + valueCounter: map[float64]uint64{}, + pairs: make([]floatCountPair, 0), + } +} + +type numericalAggregator struct { + count uint64 + min float64 + max float64 + sum float64 + maxCount uint64 + mode float64 + pairs []floatCountPair // for row-based median calculation + valueCounter map[float64]uint64 // for individual median calculation +} + +type floatCountPair struct { + value float64 + count uint64 +} + +func (a *numericalAggregator) AddFloat64(value float64) error { + return a.AddNumberRow(value, 1) +} + +// turns the value counter into a sorted list, as well as identifying the mode. Must be called before calling median etc +func (a *numericalAggregator) buildPairsFromCounts() { + a.pairs = a.pairs[:0] // clear out old values in case this function called more than once + a.pairs = append(a.pairs, make([]floatCountPair, 0, len(a.valueCounter))...) + + for value, count := range a.valueCounter { + // get one with higher count or lower value if counts are equal + if count > a.maxCount || (count == a.maxCount && value < a.mode) { + a.maxCount = count + a.mode = value + } + a.pairs = append(a.pairs, floatCountPair{value: value, count: count}) + } + + sort.Slice(a.pairs, func(x, y int) bool { + return a.pairs[x].value < a.pairs[y].value + }) +} + +func (a *numericalAggregator) AddFloat64Row(number []byte, + count uint64, +) error { + numberParsed, err := inverted.ParseLexicographicallySortableFloat64(number) + if err != nil { + return errors.Wrap(err, "read float64") + } + + return a.AddNumberRow(numberParsed, count) +} + +func (a *numericalAggregator) AddInt64Row(number []byte, count uint64) error { + numberParsed, err := inverted.ParseLexicographicallySortableInt64(number) + if err != nil { + return errors.Wrap(err, "read int64") + } + + return a.AddNumberRow(float64(numberParsed), count) +} + +func (a *numericalAggregator) AddNumberRow(number float64, count uint64) error { + if count == 0 { + // skip + return nil + } + + a.count += count + a.sum += number * float64(count) + if number < a.min { + a.min = number + } + if number > a.max { + a.max = number + } + + currentCount := a.valueCounter[number] + currentCount += count + a.valueCounter[number] = currentCount + + return nil +} + +func (a *numericalAggregator) Mean() float64 { + if a.count == 0 { + return 0 + } + return a.sum / float64(a.count) +} + +func (a *numericalAggregator) Max() float64 { + return a.max +} + +func (a *numericalAggregator) Min() float64 { + return a.min +} + +func (a *numericalAggregator) Sum() float64 { + return a.sum +} + +func (a *numericalAggregator) Count() float64 { + return float64(a.count) +} + +// Mode does not require preparation if build from rows, but requires a call of +// buildPairsFromCounts() if it was built using individual objects +func (a *numericalAggregator) Mode() float64 { + return a.mode +} + +// Median does not require preparation if build from rows, but requires a call of +// buildPairsFromCounts() if it was built using individual objects. The call will panic +// if called without adding at least one element or without calling buildPairsFromCounts() +// +// since the pairs are read from an inverted index, which is in turn +// lexicographically sorted, we know that our pairs must also be sorted +// +// There are two cases: +// a) There is an uneven number of elements, then the median element is at index N/2 +// b) There is an even number of elements, then the median element is (elem_(N/2) + elem_(N/2+1))/2. +// +// with two sub-cases: +// b1) element N/2 and N/2 + 1 are within the same pair, then the median is the value of this pair +// b2) element N/2 and N/2 are part of different pairs, then the average of these pairs is the median and the +// median value is not part of the collection itself +func (a *numericalAggregator) Median() float64 { + middleIndex := a.count / 2 + count := uint64(0) + for index, pair := range a.pairs { + count += pair.count + if a.count%2 == 1 && count > middleIndex { + return pair.value // case a) + } else if a.count%2 == 0 { + if count == middleIndex { + return (pair.value + a.pairs[index+1].value) / 2 // case b2) + } else if count > middleIndex { + return pair.value // case b1) + } + } + } + panic("Couldn't determine median. This should never happen. Did you add values and call buildRows before?") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/numerical_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/numerical_test.go new file mode 100644 index 0000000000000000000000000000000000000000..397fe8ba70d4c8fa31e90ea8b3ecc6c8b27404a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/numerical_test.go @@ -0,0 +1,204 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNumericalAggregator_MedianCalculation(t *testing.T) { + tests := []struct { + name string + numbers []float64 + expectedMedian float64 + }{ + { + name: "Uneven number of elements", + numbers: []float64{1, 2, 3, 4, 5, 6, 7}, + expectedMedian: 4, + }, + { + name: "Uneven number of elements with double elements", + numbers: []float64{2, 4, 4, 4, 5, 5, 7}, + expectedMedian: 4, + }, + { + name: "Even number of elements", + numbers: []float64{1, 2, 3, 5, 7, 7}, + expectedMedian: 4, + }, + { + name: "Even number of elements with double elements, median is within double entries", + numbers: []float64{1, 2, 3, 3, 6, 7}, + expectedMedian: 3, + }, + { + name: "Even number of elements with double elements, median is after double entries", + numbers: []float64{3, 3, 3, 5, 5, 7}, + expectedMedian: 4, + }, + { + name: "Single value", + numbers: []float64{42}, + expectedMedian: 42, + }, + } + for _, tt := range tests { + for i := 0; i < 2; i++ { // test two ways of adding the value to the aggregator + t.Run(tt.name, func(t *testing.T) { + agg := newNumericalAggregator() + for _, num := range tt.numbers { + if i == 0 { + agg.AddFloat64(num) + } else { + agg.AddNumberRow(num, 1) + } + } + agg.buildPairsFromCounts() // needed to populate all required info + assert.Equal(t, tt.expectedMedian, agg.Median()) + }) + } + } +} + +func TestNumericalAggregator_ModeCalculation(t *testing.T) { + tests := []struct { + name string + numbers []float64 + expectedMode float64 + }{ + { + name: "Different elements (asc)", + numbers: []float64{1, 2, 3, 4, 5, 6, 7}, + expectedMode: 1, + }, + { + name: "Different elements (desc)", + numbers: []float64{7, 6, 5, 4, 3, 2, 1}, + expectedMode: 1, + }, + { + name: "Elements with different number of duplicates", + numbers: []float64{2, 4, 4, 5, 5, 5, 7}, + expectedMode: 5, + }, + { + name: "Elements with same number of duplicates (asc)", + numbers: []float64{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}, + expectedMode: 1, + }, + { + name: "Elements with same number of duplicates (desc)", + numbers: []float64{7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1}, + expectedMode: 1, + }, + { + name: "Elements with same number of duplicates (mixed oreder)", + numbers: []float64{5, 2, 6, 2, 1, 4, 7, 5, 7, 4, 3, 1, 6, 3}, + expectedMode: 1, + }, + { + name: "Single element", + numbers: []float64{42}, + expectedMode: 42, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + agg := newNumericalAggregator() + for _, num := range tt.numbers { + agg.AddFloat64(num) + } + agg.buildPairsFromCounts() // needed to populate all required info + + assert.Equal(t, tt.expectedMode, agg.Mode()) + }) + + t.Run(tt.name, func(t *testing.T) { + agg := newNumericalAggregator() + for _, num := range tt.numbers { + agg.AddNumberRow(num, 1) + } + agg.buildPairsFromCounts() // needed to populate all required info + + assert.Equal(t, tt.expectedMode, agg.Mode()) + }) + } +} + +func TestNumericalAggregator_MinMaxCalculation(t *testing.T) { + tests := []struct { + name string + numbers []float64 + expectedMin float64 + expectedMax float64 + }{ + { + name: "Different positive elements (asc)", + numbers: []float64{0, 1, 2, 3, 4, 5, 6, 7}, + expectedMin: 0, + expectedMax: 7, + }, + { + name: "Different positive elements (desc)", + numbers: []float64{7, 6, 5, 4, 3, 2, 1, 0}, + expectedMin: 0, + expectedMax: 7, + }, + { + name: "Different negative elements (desc)", + numbers: []float64{-1, -2, -3, -4, -5, -6, -7}, + expectedMin: -7, + expectedMax: -1, + }, + { + name: "Different negative elements (asc)", + numbers: []float64{-7, -6, -5, -4, -3, -2, -1}, + expectedMin: -7, + expectedMax: -1, + }, + { + name: "Different elements (mixed order)", + numbers: []float64{-7, 6, -5, 4, -3, 2, -1, 0, 1, -2, 3, -4, 5, -6, 7}, + expectedMin: -7, + expectedMax: 7, + }, + { + name: "Single element", + numbers: []float64{-42}, + expectedMin: -42, + expectedMax: -42, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + agg := newNumericalAggregator() + for _, num := range tt.numbers { + agg.AddFloat64(num) + } + assert.Equal(t, tt.expectedMin, agg.Min()) + assert.Equal(t, tt.expectedMax, agg.Max()) + }) + + t.Run(tt.name, func(t *testing.T) { + agg := newNumericalAggregator() + for _, num := range tt.numbers { + agg.AddNumberRow(num, 1) + } + assert.Equal(t, tt.expectedMin, agg.Min()) + assert.Equal(t, tt.expectedMax, agg.Max()) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/references.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/references.go new file mode 100644 index 0000000000000000000000000000000000000000..abdfc01299133946f155212644e351867413ed85 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/references.go @@ -0,0 +1,64 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "errors" + + "github.com/weaviate/weaviate/entities/aggregation" +) + +func addReferenceAggregations(prop *aggregation.Property, + aggs []aggregation.Aggregator, agg *refAggregator, +) { + prop.ReferenceAggregation = aggregation.Reference{} + prop.ReferenceAggregation.PointingTo = agg.PointingTo() + + for _, aProp := range aggs { + switch aProp { + case aggregation.PointingToAggregator: + prop.ReferenceAggregation.PointingTo = agg.PointingTo() + default: + continue + } + } +} + +func newRefAggregator() *refAggregator { + return &refAggregator{valueCounter: map[string]uint64{}} +} + +type refAggregator struct { + count uint64 + valueCounter map[string]uint64 +} + +func (a *refAggregator) AddReference(ref map[string]interface{}) error { + a.count++ + + beacon, ok := ref["beacon"].(string) + if !ok { + return errors.New("not a reference" + beacon) + } + count := a.valueCounter[beacon] + count++ + a.valueCounter[beacon] = count + return nil +} + +func (a *refAggregator) PointingTo() []string { + keys := make([]string, 0, len(a.valueCounter)) + for pointingTo := range a.valueCounter { + keys = append(keys, pointingTo) + } + return keys +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/shard_combiner.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/shard_combiner.go new file mode 100644 index 0000000000000000000000000000000000000000..8bfb666925b00c29d9c5c3d3a467e493657cb12d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/shard_combiner.go @@ -0,0 +1,341 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "sort" + "time" + + "github.com/weaviate/weaviate/entities/aggregation" +) + +type ShardCombiner struct{} + +func NewShardCombiner() *ShardCombiner { + return &ShardCombiner{} +} + +func (sc *ShardCombiner) Do(results []*aggregation.Result) *aggregation.Result { + allResultsAreNil := true + firstNonNilRes := 0 + for i, res := range results { + if res == nil || len(res.Groups) < 1 { + continue + } + allResultsAreNil = false + firstNonNilRes = i + } + + if allResultsAreNil { + return &aggregation.Result{} + } + + if results[firstNonNilRes].Groups[0].GroupedBy == nil { + return sc.combineUngrouped(results) + } + + return sc.combineGrouped(results) +} + +func (sc *ShardCombiner) combineUngrouped(results []*aggregation.Result) *aggregation.Result { + combined := aggregation.Result{ + Groups: make([]aggregation.Group, 1), + } + + for _, shard := range results { + if len(shard.Groups) == 0 { // not every shard has results + continue + } + sc.mergeIntoCombinedGroupAtPos(combined.Groups, 0, shard.Groups[0]) + } + + sc.finalizeGroup(&combined.Groups[0]) + return &combined +} + +func (sc *ShardCombiner) combineGrouped(results []*aggregation.Result) *aggregation.Result { + combined := aggregation.Result{} + + for _, shard := range results { + for _, shardGroup := range shard.Groups { + pos := getPosOfGroup(combined.Groups, shardGroup.GroupedBy.Value) + if pos < 0 { + combined.Groups = append(combined.Groups, shardGroup) + } else { + sc.mergeIntoCombinedGroupAtPos(combined.Groups, pos, shardGroup) + } + } + } + + for i := range combined.Groups { + sc.finalizeGroup(&combined.Groups[i]) + } + + sort.Slice(combined.Groups, func(a, b int) bool { + return combined.Groups[a].Count > combined.Groups[b].Count + }) + return &combined +} + +func (sc *ShardCombiner) mergeIntoCombinedGroupAtPos(combinedGroups []aggregation.Group, + pos int, shardGroup aggregation.Group, +) { + combinedGroups[pos].Count += shardGroup.Count + + for propName, prop := range shardGroup.Properties { + if combinedGroups[pos].Properties == nil { + combinedGroups[pos].Properties = map[string]aggregation.Property{} + } + + combinedProp := combinedGroups[pos].Properties[propName] + + combinedProp.Type = prop.Type + + switch prop.Type { + case aggregation.PropertyTypeNumerical: + if combinedProp.NumericalAggregations == nil { + combinedProp.NumericalAggregations = map[string]interface{}{} + } + sc.mergeNumericalProp( + combinedProp.NumericalAggregations, prop.NumericalAggregations) + case aggregation.PropertyTypeDate: + if combinedProp.DateAggregations == nil { + combinedProp.DateAggregations = map[string]interface{}{} + } + sc.mergeDateProp( + combinedProp.DateAggregations, prop.DateAggregations) + case aggregation.PropertyTypeBoolean: + sc.mergeBooleanProp( + &combinedProp.BooleanAggregation, &prop.BooleanAggregation) + case aggregation.PropertyTypeText: + sc.mergeTextProp( + &combinedProp.TextAggregation, &prop.TextAggregation) + case aggregation.PropertyTypeReference: + sc.mergeRefProp( + &combinedProp.ReferenceAggregation, &prop.ReferenceAggregation) + default: + panic("unknown prop type: " + prop.Type) + } + combinedGroups[pos].Properties[propName] = combinedProp + + } +} + +func (sc *ShardCombiner) mergeDateProp(first, second map[string]interface{}) { + if len(second) == 0 { + return + } + + // add all values from the second map to the first one. This is needed to compute median and mode correctly + for propType := range second { + switch propType { + case "_dateAggregator": + dateAggSource := second[propType].(*dateAggregator) + if dateAggCombined, ok := first[propType]; ok { + dateAggCombinedTyped := dateAggCombined.(*dateAggregator) + for _, pair := range dateAggSource.pairs { + for i := uint64(0); i < pair.count; i++ { + dateAggCombinedTyped.AddTimestamp(pair.value.rfc3339) + } + } + dateAggCombinedTyped.buildPairsFromCounts() + first[propType] = dateAggCombinedTyped + + } else { + first[propType] = second[propType] + } + } + } + + for propType, value := range second { + switch propType { + case "count": + if val, ok := first[propType]; ok { + first[propType] = val.(int64) + value.(int64) + } else { + first[propType] = value + } + case "mode": + dateAggCombined := first["_dateAggregator"].(*dateAggregator) + first[propType] = dateAggCombined.Mode() + case "median": + dateAggCombined := first["_dateAggregator"].(*dateAggregator) + first[propType] = dateAggCombined.Median() + case "minimum": + val, ok := first["minimum"] + if !ok { + first["minimum"] = value + } else { + source1Time, _ := time.Parse(time.RFC3339, val.(string)) + source2Time, _ := time.Parse(time.RFC3339, value.(string)) + if source2Time.Before(source1Time) { + first["minimum"] = value + } + } + case "maximum": + val, ok := first["maximum"] + if !ok { + first["maximum"] = value + } else { + source1Time, _ := time.Parse(time.RFC3339, val.(string)) + source2Time, _ := time.Parse(time.RFC3339, value.(string)) + if source2Time.After(source1Time) { + first["maximum"] = value + } + } + case "_dateAggregator": + continue + default: + panic("unknown map entry: " + propType) + } + } +} + +func (sc *ShardCombiner) mergeNumericalProp(first, second map[string]interface{}) { + if len(second) == 0 { + return + } + + // add all values from the second map to the first one. This is needed to compute median, mean and mode correctly + for propType := range second { + switch propType { + case "_numericalAggregator": + numAggSecondTyped := second[propType].(*numericalAggregator) + if numAggFirst, ok := first[propType]; ok { + numAggFirstTyped := numAggFirst.(*numericalAggregator) + for _, pair := range numAggSecondTyped.pairs { + for i := uint64(0); i < pair.count; i++ { + numAggFirstTyped.AddFloat64(pair.value) + } + } + numAggFirstTyped.buildPairsFromCounts() + first[propType] = numAggFirstTyped + } else { + first[propType] = second[propType] + } + } + } + + for propType, value := range second { + switch propType { + case "count", "sum": + if val, ok := first[propType]; ok { + first[propType] = val.(float64) + value.(float64) + } else { + first[propType] = value + } + case "mode": + numAggFirst := first["_numericalAggregator"].(*numericalAggregator) + first[propType] = numAggFirst.Mode() + case "mean": + numAggFirst := first["_numericalAggregator"].(*numericalAggregator) + first[propType] = numAggFirst.Mean() + case "median": + numAggFirst := first["_numericalAggregator"].(*numericalAggregator) + first[propType] = numAggFirst.Median() + case "minimum": + if _, ok := first["minimum"]; !ok || value.(float64) < first["minimum"].(float64) { + first["minimum"] = value + } + case "maximum": + if _, ok := first["maximum"]; !ok || value.(float64) > first["maximum"].(float64) { + first["maximum"] = value + } + case "_numericalAggregator": + continue + default: + panic("unknown map entry: " + propType) + } + } +} + +func (sc *ShardCombiner) finalizeDateProp(combined map[string]interface{}) { + delete(combined, "_dateAggregator") +} + +func (sc *ShardCombiner) finalizeNumerical(combined map[string]interface{}) { + delete(combined, "_numericalAggregator") +} + +func (sc *ShardCombiner) mergeBooleanProp(combined, source *aggregation.Boolean) { + combined.Count += source.Count + combined.TotalFalse += source.TotalFalse + combined.TotalTrue += source.TotalTrue +} + +func (sc *ShardCombiner) finalizeBoolean(combined *aggregation.Boolean) { + combined.PercentageFalse = float64(combined.TotalFalse) / float64(combined.Count) + combined.PercentageTrue = float64(combined.TotalTrue) / float64(combined.Count) +} + +func (sc *ShardCombiner) mergeTextProp(first, second *aggregation.Text) { + first.Count += second.Count + + for _, textOcc := range second.Items { + pos := getPosOfTextOcc(first.Items, textOcc.Value) + if pos < 0 { + first.Items = append(first.Items, textOcc) + } else { + first.Items[pos].Occurs += textOcc.Occurs + } + } +} + +func (sc *ShardCombiner) mergeRefProp(first, second *aggregation.Reference) { + first.PointingTo = append(first.PointingTo, second.PointingTo...) +} + +func (sc *ShardCombiner) finalizeText(combined *aggregation.Text) { + sort.Slice(combined.Items, func(a, b int) bool { + return combined.Items[a].Occurs > combined.Items[b].Occurs + }) +} + +func getPosOfTextOcc(haystack []aggregation.TextOccurrence, needle string) int { + for i, elem := range haystack { + if elem.Value == needle { + return i + } + } + + return -1 +} + +func (sc *ShardCombiner) finalizeGroup(group *aggregation.Group) { + for propName, prop := range group.Properties { + switch prop.Type { + case aggregation.PropertyTypeNumerical: + sc.finalizeNumerical(prop.NumericalAggregations) + case aggregation.PropertyTypeBoolean: + sc.finalizeBoolean(&prop.BooleanAggregation) + case aggregation.PropertyTypeText: + sc.finalizeText(&prop.TextAggregation) + case aggregation.PropertyTypeDate: + sc.finalizeDateProp(prop.DateAggregations) + case aggregation.PropertyTypeReference: + continue + default: + panic("Unknown prop type: " + prop.Type) + } + group.Properties[propName] = prop + } +} + +func getPosOfGroup(haystack []aggregation.Group, needle interface{}) int { + for i, elem := range haystack { + if elem.GroupedBy.Value == needle { + return i + } + } + + return -1 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/shard_combiner_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/shard_combiner_test.go new file mode 100644 index 0000000000000000000000000000000000000000..88f9d9abcd846628de639fdc5313c8272b6377a8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/shard_combiner_test.go @@ -0,0 +1,249 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/aggregation" +) + +const ( + YearMonthDayHourMinute = "2022-06-16T18:30:" + NanoSecondsTimeZone = ".451235Z" +) + +type TestStructDates struct { + name string + dates1 []string + dates2 []string + expectedMedian string + expectedMaximum string + expectedMode string + expectedMinimum string +} + +func TestShardCombinerMergeDates(t *testing.T) { + tests := []TestStructDates{ + { + name: "Many values", + dates1: []string{"55", "26", "10"}, + dates2: []string{"15", "26", "45", "26"}, + expectedMaximum: "55", + expectedMinimum: "10", + expectedMedian: "26", + expectedMode: "26", + }, + { + name: "Struct with single element", + dates1: []string{"45"}, + dates2: []string{"00", "26", "45", "27"}, + expectedMaximum: "45", + expectedMinimum: "00", + expectedMedian: "27", + expectedMode: "45", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testDates(t, tt.dates1, tt.dates2, tt) + testDates(t, tt.dates2, tt.dates1, tt) + }) + } +} + +func testDates(t *testing.T, dates1, dates2 []string, tt TestStructDates) { + sc := NewShardCombiner() + dateMap1 := createDateAgg(dates1) + dateMap2 := createDateAgg(dates2) + + sc.mergeDateProp(dateMap1, dateMap2) + sc.finalizeDateProp(dateMap1) + assert.Equal(t, YearMonthDayHourMinute+tt.expectedMinimum+NanoSecondsTimeZone, dateMap1["minimum"]) + assert.Equal(t, YearMonthDayHourMinute+tt.expectedMaximum+NanoSecondsTimeZone, dateMap1["maximum"]) + assert.Equal(t, YearMonthDayHourMinute+tt.expectedMedian+NanoSecondsTimeZone, dateMap1["median"]) + assert.Equal(t, int64(len(tt.dates1)+len(tt.dates2)), dateMap1["count"]) + assert.Equal(t, YearMonthDayHourMinute+tt.expectedMode+NanoSecondsTimeZone, dateMap1["mode"]) +} + +func createDateAgg(dates []string) map[string]interface{} { + agg := newDateAggregator() + for _, date := range dates { + agg.AddTimestamp(YearMonthDayHourMinute + date + NanoSecondsTimeZone) + } + agg.buildPairsFromCounts() // needed to populate all required info + + prop := aggregation.Property{} + aggs := []aggregation.Aggregator{aggregation.MedianAggregator, aggregation.MinimumAggregator, aggregation.MaximumAggregator, aggregation.CountAggregator, aggregation.ModeAggregator} + addDateAggregations(&prop, aggs, agg) + return prop.DateAggregations +} + +type TestStructNumbers struct { + name string + numbers1 []float64 + numbers2 []float64 + testMode bool +} + +func TestShardCombinerMergeNumerical(t *testing.T) { + tests := []TestStructNumbers{ + { + name: "Uneven number of elements for both", + numbers1: []float64{0, 9, 9}, + numbers2: []float64{2}, + testMode: true, + }, + { + name: "Even number of elements for both", + numbers1: []float64{0, 5, 10, 15}, + numbers2: []float64{15, 15}, + testMode: true, + }, + { + name: "Mode is affected by merge", + numbers1: []float64{2.5, 2.5, 10, 15}, + numbers2: []float64{15, 15}, + testMode: true, + }, + { + name: "random", + numbers1: createRandomSlice(), + numbers2: createRandomSlice(), + testMode: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testNumbers(t, tt.numbers1, tt.numbers2, tt.testMode) + testNumbers(t, tt.numbers2, tt.numbers1, tt.testMode) + }) + } +} + +func TestShardCombinerMergeNil(t *testing.T) { + tests := []struct { + name string + results []*aggregation.Result + totalResults int + }{ + { + name: "First is nil", + results: []*aggregation.Result{ + { + Groups: []aggregation.Group{}, + }, + { + Groups: []aggregation.Group{{GroupedBy: &aggregation.GroupedBy{Value: 10, Path: []string{"something"}}}}, + }, + }, + totalResults: 1, + }, + { + name: "Second is nil", + results: []*aggregation.Result{ + { + Groups: []aggregation.Group{{GroupedBy: &aggregation.GroupedBy{Value: 10, Path: []string{"something"}}}}, + }, + { + Groups: []aggregation.Group{}, + }, + }, + totalResults: 1, + }, + { + name: "Both are nil", + results: []*aggregation.Result{ + { + Groups: []aggregation.Group{}, + }, + { + Groups: []aggregation.Group{}, + }, + }, + totalResults: 0, + }, + { + name: "Non are nil", + results: []*aggregation.Result{ + { + Groups: []aggregation.Group{{GroupedBy: &aggregation.GroupedBy{Value: 9, Path: []string{"other thing"}}}}, + }, + { + Groups: []aggregation.Group{{GroupedBy: &aggregation.GroupedBy{Value: 10, Path: []string{"something"}}}}, + }, + }, + totalResults: 2, + }, + { + name: "Ungrouped with nil", + results: []*aggregation.Result{ + { + Groups: []aggregation.Group{{Count: 1}}, + }, + { + Groups: []aggregation.Group{}, + }, + }, + totalResults: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + combinedResults := NewShardCombiner().Do(tt.results) + assert.Equal(t, len(combinedResults.Groups), tt.totalResults) + }) + } +} + +func testNumbers(t *testing.T, numbers1, numbers2 []float64, testMode bool) { + sc := NewShardCombiner() + numberMap1 := createNumericalAgg(numbers1) + numberMap2 := createNumericalAgg(numbers2) + + combinedMap := createNumericalAgg(append(numbers1, numbers2...)) + + sc.mergeNumericalProp(numberMap1, numberMap2) + sc.finalizeNumerical(numberMap1) + + assert.Equal(t, len(numbers1)+len(numbers2), int(numberMap1["count"].(float64))) + assert.InDelta(t, combinedMap["mean"], numberMap1["mean"], 0.0001) + assert.InDelta(t, combinedMap["median"], numberMap1["median"], 0.0001) + if testMode { // for random numbers the mode is flaky as there is no guaranteed order if several values have the same count + assert.Equal(t, combinedMap["mode"], numberMap1["mode"]) + } +} + +func createNumericalAgg(numbers []float64) map[string]interface{} { + agg := newNumericalAggregator() + for _, num := range numbers { + agg.AddFloat64(num) + } + agg.buildPairsFromCounts() // needed to populate all required info + + prop := aggregation.Property{} + aggs := []aggregation.Aggregator{aggregation.MedianAggregator, aggregation.MeanAggregator, aggregation.ModeAggregator, aggregation.CountAggregator} + addNumericalAggregations(&prop, aggs, agg) + return prop.NumericalAggregations +} + +func createRandomSlice() []float64 { + size := rand.Intn(100) + 1 // at least one entry + array := make([]float64, size) + for i := 0; i < size; i++ { + array[i] = rand.Float64() * 1000 + } + return array +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/text.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/text.go new file mode 100644 index 0000000000000000000000000000000000000000..09ef4bbbece280052556fce8a52c90bf6cd132a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/text.go @@ -0,0 +1,149 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "sort" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" +) + +func extractLimitFromTopOccs(aggs []aggregation.Aggregator) int { + for _, agg := range aggs { + if agg.Type == aggregation.TopOccurrencesType && agg.Limit != nil { + return *agg.Limit + } + } + + // we couldn't extract a limit, default to something reasonable + return 5 +} + +func newTextAggregator(limit int) *textAggregator { + return &textAggregator{itemCounter: map[string]int{}, max: limit} +} + +type textAggregator struct { + max int + count uint64 + + itemCounter map[string]int + + // always keep sorted, so we can cut off the last elem, when it grows larger + // than max + topPairs []aggregation.TextOccurrence +} + +func (a *Aggregator) parseAndAddTextRow(agg *textAggregator, + v []byte, propName schema.PropertyName, +) error { + items, ok, err := storobj.ParseAndExtractTextProp(v, propName.String()) + if err != nil { + return errors.Wrap(err, "parse and extract prop") + } + + if !ok { + return nil + } + + for i := range items { + if err := agg.AddText(items[i]); err != nil { + return err + } + } + return nil +} + +func (a *textAggregator) AddText(value string) error { + a.count++ + + itemCount := a.itemCounter[value] + itemCount++ + a.itemCounter[value] = itemCount + return nil +} + +func (a *textAggregator) insertOrdered(elem aggregation.TextOccurrence) { + if len(a.topPairs) == 0 { + a.topPairs = []aggregation.TextOccurrence{elem} + return + } + + added := false + for i, pair := range a.topPairs { + if pair.Occurs > elem.Occurs { + continue + } + // if number of occurrences is the same, + // skip if string is after one in topPairs + if pair.Occurs == elem.Occurs && pair.Value < elem.Value { + continue + } + + // we have found the first one that's smaller so me must insert before i + a.topPairs = append( + a.topPairs[:i], append( + []aggregation.TextOccurrence{elem}, + a.topPairs[i:]..., + )..., + ) + + added = true + break + } + + if len(a.topPairs) > a.max { + a.topPairs = a.topPairs[:len(a.topPairs)-1] + } + + if !added && len(a.topPairs) < a.max { + a.topPairs = append(a.topPairs, elem) + } +} + +func (a *textAggregator) Res() aggregation.Text { + out := aggregation.Text{} + if a.count == 0 { + return out + } + + for value, count := range a.itemCounter { + a.insertOrdered(aggregation.TextOccurrence{ + Value: value, + Occurs: count, + }) + } + + out.Items = a.topPairs + sort.SliceStable(out.Items, func(a, b int) bool { + countA := out.Items[a].Occurs + countB := out.Items[b].Occurs + + if countA != countB { + return countA > countB + } + + valueA := out.Items[a].Value + valueB := out.Items[b].Value + if len(valueA) == 0 || len(valueB) == 0 { + return false // order doesn't matter in this case, just prevent a panic + } + + return valueA[0] < valueB[0] + }) + + out.Count = int(a.count) + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/text_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/text_test.go new file mode 100644 index 0000000000000000000000000000000000000000..69b32cfe1c222ec2ec8b2d9277d1d3710c0c1356 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/text_test.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/aggregation" +) + +func TestTextAggregator_TopOccurrencesCalculation(t *testing.T) { + testCases := []struct { + name string + texts []string + expectedCount int + expectedTopOccurrences []aggregation.TextOccurrence + }{ + { + name: "All texts occurring once", + texts: []string{"b_occurs1", "c_occurs1", "g_occurs1", "f_occurs1", "a_occurs1", "d_occurs1", "e_occurs1"}, + expectedCount: 7, + expectedTopOccurrences: []aggregation.TextOccurrence{ + {Value: "a_occurs1", Occurs: 1}, + {Value: "b_occurs1", Occurs: 1}, + {Value: "c_occurs1", Occurs: 1}, + {Value: "d_occurs1", Occurs: 1}, + {Value: "e_occurs1", Occurs: 1}, + }, + }, + { + name: "All texts occurring different number of times", + texts: []string{ + "b_occurs2", "e_occurs5", "d_occurs4", "c_occurs3", "g_occurs7", "e_occurs5", "d_occurs4", + "f_occurs6", "g_occurs7", "c_occurs3", "b_occurs2", "g_occurs7", "f_occurs6", "g_occurs7", "d_occurs4", + "a_occurs1", "f_occurs6", "g_occurs7", "g_occurs7", "f_occurs6", "d_occurs4", "e_occurs5", "g_occurs7", + "c_occurs3", "f_occurs6", "e_occurs5", "f_occurs6", "e_occurs5", + }, + expectedCount: 28, + expectedTopOccurrences: []aggregation.TextOccurrence{ + {Value: "g_occurs7", Occurs: 7}, + {Value: "f_occurs6", Occurs: 6}, + {Value: "e_occurs5", Occurs: 5}, + {Value: "d_occurs4", Occurs: 4}, + {Value: "c_occurs3", Occurs: 3}, + }, + }, + { + name: "Some texts occurring same number of times", + texts: []string{ + "a_occurs4", "b_occurs3", "g_occurs4", "f_occurs3", "a_occurs4", "e_occurs2", "a_occurs4", + "c_occurs2", "g_occurs4", "f_occurs3", "b_occurs3", "c_occurs2", "a_occurs4", "f_occurs3", "g_occurs4", + "b_occurs3", "d_occurs1", "e_occurs2", "g_occurs4", + }, + expectedCount: 19, + expectedTopOccurrences: []aggregation.TextOccurrence{ + {Value: "a_occurs4", Occurs: 4}, + {Value: "g_occurs4", Occurs: 4}, + {Value: "b_occurs3", Occurs: 3}, + {Value: "f_occurs3", Occurs: 3}, + {Value: "c_occurs2", Occurs: 2}, + }, + }, + { + name: "Fewer texts than limit", + texts: []string{"b_occurs3", "d_occurs3", "c_occurs1", "d_occurs3", "b_occurs3", "b_occurs3", "a_occurs1", "d_occurs3"}, + expectedCount: 8, + expectedTopOccurrences: []aggregation.TextOccurrence{ + {Value: "b_occurs3", Occurs: 3}, + {Value: "d_occurs3", Occurs: 3}, + {Value: "a_occurs1", Occurs: 1}, + {Value: "c_occurs1", Occurs: 1}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + agg := newTextAggregator(5) + for _, text := range tc.texts { + agg.AddText(text) + } + + res := agg.Res() + assert.Equal(t, tc.expectedCount, res.Count) + assert.Equal(t, tc.expectedTopOccurrences, res.Items) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/unfiltered.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/unfiltered.go new file mode 100644 index 0000000000000000000000000000000000000000..14aa74a5d11da260b3171c06d5b5169b6836f261 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/unfiltered.go @@ -0,0 +1,145 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/schema" +) + +// unfilteredAggregator allows for relatively efficient whole-dataset +// aggregations, because it uses the invert index which is already grouped and +// ordered. Numerical aggregations can therefore be relatively efficient. +// +// As opposed to reading n objects, the unfiltered aggregator read x rows per +// props. X can be different for each prop. +// +// However, this aggregator does not work with subselections of the dataset, +// such as when grouping or a filter is set. +type unfilteredAggregator struct { + *Aggregator +} + +func newUnfilteredAggregator(agg *Aggregator) *unfilteredAggregator { + return &unfilteredAggregator{Aggregator: agg} +} + +func (ua *unfilteredAggregator) Do(ctx context.Context) (*aggregation.Result, error) { + out := aggregation.Result{} + + // without grouping there is always exactly one group + out.Groups = make([]aggregation.Group, 1) + + if ua.params.IncludeMetaCount { + if err := ua.addMetaCount(ctx, &out); err != nil { + return nil, errors.Wrap(err, "add meta count") + } + } + + props, err := ua.properties(ctx) + if err != nil { + return nil, errors.Wrap(err, "aggregate properties") + } + + out.Groups[0].Properties = props + + return &out, nil +} + +func (ua *unfilteredAggregator) addMetaCount(ctx context.Context, + out *aggregation.Result, +) error { + b := ua.store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return errors.Errorf("objects bucket is nil") + } + + out.Groups[0].Count = b.Count() + + return nil +} + +func (ua unfilteredAggregator) properties( + ctx context.Context, +) (map[string]aggregation.Property, error) { + if len(ua.params.Properties) == 0 { + return nil, nil + } + + out := map[string]aggregation.Property{} + + for _, prop := range ua.params.Properties { + if err := ctx.Err(); err != nil { + return nil, errors.Wrapf(err, "start property %s", prop.Name) + } + + analyzed, err := ua.property(ctx, prop) + if err != nil { + return nil, errors.Wrapf(err, "property %s", prop.Name) + } + + if analyzed == nil { + continue + } + + out[prop.Name.String()] = *analyzed + } + + return out, nil +} + +func (ua unfilteredAggregator) property(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + aggType, dt, err := ua.aggTypeOfProperty(prop.Name) + if err != nil { + return nil, err + } + + switch aggType { + case aggregation.PropertyTypeNumerical: + switch dt { + case schema.DataTypeNumber: + return ua.floatProperty(ctx, prop) + case schema.DataTypeNumberArray, schema.DataTypeIntArray: + return ua.numberArrayProperty(ctx, prop) + default: + return ua.intProperty(ctx, prop) + } + case aggregation.PropertyTypeBoolean: + switch dt { + case schema.DataTypeBooleanArray: + return ua.boolArrayProperty(ctx, prop) + default: + return ua.boolProperty(ctx, prop) + } + case aggregation.PropertyTypeText: + return ua.textProperty(ctx, prop) + case aggregation.PropertyTypeDate: + switch dt { + case schema.DataTypeDateArray: + return ua.dateArrayProperty(ctx, prop) + default: + return ua.dateProperty(ctx, prop) + } + case aggregation.PropertyTypeReference: + // ignore, as this is handled outside the repo in the uc + return nil, nil + default: + return nil, fmt.Errorf("aggreation type %s not supported yet", aggType) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/unfiltered_type_specific.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/unfiltered_type_specific.go new file mode 100644 index 0000000000000000000000000000000000000000..e249c89427e427b079e6512adad5bd620d25aa61 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/unfiltered_type_specific.go @@ -0,0 +1,503 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (ua unfilteredAggregator) boolProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeBoolean, + } + + b := ua.store.Bucket(helpers.BucketFromPropNameLSM(prop.Name.String())) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newBoolAggregator() + + // bool never has a frequency, so it's either a Set or RoaringSet + if b.Strategy() == lsmkv.StrategyRoaringSet { + c := b.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + err := ua.parseAndAddBoolRowRoaringSet(agg, k, v) + if err != nil { + return nil, err + } + } + } else { + c := b.SetCursor() // bool never has a frequency, so it's always a Set + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + err := ua.parseAndAddBoolRowSet(agg, k, v) + if err != nil { + return nil, err + } + } + } + + out.BooleanAggregation = agg.Res() + + return &out, nil +} + +func (ua unfilteredAggregator) boolArrayProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeBoolean, + } + + b := ua.store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newBoolAggregator() + + c := b.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + err := ua.parseAndAddBoolArrayRow(agg, v, prop.Name) + if err != nil { + return nil, err + } + } + + out.BooleanAggregation = agg.Res() + + return &out, nil +} + +func (ua unfilteredAggregator) parseAndAddBoolRowSet(agg *boolAggregator, k []byte, v [][]byte) error { + if len(k) != 1 { + // we expect to see a single byte for a marshalled bool + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 1: got %d", len(k)) + } + + if err := agg.AddBoolRow(k, uint64(len(v))); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddBoolRowRoaringSet(agg *boolAggregator, k []byte, v *sroar.Bitmap) error { + if len(k) != 1 { + // we expect to see a single byte for a marshalled bool + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 1: got %d", len(k)) + } + + if err := agg.AddBoolRow(k, uint64(v.GetCardinality())); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddBoolArrayRow(agg *boolAggregator, + v []byte, propName schema.PropertyName, +) error { + items, ok, err := storobj.ParseAndExtractBoolArrayProp(v, propName.String()) + if err != nil { + return errors.Wrap(err, "parse and extract prop") + } + + if !ok { + return nil + } + + for i := range items { + if err := agg.AddBool(items[i]); err != nil { + return err + } + } + + return nil +} + +func (ua unfilteredAggregator) floatProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{}, + } + + b := ua.store.Bucket(helpers.BucketFromPropNameLSM(prop.Name.String())) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newNumericalAggregator() + + // flat never has a frequency, so it's either a Set or RoaringSet + if b.Strategy() == lsmkv.StrategyRoaringSet { + c := b.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddFloatRowRoaringSet(agg, k, v); err != nil { + return nil, err + } + } + } else { + c := b.SetCursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddFloatRowSet(agg, k, v); err != nil { + return nil, err + } + } + } + + addNumericalAggregations(&out, prop.Aggregators, agg) + + return &out, nil +} + +func (ua unfilteredAggregator) intProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{}, + } + + b := ua.store.Bucket(helpers.BucketFromPropNameLSM(prop.Name.String())) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newNumericalAggregator() + + // int never has a frequency, so it's either a Set or RoaringSet + if b.Strategy() == lsmkv.StrategyRoaringSet { + c := b.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddIntRowRoaringSet(agg, k, v); err != nil { + return nil, err + } + } + } else { + + c := b.SetCursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddIntRowSet(agg, k, v); err != nil { + return nil, err + } + } + } + + addNumericalAggregations(&out, prop.Aggregators, agg) + + return &out, nil +} + +func (ua unfilteredAggregator) dateProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeDate, + DateAggregations: map[string]interface{}{}, + } + + b := ua.store.Bucket(helpers.BucketFromPropNameLSM(prop.Name.String())) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newDateAggregator() + + // dates don't have frequency, so it's either a Set or RoaringSet + if b.Strategy() == lsmkv.StrategyRoaringSet { + c := b.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddDateRowRoaringSet(agg, k, v); err != nil { + return nil, err + } + } + } else { + c := b.SetCursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddDateRowSet(agg, k, v); err != nil { + return nil, err + } + } + } + + addDateAggregations(&out, prop.Aggregators, agg) + + return &out, nil +} + +func (ua unfilteredAggregator) parseAndAddDateRowSet(agg *dateAggregator, k []byte, + v [][]byte, +) error { + if len(k) != 8 { + // dates are stored as epoch nanoseconds, we expect to see an int64 + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 8: got %d", len(k)) + } + + if err := agg.AddTimestampRow(k, uint64(len(v))); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddDateRowRoaringSet(agg *dateAggregator, k []byte, + v *sroar.Bitmap, +) error { + if len(k) != 8 { + // dates are stored as epoch nanoseconds, we expect to see an int64 + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 8: got %d", len(k)) + } + + if err := agg.AddTimestampRow(k, uint64(v.GetCardinality())); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) dateArrayProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeDate, + DateAggregations: map[string]interface{}{}, + } + + b := ua.store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newDateAggregator() + + c := b.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddDateArrayRow(agg, v, prop.Name); err != nil { + return nil, err + } + } + + addDateAggregations(&out, prop.Aggregators, agg) + + return &out, nil +} + +func (ua unfilteredAggregator) parseAndAddDateArrayRow(agg *dateAggregator, + v []byte, propName schema.PropertyName, +) error { + items, ok, err := storobj.ParseAndExtractProperty(v, propName.String()) + if err != nil { + return errors.Wrap(err, "parse and extract prop") + } + + if !ok { + return nil + } + + for i := range items { + if err := agg.AddTimestamp(items[i]); err != nil { + return err + } + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddFloatRowSet(agg *numericalAggregator, k []byte, + v [][]byte, +) error { + if len(k) != 8 { + // we expect to see either an int64 or a float64, so any non-8 length + // is unexpected + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 8: got %d", len(k)) + } + + if err := agg.AddFloat64Row(k, uint64(len(v))); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddFloatRowRoaringSet(agg *numericalAggregator, k []byte, + v *sroar.Bitmap, +) error { + if len(k) != 8 { + // we expect to see either an int64 or a float64, so any non-8 length + // is unexpected + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 8: got %d", len(k)) + } + + if err := agg.AddFloat64Row(k, uint64(v.GetCardinality())); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddIntRowSet(agg *numericalAggregator, k []byte, + v [][]byte, +) error { + if len(k) != 8 { + // we expect to see either an int64 or a float64, so any non-8 length + // is unexpected + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 8: got %d", len(k)) + } + + if err := agg.AddInt64Row(k, uint64(len(v))); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddIntRowRoaringSet(agg *numericalAggregator, k []byte, + v *sroar.Bitmap, +) error { + if len(k) != 8 { + // we expect to see either an int64 or a float64, so any non-8 length + // is unexpected + return fmt.Errorf("unexpected key length on inverted index, "+ + "expected 8: got %d", len(k)) + } + + if err := agg.AddInt64Row(k, uint64(v.GetCardinality())); err != nil { + return err + } + + return nil +} + +func (ua unfilteredAggregator) parseAndAddNumberArrayRow(agg *numericalAggregator, + v []byte, propName schema.PropertyName, +) error { + items, ok, err := storobj.ParseAndExtractNumberArrayProp(v, propName.String()) + if err != nil { + return errors.Wrap(err, "parse and extract prop") + } + + if !ok { + return nil + } + + for i := range items { + err := agg.AddNumberRow(items[i], 1) + if err != nil { + return err + } + } + + return nil +} + +func (ua unfilteredAggregator) textProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeText, + TextAggregation: aggregation.Text{}, + } + + limit := extractLimitFromTopOccs(prop.Aggregators) + + b := ua.store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newTextAggregator(limit) + + // we're looking at the whole object, so this is neither a Set, nor a Map, but + // a Replace strategy + c := b.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddTextRow(agg, v, prop.Name); err != nil { + return nil, err + } + } + + out.TextAggregation = agg.Res() + + return &out, nil +} + +func (ua unfilteredAggregator) numberArrayProperty(ctx context.Context, + prop aggregation.ParamProperty, +) (*aggregation.Property, error) { + out := aggregation.Property{ + Type: aggregation.PropertyTypeNumerical, + NumericalAggregations: map[string]interface{}{}, + } + + b := ua.store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return nil, errors.Errorf("could not find bucket for prop %s", prop.Name) + } + + agg := newNumericalAggregator() + + c := b.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := ua.parseAndAddNumberArrayRow(agg, v, prop.Name); err != nil { + return nil, err + } + } + + addNumericalAggregations(&out, prop.Aggregators, agg) + + return &out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/vector_search.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/vector_search.go new file mode 100644 index 0000000000000000000000000000000000000000..cab2f475901e3c5472d0435f354201ea28b5f38c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/aggregator/vector_search.go @@ -0,0 +1,138 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package aggregator + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (a *Aggregator) vectorSearch(ctx context.Context, allow helpers.AllowList, vec models.Vector) ([]uint64, []float32, error) { + if a.params.ObjectLimit != nil { + return a.searchByVector(ctx, vec, a.params.ObjectLimit, allow) + } + + return a.searchByVectorDistance(ctx, vec, allow) +} + +func (a *Aggregator) searchByVector(ctx context.Context, searchVector models.Vector, limit *int, ids helpers.AllowList) ([]uint64, []float32, error) { + idsFound, dists, err := a.performVectorSearch(ctx, searchVector, *limit, ids) + if err != nil { + return idsFound, nil, err + } + + if a.params.Certainty > 0 { + targetDist := float32(1-a.params.Certainty) * 2 + + i := 0 + for _, dist := range dists { + if dist > targetDist { + break + } + i++ + } + + return idsFound[:i], dists, nil + + } + return idsFound, dists, nil +} + +func (a *Aggregator) searchByVectorDistance(ctx context.Context, searchVector models.Vector, ids helpers.AllowList) ([]uint64, []float32, error) { + if a.params.Certainty <= 0 { + return nil, nil, fmt.Errorf("must provide certainty or objectLimit with vector search") + } + + targetDist := float32(1-a.params.Certainty) * 2 + idsFound, dists, err := a.performVectorDistanceSearch(ctx, searchVector, targetDist, -1, ids) + if err != nil { + return nil, nil, fmt.Errorf("aggregate search by vector: %w", err) + } + + return idsFound, dists, nil +} + +func (a *Aggregator) objectVectorSearch(ctx context.Context, searchVector models.Vector, + allowList helpers.AllowList, +) ([]*storobj.Object, []float32, error) { + ids, dists, err := a.vectorSearch(ctx, allowList, searchVector) + if err != nil { + return nil, nil, err + } + + bucket := a.store.Bucket(helpers.ObjectsBucketLSM) + objs, err := storobj.ObjectsByDocID(bucket, ids, additional.Properties{}, nil, a.logger) + if err != nil { + return nil, nil, fmt.Errorf("get objects by doc id: %w", err) + } + return objs, dists, nil +} + +func (a *Aggregator) buildAllowList(ctx context.Context) (helpers.AllowList, error) { + var ( + allow helpers.AllowList + err error + ) + + if a.params.Filters != nil { + allow, err = inverted.NewSearcher(a.logger, a.store, a.getSchema.ReadOnlyClass, nil, + a.classSearcher, a.stopwords, a.shardVersion, a.isFallbackToSearchable, + a.tenant, a.nestedCrossRefLimit, a.bitmapFactory). + DocIDs(ctx, a.params.Filters, additional.Properties{}, + a.params.ClassName) + if err != nil { + return nil, fmt.Errorf("retrieve doc IDs from searcher: %w", err) + } + } + + return allow, nil +} + +func (a *Aggregator) performVectorSearch(ctx context.Context, + searchVector models.Vector, limit int, ids helpers.AllowList, +) ([]uint64, []float32, error) { + switch vec := searchVector.(type) { + case []float32: + idsFound, dists, err := a.vectorIndex.SearchByVector(ctx, vec, limit, ids) + if err != nil { + return idsFound, nil, err + } + return idsFound, dists, nil + case [][]float32: + idsFound, dists, err := a.vectorIndex.(vectorIndexMulti).SearchByMultiVector(ctx, vec, limit, ids) + if err != nil { + return idsFound, nil, err + } + return idsFound, dists, nil + default: + return nil, nil, fmt.Errorf("perform vector search: unrecognized search vector type: %T", searchVector) + } +} + +func (a *Aggregator) performVectorDistanceSearch(ctx context.Context, + searchVector models.Vector, targetDist float32, maxLimit int64, ids helpers.AllowList, +) ([]uint64, []float32, error) { + switch vec := searchVector.(type) { + case []float32: + return a.vectorIndex.SearchByVectorDistance(ctx, vec, targetDist, maxLimit, ids) + case [][]float32: + return a.vectorIndex.(vectorIndexMulti).SearchByMultiVectorDistance(ctx, vec, targetDist, maxLimit, ids) + default: + return nil, nil, fmt.Errorf("perform vector distance search: unrecognized search vector type: %T", searchVector) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup.go new file mode 100644 index 0000000000000000000000000000000000000000..d583e28e21fd741729aa0f005c6db354da54c225 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup.go @@ -0,0 +1,397 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" + + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/schema" +) + +type BackupState struct { + BackupID string + InProgress bool +} + +// Backupable returns whether all given class can be backed up. +func (db *DB) Backupable(ctx context.Context, classes []string) error { + for _, c := range classes { + className := schema.ClassName(c) + idx := db.GetIndex(className) + if idx == nil || idx.Config.ClassName != className { + return fmt.Errorf("class %v doesn't exist", c) + } + } + return nil +} + +// ListBackupable returns a list of all classes which can be backed up. +func (db *DB) ListBackupable() []string { + db.indexLock.RLock() + defer db.indexLock.RUnlock() + + cs := make([]string, 0, len(db.indices)) + + for _, idx := range db.indices { + cls := string(idx.Config.ClassName) + cs = append(cs, cls) + } + + return cs +} + +// BackupDescriptors returns a channel of class descriptors. +// Class descriptor records everything needed to restore a class +// If an error happens a descriptor with an error will be written to the channel just before closing it. +func (db *DB) BackupDescriptors(ctx context.Context, bakid string, classes []string, +) <-chan backup.ClassDescriptor { + ds := make(chan backup.ClassDescriptor, len(classes)) + f := func() { + for _, c := range classes { + desc := backup.ClassDescriptor{Name: c} + idx := db.GetIndex(schema.ClassName(c)) + if idx == nil { + desc.Error = fmt.Errorf("class %v doesn't exist any more", c) + } else if err := idx.descriptor(ctx, bakid, &desc); err != nil { + desc.Error = fmt.Errorf("backup class %v descriptor: %w", c, err) + } + ds <- desc + if desc.Error != nil { + break + } + } + close(ds) + } + enterrors.GoWrapper(f, db.logger) + return ds +} + +func (db *DB) ShardsBackup( + ctx context.Context, bakID, class string, shards []string, +) (_ backup.ClassDescriptor, err error) { + cd := backup.ClassDescriptor{Name: class} + idx := db.GetIndex(schema.ClassName(class)) + if idx == nil { + return cd, fmt.Errorf("no index for class %q", class) + } + + if err := idx.initBackup(bakID); err != nil { + return cd, fmt.Errorf("init backup state for class %q: %w", class, err) + } + + defer func() { + if err != nil { + enterrors.GoWrapper(func() { idx.ReleaseBackup(ctx, bakID) }, db.logger) + } + }() + + sm := make(map[string]ShardLike, len(shards)) + for _, shardName := range shards { + shard := idx.shards.Load(shardName) + if shard == nil { + return cd, fmt.Errorf("no shard %q for class %q", shardName, class) + } + sm[shardName] = shard + } + + // prevent writing into the index during collection of metadata + idx.shardTransferMutex.Lock() + defer idx.shardTransferMutex.Unlock() + for shardName, shard := range sm { + if err := shard.HaltForTransfer(ctx, false, 0); err != nil { + return cd, fmt.Errorf("class %q: shard %q: begin backup: %w", class, shardName, err) + } + + sd := backup.ShardDescriptor{Name: shardName} + if err := shard.ListBackupFiles(ctx, &sd); err != nil { + return cd, fmt.Errorf("class %q: shard %q: list backup files: %w", class, shardName, err) + } + + cd.Shards = append(cd.Shards, &sd) + } + + return cd, nil +} + +// ReleaseBackup release resources acquired by the index during backup +func (db *DB) ReleaseBackup(ctx context.Context, bakID, class string) (err error) { + fields := logrus.Fields{ + "op": "release_backup", + "class": class, + "id": bakID, + } + db.logger.WithFields(fields).Debug("starting") + begin := time.Now() + defer func() { + l := db.logger.WithFields(fields).WithField("took", time.Since(begin)) + if err != nil { + l.Error(err) + return + } + l.Debug("finish") + }() + + idx := db.GetIndex(schema.ClassName(class)) + if idx != nil { + return idx.ReleaseBackup(ctx, bakID) + } + return nil +} + +func (db *DB) ClassExists(name string) bool { + return db.IndexExists(schema.ClassName(name)) +} + +// Shards returns the list of nodes where shards of class are contained. +// If there are no shards for the class, returns an empty list +// If there are shards for the class but no nodes are found, return an error +func (db *DB) Shards(ctx context.Context, class string) ([]string, error) { + var nodes []string + var shardCount int + + err := db.schemaReader.Read(class, func(_ *models.Class, state *sharding.State) error { + if state == nil { + return fmt.Errorf("unable to retrieve sharding state for class %s", class) + } + shardCount = len(state.Physical) + if shardCount == 0 { + nodes = []string{} + return nil + } + + unique := make(map[string]struct{}) + for _, shard := range state.Physical { + for _, node := range shard.BelongsToNodes { + unique[node] = struct{}{} + } + } + + nodes = make([]string, 0, len(unique)) + for node := range unique { + nodes = append(nodes, node) + } + + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to read sharding state for class %s: %w", class, err) + } + + if shardCount > 0 && len(nodes) == 0 { + return nil, fmt.Errorf("found %d shards but no nodes for class %s", shardCount, class) + } + + return nodes, nil +} + +func (db *DB) ListClasses(ctx context.Context) []string { + classes := db.schemaGetter.GetSchemaSkipAuth().Objects.Classes + classNames := make([]string, len(classes)) + + for i, class := range classes { + classNames[i] = class.Class + } + + return classNames +} + +// descriptor record everything needed to restore a class +func (i *Index) descriptor(ctx context.Context, backupID string, desc *backup.ClassDescriptor) (err error) { + if err := i.initBackup(backupID); err != nil { + return err + } + defer func() { + if err != nil { + enterrors.GoWrapper(func() { i.ReleaseBackup(ctx, backupID) }, i.logger) + } + }() + // prevent writing into the index during collection of metadata + i.shardTransferMutex.Lock() + defer i.shardTransferMutex.Unlock() + + if err = i.ForEachShard(func(name string, s ShardLike) error { + if err = s.HaltForTransfer(ctx, false, 0); err != nil { + return fmt.Errorf("pause compaction and flush: %w", err) + } + var sd backup.ShardDescriptor + if err := s.ListBackupFiles(ctx, &sd); err != nil { + return fmt.Errorf("list shard %v files: %w", s.Name(), err) + } + + desc.Shards = append(desc.Shards, &sd) + return nil + }); err != nil { + return err + } + + if desc.ShardingState, err = i.marshalShardingState(); err != nil { + return fmt.Errorf("marshal sharding state %w", err) + } + if desc.Schema, err = i.marshalSchema(); err != nil { + return fmt.Errorf("marshal schema %w", err) + } + if desc.Aliases, err = i.marshalAliases(); err != nil { + return fmt.Errorf("marshal aliases %w", err) + } + // this has to be set true, even if aliases list is empty. + // because eventhen JSON key `aliases` will be present in + // newer backups. To avoid failing to backup old backups that doesn't + // understand `aliases` key in the ClassDescriptor. + desc.AliasesIncluded = true + return ctx.Err() +} + +// ReleaseBackup marks the specified backup as inactive and restarts all +// async background and maintenance processes. It errors if the backup does not exist +// or is already inactive. +func (i *Index) ReleaseBackup(ctx context.Context, id string) error { + i.logger.WithField("backup_id", id).WithField("class", i.Config.ClassName).Info("release backup") + i.resetBackupState() + if err := i.resumeMaintenanceCycles(ctx); err != nil { + return err + } + return nil +} + +func (i *Index) initBackup(id string) error { + new := &BackupState{ + BackupID: id, + InProgress: true, + } + if !i.lastBackup.CompareAndSwap(nil, new) { + bid := "" + if x := i.lastBackup.Load(); x != nil { + bid = x.BackupID + } + return errors.Errorf( + "cannot create new backup, backup ‘%s’ is not yet released, this "+ + "means its contents have not yet been fully copied to its destination, "+ + "try again later", bid) + } + + return nil +} + +func (i *Index) resetBackupState() { + i.lastBackup.Store(nil) +} + +func (i *Index) resumeMaintenanceCycles(ctx context.Context) (lastErr error) { + i.ForEachShard(func(name string, shard ShardLike) error { + if err := shard.resumeMaintenanceCycles(ctx); err != nil { + lastErr = err + i.logger.WithField("shard", name).WithField("op", "resume_maintenance").Error(err) + } + time.Sleep(time.Millisecond * 10) + return nil + }) + return lastErr +} + +func (i *Index) marshalShardingState() ([]byte, error) { + var jsonBytes []byte + err := i.schemaReader.Read(i.Config.ClassName.String(), func(_ *models.Class, state *sharding.State) error { + if state == nil { + return fmt.Errorf("unable to retrieve sharding state for class %s", i.Config.ClassName.String()) + } + bytes, jsonErr := state.JSON() + if jsonErr != nil { + return jsonErr + } + + jsonBytes = bytes + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "marshal sharding state") + } + + return jsonBytes, nil +} + +func (i *Index) marshalSchema() ([]byte, error) { + b, err := i.getSchema.ReadOnlyClass(i.Config.ClassName.String()).MarshalBinary() + if err != nil { + return nil, errors.Wrap(err, "marshal schema") + } + + return b, err +} + +func (i *Index) marshalAliases() ([]byte, error) { + aliases := i.getSchema.GetAliasesForClass(i.Config.ClassName.String()) + b, err := json.Marshal(aliases) + if err != nil { + return nil, errors.Wrap(err, "marshal aliases failed to get aliases for collection") + } + return b, err +} + +const ( + mutexRetryDuration = time.Millisecond * 500 + mutexNotifyDuration = 20 * time.Second +) + +// shardTransfer is an adapter built around rwmutex that facilitates cooperative blocking between write and read locks +type shardTransfer struct { + sync.RWMutex + log logrus.FieldLogger + retryDuration time.Duration + notifyDuration time.Duration +} + +// LockWithContext attempts to acquire a write lock while respecting the provided context. +// It reports whether the lock acquisition was successful or if the context has been cancelled. +func (m *shardTransfer) LockWithContext(ctx context.Context) error { + return m.lock(ctx, m.TryLock) +} + +func (m *shardTransfer) lock(ctx context.Context, tryLock func() bool) error { + if tryLock() { + return nil + } + curTime := time.Now() + t := time.NewTicker(m.retryDuration) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + if tryLock() { + return nil + } + if time.Since(curTime) > m.notifyDuration { + curTime = time.Now() + m.log.Info("backup process waiting for ongoing writes to finish") + } + } + } +} + +func (s *shardTransfer) RLockGuard(reader func() error) error { + s.RLock() + defer s.RUnlock() + return reader() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1307da69632d8fb8e30cbe682126ec85a249792b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup_integration_test.go @@ -0,0 +1,550 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestBackup_DBLevel(t *testing.T) { + t.Run("successful backup creation", func(t *testing.T) { + ctx := testCtx() + dirName := t.TempDir() + className := "DBLevelBackupClass" + backupID := "backup1" + now := time.Now() + + db := setupTestDB(t, dirName, makeTestClass(className)) + defer func() { + require.Nil(t, db.Shutdown(context.Background())) + }() + + t.Run("insert data", func(t *testing.T) { + require.Nil(t, db.PutObject(ctx, &models.Object{ + Class: className, + CreationTimeUnix: now.UnixNano(), + ID: "ff9fcae5-57b8-431c-b8e2-986fd78f5809", + LastUpdateTimeUnix: now.UnixNano(), + Vector: []float32{1, 2, 3}, + VectorWeights: nil, + }, []float32{1, 2, 3}, nil, nil, nil, 0)) + }) + + expectedNodeName := "node1" + shards, err := db.schemaReader.Shards(className) + require.Nil(t, err) + expectedShardName := shards[0] + testShd := db.GetIndex(schema.ClassName(className)).shards.Load(expectedShardName) + expectedCounterPath, _ := filepath.Rel(testShd.Index().Config.RootPath, testShd.Counter().FileName()) + expectedCounter, err := os.ReadFile(testShd.Counter().FileName()) + require.Nil(t, err) + expectedPropLengthPath, _ := filepath.Rel(testShd.Index().Config.RootPath, testShd.GetPropertyLengthTracker().FileName()) + expectedShardVersionPath, _ := filepath.Rel(testShd.Index().Config.RootPath, testShd.Versioner().path) + expectedShardVersion, err := os.ReadFile(testShd.Versioner().path) + require.Nil(t, err) + expectedPropLength, err := os.ReadFile(testShd.GetPropertyLengthTracker().FileName()) + require.Nil(t, err) + var expectedShardState []byte + err = testShd.Index().schemaReader.Read(className, func(class *models.Class, state *sharding.State) error { + var jsonErr error + expectedShardState, jsonErr = state.JSON() + return jsonErr + }) + require.Nil(t, err) + expectedSchema, err := testShd.Index().getSchema.GetSchemaSkipAuth(). + Objects.Classes[0].MarshalBinary() + require.Nil(t, err) + + classes := db.ListBackupable() + + t.Run("doesn't fail on casing permutation of existing class", func(t *testing.T) { + err := db.Backupable(ctx, []string{"DBLeVELBackupClass"}) + require.NotNil(t, err) + require.Equal(t, "class DBLeVELBackupClass doesn't exist", err.Error()) + }) + + t.Run("create backup", func(t *testing.T) { + err := db.Backupable(ctx, classes) + assert.Nil(t, err) + + ch := db.BackupDescriptors(ctx, backupID, classes) + + for d := range ch { + assert.Equal(t, className, d.Name) + assert.Len(t, d.Shards, len(classes)) + for _, shd := range d.Shards { + assert.Equal(t, expectedShardName, shd.Name) + assert.Equal(t, expectedNodeName, shd.Node) + assert.NotEmpty(t, shd.Files) + for _, f := range shd.Files { + assert.NotEmpty(t, f) + } + assert.Equal(t, expectedCounterPath, shd.DocIDCounterPath) + assert.Equal(t, expectedCounter, shd.DocIDCounter) + assert.Equal(t, expectedPropLengthPath, shd.PropLengthTrackerPath) + assert.Equal(t, expectedPropLength, shd.PropLengthTracker) + assert.Equal(t, expectedShardVersionPath, shd.ShardVersionPath) + assert.Equal(t, expectedShardVersion, shd.Version) + } + assert.Equal(t, expectedShardState, d.ShardingState) + assert.Equal(t, expectedSchema, d.Schema) + } + }) + + t.Run("release backup", func(t *testing.T) { + for _, class := range classes { + err := db.ReleaseBackup(ctx, backupID, class) + assert.Nil(t, err) + } + }) + + t.Run("node names from shards", func(t *testing.T) { + res, err := db.Shards(ctx, className) + assert.NoError(t, err) + assert.Len(t, res, 1) + assert.Equal(t, "node1", res[0]) + }) + + t.Run("get all classes", func(t *testing.T) { + res := db.ListClasses(ctx) + assert.Len(t, res, 1) + assert.Equal(t, className, res[0]) + }) + }) + + t.Run("failed backup creation from expired context", func(t *testing.T) { + ctx := testCtx() + dirName := t.TempDir() + className := "DBLevelBackupClass" + backupID := "backup1" + now := time.Now() + + db := setupTestDB(t, dirName, makeTestClass(className)) + defer func() { + require.Nil(t, db.Shutdown(context.Background())) + }() + + t.Run("insert data", func(t *testing.T) { + require.Nil(t, db.PutObject(ctx, &models.Object{ + Class: className, + CreationTimeUnix: now.UnixNano(), + ID: "ff9fcae5-57b8-431c-b8e2-986fd78f5809", + LastUpdateTimeUnix: now.UnixNano(), + Vector: []float32{1, 2, 3}, + VectorWeights: nil, + }, []float32{1, 2, 3}, nil, nil, nil, 9)) + }) + + t.Run("fail with expired context", func(t *testing.T) { + classes := db.ListBackupable() + + err := db.Backupable(ctx, classes) + assert.Nil(t, err) + + timeoutCtx, cancel := context.WithTimeout(context.Background(), 0) + defer cancel() + + ch := db.BackupDescriptors(timeoutCtx, backupID, classes) + for d := range ch { + require.NotNil(t, d.Error) + assert.Contains(t, d.Error.Error(), "context deadline exceeded") + } + }) + }) +} + +func TestBackup_BucketLevel(t *testing.T) { + ctx := testCtx() + className := "BucketLevelBackup" + shard, _ := testShard(t, ctx, className) + + t.Run("insert data", func(t *testing.T) { + err := shard.PutObject(ctx, + &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: "8c29da7a-600a-43dc-85fb-83ab2b08c294", + Class: className, + Properties: map[string]interface{}{ + "stringField": "somevalue", + }, + }, + }) + require.Nil(t, err) + }) + + t.Run("perform backup sequence", func(t *testing.T) { + objBucket := shard.Store().Bucket("objects") + require.NotNil(t, objBucket) + + err := shard.Store().PauseCompaction(ctx) + require.Nil(t, err) + + err = objBucket.FlushMemtable() + require.Nil(t, err) + + files, err := objBucket.ListFiles(ctx, shard.Index().Config.RootPath) + require.Nil(t, err) + + t.Run("check ListFiles, results", func(t *testing.T) { + assert.Len(t, files, 5) + + // build regex to get very close approximation to the expected + // contents of the ListFiles result. the only thing we can't + // know for sure is the actual name of the segment group, hence + // the `.*` + re := path.Clean(fmt.Sprintf("%s\\/.*\\.(wal|db|bloom|cna)", shard.Index().Config.RootPath)) + + // we expect to see only four files inside the bucket at this point: + // 1. a *.db file - the segment itself + // 2. a *.bloom file - the segments' bloom filter (only since v1.17) + // 3. a *.secondary.0.bloom file - the bloom filter for the secondary index at pos 0 (only since v1.17) + // 4. a *.secondary.1.bloom file - the bloom filter for the secondary index at pos 1 (only since v1.25) + // 5. a *.cna file - th segment's count net additions (only since v1.17) + // + // These files are created when the memtable is flushed, and the new + // segment is initialized. Both happens as a result of calling + // FlushMemtable(). + for i := range files { + isMatch, err := regexp.MatchString(re, files[i]) + assert.Nil(t, err) + assert.True(t, isMatch, files[i]) + } + + // check that we have one of each: *.db + exts := make([]string, 5) + for i, file := range files { + exts[i] = filepath.Ext(file) + } + assert.Contains(t, exts, ".db") // the main segment + assert.Contains(t, exts, ".cna") // the segment's count net additions + assert.Contains(t, exts, ".bloom") // matches both bloom filters (primary+secondary ones) + }) + + err = shard.Store().ResumeCompaction(ctx) + require.Nil(t, err) + }) + + t.Run("cleanup", func(t *testing.T) { + require.Nil(t, shard.Shutdown(ctx)) + require.Nil(t, os.RemoveAll(shard.Index().Config.RootPath)) + }) +} + +func setupTestDB(t *testing.T, rootDir string, classes ...*models.Class) *DB { + logger, _ := test.NewNullLogger() + + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: classes}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + db, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: rootDir, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + db.SetSchemaGetter(schemaGetter) + require.Nil(t, db.WaitForStartup(testCtx())) + migrator := NewMigrator(db, logger, "node1") + + for _, class := range classes { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + } + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: classes, + }, + } + + return db +} + +func TestDB_Shards(t *testing.T) { + ctx := testCtx() + logger, _ := test.NewNullLogger() + + t.Run("single shard with single node", func(t *testing.T) { + className := "SingleShardClass" + + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1"}, + }, + }, + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(className, mock.Anything).RunAndReturn( + func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }, + ) + + db := &DB{ + logger: logger, + schemaReader: mockSchemaReader, + } + + nodes, err := db.Shards(ctx, className) + assert.NoError(t, err) + assert.Len(t, nodes, 1) + assert.Equal(t, "node1", nodes[0]) + }) + + t.Run("single shard with multiple nodes", func(t *testing.T) { + className := "SingleShardMultiNodeClass" + + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1", "node2", "node3"}, + }, + }, + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(className, mock.Anything).RunAndReturn( + func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }, + ) + + db := &DB{ + logger: logger, + schemaReader: mockSchemaReader, + } + + nodes, err := db.Shards(ctx, className) + assert.NoError(t, err) + assert.Len(t, nodes, 3) + assert.Contains(t, nodes, "node1") + assert.Contains(t, nodes, "node2") + assert.Contains(t, nodes, "node3") + }) + + t.Run("multiple shards with overlapping nodes", func(t *testing.T) { + className := "MultiShardClass" + + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1", "node2"}, + }, + "shard2": { + Name: "shard2", + BelongsToNodes: []string{"node2", "node3"}, + }, + "shard3": { + Name: "shard3", + BelongsToNodes: []string{"node1", "node3"}, + }, + }, + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(className, mock.Anything).RunAndReturn( + func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }, + ) + + db := &DB{ + logger: logger, + schemaReader: mockSchemaReader, + } + + nodes, err := db.Shards(ctx, className) + assert.NoError(t, err) + assert.Len(t, nodes, 3) + assert.Contains(t, nodes, "node1") + assert.Contains(t, nodes, "node2") + assert.Contains(t, nodes, "node3") + }) + + t.Run("multiple shards with distinct nodes", func(t *testing.T) { + className := "MultiShardDistinctClass" + + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1"}, + }, + "shard2": { + Name: "shard2", + BelongsToNodes: []string{"node2"}, + }, + "shard3": { + Name: "shard3", + BelongsToNodes: []string{"node3"}, + }, + }, + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(className, mock.Anything).RunAndReturn( + func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }, + ) + + db := &DB{ + logger: logger, + schemaReader: mockSchemaReader, + } + + nodes, err := db.Shards(ctx, className) + assert.NoError(t, err) + assert.Len(t, nodes, 3) + assert.Contains(t, nodes, "node1") + assert.Contains(t, nodes, "node2") + assert.Contains(t, nodes, "node3") + }) + + t.Run("no shards for class", func(t *testing.T) { + className := "EmptyClass" + + // Empty physical shards + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{}, + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(className, mock.Anything).RunAndReturn( + func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }, + ) + + db := &DB{ + logger: logger, + schemaReader: mockSchemaReader, + } + + nodes, err := db.Shards(ctx, className) + assert.NoError(t, err) + assert.Len(t, nodes, 0) + assert.Equal(t, []string{}, nodes) + }) + + t.Run("nil sharding state", func(t *testing.T) { + className := "NilStateClass" + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(className, mock.Anything).RunAndReturn( + func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, nil) + }, + ) + + db := &DB{ + logger: logger, + schemaReader: mockSchemaReader, + } + + nodes, err := db.Shards(ctx, className) + assert.Error(t, err) + assert.Nil(t, nodes) + assert.Contains(t, err.Error(), "unable to retrieve sharding state") + }) + + t.Run("schema reader error", func(t *testing.T) { + className := "ErrorClass" + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(className, mock.Anything).Return( + fmt.Errorf("schema read failed"), + ) + + db := &DB{ + logger: logger, + schemaReader: mockSchemaReader, + } + + nodes, err := db.Shards(ctx, className) + assert.Error(t, err) + assert.Nil(t, nodes) + assert.Contains(t, err.Error(), "failed to read sharding state") + assert.Contains(t, err.Error(), "schema read failed") + }) +} + +func makeTestClass(className string) *models.Class { + return &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: className, + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..37c366df6dc7e09b253af0810065ca33982d09f6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/backup_test.go @@ -0,0 +1,57 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "errors" + "testing" + "time" + + tlog "github.com/sirupsen/logrus/hooks/test" +) + +func TestBackupMutex(t *testing.T) { + l, _ := tlog.NewNullLogger() + t.Run("success first time", func(t *testing.T) { + m := shardTransfer{log: l, retryDuration: time.Millisecond, notifyDuration: 5 * time.Millisecond} + ctx, cancel := context.WithTimeout(context.Background(), 12*time.Millisecond) + defer cancel() + if err := m.LockWithContext(ctx); err != nil { + t.Errorf("error want:nil got:%v ", err) + } + }) + t.Run("success after retry", func(t *testing.T) { + m := shardTransfer{log: l, retryDuration: 2 * time.Millisecond, notifyDuration: 5 * time.Millisecond} + m.RLock() + go func() { + defer m.RUnlock() + time.Sleep(time.Millisecond * 15) + }() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := m.LockWithContext(ctx); err != nil { + t.Errorf("error want:nil got:%v ", err) + } + }) + t.Run("cancelled context", func(t *testing.T) { + m := shardTransfer{log: l, retryDuration: time.Millisecond, notifyDuration: 5 * time.Millisecond} + m.RLock() + defer m.RUnlock() + ctx, cancel := context.WithTimeout(context.Background(), 12*time.Millisecond) + defer cancel() + err := m.LockWithContext(ctx) + if !errors.Is(err, context.DeadlineExceeded) { + t.Errorf("error want:%v got:%v", err, context.DeadlineExceeded) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch.go new file mode 100644 index 0000000000000000000000000000000000000000..f288e441780b9a705d81395f52d416f1f343c318 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch.go @@ -0,0 +1,260 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/objects" +) + +type batchQueue struct { + objects []*storobj.Object + originalIndex []int +} + +func (db *DB) BatchPutObjects(ctx context.Context, objs objects.BatchObjects, + repl *additional.ReplicationProperties, schemaVersion uint64, +) (objects.BatchObjects, error) { + objectByClass := make(map[string]batchQueue) + indexByClass := make(map[string]*Index) + + if err := db.memMonitor.CheckAlloc(estimateBatchMemory(objs)); err != nil { + db.logger.WithError(err).Errorf("memory pressure: cannot process batch") + return nil, fmt.Errorf("cannot process batch: %w", err) + } + + for _, item := range objs { + if item.Err != nil { + // item has a validation error or another reason to ignore + continue + } + queue := objectByClass[item.Object.Class] + vectors, multiVectors, err := dto.GetVectors(item.Object.Vectors) + if err != nil { + return nil, fmt.Errorf("cannot process batch: cannot get vectors: %w", err) + } + queue.objects = append(queue.objects, storobj.FromObject(item.Object, item.Object.Vector, vectors, multiVectors)) + queue.originalIndex = append(queue.originalIndex, item.OriginalIndex) + objectByClass[item.Object.Class] = queue + } + + // wrapped by func to acquire and safely release indexLock only for duration of loop + func() { + db.indexLock.RLock() + defer db.indexLock.RUnlock() + + for class, queue := range objectByClass { + index, ok := db.indices[indexID(schema.ClassName(class))] + if !ok { + msg := fmt.Sprintf("could not find index for class %v. It might have been deleted in the meantime", class) + db.logger.Warn(msg) + for _, origIdx := range queue.originalIndex { + if origIdx >= len(objs) { + db.logger.Errorf( + "batch add queue index out of bounds. len(objs) == %d, queue.originalIndex == %d", + len(objs), origIdx) + break + } + objs[origIdx].Err = errors.New(msg) + } + continue + } + index.dropIndex.RLock() + indexByClass[class] = index + } + }() + + // safely release remaining locks (in case of panic) + defer func() { + for _, index := range indexByClass { + if index != nil { + index.dropIndex.RUnlock() + } + } + }() + + for class, index := range indexByClass { + queue := objectByClass[class] + errs := index.putObjectBatch(ctx, queue.objects, repl, schemaVersion) + index.metrics.BatchCount(len(queue.objects)) + index.metrics.BatchCountBytes(estimateStorBatchMemory(queue.objects)) + + // remove index from map to skip releasing its lock in defer + indexByClass[class] = nil + index.dropIndex.RUnlock() + for i, err := range errs { + if err != nil { + objs[queue.originalIndex[i]].Err = err + } + } + } + + return objs, nil +} + +func (db *DB) AddBatchReferences(ctx context.Context, references objects.BatchReferences, + repl *additional.ReplicationProperties, schemaVersion uint64, +) (objects.BatchReferences, error) { + refByClass := make(map[schema.ClassName]objects.BatchReferences) + indexByClass := make(map[schema.ClassName]*Index) + + for _, item := range references { + if item.Err != nil { + // item has a validation error or another reason to ignore + continue + } + refByClass[item.From.Class] = append(refByClass[item.From.Class], item) + } + + // wrapped by func to acquire and safely release indexLock only for duration of loop + func() { + db.indexLock.RLock() + defer db.indexLock.RUnlock() + + for class, queue := range refByClass { + index, ok := db.indices[indexID(class)] + if !ok { + for _, item := range queue { + references[item.OriginalIndex].Err = fmt.Errorf("could not find index for class %v. It might have been deleted in the meantime", class) + } + continue + } + index.dropIndex.RLock() + indexByClass[class] = index + } + }() + + // safely release remaining locks (in case of panic) + defer func() { + for _, index := range indexByClass { + if index != nil { + index.dropIndex.RUnlock() + } + } + }() + + for class, index := range indexByClass { + queue := refByClass[class] + errs := index.AddReferencesBatch(ctx, queue, repl, schemaVersion) + // remove index from map to skip releasing its lock in defer + indexByClass[class] = nil + index.dropIndex.RUnlock() + for i, err := range errs { + if err != nil { + references[queue[i].OriginalIndex].Err = err + } + } + } + + return references, nil +} + +func (db *DB) BatchDeleteObjects(ctx context.Context, params objects.BatchDeleteParams, + deletionTime time.Time, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) (objects.BatchDeleteResult, error) { + start := time.Now() + // get index for a given class + className := params.ClassName + idx := db.GetIndex(className) + if idx == nil { + return objects.BatchDeleteResult{}, errors.Errorf("cannot find index for class %v", className) + } + + // find all DocIDs in all shards that match the filter + shardDocIDs, err := idx.findUUIDs(ctx, params.Filters, tenant, repl) + if err != nil { + return objects.BatchDeleteResult{}, errors.Wrapf(err, "cannot find objects") + } + // prepare to be deleted list of DocIDs from all shards + toDelete := map[string][]strfmt.UUID{} + limit := db.config.QueryMaximumResults + + matches := int64(0) + for shardName, docIDs := range shardDocIDs { + docIDsLength := int64(len(docIDs)) + if matches <= limit { + if matches+docIDsLength <= limit { + toDelete[shardName] = docIDs + } else { + toDelete[shardName] = docIDs[:limit-matches] + } + } + matches += docIDsLength + } + + db.logger.WithFields(logrus.Fields{ + "action": "batch_delete_objects_post_find_ids", + "params": params, + "tenant": tenant, + "matches": matches, + "dry_run": params.DryRun, + "took": time.Since(start), + }).Debugf("batch delete: identified %v objects to delete", matches) + + if err := db.memMonitor.CheckAlloc(memwatch.EstimateObjectDeleteMemory() * matches); err != nil { + db.logger.WithError(err).Errorf("memory pressure: cannot process batch delete object") + return objects.BatchDeleteResult{}, fmt.Errorf("cannot process batch delete object: %w", err) + } + + // delete the DocIDs in given shards + deletedObjects, err := idx.batchDeleteObjects(ctx, toDelete, deletionTime, params.DryRun, repl, schemaVersion, tenant) + if err != nil { + return objects.BatchDeleteResult{}, errors.Wrapf(err, "cannot delete objects") + } + + result := objects.BatchDeleteResult{ + Matches: matches, + Limit: db.config.QueryMaximumResults, + DeletionTime: deletionTime, + DryRun: params.DryRun, + Objects: deletedObjects, + } + + db.logger.WithFields(logrus.Fields{ + "action": "batch_delete_objects_completed", + "params": params, + "tenant": tenant, + "matches": matches, + "took": time.Since(start), + "dry_run": params.DryRun, + }).Debugf("batch delete completed in %s", time.Since(start)) + return result, nil +} + +func estimateBatchMemory(objs objects.BatchObjects) int64 { + var sum int64 + for _, item := range objs { + sum += memwatch.EstimateObjectMemory(item.Object) + } + + return sum +} + +func estimateStorBatchMemory(objs []*storobj.Object) int64 { + var sum int64 + for _, item := range objs { + sum += memwatch.EstimateStorObjectMemory(item) + } + + return sum +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6f6b8f8afff565761518e08aa17b2c575628ebdd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch_integration_test.go @@ -0,0 +1,1613 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "math/rand" + "sort" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/objects" +) + +func TestBatchPutObjectsWithDimensions(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", testAddBatchObjectClass(repo, migrator, schemaGetter)) + + dimBefore := getDimensionsFromRepo(context.Background(), repo, "ThingForBatching") + require.Equal(t, 0, dimBefore, "Dimensions are empty before import") + + simpleInsertObjects(t, repo, "ThingForBatching", 123) + + dimAfter := getDimensionsFromRepo(context.Background(), repo, "ThingForBatching") + require.Equal(t, 369, dimAfter, "Dimensions are present after import") +} + +func TestBatchPutObjects(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", testAddBatchObjectClass(repo, migrator, schemaGetter)) + + t.Run("batch import things", testBatchImportObjects(repo)) + t.Run("batch import things with geo props", testBatchImportGeoObjects(repo)) +} + +func TestBatchPutObjectsWithNamedVectors(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + className := "NamedVectors" + + t.Run("create class", func(t *testing.T) { + class := &models.Class{ + Class: className, + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "location", + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + }, + }, + VectorConfig: map[string]models.VectorConfig{ + "bringYourOwnVector": { + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + VectorIndexType: "hnsw", + }, + "colbert": { + Vectorizer: map[string]interface{}{"none": map[string]interface{}{}}, + VectorIndexConfig: enthnsw.NewDefaultMultiVectorUserConfig(), + VectorIndexType: "hnsw", + }, + }, + } + + require.Nil(t, migrator.AddClass(context.Background(), class)) + + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{class}, + } + }) + + t.Run("batch import", func(t *testing.T) { + batch := objects.BatchObjects{ + objects.BatchObject{ + OriginalIndex: 0, + Err: nil, + Object: &models.Object{ + Class: className, + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + Vectors: models.Vectors{ + "bringYourOwnVector": []float32{1, 2, 3}, + "colbert": [][]float32{{0.5, 0.52, 0.53}, {0.511, 0.522, 0.533}, {0.5111, 0.5222, 0.5333}}, + }, + }, + UUID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + }, + objects.BatchObject{ + OriginalIndex: 1, + Err: nil, + Object: &models.Object{ + Class: className, + Properties: map[string]interface{}{ + "stringProp": "second element", + }, + ID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + Vectors: models.Vectors{ + "bringYourOwnVector": []float32{1, 2, 3}, + "colbert": [][]float32{{0.001, 0.002, 0.003}, {0.0011, 0.0022, 0.0033}, {0.00111, 0.00222, 0.00333}}, + }, + }, + UUID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + }, + objects.BatchObject{ + OriginalIndex: 2, + Err: nil, + Object: &models.Object{ + Class: className, + Properties: map[string]interface{}{ + "stringProp": "third element", + }, + ID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + Vectors: models.Vectors{ + "bringYourOwnVector": []float32{1, 2, 3}, + "colbert": [][]float32{{0.00000001, 0.00000002, 0.00000003}, {0.000000011, 0.000000022, 0.000000033}, {0.111, 0.222, 0.333}}, + }, + }, + UUID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + }, + } + + t.Run("batch import", func(t *testing.T) { + batchRes, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + + assert.Nil(t, batchRes[0].Err) + assert.Nil(t, batchRes[1].Err) + assert.Nil(t, batchRes[2].Err) + }) + + params := dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + + t.Run("contains first element", func(t *testing.T) { + item, ok := findID(res, batch[0].Object.ID) + require.Equal(t, true, ok, "results should contain our desired id") + assert.Equal(t, "first element", item.Schema.(map[string]interface{})["stringProp"]) + + t.Run("contains named vector", func(t *testing.T) { + require.Len(t, item.Vectors, 2) + assert.IsType(t, []float32{}, item.Vectors["bringYourOwnVector"]) + assert.Equal(t, batch[0].Object.Vectors["bringYourOwnVector"], item.Vectors["bringYourOwnVector"]) + }) + t.Run("contains named multi vector", func(t *testing.T) { + require.Len(t, item.Vectors, 2) + assert.IsType(t, [][]float32{}, item.Vectors["colbert"]) + assert.Equal(t, batch[0].Object.Vectors["colbert"], item.Vectors["colbert"]) + }) + }) + + t.Run("can be queried through the inverted index", func(t *testing.T) { + filter := buildFilter("stringProp", "third", eq, schema.DataTypeText) + params := dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: 10}, + Filters: filter, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + + require.Len(t, res, 1) + assert.Equal(t, strfmt.UUID("90ade18e-2b99-4903-aa34-1d5d648c932d"), + res[0].ID) + }) + + // Vector search + t.Run("can perform vector search using regular embeddings", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 10, + }, + }, []string{"bringYourOwnVector"}, []models.Vector{[]float32{1, 2, 3}}) + require.Nil(t, err) + assert.Len(t, res, 3) + }) + + t.Run("can perform vector search using ColBERT embeddings", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 10, + }, + }, []string{"colbert"}, []models.Vector{[][]float32{{0.5, 0.52, 0.53}, {0.511, 0.522, 0.533}, {0.5111, 0.5222, 0.5333}}}) + require.NoError(t, err) + assert.Len(t, res, 3) + assert.Equal(t, "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", res[0].ID.String()) + }) + }) +} + +func TestBatchPutObjectsNoVectorsWithDimensions(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", testAddBatchObjectClass(repo, migrator, + schemaGetter)) + + dimensions := getDimensionsFromRepo(context.Background(), repo, "ThingForBatching") + require.Equal(t, 0, dimensions, "Dimensions are empty before import") + + t.Run("batch import things", testBatchImportObjectsNoVector(repo)) + + dimAfter := getDimensionsFromRepo(context.Background(), repo, "ThingForBatching") + require.Equal(t, 0, dimAfter, "Dimensions are empty after import (no vectors in import)") +} + +func TestBatchPutObjectsNoVectors(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", testAddBatchObjectClass(repo, migrator, schemaGetter)) + + t.Run("batch import things", testBatchImportObjectsNoVector(repo)) +} + +func TestBatchDeleteObjectsWithDimensions(t *testing.T) { + className := "ThingForBatching" + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 1, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the test class", testAddBatchObjectClass(repo, migrator, schemaGetter)) + + dimBefore := getDimensionsFromRepo(context.Background(), repo, className) + require.Equal(t, 0, dimBefore, "Dimensions are empty before import") + + simpleInsertObjects(t, repo, className, 103) + + dimAfter := getDimensionsFromRepo(context.Background(), repo, className) + require.Equal(t, 309, dimAfter, "Dimensions are present before delete") + + delete2Objects(t, repo, className) + + dimFinal := getDimensionsFromRepo(context.Background(), repo, className) + require.Equal(t, 303, dimFinal, "2 objects have been deleted") +} + +func delete2Objects(t *testing.T, repo *DB, className string) { + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), objects.BatchDeleteParams{ + ClassName: "ThingForBatching", + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "ThingForBatching", + Property: schema.PropertyName("id"), + }, + Value: &filters.Value{ + Value: "8d5a3aa2-3c8d-4589-9ae1-3f638f506003", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "ThingForBatching", + Property: schema.PropertyName("id"), + }, + Value: &filters.Value{ + Value: "8d5a3aa2-3c8d-4589-9ae1-3f638f506004", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + DryRun: false, + Output: "verbose", + }, time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, 2, len(batchDeleteRes.Objects), "Objects deleted") +} + +func TestBatchDeleteObjects(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", testAddBatchObjectClass(repo, migrator, schemaGetter)) + + t.Run("batch import things", testBatchImportObjects(repo)) + + t.Run("batch delete things", testBatchDeleteObjects(repo)) +} + +func TestBatchDeleteObjects_JourneyWithDimensions(t *testing.T) { + dirName := t.TempDir() + + queryMaximumResults := int64(200) + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: queryMaximumResults, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", testAddBatchObjectClass(repo, migrator, schemaGetter)) + + dimBefore := getDimensionsFromRepo(context.Background(), repo, "ThingForBatching") + require.Equal(t, 0, dimBefore, "Dimensions are empty before import") + + simpleInsertObjects(t, repo, "ThingForBatching", 103) + + dimAfter := getDimensionsFromRepo(context.Background(), repo, "ThingForBatching") + require.Equal(t, 309, dimAfter, "Dimensions are present before delete") + + delete2Objects(t, repo, "ThingForBatching") + + dimFinal := getDimensionsFromRepo(context.Background(), repo, "ThingForBatching") + require.Equal(t, 303, dimFinal, "Dimensions have been deleted") +} + +func TestBatchDeleteObjects_Journey(t *testing.T) { + dirName := t.TempDir() + + queryMaximumResults := int64(20) + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: queryMaximumResults, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer func() { + require.Nil(t, repo.Shutdown(context.Background())) + }() + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", testAddBatchObjectClass(repo, migrator, + schemaGetter)) + t.Run("batch import things", testBatchImportObjects(repo)) + t.Run("batch delete journey things", testBatchDeleteObjectsJourney(repo, queryMaximumResults)) +} + +func testAddBatchObjectClass(repo *DB, migrator *Migrator, + schemaGetter *fakeSchemaGetter, +) func(t *testing.T) { + return func(t *testing.T) { + class := &models.Class{ + Class: "ThingForBatching", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "location", + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + }, + }, + } + + require.Nil(t, migrator.AddClass(context.Background(), class)) + + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{class}, + } + } +} + +func testBatchImportObjectsNoVector(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + t.Run("with a prior validation error, but nothing to cause errors in the db", func(t *testing.T) { + batch := objects.BatchObjects{ + objects.BatchObject{ + OriginalIndex: 0, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + }, + UUID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + }, + objects.BatchObject{ + OriginalIndex: 1, + Err: fmt.Errorf("already has a validation error"), + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "second element", + }, + ID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + }, + UUID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + }, + objects.BatchObject{ + OriginalIndex: 2, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "third element", + }, + ID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + }, + UUID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + }, + } + + t.Run("can import", func(t *testing.T) { + batchRes, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + + assert.Nil(t, batchRes[0].Err) + assert.Nil(t, batchRes[2].Err) + }) + + params := dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + _, err := repo.Search(context.Background(), params) + require.Nil(t, err) + }) + } +} + +func simpleInsertObjects(t *testing.T, repo *DB, class string, count int) { + batch := make(objects.BatchObjects, count) + for i := 0; i < count; i++ { + batch[i] = objects.BatchObject{ + OriginalIndex: i, + Err: nil, + Object: &models.Object{ + Class: class, + Properties: map[string]interface{}{ + "stringProp": fmt.Sprintf("element %d", i), + }, + ID: strfmt.UUID(fmt.Sprintf("8d5a3aa2-3c8d-4589-9ae1-3f638f506%03d", i)), + Vector: []float32{1, 2, 3}, + }, + UUID: strfmt.UUID(fmt.Sprintf("8d5a3aa2-3c8d-4589-9ae1-3f638f506%03d", i)), + } + } + + repo.BatchPutObjects(context.Background(), batch, nil, 0) +} + +func testBatchImportObjects(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + t.Run("with a prior validation error, but nothing to cause errors in the db", func(t *testing.T) { + batch := objects.BatchObjects{ + objects.BatchObject{ + OriginalIndex: 0, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + Vector: []float32{1, 2, 3}, + }, + UUID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + }, + objects.BatchObject{ + OriginalIndex: 1, + Err: fmt.Errorf("already has a validation error"), + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "second element", + }, + ID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + Vector: []float32{1, 2, 3}, + }, + UUID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + }, + objects.BatchObject{ + OriginalIndex: 2, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "third element", + }, + ID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + Vector: []float32{1, 2, 3}, + }, + UUID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + }, + } + + t.Run("can import", func(t *testing.T) { + batchRes, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + + assert.Nil(t, batchRes[0].Err) + assert.Nil(t, batchRes[2].Err) + }) + + params := dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + + t.Run("contains first element", func(t *testing.T) { + item, ok := findID(res, batch[0].Object.ID) + require.Equal(t, true, ok, "results should contain our desired id") + assert.Equal(t, "first element", item.Schema.(map[string]interface{})["stringProp"]) + }) + + t.Run("contains third element", func(t *testing.T) { + item, ok := findID(res, batch[2].Object.ID) + require.Equal(t, true, ok, "results should contain our desired id") + assert.Equal(t, "third element", item.Schema.(map[string]interface{})["stringProp"]) + }) + + t.Run("can be queried through the inverted index", func(t *testing.T) { + filter := buildFilter("stringProp", "third", eq, schema.DataTypeText) + params := dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 10}, + Filters: filter, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + + require.Len(t, res, 1) + assert.Equal(t, strfmt.UUID("90ade18e-2b99-4903-aa34-1d5d648c932d"), + res[0].ID) + }) + }) + + t.Run("with an import which will fail", func(t *testing.T) { + batch := objects.BatchObjects{ + objects.BatchObject{ + OriginalIndex: 0, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "79aebd44-7486-4fed-9334-3a74cc09a1c3", + }, + UUID: "79aebd44-7486-4fed-9334-3a74cc09a1c3", + }, + objects.BatchObject{ + OriginalIndex: 1, + Err: fmt.Errorf("already had a prior error"), + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "second element", + }, + ID: "1c2d8ce6-32da-4081-9794-a81e23e673e4", + }, + UUID: "1c2d8ce6-32da-4081-9794-a81e23e673e4", + }, + objects.BatchObject{ + OriginalIndex: 2, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "third element", + }, + ID: "", // ID can't be empty in es, this should produce an error + }, + UUID: "", + }, + } + + t.Run("can import", func(t *testing.T) { + batchRes, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err, "there shouldn't be an overall error, only individual ones") + + t.Run("element errors are marked correctly", func(t *testing.T) { + require.Len(t, batchRes, 3) + assert.NotNil(t, batchRes[1].Err) // from validation + assert.NotNil(t, batchRes[2].Err) // from db + }) + }) + + params := dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + + t.Run("does not contain second element (validation error)", func(t *testing.T) { + _, ok := findID(res, batch[1].Object.ID) + require.Equal(t, false, ok, "results should not contain our desired id") + }) + + t.Run("does not contain third element (es error)", func(t *testing.T) { + _, ok := findID(res, batch[2].Object.ID) + require.Equal(t, false, ok, "results should not contain our desired id") + }) + }) + + t.Run("upserting the same objects over and over again", func(t *testing.T) { + for i := 0; i < 20; i++ { + batch := objects.BatchObjects{ + objects.BatchObject{ + OriginalIndex: 0, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + Vector: []float32{1, 2, 3}, + }, + UUID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + }, + objects.BatchObject{ + OriginalIndex: 1, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "third element", + }, + ID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + Vector: []float32{1, 1, -3}, + }, + UUID: "90ade18e-2b99-4903-aa34-1d5d648c932d", + }, + } + + t.Run("can import", func(t *testing.T) { + batchRes, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + + assert.Nil(t, batchRes[0].Err) + assert.Nil(t, batchRes[1].Err) + }) + + t.Run("a vector search returns the correct number of elements", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 10, + }, + }, []string{""}, []models.Vector{[]float32{1, 2, 3}}) + require.Nil(t, err) + assert.Len(t, res, 2) + }) + + } + }) + + t.Run("with a duplicate UUID", func(t *testing.T) { + // it should ignore the first one as the second one would overwrite the + // first one anyway + batch := make(objects.BatchObjects, 53) + + batch[0] = objects.BatchObject{ + OriginalIndex: 0, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "79aebd44-7486-4fed-9334-3a74cc09a1c3", + Vector: []float32{7, 8, 9}, + }, + UUID: "79aebd44-7486-4fed-9334-3a74cc09a1c3", + } + + // add 50 more nonsensical items, so we cross the transaction threshold + + for i := 1; i < 51; i++ { + uid, err := uuid.NewRandom() + require.Nil(t, err) + id := strfmt.UUID(uid.String()) + batch[i] = objects.BatchObject{ + OriginalIndex: i, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "ignore me", + }, + ID: id, + Vector: []float32{0.05, 0.1, 0.2}, + }, + UUID: id, + } + } + + batch[51] = objects.BatchObject{ + OriginalIndex: 51, + Err: fmt.Errorf("already had a prior error"), + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "1c2d8ce6-32da-4081-9794-a81e23e673e4", + Vector: []float32{3, 2, 1}, + }, + UUID: "1c2d8ce6-32da-4081-9794-a81e23e673e4", + } + batch[52] = objects.BatchObject{ + OriginalIndex: 52, + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "first element, imported a second time", + }, + ID: "79aebd44-7486-4fed-9334-3a74cc09a1c3", // note the duplicate id with item 1 + Vector: []float32{1, 2, 3}, + }, + UUID: "79aebd44-7486-4fed-9334-3a74cc09a1c3", // note the duplicate id with item 1 + } + + t.Run("can import", func(t *testing.T) { + batchRes, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err, "there shouldn't be an overall error, only individual ones") + + t.Run("element errors are marked correctly", func(t *testing.T) { + require.Len(t, batchRes, 53) + assert.NotNil(t, batchRes[51].Err) // from validation + }) + }) + + params := dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + + t.Run("does not contain second element (validation error)", func(t *testing.T) { + _, ok := findID(res, batch[51].Object.ID) + require.Equal(t, false, ok, "results should not contain our desired id") + }) + + t.Run("does not contain third element (es error)", func(t *testing.T) { + _, ok := findID(res, batch[52].Object.ID) + require.Equal(t, false, ok, "results should not contain our desired id") + }) + }) + + t.Run("when a context expires", func(t *testing.T) { + // it should ignore the first one as the second one would overwrite the + // first one anyway + size := 50 + batch := make(objects.BatchObjects, size) + // add 50 more nonsensical items, so we cross the transaction threshold + + for i := 0; i < size; i++ { + uid, err := uuid.NewRandom() + require.Nil(t, err) + id := strfmt.UUID(uid.String()) + batch[i] = objects.BatchObject{ + Err: nil, + Object: &models.Object{ + Class: "ThingForBatching", + Properties: map[string]interface{}{ + "stringProp": "ignore me", + }, + ID: id, + Vector: []float32{0.05, 0.1, 0.2}, + }, + UUID: id, + } + } + + t.Run("can import", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + defer cancel() + + batchRes, err := repo.BatchPutObjects(ctx, batch, nil, 0) + require.Nil(t, err, "there shouldn't be an overall error, only individual ones") + + t.Run("some elements have error'd due to context", func(t *testing.T) { + require.Len(t, batchRes, 50) + + errCount := 0 + for _, elem := range batchRes { + if elem.Err != nil { + errCount++ + assert.Contains(t, elem.Err.Error(), "context deadline exceeded") + } + } + + assert.True(t, errCount > 0) + }) + }) + }) + } +} + +// geo props are the first props with property specific indices, so making sure +// that they work with batches at scale adds value beyond the regular batch +// import tests +func testBatchImportGeoObjects(repo *DB) func(t *testing.T) { + r := getRandomSeed() + return func(t *testing.T) { + size := 500 + batchSize := 50 + + objs := make([]*models.Object, size) + + t.Run("generate random vectors", func(t *testing.T) { + for i := 0; i < size; i++ { + id, _ := uuid.NewRandom() + objs[i] = &models.Object{ + Class: "ThingForBatching", + ID: strfmt.UUID(id.String()), + Properties: map[string]interface{}{ + "location": randGeoCoordinates(r), + }, + Vector: []float32{0.123, 0.234, rand.Float32()}, // does not matter for this test + } + } + }) + + t.Run("import vectors in batches", func(t *testing.T) { + for i := 0; i < size; i += batchSize { + batch := make(objects.BatchObjects, batchSize) + for j := 0; j < batchSize; j++ { + batch[j] = objects.BatchObject{ + OriginalIndex: j, + Object: objs[i+j], + } + } + + res, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + assertAllItemsErrorFree(t, res) + } + }) + + const km = 1000 + distances := []float32{ + 0.1, + 1, + 10, + 100, + 1000, + 2000, + 5000, + 7500, + 10000, + 12500, + 15000, + 20000, + 35000, + 100000, // larger than the circumference of the earth, should contain all + } + + t.Run("query for expected results", func(t *testing.T) { + queryGeo := randGeoCoordinates(r) + + for _, maxDist := range distances { + t.Run(fmt.Sprintf("with maxDist=%f", maxDist), func(t *testing.T) { + var relevant int + var retrieved int + + controlList := bruteForceMaxDist(objs, []float32{ + *queryGeo.Latitude, + *queryGeo.Longitude, + }, maxDist*km) + + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 500}, + Filters: buildFilter("location", filters.GeoRange{ + GeoCoordinates: queryGeo, + Distance: maxDist * km, + }, filters.OperatorWithinGeoRange, schema.DataTypeGeoCoordinates), + }) + require.Nil(t, err) + + retrieved += len(res) + relevant += matchesInUUIDLists(controlList, resToUUIDs(res)) + + if relevant == 0 { + // skip, as we risk dividing by zero, if both relevant and retrieved + // are zero, however, we want to fail with a divide-by-zero if only + // retrieved is 0 and relevant was more than 0 + return + } + recall := float32(relevant) / float32(retrieved) + assert.True(t, recall >= 0.99) + }) + } + }) + + t.Run("renew vector positions to test batch geo updates", func(t *testing.T) { + for i, obj := range objs { + obj.Properties = map[string]interface{}{ + "location": randGeoCoordinates(r), + } + objs[i] = obj + } + }) + + t.Run("import in batches again (as update - same IDs!)", func(t *testing.T) { + for i := 0; i < size; i += batchSize { + batch := make(objects.BatchObjects, batchSize) + for j := 0; j < batchSize; j++ { + batch[j] = objects.BatchObject{ + OriginalIndex: j, + Object: objs[i+j], + } + } + + res, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + assertAllItemsErrorFree(t, res) + } + }) + + t.Run("query again to verify updates worked", func(t *testing.T) { + queryGeo := randGeoCoordinates(r) + + for _, maxDist := range distances { + t.Run(fmt.Sprintf("with maxDist=%f", maxDist), func(t *testing.T) { + var relevant int + var retrieved int + + controlList := bruteForceMaxDist(objs, []float32{ + *queryGeo.Latitude, + *queryGeo.Longitude, + }, maxDist*km) + + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 500}, + Filters: buildFilter("location", filters.GeoRange{ + GeoCoordinates: queryGeo, + Distance: maxDist * km, + }, filters.OperatorWithinGeoRange, schema.DataTypeGeoCoordinates), + }) + require.Nil(t, err) + + retrieved += len(res) + relevant += matchesInUUIDLists(controlList, resToUUIDs(res)) + + if relevant == 0 { + // skip, as we risk dividing by zero, if both relevant and retrieved + // are zero, however, we want to fail with a divide-by-zero if only + // retrieved is 0 and relevant was more than 0 + return + } + recall := float32(relevant) / float32(retrieved) + t.Logf("recall is %f\n", recall) + assert.True(t, recall >= 0.99) + }) + } + }) + } +} + +func testBatchDeleteObjects(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + getParams := func(dryRun bool, output string) objects.BatchDeleteParams { + return objects.BatchDeleteParams{ + ClassName: "ThingForBatching", + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLike, + Value: &filters.Value{ + Value: "*", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Property: schema.PropertyName("id"), + }, + }, + }, + DryRun: dryRun, + Output: output, + } + } + performClassSearch := func() ([]search.Result, error) { + return repo.Search(context.Background(), dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 10000}, + }) + } + t.Run("batch delete with dryRun set to true", func(t *testing.T) { + // get the initial count of the objects + res, err := performClassSearch() + require.Nil(t, err) + beforeDelete := len(res) + require.True(t, beforeDelete > 0) + // dryRun == true, only test how many objects can be deleted + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), getParams(true, "verbose"), time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, int64(beforeDelete), batchDeleteRes.Matches) + require.Equal(t, beforeDelete, len(batchDeleteRes.Objects)) + for _, batchRes := range batchDeleteRes.Objects { + require.Nil(t, batchRes.Err) + } + res, err = performClassSearch() + require.Nil(t, err) + assert.Equal(t, beforeDelete, len(res)) + }) + + t.Run("batch delete with dryRun set to true and output to minimal", func(t *testing.T) { + // get the initial count of the objects + res, err := performClassSearch() + require.Nil(t, err) + beforeDelete := len(res) + require.True(t, beforeDelete > 0) + // dryRun == true, only test how many objects can be deleted + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), getParams(true, "minimal"), time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, int64(beforeDelete), batchDeleteRes.Matches) + require.Equal(t, beforeDelete, len(batchDeleteRes.Objects)) + for _, batchRes := range batchDeleteRes.Objects { + require.Nil(t, batchRes.Err) + } + res, err = performClassSearch() + require.Nil(t, err) + assert.Equal(t, beforeDelete, len(res)) + }) + + t.Run("batch delete only 2 given objects", func(t *testing.T) { + // get the initial count of the objects + res, err := performClassSearch() + require.Nil(t, err) + beforeDelete := len(res) + require.True(t, beforeDelete > 0) + // dryRun == true, only test how many objects can be deleted + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), objects.BatchDeleteParams{ + ClassName: "ThingForBatching", + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "ThingForBatching", + Property: schema.PropertyName("id"), + }, + Value: &filters.Value{ + Value: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "ThingForBatching", + Property: schema.PropertyName("id"), + }, + Value: &filters.Value{ + Value: "90ade18e-2b99-4903-aa34-1d5d648c932d", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + DryRun: false, + Output: "verbose", + }, time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, int64(2), batchDeleteRes.Matches) + require.Equal(t, 2, len(batchDeleteRes.Objects)) + for _, batchRes := range batchDeleteRes.Objects { + require.Nil(t, batchRes.Err) + } + res, err = performClassSearch() + require.Nil(t, err) + assert.Equal(t, beforeDelete-2, len(res)) + }) + + t.Run("batch delete with dryRun set to false", func(t *testing.T) { + // get the initial count of the objects + res, err := performClassSearch() + require.Nil(t, err) + beforeDelete := len(res) + require.True(t, beforeDelete > 0) + // dryRun == true, only test how many objects can be deleted + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), getParams(false, "verbose"), time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, int64(beforeDelete), batchDeleteRes.Matches) + require.Equal(t, beforeDelete, len(batchDeleteRes.Objects)) + for _, batchRes := range batchDeleteRes.Objects { + require.Nil(t, batchRes.Err) + } + res, err = performClassSearch() + require.Nil(t, err) + assert.Equal(t, 0, len(res)) + }) + } +} + +func testBatchDeleteObjectsJourney(repo *DB, queryMaximumResults int64) func(t *testing.T) { + return func(t *testing.T) { + getParams := func(dryRun bool, output string) objects.BatchDeleteParams { + return objects.BatchDeleteParams{ + ClassName: "ThingForBatching", + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLike, + Value: &filters.Value{ + Value: "*", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Property: schema.PropertyName("id"), + }, + }, + }, + DryRun: dryRun, + Output: output, + } + } + performClassSearch := func() ([]search.Result, error) { + return repo.Search(context.Background(), dto.GetParams{ + ClassName: "ThingForBatching", + Pagination: &filters.Pagination{Limit: 20}, + }) + } + t.Run("batch delete journey", func(t *testing.T) { + // delete objects to limit + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), getParams(true, "verbose"), time.Now(), nil, "", 0) + require.Nil(t, err) + objectsMatches := batchDeleteRes.Matches + + leftToDelete := objectsMatches + deleteIterationCount := 0 + deletedObjectsCount := 0 + for { + // delete objects to limit + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), getParams(false, "verbose"), time.Now(), nil, "", 0) + require.Nil(t, err) + matches, deleted := batchDeleteRes.Matches, len(batchDeleteRes.Objects) + require.Equal(t, leftToDelete, matches) + require.True(t, deleted > 0) + deletedObjectsCount += deleted + + batchDeleteRes, err = repo.BatchDeleteObjects(context.Background(), getParams(true, "verbose"), time.Now(), nil, "", 0) + require.Nil(t, err) + leftToDelete = batchDeleteRes.Matches + + res, err := performClassSearch() + require.Nil(t, err) + afterDelete := len(res) + require.True(t, afterDelete >= 0) + if afterDelete == 0 { + // where have deleted all objects + break + } + deleteIterationCount += 1 + if deleteIterationCount > 100 { + // something went wrong + break + } + } + require.False(t, deleteIterationCount > 100, "Batch delete journey tests didn't stop properly") + require.True(t, objectsMatches/int64(queryMaximumResults) <= int64(deleteIterationCount)) + require.Equal(t, objectsMatches, int64(deletedObjectsCount)) + }) + } +} + +func assertAllItemsErrorFree(t *testing.T, res objects.BatchObjects) { + for _, elem := range res { + assert.Nil(t, elem.Err) + } +} + +func bruteForceMaxDist(inputs []*models.Object, query []float32, maxDist float32) []strfmt.UUID { + type distanceAndIndex struct { + distance float32 + index int + } + + distances := make([]distanceAndIndex, len(inputs)) + + distancer := distancer.NewGeoProvider().New(query) + for i, elem := range inputs { + coord := elem.Properties.(map[string]interface{})["location"].(*models.GeoCoordinates) + vec := []float32{*coord.Latitude, *coord.Longitude} + + dist, _ := distancer.Distance(vec) + distances[i] = distanceAndIndex{ + index: i, + distance: dist, + } + } + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + out := make([]strfmt.UUID, len(distances)) + i := 0 + for _, elem := range distances { + if elem.distance > maxDist { + break + } + out[i] = inputs[distances[i].index].ID + i++ + } + + return out[:i] +} + +func randGeoCoordinates(r *rand.Rand) *models.GeoCoordinates { + maxLat := float32(90.0) + minLat := float32(-90.0) + maxLon := float32(180) + minLon := float32(-180) + + lat := minLat + (maxLat-minLat)*r.Float32() + lon := minLon + (maxLon-minLon)*r.Float32() + return &models.GeoCoordinates{ + Latitude: &lat, + Longitude: &lon, + } +} + +func resToUUIDs(in []search.Result) []strfmt.UUID { + out := make([]strfmt.UUID, len(in)) + for i, obj := range in { + out[i] = obj.ID + } + + return out +} + +func matchesInUUIDLists(control []strfmt.UUID, results []strfmt.UUID) int { + desired := map[strfmt.UUID]struct{}{} + for _, relevant := range control { + desired[relevant] = struct{}{} + } + + var matches int + for _, candidate := range results { + _, ok := desired[candidate] + if ok { + matches++ + } + } + + return matches +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch_reference_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch_reference_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..771037637173b5aa4b9639f00cc704492fd10095 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/batch_reference_integration_test.go @@ -0,0 +1,425 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/objects" +) + +func Test_AddingReferencesInBatches(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + s := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "AddingBatchReferencesTestTarget", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "AddingBatchReferencesTestSource", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "toTarget", + DataType: []string{"AddingBatchReferencesTestTarget"}, + }, + }, + }, + }, + }, + } + + t.Run("add required classes", func(t *testing.T) { + for _, class := range s.Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + }) + } + }) + schemaGetter.schema = s + + target1 := strfmt.UUID("7b395e5c-cf4d-4297-b8cc-1d849a057de3") + target2 := strfmt.UUID("8f9f54f3-a7db-415e-881a-0e6fb79a7ec7") + target3 := strfmt.UUID("046251cf-cb02-4102-b854-c7c4691cf16f") + target4 := strfmt.UUID("bc7d8875-3a24-4137-8203-e152096dea4f") + sourceID := strfmt.UUID("a3c98a66-be4a-4eaf-8cf3-04648a11d0f7") + + t.Run("add objects", func(t *testing.T) { + err := repo.PutObject(context.Background(), &models.Object{ + ID: sourceID, + Class: "AddingBatchReferencesTestSource", + Properties: map[string]interface{}{ + "name": "source item", + }, + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + + targets := []strfmt.UUID{target1, target2, target3, target4} + + for i, target := range targets { + err = repo.PutObject(context.Background(), &models.Object{ + ID: target, + Class: "AddingBatchReferencesTestTarget", + Properties: map[string]interface{}{ + "name": fmt.Sprintf("target item %d", i), + }, + }, []float32{0.7}, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("verify ref count through filters", func(t *testing.T) { + t.Run("count==0 should return the source", func(t *testing.T) { + filter := buildFilter("toTarget", 0, eq, schema.DataTypeInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, res[0].ID, sourceID) + }) + + t.Run("count>0 should not return anything", func(t *testing.T) { + filter := buildFilter("toTarget", 0, gt, schema.DataTypeInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 0) + }) + }) + + t.Run("add reference between them - first batch", func(t *testing.T) { + source, err := crossref.ParseSource(fmt.Sprintf( + "weaviate://localhost/AddingBatchReferencesTestSource/%s/toTarget", + sourceID)) + require.Nil(t, err) + targets := []strfmt.UUID{target1, target2} + refs := make(objects.BatchReferences, len(targets)) + for i, target := range targets { + to, err := crossref.Parse(fmt.Sprintf("weaviate://localhost/%s", + target)) + require.Nil(t, err) + refs[i] = objects.BatchReference{ + Err: nil, + From: source, + To: to, + OriginalIndex: i, + } + } + _, err = repo.AddBatchReferences(context.Background(), refs, nil, 0) + assert.Nil(t, err) + }) + + t.Run("verify ref count through filters", func(t *testing.T) { + // so far we have imported two refs (!) + t.Run("count==2 should return the source", func(t *testing.T) { + filter := buildFilter("toTarget", 2, eq, schema.DataTypeInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, res[0].ID, sourceID) + }) + + t.Run("count==0 should not return anything", func(t *testing.T) { + filter := buildFilter("toTarget", 0, eq, schema.DataTypeInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 0) + }) + }) + + t.Run("add reference between them - second batch including errors", func(t *testing.T) { + source, err := crossref.ParseSource(fmt.Sprintf( + "weaviate://localhost/AddingBatchReferencesTestSource/%s/toTarget", + sourceID)) + require.Nil(t, err) + sourceNonExistingClass, err := crossref.ParseSource(fmt.Sprintf( + "weaviate://localhost/NonExistingClass/%s/toTarget", + sourceID)) + require.Nil(t, err) + sourceNonExistingProp, err := crossref.ParseSource(fmt.Sprintf( + "weaviate://localhost/AddingBatchReferencesTestSource/%s/nonExistingProp", + sourceID)) + require.Nil(t, err) + + targets := []strfmt.UUID{target3, target4} + refs := make(objects.BatchReferences, 3*len(targets)) + for i, target := range targets { + to, err := crossref.Parse(fmt.Sprintf("weaviate://localhost/%s", target)) + require.Nil(t, err) + + refs[3*i] = objects.BatchReference{ + Err: nil, + From: source, + To: to, + OriginalIndex: 3 * i, + } + refs[3*i+1] = objects.BatchReference{ + Err: nil, + From: sourceNonExistingClass, + To: to, + OriginalIndex: 3*i + 1, + } + refs[3*i+2] = objects.BatchReference{ + Err: nil, + From: sourceNonExistingProp, + To: to, + OriginalIndex: 3*i + 2, + } + } + batchRefs, err := repo.AddBatchReferences(context.Background(), refs, nil, 0) + assert.Nil(t, err) + require.Len(t, batchRefs, 6) + assert.Nil(t, batchRefs[0].Err) + assert.Nil(t, batchRefs[3].Err) + assert.Contains(t, batchRefs[1].Err.Error(), "NonExistingClass") + assert.Contains(t, batchRefs[4].Err.Error(), "NonExistingClass") + assert.Contains(t, batchRefs[2].Err.Error(), "nonExistingProp") + assert.Contains(t, batchRefs[5].Err.Error(), "nonExistingProp") + }) + + t.Run("check all references are now present", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{}, "") + require.Nil(t, err) + + refs := source.Object().Properties.(map[string]interface{})["toTarget"] + refsSlice, ok := refs.(models.MultipleRef) + require.True(t, ok, fmt.Sprintf("toTarget must be models.MultipleRef, but got %#v", refs)) + + foundBeacons := []string{} + for _, ref := range refsSlice { + foundBeacons = append(foundBeacons, ref.Beacon.String()) + } + expectedBeacons := []string{ + fmt.Sprintf("weaviate://localhost/%s", target1), + fmt.Sprintf("weaviate://localhost/%s", target2), + fmt.Sprintf("weaviate://localhost/%s", target3), + fmt.Sprintf("weaviate://localhost/%s", target4), + } + + assert.ElementsMatch(t, foundBeacons, expectedBeacons) + }) + + t.Run("verify ref count through filters", func(t *testing.T) { + // so far we have imported two refs (!) + t.Run("count==4 should return the source", func(t *testing.T) { + filter := buildFilter("toTarget", 4, eq, schema.DataTypeInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, res[0].ID, sourceID) + }) + + t.Run("count==0 should not return anything", func(t *testing.T) { + filter := buildFilter("toTarget", 0, eq, schema.DataTypeInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 0) + }) + + t.Run("count==2 should not return anything", func(t *testing.T) { + filter := buildFilter("toTarget", 2, eq, schema.DataTypeInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 0) + }) + }) + + t.Run("verify search by cross-ref", func(t *testing.T) { + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: eq, + On: &filters.Path{ + Class: schema.ClassName("AddingBatchReferencesTestSource"), + Property: schema.PropertyName("toTarget"), + Child: &filters.Path{ + Class: schema.ClassName("AddingBatchReferencesTestTarget"), + Property: schema.PropertyName("name"), + }, + }, + Value: &filters.Value{ + Value: "item", + Type: schema.DataTypeText, + }, + }, + } + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 10, + }, + }) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, res[0].ID, sourceID) + }) + + t.Run("verify objects are still searchable through the vector index", + func(t *testing.T) { + // prior to making the inverted index and its docIDs immutable, a ref + // update would not change the doc ID, therefore the batch reference + // never had to interact with the vector index. Now that they're + // immutable, the updated doc ID needs to be "re-inserted" even if the + // vector is still the same + // UPDATE gh-1334: Since batch refs are now a special case where we + // tolerate a re-use of the doc id, the above assumption is no longer + // correct. However, this test still adds value, since we were now able + // to remove the additional storage updates. By still including this + // test we verify that such an update is indeed no longer necessary + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "AddingBatchReferencesTestSource", + Pagination: &filters.Pagination{ + Limit: 1, + }, + }, []string{""}, []models.Vector{[]float32{0.49}}) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, sourceID, res[0].ID) + }) + + t.Run("remove source and target classes", func(t *testing.T) { + err := repo.DeleteIndex("AddingBatchReferencesTestSource") + assert.Nil(t, err) + err = repo.DeleteIndex("AddingBatchReferencesTestTarget") + assert.Nil(t, err) + + t.Run("verify classes do not exist", func(t *testing.T) { + assert.False(t, repo.IndexExists("AddingBatchReferencesTestSource")) + assert.False(t, repo.IndexExists("AddingBatchReferencesTestTarget")) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_and_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_and_test.go new file mode 100644 index 0000000000000000000000000000000000000000..47a2bbc5a4dbc15425a5892f81e1f0c967f56a1b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_and_test.go @@ -0,0 +1,260 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "strings" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestBM25FJourneyBlockAnd(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.Background())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("MyClass") + + require.NotNil(t, idx) + + // Check basic search + addit := additional.Properties{} + for _, location := range []string{"memory", "disk"} { + t.Run("bm25f text with AND "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "This is how we get to BM25F", SearchOperator: common_filters.SearchOperatorAnd} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + // Print results + t.Log("--- Start results for search with AND ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(0), res[0].DocID) + }) + + t.Run("bm25f text with AND == minimum should match with len(queryTerms) "+location, func(t *testing.T) { + q := "This is how we get to BM25F" + kwr1 := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: q, SearchOperator: common_filters.SearchOperatorAnd} + res1, scores1, err := idx.objectSearch(context.TODO(), 1000, nil, kwr1, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + + kwr2 := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: q, SearchOperator: common_filters.SearchOperatorOr, MinimumOrTokensMatch: len(strings.Split(q, " "))} + res2, scores2, err := idx.objectSearch(context.TODO(), 1000, nil, kwr2, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + // Print results + t.Log("--- Start results for search with AND ---") + for i, r := range res1 { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores1[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + for i, r := range res2 { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores2[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + require.Equal(t, len(res1), len(res2)) + + for i := 0; i < len(res1); i++ { + require.Equal(t, res1[i].DocID, res2[i].DocID) + require.Equal(t, scores1[i], scores2[i]) + } + }) + + // depending on the minimum should match, we will have a different number of results showing up + expectedSizes := []int{3, 3, 2, 2, 2, 2, 1, 1} + for minimumOrTokensMatch, expectedSize := range expectedSizes { + t.Run("bm25f text with minimum should match with 0...len(queryTerms) "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "This is how we get to BM25F", MinimumOrTokensMatch: minimumOrTokensMatch} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + // Print results + t.Log("--- Start results for search with AND ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + require.Equal(t, expectedSize, len(res)) + require.Equal(t, uint64(0), res[0].DocID) + + // if minimumOrTokensMatch < 3, title and description will both match, and thus the score will be higher + if minimumOrTokensMatch < 3 { + EqualFloats(t, scores[0], 5.470736, 3) + } else { + EqualFloats(t, scores[0], 4.0164075, 3) + } + }) + } + + } +} + +func TestBM25FJourneyAnd(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.Background())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("MyClass") + + require.NotNil(t, idx) + + // Check basic search + addit := additional.Properties{} + for _, location := range []string{"memory", "disk"} { + t.Run("bm25f text with AND "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "This is how we get to BM25F", SearchOperator: common_filters.SearchOperatorAnd} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + // Print results + t.Log("--- Start results for search with AND ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(0), res[0].DocID) + }) + + t.Run("bm25f text with AND == minimum should match with len(queryTerms) "+location, func(t *testing.T) { + q := "This is how we get to BM25F" + kwr1 := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: q, SearchOperator: common_filters.SearchOperatorAnd} + res1, scores1, err := idx.objectSearch(context.TODO(), 1000, nil, kwr1, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + + kwr2 := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: q, SearchOperator: common_filters.SearchOperatorOr, MinimumOrTokensMatch: len(strings.Split(q, " "))} + res2, scores2, err := idx.objectSearch(context.TODO(), 1000, nil, kwr2, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + // Print results + t.Log("--- Start results for search with AND ---") + for i, r := range res1 { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores1[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + for i, r := range res2 { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores2[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + require.Equal(t, len(res1), len(res2)) + + for i := 0; i < len(res1); i++ { + require.Equal(t, res1[i].DocID, res2[i].DocID) + require.Equal(t, scores1[i], scores2[i]) + } + }) + + // depending on the minimum should match, we will have a different number of results showing up + expectedSizes := []int{3, 3, 2, 2, 2, 2, 1, 1} + for minimumOrTokensMatch, expectedSize := range expectedSizes { + t.Run("bm25f text with minimum should match with 0...len(queryTerms) "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "This is how we get to BM25F", MinimumOrTokensMatch: minimumOrTokensMatch} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + // Print results + t.Log("--- Start results for search with AND ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + require.Equal(t, expectedSize, len(res)) + require.Equal(t, uint64(0), res[0].DocID) + + EqualFloats(t, scores[0], 3.4539468, 3) + }) + } + + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_block_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_block_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dfa9985c0c6344c730bc51dd1459207e9dffa853 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_block_test.go @@ -0,0 +1,1082 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "strings" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestBM25FJourneyBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.Background())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("MyClass") + + require.NotNil(t, idx) + + // Check basic search + addit := additional.Properties{} + for _, location := range []string{"memory", "disk"} { + t.Run("bm25f journey "+location, func(t *testing.T) { + addit = additional.Properties{Vector: true} + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description", "textField"}, Query: "journey"} + res, scores, err := idx.objectSearch(context.Background(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for basic search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(2), res[3].DocID) + require.Equal(t, uint64(3), res[4].DocID) + require.Equal(t, uint64(0), res[5].DocID) + + // vectors should be returned + require.NotNil(t, res[0].Vector) + + // Without additionalExplanations no explainScore entry should be present + require.NotContains(t, res[0].Object.Additional, "explainScore") + }) + + // Check non-alpha search on string field + + // text/field are tokenized entirely, so we can search for non-alpha characters + t.Run("bm25f textField non-alpha "+location, func(t *testing.T) { + kwrTextField := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description", "textField"}, Query: "*&^$@#$%^&*()(Offtopic!!!!"} + addit = additional.Properties{} + resTextField, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwrTextField, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for textField search ---") + for i, r := range resTextField { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(7), resTextField[0].DocID) + }) + + // text/field are not lower-cased before indexing, so upper case searches must be passed through unchanged. + t.Run("bm25f textField caps "+location, func(t *testing.T) { + kwrTextField := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"textField"}, Query: "YELLING IS FUN"} + addit := additional.Properties{} + resTextField, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwrTextField, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for textField caps search ---") + for i, r := range resTextField { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(8), resTextField[0].DocID) + }) + + // Check basic text search WITH CAPS + t.Run("bm25f text with caps "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "JOURNEY"} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + // Print results + t.Log("--- Start results for search with caps ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(2), res[3].DocID) + require.Equal(t, uint64(3), res[4].DocID) + require.Equal(t, uint64(0), res[5].DocID) + require.Equal(t, uint64(1), res[6].DocID) + }) + + t.Run("bm25f journey boosted "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title^3", "description"}, Query: "journey"} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(2), res[3].DocID) + require.Equal(t, uint64(0), res[4].DocID) + require.Equal(t, uint64(1), res[5].DocID) + require.Equal(t, uint64(3), res[6].DocID) + }) + + t.Run("Check search with two terms "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "journey somewhere"} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + require.Equal(t, len(scores), len(res)) + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(1), res[1].DocID) + require.Equal(t, uint64(5), res[2].DocID) + require.Equal(t, uint64(6), res[3].DocID) + require.Equal(t, uint64(2), res[4].DocID) + }) + + t.Run("bm25f journey somewhere no properties "+location, func(t *testing.T) { + // Check search with no properties (should include all properties) + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{}, Query: "journey somewhere"} + res, _, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(1), res[1].DocID) + require.Equal(t, uint64(5), res[2].DocID) + require.Equal(t, uint64(6), res[3].DocID) + }) + + t.Run("bm25f non alphanums "+location, func(t *testing.T) { + // Check search with no properties (should include all properties) + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{}, Query: "*&^$@#$%^&*()(Offtopic!!!!"} + res, _, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + require.Equal(t, uint64(7), res[0].DocID) + }) + + t.Run("First result has high score "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "about BM25F"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Equal(t, uint64(0), res[0].DocID) + require.Len(t, res, 4) // four results have one of the terms + }) + + t.Run("More results than limit "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(3), res[3].DocID) + require.Equal(t, uint64(2), res[4].DocID) + require.Len(t, res, 5) // four results have one of the terms + }) + + t.Run("Results from three properties "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "none"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Equal(t, uint64(9), res[0].DocID) + require.Equal(t, uint64(0), res[1].DocID) + require.Equal(t, uint64(8), res[2].DocID) + require.Len(t, res, 3) + }) + + t.Run("Include additional explanations "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey", AdditionalExplanations: true} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // With additionalExplanations explainScore entry should be present + require.Contains(t, res[0].Object.Additional, "explainScore") + require.Contains(t, res[0].Object.Additional["explainScore"], "BM25") + }) + + t.Run("Array fields text "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"multiTitles"}, Query: "dinner"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Len(t, res, 2) + require.Equal(t, uint64(0), res[0].DocID) + require.Equal(t, uint64(1), res[1].DocID) + }) + + t.Run("Array fields string "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"multiTextWhitespace"}, Query: "MuuultiYell!"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Len(t, res, 2) + require.Equal(t, uint64(6), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + }) + + t.Run("With autocut "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "journey", Properties: []string{"description"}} + resNoAutoCut, noautocutscores, err := idx.objectSearch(context.TODO(), 10, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + resAutoCut, autocutscores, err := idx.objectSearch(context.TODO(), 10, nil, kwr, nil, nil, addit, nil, "", 1, props) + require.Nil(t, err) + + require.Less(t, len(resAutoCut), len(resNoAutoCut)) + + EqualFloats(t, float32(0.5253056), noautocutscores[0], 5) + EqualFloats(t, float32(0.50612706), noautocutscores[1], 5) // <= autocut last element + EqualFloats(t, float32(0.35391074), noautocutscores[2], 5) + EqualFloats(t, float32(0.31824225), noautocutscores[3], 5) + EqualFloats(t, float32(0.28910512), noautocutscores[4], 5) + + require.Len(t, resAutoCut, 2) + EqualFloats(t, float32(0.5253056), autocutscores[0], 5) + EqualFloats(t, float32(0.50612706), autocutscores[1], 5) + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + + } +} + +func TestBM25FSinglePropBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 1) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + for _, location := range []string{"memory", "disk"} { + t.Run("bm25f singleprop "+location, func(t *testing.T) { + // Check boosted + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey"} + addit := additional.Properties{} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + t.Log("--- Start results for singleprop search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + // Check results in correct order + require.Equal(t, uint64(5), res[0].DocID) + require.Equal(t, uint64(3), res[3].DocID) + + // Check scores + EqualFloats(t, float32(0.6178051), scores[0], 5) + EqualFloats(t, float32(0.6178051), scores[1], 5) + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + } +} + +func TestBM25FWithFiltersBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 1) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "My", + Type: schema.DataType("text"), + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "journeys", + Type: schema.DataType("text"), + }, + }, + }, + }, + } + + filterEmpty := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "asdasdas", + Type: schema.DataType("text"), + }, + }, + }, + }, + } + + for _, location := range []string{"memory", "disk"} { + t.Run("bm25f with filter "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey"} + addit := additional.Properties{} + res, _, err := idx.objectSearch(context.TODO(), 1000, filter, kwr, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + require.True(t, len(res) == 1) + require.Equal(t, uint64(2), res[0].DocID) + }) + + t.Run("bm25f with filter matching no docs "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey"} + addit := additional.Properties{} + res, _, err := idx.objectSearch(context.TODO(), 1000, filterEmpty, kwr, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + require.True(t, len(res) == 0) + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + } +} + +func TestBM25FWithFilters_ScoreIsIdenticalWithOrWithoutFilterBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClassForFilterScoringTest(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("FilterClass") + require.NotNil(t, idx) + + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName("FilterClass"), + Property: schema.PropertyName("relatedToGolf"), + }, + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: true, + Type: dtBool, + }, + }, + } + + kwr := &searchparams.KeywordRanking{ + Type: "bm25", + Properties: []string{"description"}, + Query: "koepka golf", + } + + for _, location := range []string{"memory", "disk"} { + t.Run("bm25f with and without filter "+location, func(t *testing.T) { + addit := additional.Properties{} + filtered, filteredScores, err := idx.objectSearch(context.TODO(), 1000, filter, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + unfiltered, unfilteredScores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Len(t, filtered, 1) // should match exactly one element + require.Len(t, unfiltered, 2) // contains irrelevant result + + assert.Equal(t, uint64(0), filtered[0].DocID) // brooks koepka result + assert.Equal(t, uint64(0), unfiltered[0].DocID) // brooks koepka result + + assert.Equal(t, filteredScores[0], unfilteredScores[0]) + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + } +} + +func TestBM25FDifferentParamsJourneyBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 1) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + for _, location := range []string{"memory", "disk"} { + t.Run("bm25f different params boosted "+location, func(t *testing.T) { + // Check boosted + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title^2", "description"}, Query: "journey"} + addit := additional.Properties{} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(5), res[0].DocID) + require.Equal(t, uint64(6), res[2].DocID) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check scores + EqualFloats(t, float32(1.7730504), scores[0], 2) + EqualFloats(t, float32(1.7730504), scores[1], 2) + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + } +} + +// Compare with previous BM25 version to ensure the algorithm functions correctly +func TestBM25FCompareBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 1) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + for _, location := range []string{"memory", "disk"} { + t.Run("Compare with previous BM25 version "+location, func(t *testing.T) { + shardNames, err := idx.schemaReader.Shards("MyClass") + require.Nil(t, err) + + for _, shardName := range shardNames { + shard := idx.shards.Load(shardName) + t.Logf("------ BM25F --------\n") + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title"}, Query: "journey"} + addit := additional.Properties{} + + withBM25Fobjs, withBM25Fscores, err := shard.ObjectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, props) + require.Nil(t, err) + + for i, r := range withBM25Fobjs { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, withBM25Fscores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + t.Logf("------ BM25 --------\n") + kwr.Type = "" + + objs, scores, err := shard.ObjectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, props) + require.Nil(t, err) + + for i, r := range objs { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + require.Equal(t, len(withBM25Fobjs), len(objs)) + for i := range objs { + t.Logf("%v: BM25F score: %v, BM25 score: %v", i, withBM25Fscores[i], scores[i]) + EqualFloats(t, withBM25Fscores[i], scores[i], 9) + } + + // Not all the scores are unique and the search is not stable, so pick ones that don't move + require.Equal(t, uint64(1), objs[0].DocID) + require.Equal(t, uint64(2), objs[1].DocID) + require.Equal(t, uint64(0), objs[2].DocID) + require.Equal(t, uint64(6), objs[3].DocID) + require.Equal(t, uint64(5), objs[4].DocID) + require.Equal(t, uint64(4), objs[5].DocID) + + require.Equal(t, uint64(1), withBM25Fobjs[0].DocID) + require.Equal(t, uint64(2), withBM25Fobjs[1].DocID) + require.Equal(t, uint64(0), withBM25Fobjs[2].DocID) + require.Equal(t, uint64(6), withBM25Fobjs[3].DocID) + require.Equal(t, uint64(5), withBM25Fobjs[4].DocID) + require.Equal(t, uint64(4), withBM25Fobjs[5].DocID) + + } + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + } +} + +func TestBM25F_ComplexDocumentsBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{}, + }, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + addit := additional.Properties{} + + for _, location := range []string{"memory", "disk"} { + + classNone, props := SetupClassDocuments(t, repo, schemaGetter, logger, 0.5, 0.75, "none") + idxNone := repo.GetIndex(schema.ClassName(classNone)) + require.NotNil(t, idxNone) + + t.Run("single term "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "considered a"} + res, scores, err := idxNone.objectSearch(context.TODO(), 10, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, \n", r.DocID, scores[i]) + } + + // Check results in correct order + require.Equal(t, uint64(3), res[0].DocID) + require.Equal(t, uint64(0), res[1].DocID) + require.Equal(t, uint64(2), res[2].DocID) + require.Len(t, res, 3) + + // Check scores + EqualFloats(t, float32(0.93171), scores[0], 5) + EqualFloats(t, float32(0.54312956), scores[1], 5) + EqualFloats(t, float32(0.3794713), scores[2], 5) + }) + + t.Run("Results without stopwords "+location, func(t *testing.T) { + kwrNoStopwords := &searchparams.KeywordRanking{Type: "bm25", Query: "example losing business"} + resNoStopwords, resNoScores, err := idxNone.objectSearch(context.TODO(), 10, nil, kwrNoStopwords, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + classEn, properties := SetupClassDocuments(t, repo, schemaGetter, logger, 0.5, 0.75, "en") + idxEn := repo.GetIndex(schema.ClassName(classEn)) + require.NotNil(t, idxEn) + + kwrStopwords := &searchparams.KeywordRanking{Type: "bm25", Query: "an example on losing the business"} + resStopwords, resScores, err := idxEn.objectSearch(context.TODO(), 10, nil, kwrStopwords, nil, nil, addit, nil, "", 0, properties) + require.Nil(t, err) + + require.Equal(t, len(resNoStopwords), len(resStopwords)) + for i, resNo := range resNoStopwords { + resYes := resStopwords[i] + require.Equal(t, resNo.DocID, resYes.DocID) + require.Equal(t, resNoScores[i], resScores[i]) + } + + kwrStopwordsDuplicate := &searchparams.KeywordRanking{Type: "bm25", Query: "on an example on losing the business on"} + resStopwordsDuplicate, duplicateScores, err := idxEn.objectSearch(context.TODO(), 10, nil, kwrStopwordsDuplicate, nil, nil, addit, nil, "", 0, properties) + require.Nil(t, err) + require.Equal(t, len(resNoStopwords), len(resStopwordsDuplicate)) + for i, resNo := range resNoStopwords { + resYes := resStopwordsDuplicate[i] + require.Equal(t, resNo.DocID, resYes.DocID) + require.Equal(t, resNoScores[i], duplicateScores[i]) + } + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + } +} + +func TestBM25F_SortMultiPropBlock(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{}, + }, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + className, props := MultiPropClass(t, repo, schemaGetter, logger, 0.5, 0.75) + idx := repo.GetIndex(schema.ClassName(className)) + require.NotNil(t, idx) + + addit := additional.Properties{} + + for _, location := range []string{"memory", "disk"} { + + t.Run("single term "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "pepper banana"} + res, scores, err := idx.objectSearch(context.TODO(), 2, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, \n", r.DocID, scores[i]) + } + + // Document 1 is a result for both terms + require.Len(t, res, 2) + require.Equal(t, uint64(1), res[0].DocID) + }) + + t.Run("two docs to test additional explanations "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "pepper banana", AdditionalExplanations: true} + res, _, err := idx.objectSearch(context.TODO(), 2, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for boosted search ---") + for _, r := range res { + t.Logf("Result id: %v, score: %v, additional: %v\n", r.DocID, r.ExplainScore(), r.Object.Additional) + } + + // We have two results, one that matches both terms and one that matches only one + require.Len(t, res, 2) + require.Equal(t, uint64(1), res[0].DocID) + // these assertions failed if we didn't swap the positions of the additional explanations, as we would be getting the explanations from the second doc + explanationString := fmt.Sprintf("%f", res[0].Object.Additional["explainScore"]) + require.True(t, strings.Contains(explanationString, "BM25F_pepper_frequency:1")) + require.True(t, strings.Contains(explanationString, "BM25F_pepper_propLength:1")) + require.True(t, strings.Contains(explanationString, "BM25F_banana_frequency:1")) + require.True(t, strings.Contains(explanationString, "BM25F_banana_propLength:1")) + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + + } +} + +func TestBM25FWithFiltersMemtable(t *testing.T) { + config.DefaultUsingBlockMaxWAND = true + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 1) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "unrelated", + Type: schema.DataType("text"), + }, + }, + }, + }, + } + + resultIds := make([][]uint64, 2) + resultScores := make([][]float32, 2) + for i, location := range []string{"memory", "disk"} { + t.Run("bm25f with filter "+location, func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title"}, Query: "my unrelated journey", AdditionalExplanations: true} + addit := additional.Properties{} + res, scores, err := idx.objectSearch(context.TODO(), 1000, filter, kwr, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + + for j, r := range res { + resultIds[i] = append(resultIds[i], r.DocID) + resultScores[i] = append(resultScores[i], scores[j]) + } + }) + + for _, index := range repo.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } + } + assert.Equal(t, resultIds[0], resultIds[1], "Result IDs should be the same for memory and disk") + assert.Equal(t, resultScores[0], resultScores[1], "Result scores should be the same for memory and disk") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e177be94bc6594361de8d9799387ae2f746d5041 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/bm25f_test.go @@ -0,0 +1,1239 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "strings" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func BM25FinvertedConfig(k1, b float32, stopWordPreset string) *models.InvertedIndexConfig { + return &models.InvertedIndexConfig{ + Bm25: &models.BM25Config{ + K1: k1, + B: b, + }, + CleanupIntervalSeconds: 60, + Stopwords: &models.StopwordConfig{ + Preset: stopWordPreset, + }, + IndexNullState: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + } +} + +func SetupClass(t require.TestingT, repo *DB, schemaGetter *fakeSchemaGetter, logger logrus.FieldLogger, k1, b float32, +) []string { + vFalse := false + vTrue := true + + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(k1, b, "none"), + Class: "MyClass", + + Properties: []*models.Property{ + { + Name: "title", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "review", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "textField", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "textWhitespace", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "relatedToGolf", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "multiTitles", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "multiTextWhitespace", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + // Test that bm25f handles this property being unsearchable + { + Name: "notSearchable", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + }, + }, + } + props := make([]string, len(class.Properties)) + for i, prop := range class.Properties { + props[i] = prop.Name + } + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + schemaGetter.schema = schema + + migrator := NewMigrator(repo, logger, "node1") + migrator.AddClass(context.Background(), class) + + testData := []map[string]interface{}{} + testData = append(testData, map[string]interface{}{"title": "Our journey to BM25F", "description": "This is how we get to BM25F", "review": "none none none", "multiTitles": []string{"breakfast", "dinner"}}) + testData = append(testData, map[string]interface{}{"title": "Why I dont like journey", "description": "This is about how we get somewhere", "multiTitles": []string{"going to a restaurant for dinner", "sandwiches and desert are a great lunch"}}) + testData = append(testData, map[string]interface{}{"title": "My journeys in Journey", "description": "A journey story about journeying"}) + testData = append(testData, map[string]interface{}{"title": "An unrelated title", "description": "Actually all about journey"}) + testData = append(testData, map[string]interface{}{"title": "journey journey", "description": "journey journey journey"}) + testData = append(testData, map[string]interface{}{"title": "journey", "description": "journey journey", "multiTextWhitespace": []string{"totally irrelevant:)", "we all MuuultiYell! together"}}) + testData = append(testData, map[string]interface{}{"title": "JOURNEY", "description": "A LOUD JOURNEY", "multiTextWhitespace": []string{"MuuultiYell!", "is fun"}}) + testData = append(testData, map[string]interface{}{"title": "An unrelated title", "description": "Absolutely nothing to do with the topic", "textField": "*&^$@#$%^&*()(Offtopic!!!!"}) + testData = append(testData, map[string]interface{}{"title": "none", "description": "other", "textField": "YELLING IS FUN"}) + testData = append(testData, map[string]interface{}{"title": "something", "description": "none none", "review": "none none none none none none"}) + + for i, data := range testData { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + + obj := &models.Object{Class: "MyClass", ID: id, Properties: data, CreationTimeUnix: 1565612833955, LastUpdateTimeUnix: 10000020} + vector := []float32{1, 3, 5, 0.4} + //{title: "Our journey to BM25F", description: " This is how we get to BM25F"}} + err := repo.PutObject(context.Background(), obj, vector, nil, nil, nil, 0) + require.Nil(t, err) + } + return props +} + +// DuplicatedFrom SetupClass to make sure this new test does not alter the results of the existing one +func SetupClassForFilterScoringTest(t require.TestingT, repo *DB, schemaGetter *fakeSchemaGetter, logger logrus.FieldLogger, k1, b float32, +) []string { + vFalse := false + vTrue := true + + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(k1, b, "none"), + Class: "FilterClass", + + Properties: []*models.Property{ + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "relatedToGolf", + DataType: schema.DataTypeBoolean.PropString(), + IndexFilterable: &vTrue, + }, + }, + } + props := make([]string, len(class.Properties)) + for i, prop := range class.Properties { + props[i] = prop.Name + } + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + schemaGetter.schema = schema + + migrator := NewMigrator(repo, logger, "node1") + migrator.AddClass(context.Background(), class) + + testData := []map[string]interface{}{} + testData = append(testData, map[string]interface{}{"description": "Brooks Koepka appeared a lot in the ms marco dataset. I was surprised to see golf content in there. I assume if the dataset was newer, we'd see a lot more Rory though.", "relatedToGolf": true}) + testData = append(testData, map[string]interface{}{"description": "While one would expect Koepka to be a somewhat rare name, it did appear in msmarco also outside the context of Brooks.", "relatedToGolf": false}) + + for i, data := range testData { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + + obj := &models.Object{Class: "FilterClass", ID: id, Properties: data, CreationTimeUnix: 1565612833955, LastUpdateTimeUnix: 10000020} + vector := []float32{1, 3, 5, 0.4} + err := repo.PutObject(context.Background(), obj, vector, nil, nil, nil, 0) + require.Nil(t, err) + } + return props +} + +func TestBM25FJourney(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + // Check basic search + addit := additional.Properties{Vector: true} + + t.Run("bm25f journey", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description", "textField"}, Query: "journey"} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for basic search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(3), res[3].DocID) + require.Equal(t, uint64(0), res[4].DocID) + require.Equal(t, uint64(2), res[5].DocID) + + // vectors should be returned + require.NotNil(t, res[0].Vector) + + // Without additionalExplanations no explainScore entry should be present + require.NotContains(t, res[0].Object.Additional, "explainScore") + }) + + // Check non-alpha search on string field + + // text/field are tokenized entirely, so we can search for non-alpha characters + t.Run("bm25f textField non-alpha", func(t *testing.T) { + kwrTextField := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description", "textField"}, Query: "*&^$@#$%^&*()(Offtopic!!!!"} + addit = additional.Properties{} + resTextField, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwrTextField, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for textField search ---") + for i, r := range resTextField { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(7), resTextField[0].DocID) + }) + + // text/field are not lower-cased before indexing, so upper case searches must be passed through unchanged. + t.Run("bm25f textField caps", func(t *testing.T) { + kwrTextField := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"textField"}, Query: "YELLING IS FUN"} + addit := additional.Properties{} + resTextField, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwrTextField, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for textField caps search ---") + for i, r := range resTextField { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(8), resTextField[0].DocID) + }) + + // Check basic text search WITH CAPS + t.Run("bm25f text with caps", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "JOURNEY"} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + // Print results + t.Log("--- Start results for search with caps ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(2), res[3].DocID) + require.Equal(t, uint64(3), res[4].DocID) + require.Equal(t, uint64(0), res[5].DocID) + require.Equal(t, uint64(1), res[6].DocID) + }) + + t.Run("bm25f journey boosted", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title^3", "description"}, Query: "journey"} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check results in correct order + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(0), res[3].DocID) + require.Equal(t, uint64(1), res[4].DocID) + require.Equal(t, uint64(2), res[5].DocID) + require.Equal(t, uint64(3), res[6].DocID) + }) + + t.Run("Check search with two terms", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title", "description"}, Query: "journey somewhere"} + res, _, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + // Check results in correct order + require.Equal(t, uint64(1), res[0].DocID) + require.Equal(t, uint64(4), res[1].DocID) + require.Equal(t, uint64(5), res[2].DocID) + require.Equal(t, uint64(6), res[3].DocID) + require.Equal(t, uint64(2), res[4].DocID) + }) + + t.Run("bm25f journey somewhere no properties", func(t *testing.T) { + // Check search with no properties (should include all properties) + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{}, Query: "journey somewhere"} + res, _, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(1), res[0].DocID) + require.Equal(t, uint64(4), res[1].DocID) + require.Equal(t, uint64(5), res[2].DocID) + require.Equal(t, uint64(6), res[3].DocID) + }) + + t.Run("bm25f non alphanums", func(t *testing.T) { + // Check search with no properties (should include all properties) + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{}, Query: "*&^$@#$%^&*()(Offtopic!!!!"} + res, _, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + require.Equal(t, uint64(7), res[0].DocID) + }) + + t.Run("First result has high score", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "about BM25F"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Equal(t, uint64(0), res[0].DocID) + require.Len(t, res, 4) // four results have one of the terms + }) + + t.Run("More results than limit", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Equal(t, uint64(4), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + require.Equal(t, uint64(6), res[2].DocID) + require.Equal(t, uint64(3), res[3].DocID) + require.Equal(t, uint64(2), res[4].DocID) + require.Len(t, res, 5) // four results have one of the terms + }) + + t.Run("Results from three properties", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "none"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Equal(t, uint64(9), res[0].DocID) + require.Equal(t, uint64(0), res[1].DocID) + require.Equal(t, uint64(8), res[2].DocID) + require.Len(t, res, 3) + }) + + t.Run("Include additional explanations", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey", AdditionalExplanations: true} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // With additionalExplanations explainScore entry should be present + require.Contains(t, res[0].Object.Additional, "explainScore") + require.Contains(t, res[0].Object.Additional["explainScore"], "BM25") + }) + + t.Run("Array fields text", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"multiTitles"}, Query: "dinner"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Len(t, res, 2) + require.Equal(t, uint64(0), res[0].DocID) + require.Equal(t, uint64(1), res[1].DocID) + }) + + t.Run("Array fields string", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"multiTextWhitespace"}, Query: "MuuultiYell!"} + res, _, err := idx.objectSearch(context.TODO(), 5, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Len(t, res, 2) + require.Equal(t, uint64(6), res[0].DocID) + require.Equal(t, uint64(5), res[1].DocID) + }) + + t.Run("With autocut", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "journey", Properties: []string{"description"}} + resNoAutoCut, noautocutscores, err := idx.objectSearch(context.TODO(), 10, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + resAutoCut, autocutscores, err := idx.objectSearch(context.TODO(), 10, nil, kwr, nil, nil, addit, nil, "", 1, props) + require.Nil(t, err) + + require.Less(t, len(resAutoCut), len(resNoAutoCut)) + + EqualFloats(t, float32(0.5868752), noautocutscores[0], 5) + EqualFloats(t, float32(0.5450892), noautocutscores[1], 5) // <= autocut last element + EqualFloats(t, float32(0.34149727), noautocutscores[2], 5) + EqualFloats(t, float32(0.3049518), noautocutscores[3], 5) + EqualFloats(t, float32(0.27547202), noautocutscores[4], 5) + + require.Len(t, resAutoCut, 2) + EqualFloats(t, float32(0.5868752), autocutscores[0], 5) + EqualFloats(t, float32(0.5450892), autocutscores[1], 5) + }) +} + +func TestBM25FSingleProp(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 100) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + // Check boosted + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey"} + addit := additional.Properties{} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + t.Log("--- Start results for singleprop search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + require.Nil(t, err) + // Check results in correct order + require.Equal(t, uint64(3), res[0].DocID) + require.Equal(t, uint64(4), res[3].DocID) + + // Check scores + EqualFloats(t, float32(0.1248), scores[0], 5) + EqualFloats(t, float32(0.0363), scores[1], 5) +} + +func TestBM25FWithFilters(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 100) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "My", + Type: schema.DataType("text"), + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "journeys", + Type: schema.DataType("text"), + }, + }, + }, + }, + } + + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"description"}, Query: "journey"} + addit := additional.Properties{} + res, _, err := idx.objectSearch(context.TODO(), 1000, filter, kwr, nil, nil, addit, nil, "", 0, props) + + require.Nil(t, err) + require.True(t, len(res) == 1) + require.Equal(t, uint64(2), res[0].DocID) +} + +func TestBM25FWithFilters_ScoreIsIdenticalWithOrWithoutFilter(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClassForFilterScoringTest(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("FilterClass") + require.NotNil(t, idx) + + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Class: schema.ClassName("FilterClass"), + Property: schema.PropertyName("relatedToGolf"), + }, + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: true, + Type: dtBool, + }, + }, + } + + kwr := &searchparams.KeywordRanking{ + Type: "bm25", + Properties: []string{"description"}, + Query: "koepka golf", + } + + addit := additional.Properties{} + filtered, filteredScores, err := idx.objectSearch(context.TODO(), 1000, filter, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + unfiltered, unfilteredScores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + require.Len(t, filtered, 1) // should match exactly one element + require.Len(t, unfiltered, 2) // contains irrelevant result + + assert.Equal(t, uint64(0), filtered[0].DocID) // brooks koepka result + assert.Equal(t, uint64(0), unfiltered[0].DocID) // brooks koepka result + + assert.Equal(t, filteredScores[0], unfilteredScores[0]) +} + +func TestBM25FDifferentParamsJourney(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 100) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + // Check boosted + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title^2", "description"}, Query: "journey"} + addit := additional.Properties{} + res, scores, err := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + require.Nil(t, err) + + // Check results in correct order + require.Equal(t, uint64(6), res[0].DocID) + require.Equal(t, uint64(1), res[3].DocID) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + // Check scores + EqualFloats(t, float32(0.06023), scores[0], 6) + EqualFloats(t, float32(0.04238), scores[1], 6) +} + +func EqualFloats(t *testing.T, expected, actual float32, significantFigures int) { + s1 := fmt.Sprintf("%v", expected) + s2 := fmt.Sprintf("%v", actual) + if len(s1) < 2 || len(s2) < 2 { + t.Fail() + } + if len(s1) <= significantFigures { + significantFigures = len(s1) - 1 + } + if len(s2) <= significantFigures { + significantFigures = len(s2) - 1 + } + require.Equal(t, s1[:significantFigures+1], s2[:significantFigures+1]) +} + +// Compare with previous BM25 version to ensure the algorithm functions correctly +func TestBM25FCompare(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupClass(t, repo, schemaGetter, logger, 0.5, 100) + + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + shardNames, err := idx.schemaReader.Shards("MyClass") + require.Nil(t, err) + + for _, shardName := range shardNames { + shard := idx.shards.Load(shardName) + t.Logf("------ BM25F --------\n") + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{"title"}, Query: "journey"} + addit := additional.Properties{} + + withBM25Fobjs, withBM25Fscores, err := shard.ObjectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, props) + require.Nil(t, err) + + for i, r := range withBM25Fobjs { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, withBM25Fscores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + t.Logf("------ BM25 --------\n") + kwr.Type = "" + + objs, scores, err := shard.ObjectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, props) + require.Nil(t, err) + + for i, r := range objs { + t.Logf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.DocID, scores[i], r.Object.Properties.(map[string]interface{})["title"], r.Object.Properties.(map[string]interface{})["description"], r.Object.Additional) + } + + require.Equal(t, len(withBM25Fobjs), len(objs)) + for i := range objs { + t.Logf("%v: BM25F score: %v, BM25 score: %v", i, withBM25Fscores[i], scores[i]) + EqualFloats(t, withBM25Fscores[i], scores[i], 9) + } + + // Not all the scores are unique and the search is not stable, so pick ones that don't move + require.Equal(t, uint64(4), objs[0].DocID) + require.Equal(t, uint64(6), objs[1].DocID) + require.Equal(t, uint64(5), objs[2].DocID) + require.Equal(t, uint64(1), objs[3].DocID) + require.Equal(t, uint64(2), objs[4].DocID) + require.Equal(t, uint64(0), objs[5].DocID) + + require.Equal(t, uint64(4), withBM25Fobjs[0].DocID) + require.Equal(t, uint64(6), withBM25Fobjs[1].DocID) + require.Equal(t, uint64(5), withBM25Fobjs[2].DocID) + require.Equal(t, uint64(1), withBM25Fobjs[3].DocID) + require.Equal(t, uint64(2), withBM25Fobjs[4].DocID) + require.Equal(t, uint64(0), withBM25Fobjs[5].DocID) + + } +} + +func Test_propertyHasSearchableIndex(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + vFalse := false + vTrue := true + + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(1, 1, "none"), + Class: "MyClass", + + Properties: []*models.Property{ + { + Name: "title", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: nil, + }, + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "textField", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + }, + }, + } + + t.Run("Property index", func(t *testing.T) { + if got := inverted.PropertyHasSearchableIndex(class, "description"); got != true { + t.Errorf("PropertyHasSearchableIndex() = %v, want %v", got, true) + } + + if got := inverted.PropertyHasSearchableIndex(class, "description^2"); got != true { + t.Errorf("PropertyHasSearchableIndex() = %v, want %v", got, true) + } + + if got := inverted.PropertyHasSearchableIndex(class, "textField"); got != false { + t.Errorf("PropertyHasSearchableIndex() = %v, want %v", got, false) + } + + if got := inverted.PropertyHasSearchableIndex(class, "title"); got != true { + t.Errorf("PropertyHasSearchableIndex() = %v, want %v", got, true) + } + }) +} + +func SetupClassDocuments(t require.TestingT, repo *DB, schemaGetter *fakeSchemaGetter, logger logrus.FieldLogger, k1, b float32, preset string, +) (string, []string) { + vFalse := false + vTrue := true + + className := "DocumentsPreset_" + preset + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(k1, b, preset), + Class: className, + + Properties: []*models.Property{ + { + Name: "document", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + }, + } + props := make([]string, len(class.Properties)) + for i, prop := range class.Properties { + props[i] = prop.Name + } + + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + migrator := NewMigrator(repo, logger, "node1") + migrator.AddClass(context.Background(), class) + + testData := []map[string]interface{}{} + testData = append(testData, map[string]interface{}{"document": "No matter what you do, the question of \"\"what is income\"\" is *always* going to be an extremely complex question. To use this particular example, is paying a royalty fee to an external party a legitimate business expense that is part of the cost of doing business and which subtracts from your \"\"income\"\"?"}) + testData = append(testData, map[string]interface{}{"document": "test"}) + testData = append(testData, map[string]interface{}{"document": "As long as the losing business is not considered \"\"passive activity\"\" or \"\"hobby\"\", then yes. Passive Activity is an activity where you do not have to actively do anything to generate income. For example - royalties or rentals. Hobby is an activity that doesn't generate profit. Generally, if your business doesn't consistently generate profit (the IRS looks at 3 out of the last 5 years), it may be characterized as hobby. For hobby, loss deduction is limited by the hobby income and the 2% AGI threshold."}) + testData = append(testData, map[string]interface{}{"document": "So you're basically saying that average market fluctuations have an affect on individual stocks, because individual stocks are often priced in relation to the growth of the market as a whole? Also, what kinds of investments would be considered \"\"risk free\"\" in this nomenclature?"}) + + for i, data := range testData { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + + obj := &models.Object{Class: className, ID: id, Properties: data, CreationTimeUnix: 1565612833955, LastUpdateTimeUnix: 10000020} + vector := []float32{1, 3, 5, 0.4} + //{title: "Our journey to BM25F", description: " This is how we get to BM25F"}} + err := repo.PutObject(context.Background(), obj, vector, nil, nil, nil, 0) + require.Nil(t, err) + } + return className, props +} + +func TestBM25F_ComplexDocuments(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{}, + }, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + classNone, props := SetupClassDocuments(t, repo, schemaGetter, logger, 0.5, 0.75, "none") + idxNone := repo.GetIndex(schema.ClassName(classNone)) + require.NotNil(t, idxNone) + + addit := additional.Properties{} + + t.Run("single term", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "considered a"} + res, scores, err := idxNone.objectSearch(context.TODO(), 10, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, \n", r.DocID, scores[i]) + } + + // Check results in correct order + require.Equal(t, uint64(3), res[0].DocID) + require.Equal(t, uint64(0), res[1].DocID) + require.Equal(t, uint64(2), res[2].DocID) + require.Len(t, res, 3) + + // Check scores + EqualFloats(t, float32(0.8914), scores[0], 5) + EqualFloats(t, float32(0.5425), scores[1], 5) + EqualFloats(t, float32(0.3952), scores[2], 5) + }) + + t.Run("Results without stopwords", func(t *testing.T) { + kwrNoStopwords := &searchparams.KeywordRanking{Type: "bm25", Query: "example losing business"} + resNoStopwords, resNoScores, err := idxNone.objectSearch(context.TODO(), 10, nil, kwrNoStopwords, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + classEn, properties := SetupClassDocuments(t, repo, schemaGetter, logger, 0.5, 0.75, "en") + idxEn := repo.GetIndex(schema.ClassName(classEn)) + require.NotNil(t, idxEn) + kwrStopwords := &searchparams.KeywordRanking{Type: "bm25", Query: "an example on losing the business"} + resStopwords, resScores, err := idxEn.objectSearch(context.TODO(), 10, nil, kwrStopwords, nil, nil, addit, nil, "", 0, properties) + require.Nil(t, err) + + require.Equal(t, len(resNoStopwords), len(resStopwords)) + for i, resNo := range resNoStopwords { + resYes := resStopwords[i] + require.Equal(t, resNo.DocID, resYes.DocID) + require.Equal(t, resNoScores[i], resScores[i]) + } + + kwrStopwordsDuplicate := &searchparams.KeywordRanking{Type: "bm25", Query: "on an example on losing the business on"} + resStopwordsDuplicate, duplicateScores, err := idxEn.objectSearch(context.TODO(), 10, nil, kwrStopwordsDuplicate, nil, nil, addit, nil, "", 0, properties) + require.Nil(t, err) + require.Equal(t, len(resNoStopwords), len(resStopwordsDuplicate)) + for i, resNo := range resNoStopwords { + resYes := resStopwordsDuplicate[i] + require.Equal(t, resNo.DocID, resYes.DocID) + require.Equal(t, resNoScores[i], duplicateScores[i]) + } + }) +} + +func MultiPropClass(t require.TestingT, repo *DB, schemaGetter *fakeSchemaGetter, logger logrus.FieldLogger, k1, b float32) (string, []string) { + vFalse := false + vTrue := true + + className := "MultiProps" + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(k1, b, "none"), + Class: className, + + Properties: []*models.Property{ + { + Name: "document", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + { + Name: "title", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + }, + }, + } + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + props := make([]string, len(class.Properties)) + for i, prop := range class.Properties { + props[i] = prop.Name + } + migrator := NewMigrator(repo, logger, "node1") + migrator.AddClass(context.Background(), class) + + testData := []map[string]interface{}{} + testData = append(testData, map[string]interface{}{"document": "test", "title": "pepper"}) + testData = append(testData, map[string]interface{}{"document": "banana", "title": "pepper"}) + testData = append(testData, map[string]interface{}{"document": "apple", "title": "banana taste great"}) + testData = append(testData, map[string]interface{}{"document": "banana burger", "title": "test"}) + testData = append(testData, map[string]interface{}{"document": "carotte", "title": "great"}) + + for i, data := range testData { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + + obj := &models.Object{Class: className, ID: id, Properties: data, CreationTimeUnix: 1565612833955, LastUpdateTimeUnix: 10000020} + vector := []float32{1, 3, 5, 0.4} + err := repo.PutObject(context.Background(), obj, vector, nil, nil, nil, 0) + require.Nil(t, err) + } + return className, props +} + +func TestBM25F_SortMultiProp(t *testing.T) { + config.DefaultUsingBlockMaxWAND = false + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{}, + }, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + className, props := MultiPropClass(t, repo, schemaGetter, logger, 0.5, 0.75) + idx := repo.GetIndex(schema.ClassName(className)) + require.NotNil(t, idx) + + addit := additional.Properties{} + + t.Run("single term", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "pepper banana"} + res, scores, err := idx.objectSearch(context.TODO(), 1, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for boosted search ---") + for i, r := range res { + t.Logf("Result id: %v, score: %v, \n", r.DocID, scores[i]) + } + + // Document 1 is a result for both terms + require.Len(t, res, 1) + require.Equal(t, uint64(1), res[0].DocID) + }) + + t.Run("two docs to test additional explanations", func(t *testing.T) { + kwr := &searchparams.KeywordRanking{Type: "bm25", Query: "pepper banana", AdditionalExplanations: true} + res, _, err := idx.objectSearch(context.TODO(), 2, nil, kwr, nil, nil, addit, nil, "", 0, props) + require.Nil(t, err) + + // Print results + t.Log("--- Start results for boosted search ---") + for _, r := range res { + t.Logf("Result id: %v, score: %v, additional: %v\n", r.DocID, r.ExplainScore(), r.Object.Additional) + } + + // We have two results, one that matches both terms and one that matches only one + require.Len(t, res, 2) + require.Equal(t, uint64(1), res[0].DocID) + // these assertions failed if we didn't swap the positions of the additional explanations, as we would be getting the explanations from the second doc + explanationString := fmt.Sprintf("%f", res[0].Object.Additional["explainScore"]) + require.True(t, strings.Contains(explanationString, "BM25F_pepper_frequency:1")) + require.True(t, strings.Contains(explanationString, "BM25F_pepper_propLength:1")) + require.True(t, strings.Contains(explanationString, "BM25F_banana_frequency:1")) + require.True(t, strings.Contains(explanationString, "BM25F_banana_propLength:1")) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/classification.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/classification.go new file mode 100644 index 0000000000000000000000000000000000000000..0a0540e92cd43ff636b579d07eac660be1e24b6c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/classification.go @@ -0,0 +1,307 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "math" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/classification" + "github.com/weaviate/weaviate/usecases/vectorizer" +) + +// TODO: why is this logic in the persistence package? This is business-logic, +// move out of here! +func (db *DB) GetUnclassified(ctx context.Context, className string, + properties []string, propsToReturn []string, filter *filters.LocalFilter, +) ([]search.Result, error) { + propsToReturnTmp := append(properties, propsToReturn...) + props := make(search.SelectProperties, len(propsToReturnTmp)) + for i, prop := range propsToReturnTmp { + props[i] = search.SelectProperty{Name: prop} + } + mergedFilter := mergeUserFilterWithRefCountFilter(filter, className, properties, + filters.OperatorEqual, 0) + res, err := db.Search(ctx, dto.GetParams{ + ClassName: className, + Filters: mergedFilter, + Pagination: &filters.Pagination{ + Limit: 10000, // TODO: gh-1219 increase + }, + AdditionalProperties: additional.Properties{ + Classification: true, + Vector: true, + ModuleParams: map[string]interface{}{ + "interpretation": true, + }, + }, + Properties: props, + }) + + return res, err +} + +// TODO: why is this logic in the persistence package? This is business-logic, +// move out of here! +func (db *DB) ZeroShotSearch(ctx context.Context, vector []float32, + class string, properties []string, + filter *filters.LocalFilter, +) ([]search.Result, error) { + props := make(search.SelectProperties, len(properties)) + for i, prop := range properties { + props[i] = search.SelectProperty{Name: prop} + } + + res, err := db.VectorSearch(ctx, dto.GetParams{ + ClassName: class, + Pagination: &filters.Pagination{ + Limit: 1, + }, + Filters: filter, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + Properties: props, + }, []string{""}, []models.Vector{vector}) + + return res, err +} + +// TODO: why is this logic in the persistence package? This is business-logic, +// move out of here! +func (db *DB) AggregateNeighbors(ctx context.Context, vector []float32, + class string, properties []string, k int, + filter *filters.LocalFilter, +) ([]classification.NeighborRef, error) { + props := make(search.SelectProperties, len(properties)) + for i, prop := range properties { + props[i] = search.SelectProperty{Name: prop} + } + mergedFilter := mergeUserFilterWithRefCountFilter(filter, class, properties, + filters.OperatorGreaterThan, 0) + res, err := db.VectorSearch(ctx, dto.GetParams{ + ClassName: class, + Pagination: &filters.Pagination{ + Limit: k, + }, + Filters: mergedFilter, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + Properties: props, + }, []string{""}, []models.Vector{vector}) + if err != nil { + return nil, errors.Wrap(err, "aggregate neighbors: search neighbors") + } + + return NewKnnAggregator(res, vector).Aggregate(k, properties) +} + +// TODO: this is business logic, move out of here +type KnnAggregator struct { + input search.Results + sourceVector []float32 +} + +func NewKnnAggregator(input search.Results, sourceVector []float32) *KnnAggregator { + return &KnnAggregator{input: input, sourceVector: sourceVector} +} + +func (a *KnnAggregator) Aggregate(k int, properties []string) ([]classification.NeighborRef, error) { + neighbors, err := a.extractBeacons(properties) + if err != nil { + return nil, errors.Wrap(err, "aggregate: extract beacons from neighbors") + } + + return a.aggregateBeacons(neighbors) +} + +func (a *KnnAggregator) extractBeacons(properties []string) (neighborProps, error) { + neighbors := neighborProps{} + for i, elem := range a.input { + schemaMap, ok := elem.Schema.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expecteded element[%d].Schema to be map, got: %T", i, elem.Schema) + } + + for _, prop := range properties { + refProp, ok := schemaMap[prop] + if !ok { + return nil, fmt.Errorf("expecteded element[%d].Schema to have property %q, but didn't", i, prop) + } + + refTyped, ok := refProp.(models.MultipleRef) + if !ok { + return nil, fmt.Errorf("expecteded element[%d].Schema.%s to be models.MultipleRef, got: %T", i, prop, refProp) + } + + if len(refTyped) != 1 { + return nil, fmt.Errorf("a knn training data object needs to have exactly one label: "+ + "expecteded element[%d].Schema.%s to have exactly one reference, got: %d", + i, prop, len(refTyped)) + } + + distance, err := vectorizer.NormalizedDistance(a.sourceVector, elem.Vector) + if err != nil { + return nil, errors.Wrap(err, "calculate distance between source and candidate") + } + + beacon := refTyped[0].Beacon.String() + neighborProp := neighbors[prop] + if neighborProp.beacons == nil { + neighborProp.beacons = neighborBeacons{} + } + neighborProp.beacons[beacon] = append(neighborProp.beacons[beacon], distance) + neighbors[prop] = neighborProp + } + } + + return neighbors, nil +} + +func (a *KnnAggregator) aggregateBeacons(props neighborProps) ([]classification.NeighborRef, error) { + var out []classification.NeighborRef + for propName, prop := range props { + var winningBeacon string + var winningCount int + var totalCount int + + for beacon, distances := range prop.beacons { + totalCount += len(distances) + if len(distances) > winningCount { + winningBeacon = beacon + winningCount = len(distances) + } + } + + distances := a.distances(prop.beacons, winningBeacon) + out = append(out, classification.NeighborRef{ + Beacon: strfmt.URI(winningBeacon), + WinningCount: winningCount, + OverallCount: totalCount, + LosingCount: totalCount - winningCount, + Property: propName, + Distances: distances, + }) + } + + return out, nil +} + +func (a *KnnAggregator) distances(beacons neighborBeacons, + winner string, +) classification.NeighborRefDistances { + out := classification.NeighborRefDistances{} + + var winningDistances []float32 + var losingDistances []float32 + + for beacon, distances := range beacons { + if beacon == winner { + winningDistances = distances + } else { + losingDistances = append(losingDistances, distances...) + } + } + + if len(losingDistances) > 0 { + mean := mean(losingDistances) + out.MeanLosingDistance = &mean + + closest := min_custom(losingDistances) + out.ClosestLosingDistance = &closest + } + + out.ClosestOverallDistance = min_custom(append(winningDistances, losingDistances...)) + out.ClosestWinningDistance = min_custom(winningDistances) + out.MeanWinningDistance = mean(winningDistances) + + return out +} + +type neighborProps map[string]neighborProp + +type neighborProp struct { + beacons neighborBeacons +} + +type neighborBeacons map[string][]float32 + +func mergeUserFilterWithRefCountFilter(userFilter *filters.LocalFilter, className string, + properties []string, op filters.Operator, refCount int, +) *filters.LocalFilter { + countFilters := make([]filters.Clause, len(properties)) + for i, prop := range properties { + countFilters[i] = filters.Clause{ + Operator: op, + Value: &filters.Value{ + Type: schema.DataTypeInt, + Value: refCount, + }, + On: &filters.Path{ + Class: schema.ClassName(className), + Property: schema.PropertyName(prop), + }, + } + } + + var countRootClause filters.Clause + if len(countFilters) == 1 { + countRootClause = countFilters[0] + } else { + countRootClause = filters.Clause{ + Operands: countFilters, + Operator: filters.OperatorAnd, + } + } + + rootFilter := &filters.LocalFilter{} + if userFilter == nil { + rootFilter.Root = &countRootClause + } else { + rootFilter.Root = &filters.Clause{ + Operator: filters.OperatorAnd, // so we can AND the refcount requirements and whatever custom filters, the user has + Operands: []filters.Clause{*userFilter.Root, countRootClause}, + } + } + + return rootFilter +} + +func mean(in []float32) float32 { + sum := float32(0) + for _, v := range in { + sum += v + } + + return sum / float32(len(in)) +} + +func min_custom(in []float32) float32 { + min := float32(math.MaxFloat32) + for _, dist := range in { + if dist < min { + min = dist + } + } + + return min +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/classification_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/classification_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4997b0ee89bb845ef7d7d51b28df00772be838c2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/classification_integration_test.go @@ -0,0 +1,458 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/classification" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestClassifications(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("importing classification schema", func(t *testing.T) { + for _, class := range classificationTestSchema() { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + } + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{Objects: &models.Schema{Classes: classificationTestSchema()}} + + t.Run("importing categories", func(t *testing.T) { + for _, res := range classificationTestCategories() { + thing := res.Object() + err := repo.PutObject(context.Background(), thing, res.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("importing articles", func(t *testing.T) { + for _, res := range classificationTestArticles() { + thing := res.Object() + err := repo.PutObject(context.Background(), thing, res.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("finding all unclassified (no filters)", func(t *testing.T) { + res, err := repo.GetUnclassified(context.Background(), + "Article", []string{"exactCategory", "mainCategory"}, nil, nil) + require.Nil(t, err) + require.Len(t, res, 6) + }) + + t.Run("finding all unclassified (with filters)", func(t *testing.T) { + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Property: "description", + }, + Value: &filters.Value{ + Value: "johnny", + Type: schema.DataTypeText, + }, + }, + } + + res, err := repo.GetUnclassified(context.Background(), + "Article", []string{"exactCategory", "mainCategory"}, nil, filter) + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, strfmt.UUID("a2bbcbdc-76e1-477d-9e72-a6d2cfb50109"), res[0].ID) + }) + + t.Run("aggregating over item neighbors", func(t *testing.T) { + t.Run("close to politics (no filters)", func(t *testing.T) { + res, err := repo.AggregateNeighbors(context.Background(), + []float32{0.7, 0.01, 0.01}, "Article", + []string{"exactCategory", "mainCategory"}, 1, nil) + + expectedRes := []classification.NeighborRef{ + { + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idCategoryPolitics)), + Property: "exactCategory", + OverallCount: 1, + WinningCount: 1, + LosingCount: 0, + Distances: classification.NeighborRefDistances{ + MeanWinningDistance: 0.00010201335, + ClosestWinningDistance: 0.00010201335, + ClosestOverallDistance: 0.00010201335, + }, + }, + { + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idMainCategoryPoliticsAndSociety)), + Property: "mainCategory", + OverallCount: 1, + WinningCount: 1, + LosingCount: 0, + Distances: classification.NeighborRefDistances{ + MeanWinningDistance: 0.00010201335, + ClosestWinningDistance: 0.00010201335, + ClosestOverallDistance: 0.00010201335, + }, + }, + } + + require.Nil(t, err) + assert.ElementsMatch(t, expectedRes, res) + }) + + t.Run("close to food and drink (no filters)", func(t *testing.T) { + res, err := repo.AggregateNeighbors(context.Background(), + []float32{0.01, 0.01, 0.66}, "Article", + []string{"exactCategory", "mainCategory"}, 1, nil) + + expectedRes := []classification.NeighborRef{ + { + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idCategoryFoodAndDrink)), + Property: "exactCategory", + OverallCount: 1, + WinningCount: 1, + LosingCount: 0, + Distances: classification.NeighborRefDistances{ + MeanWinningDistance: 0.00011473894, + ClosestWinningDistance: 0.00011473894, + ClosestOverallDistance: 0.00011473894, + }, + }, + { + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idMainCategoryFoodAndDrink)), + Property: "mainCategory", + OverallCount: 1, + WinningCount: 1, + LosingCount: 0, + Distances: classification.NeighborRefDistances{ + MeanWinningDistance: 0.00011473894, + ClosestWinningDistance: 0.00011473894, + ClosestOverallDistance: 0.00011473894, + }, + }, + } + + require.Nil(t, err) + assert.ElementsMatch(t, expectedRes, res) + }) + + t.Run("close to food and drink (but limiting to politics through filter)", func(t *testing.T) { + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + On: &filters.Path{ + Property: "description", + }, + Value: &filters.Value{ + Value: "politics", + Type: schema.DataTypeText, + }, + Operator: filters.OperatorEqual, + }, + } + res, err := repo.AggregateNeighbors(context.Background(), + []float32{0.01, 0.01, 0.66}, "Article", + []string{"exactCategory", "mainCategory"}, 1, filter) + + expectedRes := []classification.NeighborRef{ + { + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idCategoryPolitics)), + Property: "exactCategory", + OverallCount: 1, + WinningCount: 1, + LosingCount: 0, + Distances: classification.NeighborRefDistances{ + MeanWinningDistance: 0.49242598, + ClosestWinningDistance: 0.49242598, + ClosestOverallDistance: 0.49242598, + }, + }, + { + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/%s", idMainCategoryPoliticsAndSociety)), + Property: "mainCategory", + OverallCount: 1, + WinningCount: 1, + LosingCount: 0, + Distances: classification.NeighborRefDistances{ + MeanWinningDistance: 0.49242598, + ClosestWinningDistance: 0.49242598, + ClosestOverallDistance: 0.49242598, + }, + }, + } + + require.Nil(t, err) + assert.ElementsMatch(t, expectedRes, res) + }) + }) +} + +// test fixtures +func classificationTestSchema() []*models.Class { + return []*models.Class{ + { + Class: "ExactCategory", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "MainCategory", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "Article", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "description", + DataType: []string{string(schema.DataTypeText)}, + Tokenization: "word", + }, + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "exactCategory", + DataType: []string{"ExactCategory"}, + }, + { + Name: "mainCategory", + DataType: []string{"MainCategory"}, + }, + }, + }, + } +} + +const ( + idMainCategoryPoliticsAndSociety = "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e" + idMainCategoryFoodAndDrink = "5a3d909a-4f0d-4168-8f5c-cd3074d1e79a" + idCategoryPolitics = "1b204f16-7da6-44fd-bbd2-8cc4a7414bc3" + idCategorySociety = "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2" + idCategoryFoodAndDrink = "027b708a-31ca-43ea-9001-88bec864c79c" +) + +func beaconRef(target string) *models.SingleRef { + beacon := fmt.Sprintf("weaviate://localhost/%s", target) + return &models.SingleRef{Beacon: strfmt.URI(beacon)} +} + +func classificationTestCategories() search.Results { + // using search.Results, because it's the perfect grouping of object and + // vector + return search.Results{ + // exact categories + search.Result{ + ID: idCategoryPolitics, + ClassName: "ExactCategory", + Vector: []float32{1, 0, 0}, + Schema: map[string]interface{}{ + "name": "Politics", + }, + }, + search.Result{ + ID: idCategorySociety, + ClassName: "ExactCategory", + Vector: []float32{0, 1, 0}, + Schema: map[string]interface{}{ + "name": "Society", + }, + }, + search.Result{ + ID: idCategoryFoodAndDrink, + ClassName: "ExactCategory", + Vector: []float32{0, 0, 1}, + Schema: map[string]interface{}{ + "name": "Food and Drink", + }, + }, + + // main categories + search.Result{ + ID: idMainCategoryPoliticsAndSociety, + ClassName: "MainCategory", + Vector: []float32{0, 1, 0}, + Schema: map[string]interface{}{ + "name": "Politics and Society", + }, + }, + search.Result{ + ID: idMainCategoryFoodAndDrink, + ClassName: "MainCategory", + Vector: []float32{0, 0, 1}, + Schema: map[string]interface{}{ + "name": "Food and Drink", + }, + }, + } +} + +func classificationTestArticles() search.Results { + // using search.Results, because it's the perfect grouping of object and + // vector + return search.Results{ + // classified + search.Result{ + ID: "8aeecd06-55a0-462c-9853-81b31a284d80", + ClassName: "Article", + Vector: []float32{1, 0, 0}, + Schema: map[string]interface{}{ + "description": "This article talks about politics", + "exactCategory": models.MultipleRef{beaconRef(idCategoryPolitics)}, + "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)}, + }, + }, + search.Result{ + ID: "9f4c1847-2567-4de7-8861-34cf47a071ae", + ClassName: "Article", + Vector: []float32{0, 1, 0}, + Schema: map[string]interface{}{ + "description": "This articles talks about society", + "exactCategory": models.MultipleRef{beaconRef(idCategorySociety)}, + "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)}, + }, + }, + search.Result{ + ID: "926416ec-8fb1-4e40-ab8c-37b226b3d68e", + ClassName: "Article", + Vector: []float32{0, 0, 1}, + Schema: map[string]interface{}{ + "description": "This article talks about food", + "exactCategory": models.MultipleRef{beaconRef(idCategoryFoodAndDrink)}, + "mainCategory": models.MultipleRef{beaconRef(idMainCategoryFoodAndDrink)}, + }, + }, + + // unclassified + search.Result{ + ID: "75ba35af-6a08-40ae-b442-3bec69b355f9", + ClassName: "Article", + Vector: []float32{0.78, 0, 0}, + Schema: map[string]interface{}{ + "description": "Barack Obama is a former US president", + }, + }, + search.Result{ + ID: "f850439a-d3cd-4f17-8fbf-5a64405645cd", + ClassName: "Article", + Vector: []float32{0.90, 0, 0}, + Schema: map[string]interface{}{ + "description": "Michelle Obama is Barack Obamas wife", + }, + }, + search.Result{ + ID: "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109", + ClassName: "Article", + Vector: []float32{0, 0.78, 0}, + Schema: map[string]interface{}{ + "description": "Johnny Depp is an actor", + }, + }, + search.Result{ + ID: "069410c3-4b9e-4f68-8034-32a066cb7997", + ClassName: "Article", + Vector: []float32{0, 0.90, 0}, + Schema: map[string]interface{}{ + "description": "Brad Pitt starred in a Quentin Tarantino movie", + }, + }, + search.Result{ + ID: "06a1e824-889c-4649-97f9-1ed3fa401d8e", + ClassName: "Article", + Vector: []float32{0, 0, 0.78}, + Schema: map[string]interface{}{ + "description": "Ice Cream often contains a lot of sugar", + }, + }, + search.Result{ + ID: "6402e649-b1e0-40ea-b192-a64eab0d5e56", + ClassName: "Article", + Vector: []float32{0, 0, 0.90}, + Schema: map[string]interface{}{ + "description": "French Fries are more common in Belgium and the US than in France", + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/backup_coordinator_integration_override_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/backup_coordinator_integration_override_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d832e04671a46bd9085a6a8ab29d85aece38c139 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/backup_coordinator_integration_override_test.go @@ -0,0 +1,202 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package clusterintegrationtest + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + modstgfs "github.com/weaviate/weaviate/modules/backup-filesystem" + "github.com/weaviate/weaviate/usecases/backup" +) + +func TestDistributedBackupsOverride(t *testing.T) { + var ( + dirName = setupDirectory(t) + rnd = getRandomSeed() + numObjs = 100 + numNodes = 3 + backupID = "new-backup" + nodes []*node + overrideBucket = "testbucketoverride" + overridePath = "testBucketPathOverride" + ) + + t.Run("setup", func(t *testing.T) { + overallShardState := multiShardState(numNodes) + backend = &fakeBackupBackend{ + backupsPath: dirName, + backupID: backupID, + startedAt: time.Now(), + } + + for i := 0; i < numNodes; i++ { + node := &node{ + name: fmt.Sprintf("node-%d", i), + } + nodes = append(nodes, node) + } + for _, node := range nodes { + node.init(t, dirName, &nodes, overallShardState) + } + }) + + t.Run("apply schema", func(t *testing.T) { + for i := range nodes { + err := nodes[i].migrator.AddClass(context.Background(), class()) + require.Nil(t, err) + err = nodes[i].migrator.AddClass(context.Background(), secondClassWithRef()) + require.Nil(t, err) + nodes[i].schemaManager.schema.Objects.Classes = append(nodes[i].schemaManager.schema.Objects.Classes, + class(), secondClassWithRef()) + } + }) + + data := exampleData(numObjs) + refData := exampleDataWithRefs(numObjs, 5, data) + + t.Run("import data", func(t *testing.T) { + t.Run("import first class into random node", func(t *testing.T) { + for _, obj := range data { + node := nodes[rnd.Intn(len(nodes))] + + err := node.repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("import second class into random node", func(t *testing.T) { + for _, obj := range refData { + node := nodes[rnd.Intn(len(nodes))] + + err := node.repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + }) + + t.Run("should fail backup with local filesystem backend", func(t *testing.T) { + req := &backup.BackupRequest{ + ID: backupID, Backend: modstgfs.Name, + Include: []string{distributedClass}, + } + + resp, err := nodes[0].scheduler.Backup(context.Background(), &models.Principal{}, req) + assert.Nil(t, resp) + assert.Contains(t, err.Error(), "local filesystem backend is not viable for backing up a node cluster") + }) + + t.Run("should fail restore with local filesystem backend", func(t *testing.T) { + req := &backup.BackupRequest{ + ID: backupID, Backend: modstgfs.Name, + Include: []string{distributedClass}, + } + + resp, err := nodes[0].scheduler.Restore(context.Background(), &models.Principal{}, req, false) + assert.Nil(t, resp) + assert.Contains(t, err.Error(), "local filesystem backend is not viable for backing up a node cluster") + }) + + t.Run("let each node be the coordinator", func(t *testing.T) { + for _, node := range nodes { + t.Run(fmt.Sprintf("%s: coordinate backup", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req := &backup.BackupRequest{ + ID: backupID, Backend: "fake-backend", + Include: []string{distributedClass}, + } + + resp, err := node.scheduler.Backup(ctx, &models.Principal{}, req) + assert.Nil(t, err, "expected nil err, got: %s", err) + assert.Empty(t, resp.Error, "expected empty, got: %s", resp.Error) + assert.NotEmpty(t, resp.Path) + assert.Contains(t, resp.Classes, distributedClass) + }) + + t.Run(fmt.Sprintf("%s: get backup status", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + start := time.Now() + + for { + if time.Now().After(start.Add(30 * time.Second)) { + t.Fatal("backup deadline exceeded") + } + resp, err := node.scheduler.BackupStatus(ctx, &models.Principal{}, "fake-backend", backupID, overrideBucket, overridePath) + assert.Nil(t, err, "expected nil err, got: %s", err) + if resp != nil && string(resp.Status) == "SUCCESS" { + break + } + if resp != nil && string(resp.Status) == "FAILED" { + t.Fatalf("backup failed: %q", resp.Err) + } + } + }) + + t.Run(fmt.Sprintf("%s: restore to cluster", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req := &backup.BackupRequest{ + ID: backupID, Backend: "fake-backend", + Include: []string{distributedClass}, + } + + resp, err := node.scheduler.Restore(ctx, &models.Principal{}, req, false) + assert.Nil(t, err, "expected nil err, got: %s", err) + assert.Empty(t, resp.Error, "expected empty, got: %s", resp.Error) + assert.NotEmpty(t, resp.Path) + assert.Contains(t, resp.Classes, distributedClass) + }) + + t.Run(fmt.Sprintf("%s: get restore status", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + start := time.Now() + + for { + if time.Now().After(start.Add(30 * time.Second)) { + t.Fatal("restore deadline exceeded") + } + resp, err := node.scheduler.RestorationStatus(ctx, &models.Principal{}, "fake-backend", backupID, overrideBucket, overridePath) + assert.Nil(t, err, "expected nil err, got: %s", err) + if resp != nil && string(resp.Status) == "SUCCESS" { + break + } + if resp != nil && string(resp.Status) == "FAILED" { + t.Fatalf("restore failed: %q", resp.Err) + } + } + }) + + backend.reset() + } + }) + + t.Run("shutdown", func(t *testing.T) { + for _, node := range nodes { + err := node.repo.Shutdown(context.Background()) + require.Nil(t, err, "expected nil, got: %v", err) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/backup_coordinator_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/backup_coordinator_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dac4d4c2876cc8ed45b02d0b6b7d138506a6c13a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/backup_coordinator_integration_test.go @@ -0,0 +1,203 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package clusterintegrationtest + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + modstgfs "github.com/weaviate/weaviate/modules/backup-filesystem" + "github.com/weaviate/weaviate/usecases/backup" +) + +var backend *fakeBackupBackend + +func TestDistributedBackups(t *testing.T) { + var ( + dirName = setupDirectory(t) + rnd = getRandomSeed() + numObjs = 100 + numNodes = 3 + backupID = "new-backup" + nodes []*node + ) + + t.Run("setup", func(t *testing.T) { + overallShardState := multiShardState(numNodes) + backend = &fakeBackupBackend{ + backupsPath: dirName, + backupID: backupID, + startedAt: time.Now(), + } + + for i := 0; i < numNodes; i++ { + node := &node{ + name: fmt.Sprintf("node-%d", i), + } + + nodes = append(nodes, node) + } + for _, node := range nodes { + node.init(t, dirName, &nodes, overallShardState) + } + }) + + t.Run("apply schema", func(t *testing.T) { + for i := range nodes { + err := nodes[i].migrator.AddClass(context.Background(), class()) + require.Nil(t, err) + err = nodes[i].migrator.AddClass(context.Background(), secondClassWithRef()) + require.Nil(t, err) + nodes[i].schemaManager.schema.Objects.Classes = append(nodes[i].schemaManager.schema.Objects.Classes, + class(), secondClassWithRef()) + } + }) + + data := exampleData(numObjs) + refData := exampleDataWithRefs(numObjs, 5, data) + + t.Run("import data", func(t *testing.T) { + t.Run("import first class into random node", func(t *testing.T) { + for _, obj := range data { + node := nodes[rnd.Intn(len(nodes))] + + err := node.repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("import second class into random node", func(t *testing.T) { + for _, obj := range refData { + node := nodes[rnd.Intn(len(nodes))] + + err := node.repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + }) + + t.Run("should fail backup with local filesystem backend", func(t *testing.T) { + req := &backup.BackupRequest{ + ID: backupID, Backend: modstgfs.Name, + Include: []string{distributedClass}, + } + + resp, err := nodes[0].scheduler.Backup(context.Background(), &models.Principal{}, req) + assert.Nil(t, resp) + assert.Contains(t, err.Error(), "local filesystem backend is not viable for backing up a node cluster") + }) + + t.Run("should fail restore with local filesystem backend", func(t *testing.T) { + req := &backup.BackupRequest{ + ID: backupID, Backend: modstgfs.Name, + Include: []string{distributedClass}, + } + + resp, err := nodes[0].scheduler.Restore(context.Background(), &models.Principal{}, req, false) + assert.Nil(t, resp) + assert.Contains(t, err.Error(), "local filesystem backend is not viable for backing up a node cluster") + }) + + t.Run("let each node be the coordinator", func(t *testing.T) { + for _, node := range nodes { + t.Run(fmt.Sprintf("%s: coordinate backup", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req := &backup.BackupRequest{ + ID: backupID, Backend: "fake-backend", + Include: []string{distributedClass}, + } + + resp, err := node.scheduler.Backup(ctx, &models.Principal{}, req) + assert.Nil(t, err, "expected nil err, got: %s", err) + assert.Empty(t, resp.Error, "expected empty, got: %s", resp.Error) + assert.NotEmpty(t, resp.Path) + assert.Contains(t, resp.Classes, distributedClass) + }) + + t.Run(fmt.Sprintf("%s: get backup status", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + start := time.Now() + + for { + if time.Now().After(start.Add(30 * time.Second)) { + t.Fatal("backup deadline exceeded") + } + resp, err := node.scheduler.BackupStatus(ctx, &models.Principal{}, "fake-backend", backupID, "", "") + assert.Nil(t, err, "expected nil err, got: %s", err) + if resp != nil && string(resp.Status) == "SUCCESS" { + break + } + if resp != nil && string(resp.Status) == "FAILED" { + t.Fatalf("backup failed: %q", resp.Err) + } + } + }) + + t.Run(fmt.Sprintf("%s: restore to cluster", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req := &backup.BackupRequest{ + ID: backupID, Backend: "fake-backend", + Include: []string{distributedClass}, + } + + resp, err := node.scheduler.Restore(ctx, &models.Principal{}, req, false) + assert.Nil(t, err, "expected nil err, got: %s", err) + assert.Empty(t, resp.Error, "expected empty, got: %s", resp.Error) + assert.NotEmpty(t, resp.Path) + assert.Contains(t, resp.Classes, distributedClass) + }) + + t.Run(fmt.Sprintf("%s: get restore status", node.name), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + start := time.Now() + + for { + if time.Now().After(start.Add(30 * time.Second)) { + t.Fatal("restore deadline exceeded") + } + resp, err := node.scheduler.RestorationStatus(ctx, &models.Principal{}, "fake-backend", backupID, "", "") + assert.Nil(t, err, "expected nil err, got: %s", err) + if resp != nil && string(resp.Status) == "SUCCESS" { + break + } + if resp != nil && string(resp.Status) == "FAILED" { + t.Fatalf("restore failed: %q", resp.Err) + } + } + }) + + backend.reset() + } + }) + + t.Run("shutdown", func(t *testing.T) { + for _, node := range nodes { + err := node.repo.Shutdown(context.Background()) + require.Nil(t, err, "expected nil, got: %v", err) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/cluster_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/cluster_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e7d7e7c468bac30909dd96b819c0c06234523e90 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/cluster_integration_test.go @@ -0,0 +1,835 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package clusterintegrationtest + +import ( + "context" + "fmt" + "math/rand" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/objects" +) + +const ( + vectorDims = 20 + numberOfNodes = 10 + distributedClass = "Distributed" +) + +// TestDistributedSetup uses as many real components and only mocks out +// non-essential parts. Essentially we fix the shard/cluster state and schema +// as they aren't critical to this test, but use real repos and real HTTP APIs +// between the repos. +func TestDistributedSetup(t *testing.T) { + t.Run("individual imports", func(t *testing.T) { + dirName := setupDirectory(t) + r := getRandomSeed() + testDistributed(t, dirName, r, false) + }) + t.Run("batched imports", func(t *testing.T) { + dirName := setupDirectory(t) + r := getRandomSeed() + testDistributed(t, dirName, r, true) + }) +} + +func testDistributed(t *testing.T, dirName string, rnd *rand.Rand, batch bool) { + var nodes []*node + numberOfObjects := 200 + + t.Run("setup", func(t *testing.T) { + overallShardState := multiShardState(numberOfNodes) + for i := 0; i < numberOfNodes; i++ { + node := &node{ + name: fmt.Sprintf("node-%d", i), + } + + nodes = append(nodes, node) + } + + for _, node := range nodes { + node.init(t, dirName, &nodes, overallShardState) + } + }) + + t.Run("apply schema", func(t *testing.T) { + for i := range nodes { + err := nodes[i].migrator.AddClass(context.Background(), class()) + require.Nil(t, err) + err = nodes[i].migrator.AddClass(context.Background(), secondClassWithRef()) + require.Nil(t, err) + nodes[i].schemaManager.schema.Objects.Classes = append(nodes[i].schemaManager.schema.Objects.Classes, + class(), secondClassWithRef()) + } + }) + + data := exampleData(numberOfObjects) + refData := exampleDataWithRefs(numberOfObjects, 5, data) + + if batch { + t.Run("import large batch from random node", func(t *testing.T) { + // pick a random node, but send the entire batch to this node + node := nodes[rnd.Intn(len(nodes))] + + batchObjs := dataAsBatch(data) + res, err := node.repo.BatchPutObjects(context.Background(), batchObjs, nil, 0) + require.Nil(t, err) + for _, ind := range res { + require.Nil(t, ind.Err) + } + }) + + t.Run("import second class without refs", func(t *testing.T) { + // pick a random node, but send the entire batch to this node + node := nodes[rnd.Intn(len(nodes))] + + batchObjs := dataAsBatchWithProps(refData, []string{"description"}) + res, err := node.repo.BatchPutObjects(context.Background(), batchObjs, nil, 0) + require.Nil(t, err) + for _, ind := range res { + require.Nil(t, ind.Err) + } + }) + + t.Run("import refs as batch", func(t *testing.T) { + // pick a random node, but send the entire batch to this node + node := nodes[rnd.Intn(len(nodes))] + + batch := refsAsBatch(refData, "toFirst") + res, err := node.repo.AddBatchReferences(context.Background(), batch, nil, 0) + require.Nil(t, err) + for _, ind := range res { + require.Nil(t, ind.Err) + } + }) + } else { + t.Run("import first class by picking a random node", func(t *testing.T) { + for _, obj := range data { + node := nodes[rnd.Intn(len(nodes))] + + err := node.repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + t.Run("import second class with refs by picking a random node", func(t *testing.T) { + for _, obj := range refData { + node := nodes[rnd.Intn(len(nodes))] + + err := node.repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0) + require.Nil(t, err) + + } + }) + } + + t.Run("wait for indexing to finish", func(t *testing.T) { + for _, node := range nodes { + time.Sleep(100 * time.Millisecond) + node.repo.GetScheduler().Schedule(context.Background()) + node.repo.GetScheduler().WaitAll() + } + }) + + t.Run("query individually to check if all exist using random nodes", func(t *testing.T) { + for _, obj := range data { + node := nodes[rnd.Intn(len(nodes))] + + ok, err := node.repo.Exists(context.Background(), distributedClass, obj.ID, nil, "") + require.Nil(t, err) + assert.True(t, ok) + } + }) + + t.Run("query individually using random node", func(t *testing.T) { + for _, obj := range data { + node := nodes[rnd.Intn(len(nodes))] + + res, err := node.repo.ObjectByID(context.Background(), obj.ID, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, res) + + // only compare string prop to avoid having to deal with parsing time + // props + assert.Equal(t, obj.Properties.(map[string]interface{})["description"], + res.Object().Properties.(map[string]interface{})["description"]) + } + }) + + t.Run("perform vector searches", func(t *testing.T) { + // note this test assumes a recall of 100% which only works with HNSW on + // small sizes, so if we use this test suite with massive sizes, we should + // not expect this test to succeed 100% of times anymore. + runs := 10 + + for i := 0; i < runs; i++ { + query := make([]float32, vectorDims) + for i := range query { + query[i] = rnd.Float32() + } + + groundTruth := bruteForceObjectsByQuery(data, query) + + node := nodes[rnd.Intn(len(nodes))] + res, err := node.repo.VectorSearch(context.Background(), dto.GetParams{ + Pagination: &filters.Pagination{ + Limit: 25, + }, + ClassName: distributedClass, + }, []string{""}, []models.Vector{query}) + assert.Nil(t, err) + for i, obj := range res { + assert.Equal(t, groundTruth[i].ID, obj.ID, fmt.Sprintf("at pos %d", i)) + } + } + + for _, obj := range data { + node := nodes[rnd.Intn(len(nodes))] + + res, err := node.repo.ObjectByID(context.Background(), obj.ID, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, res) + + // only compare string prop to avoid having to deal with parsing time + // props + assert.Equal(t, obj.Properties.(map[string]interface{})["description"], + res.Object().Properties.(map[string]interface{})["description"]) + } + }) + + t.Run("query individually and resolve references", func(t *testing.T) { + for _, obj := range refData { + // if i == 5 { + // break + // } + node := nodes[rnd.Intn(len(nodes))] + + res, err := node.repo.ObjectByID(context.Background(), obj.ID, search.SelectProperties{ + search.SelectProperty{ + Name: "toFirst", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: distributedClass, + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "description", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, res) + props := res.Object().Properties.(map[string]interface{}) + refProp, ok := props["toFirst"].([]interface{}) + require.True(t, ok) + + var refPayload []map[string]interface{} + for _, res := range refProp { + parsed, ok := res.(search.LocalRef) + require.True(t, ok) + refPayload = append(refPayload, map[string]interface{}{ + "description": parsed.Fields["description"], + }) + } + + actual := manuallyResolveRef(t, obj, data, "toFirst", "description", nil) + assert.Equal(t, actual, refPayload) + } + }) + + t.Run("query individually with cross-ref vectors and resolve references", func(t *testing.T) { + for _, obj := range refData { + // if i == 1 { + // break + // } + node := nodes[rnd.Intn(len(nodes))] + + res, err := node.repo.Object(context.Background(), obj.Class, obj.ID, search.SelectProperties{ + search.SelectProperty{ + Name: "toFirst", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: distributedClass, + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "description", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + }, + }, + }, additional.Properties{}, nil, "") + require.Nil(t, err) + require.NotNil(t, res) + props := res.Object().Properties.(map[string]interface{}) + refProp, ok := props["toFirst"].([]interface{}) + require.True(t, ok) + + var refPayload []map[string]interface{} + var refVector []map[string]interface{} + for _, ref := range refProp { + parsed, ok := ref.(search.LocalRef) + require.True(t, ok) + refPayload = append(refPayload, map[string]interface{}{ + "description": parsed.Fields["description"], + }) + vector, ok := parsed.Fields["vector"].([]float32) + require.True(t, ok) + require.NotEmpty(t, vector) + refVector = append(refVector, map[string]interface{}{ + "vector": vector, + }) + } + + actual := manuallyResolveRef(t, obj, data, "toFirst", "description", nil) + assert.Equal(t, actual, refPayload) + actual = manuallyResolveRef(t, obj, data, "toFirst", "vector", node.repo) + assert.Equal(t, actual, refVector) + } + }) + + t.Run("ranked keyword search", func(t *testing.T) { + for i := 0; i < numberOfObjects; i++ { + description := fmt.Sprintf("object %d", i) + keywordRanking := &searchparams.KeywordRanking{ + Query: description, + Properties: []string{"description"}, + } + + params := dto.GetParams{ + ClassName: distributedClass, + KeywordRanking: keywordRanking, + Pagination: &filters.Pagination{Limit: 100}, + } + + node := nodes[rnd.Intn(len(nodes))] + res, err := node.repo.Search(context.Background(), params) + require.Nil(t, err) + require.NotEmpty(t, res) + + expected := strings.Join(strings.Split(description, " "), "-") + received := res[0].Object().Properties.(map[string]interface{})["description"] + assert.Equal(t, expected, received) + } + }) + + t.Run("aggregate count", func(t *testing.T) { + params := aggregation.Params{ + ClassName: schema.ClassName(distributedClass), + IncludeMetaCount: true, + } + + logger, _ := test.NewNullLogger() + node := nodes[rnd.Intn(len(nodes))] + res, err := node.repo.Aggregate(context.Background(), params, modules.NewProvider(logger, config.Config{})) + require.Nil(t, err) + + expectedResult := &aggregation.Result{ + Groups: []aggregation.Group{ + { + Count: numberOfObjects, + }, + }, + } + + assert.Equal(t, expectedResult, res) + }) + + t.Run("modify an object using patch", func(t *testing.T) { + obj := data[0] + + node := nodes[rnd.Intn(len(nodes))] + err := node.repo.Merge(context.Background(), objects.MergeDocument{ + Class: distributedClass, + ID: obj.ID, + PrimitiveSchema: map[string]interface{}{ + "other_property": "a-value-inserted-through-merge", + }, + }, nil, "", 0) + + require.Nil(t, err) + }) + + t.Run("verify the patched object contains the additions and orig", func(t *testing.T) { + obj := data[0] + + node := nodes[rnd.Intn(len(nodes))] + res, err := node.repo.ObjectByID(context.Background(), obj.ID, search.SelectProperties{}, additional.Properties{}, "") + + require.Nil(t, err) + previousMap := obj.Properties.(map[string]interface{}) + assert.Equal(t, "a-value-inserted-through-merge", res.Object().Properties.(map[string]interface{})["other_property"]) + assert.Equal(t, previousMap["description"], res.Object().Properties.(map[string]interface{})["description"]) + }) + + // This test prevents a regression on + // https://github.com/weaviate/weaviate/issues/1775 + t.Run("query items by date filter with regular field", func(t *testing.T) { + count := len(data) / 2 // try to match half the data objects present + cutoff := time.Unix(0, 0).Add(time.Duration(count) * time.Hour) + node := nodes[rnd.Intn(len(nodes))] + res, err := node.repo.Search(context.Background(), dto.GetParams{ + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLessThan, + On: &filters.Path{ + Class: distributedClass, + Property: schema.PropertyName("date_property"), + }, + Value: &filters.Value{ + Value: cutoff, + Type: schema.DataTypeDate, + }, + }, + }, + ClassName: distributedClass, + Pagination: &filters.Pagination{ + Limit: len(data), + }, + }) + + require.Nil(t, err) + assert.Equal(t, count, len(res)) + }) + + // This test prevents a regression on + // https://github.com/weaviate/weaviate/issues/1775 + t.Run("query items by date filter with array field", func(t *testing.T) { + count := len(data) / 2 // try to match half the data objects present + cutoff := time.Unix(0, 0).Add(time.Duration(count) * time.Hour) + node := nodes[rnd.Intn(len(nodes))] + res, err := node.repo.Search(context.Background(), dto.GetParams{ + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLessThan, + On: &filters.Path{ + Class: distributedClass, + Property: schema.PropertyName("date_array_property"), + }, + Value: &filters.Value{ + Value: cutoff, + Type: schema.DataTypeDate, + }, + }, + }, + ClassName: distributedClass, + Pagination: &filters.Pagination{ + Limit: len(data), + }, + }) + + require.Nil(t, err) + assert.Equal(t, count, len(res)) + }) + + t.Run("sort by", func(t *testing.T) { + getPhoneNumber := func(a search.Result) *float64 { + prop := a.Object().Properties.(map[string]interface{})["phone_property"] + if phoneNumber, ok := prop.(*models.PhoneNumber); ok { + phoneStr := fmt.Sprintf("%v%v", phoneNumber.CountryCode, phoneNumber.National) + if phone, err := strconv.ParseFloat(phoneStr, 64); err == nil { + return &phone + } + } + return nil + } + getDate := func(a search.Result) *time.Time { + asString := a.Object().Properties.(map[string]interface{})["date_property"].(string) + if date, err := time.Parse(time.RFC3339, asString); err == nil { + return &date + } + return nil + } + testData := []struct { + name string + sort []filters.Sort + compareFn func(a, b search.Result) bool + }{ + { + name: "description asc", + sort: []filters.Sort{{Path: []string{"description"}, Order: "asc"}}, + compareFn: func(a, b search.Result) bool { + descriptionA := a.Object().Properties.(map[string]interface{})["description"].(string) + descriptionB := b.Object().Properties.(map[string]interface{})["description"].(string) + return strings.ToLower(descriptionA) <= strings.ToLower(descriptionB) + }, + }, + { + name: "description desc", + sort: []filters.Sort{{Path: []string{"description"}, Order: "desc"}}, + compareFn: func(a, b search.Result) bool { + descriptionA := a.Object().Properties.(map[string]interface{})["description"].(string) + descriptionB := b.Object().Properties.(map[string]interface{})["description"].(string) + return strings.ToLower(descriptionA) >= strings.ToLower(descriptionB) + }, + }, + { + name: "date_property asc", + sort: []filters.Sort{{Path: []string{"date_property"}, Order: "asc"}}, + compareFn: func(a, b search.Result) bool { + datePropA, datePropB := getDate(a), getDate(b) + if datePropA != nil && datePropB != nil { + return datePropA.Before(*datePropB) + } + return false + }, + }, + { + name: "date_property desc", + sort: []filters.Sort{{Path: []string{"date_property"}, Order: "desc"}}, + compareFn: func(a, b search.Result) bool { + datePropA, datePropB := getDate(a), getDate(b) + if datePropA != nil && datePropB != nil { + return datePropA.After(*datePropB) + } + return false + }, + }, + { + name: "int_property asc", + sort: []filters.Sort{{Path: []string{"int_property"}, Order: "asc"}}, + compareFn: func(a, b search.Result) bool { + intPropertyA := a.Object().Properties.(map[string]interface{})["int_property"].(float64) + intPropertyB := b.Object().Properties.(map[string]interface{})["int_property"].(float64) + return intPropertyA <= intPropertyB + }, + }, + { + name: "int_property desc", + sort: []filters.Sort{{Path: []string{"int_property"}, Order: "desc"}}, + compareFn: func(a, b search.Result) bool { + intPropertyA := a.Object().Properties.(map[string]interface{})["int_property"].(float64) + intPropertyB := b.Object().Properties.(map[string]interface{})["int_property"].(float64) + return intPropertyA >= intPropertyB + }, + }, + { + name: "phone_property asc", + sort: []filters.Sort{{Path: []string{"phone_property"}, Order: "asc"}}, + compareFn: func(a, b search.Result) bool { + phoneA, phoneB := getPhoneNumber(a), getPhoneNumber(b) + if phoneA != nil && phoneB != nil { + return *phoneA <= *phoneB + } + return false + }, + }, + { + name: "phone_property desc", + sort: []filters.Sort{{Path: []string{"phone_property"}, Order: "desc"}}, + compareFn: func(a, b search.Result) bool { + phoneA, phoneB := getPhoneNumber(a), getPhoneNumber(b) + if phoneA != nil && phoneB != nil { + return *phoneA >= *phoneB + } + return false + }, + }, + } + for _, td := range testData { + t.Run(td.name, func(t *testing.T) { + params := dto.GetParams{ + ClassName: distributedClass, + Sort: td.sort, + Pagination: &filters.Pagination{Limit: 100}, + Properties: search.SelectProperties{{Name: "description"}}, + } + + node := nodes[rnd.Intn(len(nodes))] + res, err := node.repo.Search(context.Background(), params) + require.Nil(t, err) + require.NotEmpty(t, res) + + if len(res) > 1 { + for i := 1; i < len(res); i++ { + assert.True(t, td.compareFn(res[i-1], res[i])) + } + } + }) + } + }) + + t.Run("node names by shard", func(t *testing.T) { + for _, n := range nodes { + nodeSet := make(map[string]bool) + foundNodes, err := n.repo.Shards(context.Background(), distributedClass) + assert.NoError(t, err) + for _, found := range foundNodes { + nodeSet[found] = true + } + assert.Len(t, nodeSet, numberOfNodes, "expected %d nodes, got %d", + numberOfNodes, len(foundNodes)) + } + }) + + t.Run("delete a third of the data from random nodes", func(t *testing.T) { + for i, obj := range data { + if i%3 != 0 { + // keep this item + continue + } + + node := nodes[rnd.Intn(len(nodes))] + err := node.repo.DeleteObject(context.Background(), distributedClass, obj.ID, time.Now(), nil, "", 0) + require.Nil(t, err) + } + }) + + t.Run("make sure 2/3 exist, 1/3 no longer exists", func(t *testing.T) { + for i, obj := range data { + expected := true + if i%3 == 0 { + expected = false + } + + node := nodes[rnd.Intn(len(nodes))] + actual, err := node.repo.Exists(context.Background(), distributedClass, obj.ID, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, actual) + } + }) + + t.Run("batch delete the remaining 2/3 of data", func(t *testing.T) { + getParams := func(className string, dryRun bool) objects.BatchDeleteParams { + return objects.BatchDeleteParams{ + ClassName: schema.ClassName(className), + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLike, + Value: &filters.Value{ + Value: "*", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Property: "id", + }, + }, + }, + DryRun: dryRun, + Output: "verbose", + } + } + performClassSearch := func(repo *db.DB, className string) ([]search.Result, error) { + return repo.Search(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: 10000}, + }) + } + node := nodes[rnd.Intn(len(nodes))] + // get the initial count of the objects + res, err := performClassSearch(node.repo, distributedClass) + require.Nil(t, err) + beforeDelete := len(res) + require.True(t, beforeDelete > 0) + // dryRun == false, perform actual delete + batchDeleteRes, err := node.repo.BatchDeleteObjects(context.Background(), getParams(distributedClass, false), time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, int64(beforeDelete), batchDeleteRes.Matches) + require.Equal(t, beforeDelete, len(batchDeleteRes.Objects)) + for _, batchRes := range batchDeleteRes.Objects { + require.Nil(t, batchRes.Err) + } + // check that every object is deleted + res, err = performClassSearch(node.repo, distributedClass) + require.Nil(t, err) + require.Equal(t, 0, len(res)) + }) + + t.Run("shutdown", func(t *testing.T) { + for _, node := range nodes { + node.repo.Shutdown(context.Background()) + } + }) +} + +func TestDistributedVectorDistance(t *testing.T) { + dirName := t.TempDir() + rnd := getRandomSeed() + ctx := context.Background() + cases := []struct { + asyncIndexing bool + }{ + {asyncIndexing: true}, + {asyncIndexing: false}, + } + for _, tt := range cases { + t.Run("async indexing:"+strconv.FormatBool(tt.asyncIndexing), func(t *testing.T) { + os.Setenv("ASYNC_INDEXING", strconv.FormatBool(tt.asyncIndexing)) + + collection := multiVectorClass(tt.asyncIndexing) + overallShardState := multiShardState(numberOfNodes) + var nodes []*node + for i := 0; i < numberOfNodes; i++ { + node := &node{ + name: fmt.Sprintf("node-%d", i), + } + nodes = append(nodes, node) + } + + for _, node := range nodes { + node.init(t, dirName, &nodes, overallShardState) + } + + for i := range nodes { + require.Nil(t, nodes[i].migrator.AddClass(context.Background(), collection)) + nodes[i].schemaManager.schema.Objects.Classes = append(nodes[i].schemaManager.schema.Objects.Classes, + collection) + } + + uid := strfmt.UUID(uuid.New().String()) + + vectors := [][]float32{ + {1, 0, 0, 0}, + {0, 1, 0, 0}, + {0, 0, 1, 0}, + {0, 0, 0, 1}, + } + + t.Run("get all targets", func(t *testing.T) { + obj := &models.Object{ + ID: uid, + Class: collection.Class, + Vectors: map[string]models.Vector{"custom1": vectors[0], "custom2": vectors[1], "custom3": vectors[2]}, + } + objVectors, _, err := dto.GetVectors(obj.Vectors) + require.NoError(t, err) + require.Nil(t, nodes[rnd.Intn(len(nodes))].repo.PutObject(context.Background(), obj, nil, objVectors, nil, nil, 0)) + + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + res, err := nodes[rnd.Intn(len(nodes))].repo.VectorSearch(ctx, createParams(collection.Class, nil), []string{"custom1", "custom2", "custom3"}, []models.Vector{vectors[1], vectors[2], vectors[3]}) + if !assert.Nil(collect, err) { + return + } + if !assert.Len(collect, res, 1) { + return + } + if !assert.Equal(collect, res[0].ID, obj.ID) { + return + } + if !assert.Equal(collect, res[0].Dist, float32(1)) { + return + } + + assert.Nil(collect, nodes[rnd.Intn(len(nodes))].repo.DeleteObject(context.Background(), collection.Class, obj.ID, time.Now(), nil, "", 0)) + }, 20*time.Second, 1*time.Second) + }) + + t.Run("get some targets", func(t *testing.T) { + obj := &models.Object{ + ID: uid, + Class: collection.Class, + Vectors: map[string]models.Vector{"custom1": vectors[0], "custom2": vectors[1], "custom3": vectors[2]}, + } + objVectors, _, err := dto.GetVectors(obj.Vectors) + require.NoError(t, err) + require.Nil(t, nodes[rnd.Intn(len(nodes))].repo.PutObject(context.Background(), obj, nil, objVectors, nil, nil, 0)) + + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + res, err := nodes[rnd.Intn(len(nodes))].repo.VectorSearch(ctx, createParams(collection.Class, nil), []string{"custom1", "custom2"}, []models.Vector{vectors[1], vectors[2]}) + if !assert.Nil(collect, err) { + return + } + if !assert.Equal(collect, res[0].ID, obj.ID) { + return + } + if !assert.Equal(collect, res[0].Dist, float32(1)) { + return + } + + assert.Nil(collect, nodes[rnd.Intn(len(nodes))].repo.DeleteObject(context.Background(), collection.Class, obj.ID, time.Now(), nil, "", 0)) + }, 20*time.Second, 1*time.Second) + }) + + t.Run("get non-existing target", func(t *testing.T) { + start := time.Now() + obj := &models.Object{ + ID: uid, + Class: collection.Class, + Vectors: map[string]models.Vector{"custom1": vectors[0], "custom2": vectors[1]}, + } + objVectors, _, err := dto.GetVectors(obj.Vectors) + require.NoError(t, err) + require.Nil(t, nodes[rnd.Intn(len(nodes))].repo.PutObject(context.Background(), obj, nil, objVectors, nil, nil, 0)) + + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + res, err := nodes[rnd.Intn(len(nodes))].repo.VectorSearch(ctx, createParams(collection.Class, []float32{1, 1}), []string{"custom1", "custom3"}, []models.Vector{vectors[1], vectors[2]}) + if !assert.Nil(collect, err) { + return + } + if !assert.Len(collect, res, 0) { // no results because we are searching for target custom3 which the only object does not have + return + } + + assert.True(collect, time.Since(start) < 19*time.Second) // this will fail if the remote call is retried and should be long enough to never be triggerd in normal conditions + }, 20*time.Second, 1*time.Second) + }) + + t.Run("Multiple objects", func(t *testing.T) { + ids := make([]strfmt.UUID, 50) + for i := range ids { + obj := &models.Object{ + ID: strfmt.UUID(uuid.New().String()), + Class: collection.Class, + Vectors: map[string]models.Vector{"custom1": vectors[i%len(vectors)], "custom2": vectors[(i+1)%len(vectors)], "custom3": vectors[(i+2)%len(vectors)]}, + } + ids[i] = obj.ID + vectors, _, err := dto.GetVectors(obj.Vectors) + require.NoError(t, err) + require.Nil(t, nodes[rnd.Intn(len(nodes))].repo.PutObject(context.Background(), obj, nil, vectors, nil, nil, 0)) + } + + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + res, err := nodes[rnd.Intn(len(nodes))].repo.VectorSearch(ctx, createParams(collection.Class, []float32{1, 1}), []string{"custom1", "custom3"}, []models.Vector{vectors[1], vectors[2]}) + assert.Nil(collect, err) + assert.Greater(collect, len(res), 0) + }, 20*time.Second, 1*time.Second) + }) + + for _, node := range nodes { + node.repo.Shutdown(context.Background()) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/doc.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..5ca17b443eea65fc422819898658493d7158c88f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/doc.go @@ -0,0 +1,17 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// clusterintegrationtest acts as a test package to provide a component test +// spanning multiple parts of the application, including everything that's +// required for a distributed setup. It thus acts like a mini "main" page and +// must be separated from the rest of the package to avoid circular import +// issues, etc. +package clusterintegrationtest diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/fakes_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/fakes_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2153a8e440163c0bf94845770dbe1866ff1a50f5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/fakes_for_test.go @@ -0,0 +1,543 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package clusterintegrationtest + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/weaviate/weaviate/adapters/clients" + "github.com/weaviate/weaviate/adapters/handlers/rest/clusterapi" + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" + modstgfs "github.com/weaviate/weaviate/modules/backup-filesystem" + ubak "github.com/weaviate/weaviate/usecases/backup" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/sharding" +) + +type node struct { + name string + repo *db.DB + schemaManager *fakeSchemaManager + backupManager *ubak.Handler + scheduler *ubak.Scheduler + migrator *db.Migrator + hostname string +} + +func (n *node) init(t *testing.T, dirName string, allNodes *[]*node, shardingState *sharding.State, +) { + var err error + localDir := path.Join(dirName, n.name) + logger, _ := test.NewNullLogger() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return(n.name).Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).RunAndReturn(func(nodeName string) (string, bool) { + for _, node := range *allNodes { + if node.name == nodeName { + if node.hostname == "" { + return "", false + } + return node.hostname, true + } + } + return "", false + }).Maybe() + nodeResolver := &nodeResolver{ + NodeSelector: mockNodeSelector, + nodes: allNodes, + local: n.name, + } + + os.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "1s") + shardStateRaw, err := json.Marshal(shardingState) + if err != nil { + panic(fmt.Sprintf("Error marshalling sharding state: %v", err)) + } + shardState, err := sharding.StateFromJSON(shardStateRaw, nodeResolver) + if err != nil { + panic(fmt.Sprintf("Error unmarshalling sharding state: %v", err)) + } + + client := clients.NewRemoteIndex(&http.Client{}) + nodesClient := clients.NewRemoteNode(&http.Client{}) + replicaClient := clients.NewReplicationClient(&http.Client{}) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT(). + ShardReplicas(mock.Anything, mock.Anything). + RunAndReturn(func(class string, shard string) ([]string, error) { + phys, ok := shardState.Physical[shard] + if !ok { + return nil, fmt.Errorf("shard %q not found for class %q", shard, class) + } + return phys.BelongsToNodes, nil + }).Maybe() + mockSchemaReader.EXPECT().ShardOwner(mock.Anything, mock.Anything).RunAndReturn(func(class string, shard string) (string, error) { + x, ok := shardState.Physical[shard] + if !ok { + return "", fmt.Errorf("shard %q not found for class %q", shard, class) + } + if len(x.BelongsToNodes) < 1 || x.BelongsToNodes[0] == "" { + return "", fmt.Errorf("owner node not found for shard %q and class %q", shard, class) + } + return shardState.Physical[shard].BelongsToNodes[0], nil + }).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).RunAndReturn( + func(class string, shard string, replicas []string) []string { + return replicas + }).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).RunAndReturn( + func(class string, shard string, replicas []string) ([]string, []string) { + return replicas, []string{} + }).Maybe() + + n.repo, err = db.New(logger, n.name, db.Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: localDir, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, client, nodeResolver, nodesClient, replicaClient, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + if err != nil { + panic(err) + } + n.schemaManager = &fakeSchemaManager{ + shardState: shardState, + schema: schema.Schema{Objects: &models.Schema{}}, + nodeResolver: nodeResolver, + } + + n.repo.SetSchemaGetter(n.schemaManager) + err = n.repo.WaitForStartup(context.Background()) + if err != nil { + panic(err) + } + + backendProvider := newFakeBackupBackendProvider(localDir) + n.backupManager = ubak.NewHandler( + logger, &fakeAuthorizer{}, n.schemaManager, n.repo, backendProvider, fakeRbacBackupWrapper{}, fakeRbacBackupWrapper{}, + ) + + backupClient := clients.NewClusterBackups(&http.Client{}) + n.scheduler = ubak.NewScheduler( + &fakeAuthorizer{}, backupClient, n.repo, backendProvider, nodeResolver, n.schemaManager, logger) + + n.migrator = db.NewMigrator(n.repo, logger, n.name) + + indices := clusterapi.NewIndices(sharding.NewRemoteIndexIncoming(n.repo, n.schemaManager, modules.NewProvider(logger, config.Config{})), + n.repo, clusterapi.NewNoopAuthHandler(), func() bool { return false }, logger) + mux := http.NewServeMux() + mux.Handle("/indices/", indices.Indices()) + + backups := clusterapi.NewBackups(n.backupManager, clusterapi.NewNoopAuthHandler()) + mux.Handle("/backups/can-commit", backups.CanCommit()) + mux.Handle("/backups/commit", backups.Commit()) + mux.Handle("/backups/abort", backups.Abort()) + mux.Handle("/backups/status", backups.Status()) + + srv := httptest.NewServer(mux) + u, err := url.Parse(srv.URL) + if err != nil { + panic(err) + } + n.hostname = u.Host +} + +type fakeRbacBackupWrapper struct{} + +func (r fakeRbacBackupWrapper) GetBackupItems(context.Context) (map[string][]byte, error) { + return nil, nil +} + +func (r fakeRbacBackupWrapper) WriteBackupItems(context.Context, map[string][]byte) error { + return nil +} + +func (r fakeRbacBackupWrapper) Snapshot() ([]byte, error) { + return nil, nil +} + +func (r fakeRbacBackupWrapper) Restore([]byte) error { + return nil +} + +type fakeSchemaManager struct { + schema schema.Schema + shardState *sharding.State + nodeResolver *nodeResolver +} + +func (f *fakeSchemaManager) GetSchemaSkipAuth() schema.Schema { + return f.schema +} + +func (f *fakeSchemaManager) ReadOnlyClass(class string) *models.Class { + return f.schema.GetClass(class) +} + +func (f *fakeSchemaManager) ReadOnlyClassWithVersion(ctx context.Context, class string, version uint64, +) (*models.Class, error) { + return f.schema.GetClass(class), nil +} + +func (f *fakeSchemaManager) ResolveAlias(string) string { + return "" +} + +func (f *fakeSchemaManager) GetAliasesForClass(string) []*models.Alias { + return nil +} + +func (f *fakeSchemaManager) CopyShardingState(class string) *sharding.State { + return f.shardState +} + +func (f *fakeSchemaManager) Statistics() map[string]any { + return nil +} + +func (f *fakeSchemaManager) ShardOwner(class, shard string) (string, error) { + ss := f.shardState + x, ok := ss.Physical[shard] + if !ok { + return "", fmt.Errorf("shard not found") + } + if len(x.BelongsToNodes) < 1 || x.BelongsToNodes[0] == "" { + return "", fmt.Errorf("owner node not found") + } + return ss.Physical[shard].BelongsToNodes[0], nil +} + +func (f *fakeSchemaManager) ShardReplicas(class, shard string) ([]string, error) { + ss := f.shardState + x, ok := ss.Physical[shard] + if !ok { + return nil, fmt.Errorf("shard not found") + } + return x.BelongsToNodes, nil +} + +func (f *fakeSchemaManager) TenantsShards(_ context.Context, class string, tenants ...string) (map[string]string, error) { + res := map[string]string{} + for _, t := range tenants { + res[t] = models.TenantActivityStatusHOT + } + return res, nil +} + +func (f *fakeSchemaManager) OptimisticTenantStatus(_ context.Context, class string, tenant string) (map[string]string, error) { + res := map[string]string{} + res[tenant] = models.TenantActivityStatusHOT + return res, nil +} + +func (f *fakeSchemaManager) ShardFromUUID(class string, uuid []byte) string { + ss := f.shardState + return ss.Shard("", string(uuid)) +} + +func (f *fakeSchemaManager) RestoreClass(ctx context.Context, d *backup.ClassDescriptor, nodeMapping map[string]string, overwrite bool) error { + return nil +} + +func (f *fakeSchemaManager) Nodes() []string { + return []string{"NOT SET"} +} + +func (f *fakeSchemaManager) NodeName() string { + return f.nodeResolver.local +} + +func (f *fakeSchemaManager) ClusterHealthScore() int { + return 0 +} + +func (f *fakeSchemaManager) ResolveParentNodes(_ string, shard string, +) (map[string]string, error) { + return nil, nil +} + +func (f *fakeSchemaManager) StorageCandidates() []string { + return []string{} +} + +type nodeResolver struct { + cluster.NodeSelector + nodes *[]*node + local string +} + +func (r nodeResolver) AllHostnames() []string { + panic("node resolving not implemented yet") +} + +func (r nodeResolver) AllNames() []string { + xs := []string{} + for _, n := range *r.nodes { + xs = append(xs, n.name) + } + return xs +} + +func (r nodeResolver) NodeCount() int { + return len(*r.nodes) +} + +// LocalName keep it to override the common mock of cluster.NodeSelector +func (r nodeResolver) LocalName() string { + return r.local +} + +// NodeHostname keep it to override the common mock of cluster.NodeSelector +func (r nodeResolver) NodeHostname(nodeName string) (string, bool) { + for _, node := range *r.nodes { + if node.name == nodeName { + return node.hostname, true + } + } + + return "", false +} + +func (r nodeResolver) LeaderID() string { + if r.nodes != nil && len(*r.nodes) > 0 { + return (*r.nodes)[0].name + } + return "" +} + +func newFakeBackupBackendProvider(backupsPath string) *fakeBackupBackendProvider { + return &fakeBackupBackendProvider{ + backupsPath: backupsPath, + } +} + +type fakeBackupBackendProvider struct { + backupsPath string +} + +func (f *fakeBackupBackendProvider) BackupBackend(name string) (modulecapabilities.BackupBackend, error) { + backend.setLocal(name == modstgfs.Name) + return backend, nil +} + +func (f *fakeBackupBackendProvider) EnabledBackupBackends() []modulecapabilities.BackupBackend { + return nil +} + +type fakeBackupBackend struct { + sync.Mutex + backupsPath string + backupID string + counter int + isLocal bool + startedAt time.Time +} + +func (f *fakeBackupBackend) HomeDir(backupID, overrideBucket, overridePath string) string { + f.Lock() + defer f.Unlock() + if overridePath != "" { + if overrideBucket != "" { + return path.Join(overrideBucket, overridePath, backupID) + } else { + return path.Join(overridePath, backupID) + } + } else { + if overrideBucket != "" { + return path.Join(overrideBucket, f.backupsPath, backupID) + } else { + return path.Join(f.backupsPath, backupID) + } + } +} + +func (f *fakeBackupBackend) AllBackups(context.Context) ([]*backup.DistributedBackupDescriptor, error) { + return nil, fmt.Errorf("not implemented") +} + +func (f *fakeBackupBackend) GetObject(ctx context.Context, backupID, key, overrideBucket, overridePath string) ([]byte, error) { + f.Lock() + defer f.Unlock() + + f.counter++ + + if f.counter <= 2 { + return nil, backup.ErrNotFound{} + } + + var resp interface{} + + if key == ubak.GlobalBackupFile { + resp = f.successGlobalMeta() + } else { + resp = f.successLocalMeta() + } + + b, _ := json.Marshal(resp) + return b, nil +} + +func (f *fakeBackupBackend) WriteToFile(ctx context.Context, backupID, key, destPath, overrideBucket, overridePath string) error { + f.Lock() + defer f.Unlock() + return nil +} + +func (f *fakeBackupBackend) Write(ctx context.Context, backupID, key, overrideBucket, overridePath string, r io.ReadCloser) (int64, error) { + f.Lock() + defer f.Unlock() + defer r.Close() + return 0, nil +} + +func (f *fakeBackupBackend) Read(ctx context.Context, backupID, key, overrideBucket, overridePath string, w io.WriteCloser) (int64, error) { + f.Lock() + defer f.Unlock() + defer w.Close() + return 0, nil +} + +func (f *fakeBackupBackend) SourceDataPath() string { + f.Lock() + defer f.Unlock() + return f.backupsPath +} + +func (f *fakeBackupBackend) setLocal(v bool) { + f.Lock() + defer f.Unlock() + f.isLocal = v +} + +func (f *fakeBackupBackend) IsExternal() bool { + f.Lock() + defer f.Unlock() + return !f.isLocal +} + +func (f *fakeBackupBackend) Name() string { + return "fakeBackupBackend" +} + +func (f *fakeBackupBackend) PutFile(ctx context.Context, backupID, key, srcPath, bucket, bucketPath string) error { + f.Lock() + defer f.Unlock() + return nil +} + +func (f *fakeBackupBackend) PutObject(ctx context.Context, backupID, key, bucket, bucketPath string, byes []byte) error { + f.Lock() + defer f.Unlock() + return nil +} + +func (f *fakeBackupBackend) Initialize(ctx context.Context, backupID, overrideBucket, overridePath string) error { + f.Lock() + defer f.Unlock() + return nil +} + +func (f *fakeBackupBackend) successGlobalMeta() backup.DistributedBackupDescriptor { + return backup.DistributedBackupDescriptor{ + StartedAt: f.startedAt, + ID: f.backupID, + Nodes: map[string]*backup.NodeDescriptor{ + "node-0": { + Classes: []string{distributedClass}, + Status: "SUCCESS", + }, + }, + Status: "SUCCESS", + Version: ubak.Version, + ServerVersion: "x.x.x", + } +} + +func (f *fakeBackupBackend) successLocalMeta() backup.BackupDescriptor { + return backup.BackupDescriptor{ + ID: f.backupID, + Status: "SUCCESS", + ServerVersion: "x.x.x", + Version: ubak.Version, + StartedAt: f.startedAt, + Classes: []backup.ClassDescriptor{ + { + Name: distributedClass, + Shards: []*backup.ShardDescriptor{ + { + Name: "123", + Node: "node-0", + Files: []string{"some-file.db"}, + DocIDCounter: []byte("1"), + DocIDCounterPath: ".", + Version: []byte("1"), + ShardVersionPath: ".", + PropLengthTracker: []byte("1"), + PropLengthTrackerPath: ".", + }, + }, + ShardingState: []byte("sharding state!"), + Schema: []byte("schema!"), + }, + }, + } +} + +func (f *fakeBackupBackend) reset() { + f.counter = 0 +} + +type fakeAuthorizer struct{} + +func (f *fakeAuthorizer) Authorize(ctx context.Context, _ *models.Principal, _ string, _ ...string) error { + return nil +} + +func (f *fakeAuthorizer) AuthorizeSilent(ctx context.Context, _ *models.Principal, _ string, _ ...string) error { + return nil +} + +func (f *fakeAuthorizer) FilterAuthorizedResources(ctx context.Context, _ *models.Principal, _ string, resources ...string) ([]string, error) { + return resources, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/helpers_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/helpers_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ccff3b9c7e8d576f15a724aeb9c2904ded2dbe3d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/clusterintegrationtest/helpers_for_test.go @@ -0,0 +1,404 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package clusterintegrationtest + +import ( + "context" + "encoding/json" + "fmt" + "math" + "math/rand" + "sort" + "testing" + "time" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + dynamicent "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/sharding" + shardingConfig "github.com/weaviate/weaviate/usecases/sharding/config" +) + +func getRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} + +func setupDirectory(t *testing.T) string { + dirName := t.TempDir() + return dirName +} + +func dataAsBatch(data []*models.Object) objects.BatchObjects { + batchObjs := make(objects.BatchObjects, len(data)) + for i := range data { + batchObjs[i] = objects.BatchObject{ + OriginalIndex: i, + Err: nil, + Object: data[i], + UUID: data[i].ID, + } + } + + return batchObjs +} + +func dataAsBatchWithProps(data []*models.Object, props []string) objects.BatchObjects { + batchObjs := make(objects.BatchObjects, len(data)) + for i := range data { + batchObjs[i] = objects.BatchObject{ + OriginalIndex: i, + Err: nil, + Object: copyObjectWithProp(data[i], props), + UUID: data[i].ID, + } + } + + return batchObjs +} + +// copyObjectWithProp is not a 100% copy. It may still contain the same +// pointers in some properties, it does however guarantee that it does not +// alter the existing input - this guarantee is lost, if you modify the output +func copyObjectWithProp(in *models.Object, propsToCopy []string) *models.Object { + out := &models.Object{} + + out.Additional = in.Additional + out.Class = in.Class + out.Vector = in.Vector + out.CreationTimeUnix = in.CreationTimeUnix + out.LastUpdateTimeUnix = in.LastUpdateTimeUnix + out.ID = in.ID + props := map[string]interface{}{} + + for _, propName := range propsToCopy { + props[propName] = in.Properties.(map[string]interface{})[propName] + } + + out.Properties = props + return out +} + +func multiShardState(nodeCount int) *sharding.State { + config, err := shardingConfig.ParseConfig(map[string]interface{}{ + "desiredCount": json.Number(fmt.Sprintf("%d", nodeCount)), + }, 1) + if err != nil { + panic(err) + } + + nodeList := make([]string, nodeCount) + for i := range nodeList { + nodeList[i] = fmt.Sprintf("node-%d", i) + } + + selector := mocks.NewMockNodeSelector(nodeList...) + s, err := sharding.InitState("multi-shard-test-index", config, selector.LocalName(), + selector.StorageCandidates(), 1, false) + if err != nil { + panic(err) + } + + return s +} + +func class() *models.Class { + cfg := enthnsw.NewDefaultUserConfig() + cfg.EF = 500 + return &models.Class{ + Class: distributedClass, + VectorIndexConfig: cfg, + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "other_property", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "date_property", + DataType: schema.DataTypeDate.PropString(), + }, + { + Name: "date_array_property", + DataType: schema.DataTypeDateArray.PropString(), + }, + { + Name: "int_property", + DataType: schema.DataTypeInt.PropString(), + }, + { + Name: "phone_property", + DataType: schema.DataTypePhoneNumber.PropString(), + }, + }, + } +} + +func multiVectorClass(asyncIndexing bool) *models.Class { + namedVectors := map[string]models.VectorConfig{ + "custom1": {VectorIndexConfig: enthnsw.UserConfig{}}, + "custom2": {VectorIndexType: "hnsw", VectorIndexConfig: enthnsw.UserConfig{}}, + "custom3": {VectorIndexType: "flat", VectorIndexConfig: flatent.UserConfig{}}, + } + + if asyncIndexing { + namedVectors["custom4"] = models.VectorConfig{VectorIndexType: "dynamic", VectorIndexConfig: dynamicent.UserConfig{}} + } + + return &models.Class{ + Class: "Test", + InvertedIndexConfig: invertedConfig(), + VectorConfig: namedVectors, + Properties: []*models.Property{}, + } +} + +func secondClassWithRef() *models.Class { + cfg := enthnsw.NewDefaultUserConfig() + cfg.EF = 500 + return &models.Class{ + Class: "SecondDistributed", + VectorIndexConfig: cfg, + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "description", + DataType: []string{string(schema.DataTypeText)}, + }, + { + Name: "toFirst", + DataType: []string{distributedClass}, + }, + }, + } +} + +func invertedConfig() *models.InvertedIndexConfig { + return &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 60, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + } +} + +func exampleData(size int) []*models.Object { + out := make([]*models.Object, size) + + for i := range out { + vec := make([]float32, vectorDims) + for i := range vec { + vec[i] = rand.Float32() + } + + timestamp := time.Unix(0, 0).Add(time.Duration(i) * time.Hour) + phoneNumber := uint64(1000000 + rand.Intn(10000)) + + out[i] = &models.Object{ + Class: distributedClass, + ID: strfmt.UUID(uuid.New().String()), + Properties: map[string]interface{}{ + "description": fmt.Sprintf("object-%d", i), + "date_property": timestamp, + "date_array_property": []interface{}{timestamp}, + "int_property": rand.Intn(1000), + "phone_property": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: fmt.Sprintf("0171 %d", phoneNumber), + Valid: true, + InternationalFormatted: fmt.Sprintf("+49 171 %d", phoneNumber), + National: phoneNumber, + NationalFormatted: fmt.Sprintf("0171 %d", phoneNumber), + }, + }, + Vector: vec, + } + } + + return out +} + +func exampleDataWithRefs(size int, refCount int, targetObjs []*models.Object) []*models.Object { + out := make([]*models.Object, size) + + for i := range out { + vec := make([]float32, vectorDims) + for i := range vec { + vec[i] = rand.Float32() + } + + refs := make(models.MultipleRef, refCount) + for i := range refs { + randomTarget := targetObjs[rand.Intn(len(targetObjs))] + refs[i] = crossref.New("localhost", distributedClass, randomTarget.ID).SingleRef() + } + + out[i] = &models.Object{ + Class: "SecondDistributed", + ID: strfmt.UUID(uuid.New().String()), + Properties: map[string]interface{}{ + "description": fmt.Sprintf("second-object-%d", i), + "toFirst": refs, + }, + Vector: vec, + } + } + + return out +} + +func bruteForceObjectsByQuery(objs []*models.Object, + query []float32, +) []*models.Object { + type distanceAndObj struct { + distance float32 + obj *models.Object + } + + distProv := distancer.NewCosineDistanceProvider() + distances := make([]distanceAndObj, len(objs)) + + for i := range objs { + dist, _ := distProv.SingleDist(normalize(query), normalize(objs[i].Vector)) + distances[i] = distanceAndObj{ + distance: dist, + obj: objs[i], + } + } + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + out := make([]*models.Object, len(objs)) + for i := range out { + out[i] = distances[i].obj + } + + return out +} + +func normalize(v []float32) []float32 { + var norm float32 + for i := range v { + norm += v[i] * v[i] + } + + norm = float32(math.Sqrt(float64(norm))) + for i := range v { + v[i] = v[i] / norm + } + + return v +} + +func manuallyResolveRef(t *testing.T, obj *models.Object, + possibleTargets []*models.Object, localPropName, + referencedPropName string, + repo *db.DB, +) []map[string]interface{} { + beacons := obj.Properties.(map[string]interface{})[localPropName].(models.MultipleRef) + out := make([]map[string]interface{}, len(beacons)) + + for i, ref := range beacons { + parsed, err := crossref.Parse(ref.Beacon.String()) + require.Nil(t, err) + target := findId(possibleTargets, parsed.TargetID) + require.NotNil(t, target, "target not found") + if referencedPropName == "vector" { + // find referenced object to get his actual vector from DB + require.NotNil(t, repo) + res, err := repo.Object(context.Background(), parsed.Class, parsed.TargetID, + nil, additional.Properties{Vector: true}, nil, "") + require.Nil(t, err) + require.NotNil(t, res) + out[i] = map[string]interface{}{ + referencedPropName: res.Vector, + } + } else { + out[i] = map[string]interface{}{ + referencedPropName: target.Properties.(map[string]interface{})[referencedPropName], + } + } + } + + return out +} + +func findId(list []*models.Object, id strfmt.UUID) *models.Object { + for _, obj := range list { + if obj.ID == id { + return obj + } + } + + return nil +} + +func refsAsBatch(in []*models.Object, propName string) objects.BatchReferences { + out := objects.BatchReferences{} + + originalIndex := 0 + for _, obj := range in { + beacons := obj.Properties.(map[string]interface{})[propName].(models.MultipleRef) + current := make(objects.BatchReferences, len(beacons)) + for i, beacon := range beacons { + to, err := crossref.Parse(beacon.Beacon.String()) + if err != nil { + panic(err) + } + current[i] = objects.BatchReference{ + OriginalIndex: originalIndex, + To: to, + From: crossref.NewSource(schema.ClassName(obj.Class), + schema.PropertyName(propName), obj.ID), + } + originalIndex++ + } + out = append(out, current...) + } + + return out +} + +func createParams(className string, weights []float32) dto.GetParams { + targetCombination := &dto.TargetCombination{Type: dto.Minimum} + if weights != nil { + targetCombination = &dto.TargetCombination{Type: dto.Sum, Weights: weights} + } + return dto.GetParams{ + Pagination: &filters.Pagination{Limit: 100}, + ClassName: className, + TargetVectorCombination: targetCombination, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/compactor/compactor.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/compactor/compactor.go new file mode 100644 index 0000000000000000000000000000000000000000..136d1f88213ed3e0c4b507f8f7506418b01c788b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/compactor/compactor.go @@ -0,0 +1,215 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package compactor + +import ( + "bufio" + "errors" + "fmt" + "io" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" +) + +// SegmentWriterBufferSize controls the buffer size of the segment writer. But +// in addition it also acts as the threshold to switch between the "regular" +// write path and the "fully in memory" path which was added in v1.27.23. See +// [NewCompactor] for more details about the decision logic and motivation +// behind it.. +const SegmentWriterBufferSize = 256 * 1024 + +type Writer interface { + segmentindex.SegmentWriter + Reset(io.Writer) +} + +type MemoryWriter struct { + buffer []byte + pos int + maxPos int + writer io.WriteSeeker +} + +// newMemoryWriterWrapper creates a new MemoryWriter with initialized pointers +func newMemoryWriterWrapper(initialCapacity int64, writer io.WriteSeeker) *MemoryWriter { + return &MemoryWriter{ + buffer: make([]byte, initialCapacity), + pos: 0, + maxPos: 0, + writer: writer, + } +} + +func (mw *MemoryWriter) Write(p []byte) (n int, err error) { + lenCopyBytes := len(p) + + requiredSize := mw.pos + lenCopyBytes + if requiredSize > len(mw.buffer) { + mw.buffer = append(mw.buffer, make([]byte, requiredSize)...) + } + + numCopiedBytes := copy(mw.buffer[mw.pos:], p) + + mw.pos += numCopiedBytes + if mw.pos >= mw.maxPos { + mw.maxPos = mw.pos + } + + if numCopiedBytes != lenCopyBytes { + return numCopiedBytes, errors.New("could not copy all data into buffer") + } + + return numCopiedBytes, nil +} + +func (mw *MemoryWriter) Flush() error { + buf := mw.buffer + _, err := mw.writer.Write(buf[:mw.maxPos]) + if err != nil { + return err + } + + return nil +} + +// Reset needs to be present to fulfill interface +func (mw *MemoryWriter) Reset(writer io.Writer) {} + +func (mw *MemoryWriter) ResetWritePositionToZero() { + mw.pos = 0 +} + +func (mw *MemoryWriter) ResetWritePositionToMax() { + mw.pos = mw.maxPos +} + +func NewWriter(w io.WriteSeeker, maxNewFileSize int64) (Writer, *MemoryWriter) { + var writer Writer + var mw *MemoryWriter + if maxNewFileSize < SegmentWriterBufferSize { + mw = newMemoryWriterWrapper(maxNewFileSize, w) + writer = mw + } else { + writer = bufio.NewWriterSize(w, SegmentWriterBufferSize) + } + + return writer, mw +} + +func WriteHeader(mw *MemoryWriter, w io.WriteSeeker, bufw Writer, f *segmentindex.SegmentFile, + level, version, secondaryIndices uint16, startOfIndex uint64, + strategy segmentindex.Strategy, +) error { + h := &segmentindex.Header{ + Level: level, + Version: version, + SecondaryIndices: secondaryIndices, + Strategy: strategy, + IndexStart: startOfIndex, + } + + if mw == nil { + if _, err := w.Seek(0, io.SeekStart); err != nil { + return fmt.Errorf("seek to beginning to write header: %w", err) + } + + // We have to write directly to compactor writer, + // since it has seeked back to start. The following + // call to f.WriteHeader will not write again. + if _, err := h.WriteTo(w); err != nil { + return err + } + if _, err := f.WriteHeader(h); err != nil { + return err + } + } else { + mw.ResetWritePositionToZero() + if _, err := h.WriteTo(bufw); err != nil { + return err + } + f.SetHeader(h) + } + + // We need to seek back to the end so we can write a checksum + if mw == nil { + if _, err := w.Seek(0, io.SeekEnd); err != nil { + return fmt.Errorf("seek to end after writing header: %w", err) + } + } else { + mw.ResetWritePositionToMax() + } + + bufw.Reset(w) + + return nil +} + +func WriteHeaders(mw *MemoryWriter, w io.WriteSeeker, bufw Writer, f *segmentindex.SegmentFile, + level, version, secondaryIndices uint16, startOfIndex uint64, strategy segmentindex.Strategy, + hi *segmentindex.HeaderInverted, +) error { + h := &segmentindex.Header{ + Level: level, + Version: version, + SecondaryIndices: secondaryIndices, + Strategy: strategy, + IndexStart: startOfIndex, + } + + if mw == nil { + if _, err := w.Seek(0, io.SeekStart); err != nil { + return fmt.Errorf("seek to beginning to write header: %w", err) + } + + // We have to write directly to compactor writer, + // since it has seeked back to start. The following + // call to f.WriteHeader will not write again. + if _, err := h.WriteTo(w); err != nil { + return err + } + + if _, err := hi.WriteTo(w); err != nil { + return err + } + + if _, err := f.WriteHeader(h); err != nil { + return err + } + if _, err := f.WriteHeaderInverted(hi); err != nil { + return err + } + } else { + mw.ResetWritePositionToZero() + if _, err := h.WriteTo(bufw); err != nil { + return err + } + if _, err := hi.WriteTo(bufw); err != nil { + return err + } + + f.SetHeader(h) + f.SetHeaderInverted(hi) + } + + // We need to seek back to the end so we can write a checksum + if mw == nil { + if _, err := w.Seek(0, io.SeekEnd); err != nil { + return fmt.Errorf("seek to end after writing header: %w", err) + } + } else { + mw.ResetWritePositionToMax() + } + + bufw.Reset(w) + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud.go new file mode 100644 index 0000000000000000000000000000000000000000..4e1a416da7eced66469f4d6c3c6f9ed061c20173 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud.go @@ -0,0 +1,275 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/adapters/repos/db/refcache" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" +) + +func (db *DB) PutObject(ctx context.Context, obj *models.Object, + vector []float32, vectors map[string][]float32, multivectors map[string][][]float32, + repl *additional.ReplicationProperties, + schemaVersion uint64, +) error { + object := storobj.FromObject(obj, vector, vectors, multivectors) + idx := db.GetIndex(object.Class()) + if idx == nil { + return fmt.Errorf("import into non-existing index for %s", object.Class()) + } + + if err := idx.putObject(ctx, object, repl, obj.Tenant, schemaVersion); err != nil { + return fmt.Errorf("import into index %s: %w", idx.ID(), err) + } + + return nil +} + +// DeleteObject from of a specific class giving its ID +func (db *DB) DeleteObject(ctx context.Context, class string, id strfmt.UUID, + deletionTime time.Time, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) error { + idx := db.GetIndex(schema.ClassName(class)) + if idx == nil { + return fmt.Errorf("delete from non-existing index for %s", class) + } + + err := idx.deleteObject(ctx, id, deletionTime, repl, tenant, schemaVersion) + if err != nil { + return fmt.Errorf("delete from index %q: %w", idx.ID(), err) + } + + return nil +} + +func (db *DB) MultiGet(ctx context.Context, query []multi.Identifier, + additional additional.Properties, tenant string, +) ([]search.Result, error) { + byIndex := map[string][]multi.Identifier{} + db.indexLock.RLock() + defer db.indexLock.RUnlock() + + for i, q := range query { + // store original position to make assembly easier later + q.OriginalPosition = i + + for _, index := range db.indices { + if index.Config.ClassName != schema.ClassName(q.ClassName) { + continue + } + + queue := byIndex[index.ID()] + queue = append(queue, q) + byIndex[index.ID()] = queue + } + } + + out := make(search.Results, len(query)) + for indexID, queries := range byIndex { + indexRes, err := db.indices[indexID].multiObjectByID(ctx, queries, tenant) + if err != nil { + return nil, fmt.Errorf("index %q: %w", indexID, err) + } + + for i, obj := range indexRes { + if obj == nil { + continue + } + res := obj.SearchResult(additional, tenant) + out[queries[i].OriginalPosition] = *res + } + } + + return out, nil +} + +// ObjectByID checks every index of the particular kind for the ID +// +// @warning: this function is deprecated by Object() +func (db *DB) ObjectByID(ctx context.Context, id strfmt.UUID, + props search.SelectProperties, additional additional.Properties, + tenant string, +) (*search.Result, error) { + results, err := db.ObjectsByID(ctx, id, props, additional, tenant) + if err != nil { + return nil, err + } + if len(results) == 0 { + return nil, nil + } + return &results[0], nil +} + +// ObjectsByID checks every index of the particular kind for the ID +// this method is only used for Explore queries where we don't have +// a class context +func (db *DB) ObjectsByID(ctx context.Context, id strfmt.UUID, + props search.SelectProperties, additional additional.Properties, + tenant string, +) (search.Results, error) { + var result []*storobj.Object + // TODO: Search in parallel, rather than sequentially or this will be + // painfully slow on large schemas + db.indexLock.RLock() + + for _, index := range db.indices { + res, err := index.objectByID(ctx, id, props, additional, nil, tenant) + if err != nil { + db.indexLock.RUnlock() + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return nil, objects.NewErrMultiTenancy(fmt.Errorf("search index %s: %w", index.ID(), err)) + default: + return nil, fmt.Errorf("search index %s: %w", index.ID(), err) + } + } + + if res != nil { + result = append(result, res) + } + } + db.indexLock.RUnlock() + + if result == nil { + return nil, nil + } + + return db.ResolveReferences(ctx, + storobj.SearchResults(result, additional, tenant), props, nil, additional, tenant) +} + +// Object gets object with id from index of specified class. +func (db *DB) Object(ctx context.Context, class string, id strfmt.UUID, + props search.SelectProperties, addl additional.Properties, + repl *additional.ReplicationProperties, tenant string, +) (*search.Result, error) { + idx := db.GetIndex(schema.ClassName(class)) + if idx == nil { + return nil, nil + } + + obj, err := idx.objectByID(ctx, id, props, addl, repl, tenant) + if err != nil { + var errMultiTenancy objects.ErrMultiTenancy + switch { + case errors.As(err, &errMultiTenancy): + return nil, objects.NewErrMultiTenancy(fmt.Errorf("search index %s: %w", idx.ID(), err)) + default: + return nil, fmt.Errorf("search index %s: %w", idx.ID(), err) + } + } + var r *search.Result + if obj != nil { + r = obj.SearchResult(addl, tenant) + } + if r == nil { + return nil, nil + } + return db.enrichRefsForSingle(ctx, r, props, addl, tenant) +} + +func (db *DB) enrichRefsForSingle(ctx context.Context, obj *search.Result, + props search.SelectProperties, additional additional.Properties, tenant string, +) (*search.Result, error) { + res, err := refcache.NewResolver(refcache.NewCacher(db, db.logger, tenant)). + Do(ctx, []search.Result{*obj}, props, additional) + if err != nil { + return nil, fmt.Errorf("resolve cross-refs: %w", err) + } + + return &res[0], nil +} + +func (db *DB) Exists(ctx context.Context, class string, id strfmt.UUID, + repl *additional.ReplicationProperties, tenant string, +) (bool, error) { + if class == "" { + return db.anyExists(ctx, id, repl) + } + index := db.GetIndex(schema.ClassName(class)) + if index == nil { + return false, nil + } + return index.exists(ctx, id, repl, tenant) +} + +func (db *DB) anyExists(ctx context.Context, id strfmt.UUID, + repl *additional.ReplicationProperties, +) (bool, error) { + // TODO: Search in parallel, rather than sequentially or this will be + // painfully slow on large schemas + db.indexLock.RLock() + defer db.indexLock.RUnlock() + + for _, index := range db.indices { + ok, err := index.exists(ctx, id, repl, "") + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return false, objects.NewErrMultiTenancy(fmt.Errorf("search index %s: %w", index.ID(), err)) + default: + return false, fmt.Errorf("search index %s: %w", index.ID(), err) + } + } + if ok { + return true, nil + } + } + + return false, nil +} + +func (db *DB) AddReference(ctx context.Context, source *crossref.RefSource, target *crossref.Ref, + repl *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) error { + return db.Merge(ctx, objects.MergeDocument{ + Class: source.Class.String(), + ID: source.TargetID, + UpdateTime: time.Now().UnixMilli(), + References: objects.BatchReferences{ + objects.BatchReference{ + From: source, + To: target, + }, + }, + }, repl, tenant, schemaVersion) +} + +func (db *DB) Merge(ctx context.Context, merge objects.MergeDocument, + repl *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) error { + idx := db.GetIndex(schema.ClassName(merge.Class)) + if idx == nil { + return fmt.Errorf("merge from non-existing index for %s", merge.Class) + } + + err := idx.mergeObject(ctx, merge, repl, tenant, schemaVersion) + if err != nil { + return fmt.Errorf("merge into index %s: %w", idx.ID(), err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_deletion_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_deletion_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..89a98d60ef46207745982bbf18c2458c36297a88 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_deletion_integration_test.go @@ -0,0 +1,264 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/search" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + libschema "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestDeleteJourney(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: libschema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + schema := libschema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{updateTestClass()}, + }, + } + t.Run("add schema", func(t *testing.T) { + err := migrator.AddClass(context.Background(), updateTestClass()) + require.Nil(t, err) + }) + schemaGetter.schema = schema + + t.Run("import some objects", func(t *testing.T) { + for _, res := range updateTestData() { + err := repo.PutObject(context.Background(), res.Object(), res.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + searchVector := []float32{0.1, 0.1, 0.1} + + t.Run("verify vector search results are initially as expected", + func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "UpdateTestClass", + Pagination: &filters.Pagination{ + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "name"}}, + }, []string{""}, []models.Vector{searchVector}) + + expectedOrder := []interface{}{ + "element-0", "element-2", "element-3", "element-1", + } + + require.Nil(t, err) + require.Len(t, res, 4) + assert.Equal(t, expectedOrder, extractPropValues(res, "name")) + }) + + searchInv := func(t *testing.T, op filters.Operator, value int) []interface{} { + res, err := repo.ObjectSearch(context.Background(), 0, 100, + &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: op, + On: &filters.Path{ + Class: "UpdateTestClass", + Property: libschema.PropertyName("intProp"), + }, + Value: &filters.Value{ + Type: libschema.DataTypeInt, + Value: value, + }, + }, + }, nil, additional.Properties{}, "") + require.Nil(t, err) + return extractPropValues(res, "name") + } + + t.Run("verify invert index results are initially as expected", + func(t *testing.T) { + expectedOrder := []interface{}{ + "element-0", "element-1", "element-2", "element-3", + } + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorGreaterThanEqual, 0)) + + expectedOrder = []interface{}{"element-0"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 0)) + + expectedOrder = []interface{}{"element-1"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 10)) + + expectedOrder = []interface{}{"element-2"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 20)) + + expectedOrder = []interface{}{"element-3"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 30)) + }) + + t.Run("delete element-0", + func(t *testing.T) { + id := updateTestData()[0].ID + + err := repo.DeleteObject(context.Background(), "UpdateTestClass", id, time.Now(), nil, "", 0) + require.Nil(t, err) + }) + + t.Run("verify new vector search results are as expected", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "UpdateTestClass", + Pagination: &filters.Pagination{ + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "name"}}, + }, []string{""}, []models.Vector{searchVector}) + + expectedOrder := []interface{}{ + "element-2", "element-3", "element-1", + } + + require.Nil(t, err) + require.Len(t, res, 3) + assert.Equal(t, expectedOrder, extractPropValues(res, "name")) + }) + + t.Run("verify invert results still work properly", func(t *testing.T) { + expectedOrder := []interface{}{ + "element-1", "element-2", "element-3", + } + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorGreaterThanEqual, 0)) + + expectedOrder = []interface{}{"element-1"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 10)) + + expectedOrder = []interface{}{"element-2"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 20)) + + expectedOrder = []interface{}{"element-3"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 30)) + }) + + t.Run("delete element-1", + func(t *testing.T) { + id := updateTestData()[1].ID + + err := repo.DeleteObject(context.Background(), "UpdateTestClass", id, time.Now(), nil, "", 0) + require.Nil(t, err) + }) + + t.Run("verify new vector search results are as expected", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "UpdateTestClass", + Pagination: &filters.Pagination{ + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "name"}}, + }, []string{""}, []models.Vector{searchVector}) + + expectedOrder := []interface{}{ + "element-2", "element-3", + } + + require.Nil(t, err) + require.Len(t, res, 2) + assert.Equal(t, expectedOrder, extractPropValues(res, "name")) + }) + + t.Run("verify invert results have been updated correctly", func(t *testing.T) { + expectedOrder := []interface{}{ + "element-2", "element-3", + } + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorGreaterThanEqual, 0)) + + expectedOrder = []interface{}{"element-2"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 20)) + + expectedOrder = []interface{}{"element-3"} + assert.Equal(t, expectedOrder, searchInv(t, filters.OperatorEqual, 30)) + }) + + t.Run("delete the index", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "UpdateTestClass", + Pagination: &filters.Pagination{ + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "name"}}, + }, []string{""}, []models.Vector{searchVector}) + + expectedOrder := []interface{}{ + "element-2", "element-3", + } + + require.Nil(t, err) + require.Len(t, res, 2) + assert.Equal(t, expectedOrder, extractPropValues(res, "name")) + + id := updateTestData()[2].ID + + err = repo.DeleteObject(context.Background(), "UpdateTestClass", id, time.Now(), nil, "", 0) + require.Nil(t, err) + + index := repo.GetIndex("UpdateTestClass") + require.NotNil(t, index) + + err = repo.DeleteIndex("UpdateTestClass") + assert.Nil(t, err) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a023fd9581d81b0fb563f19cc65f2f795d04006c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_integration_test.go @@ -0,0 +1,2838 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/sharding" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/router/types" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/memwatch" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +func TestCRUD(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + thingclass := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "TheBestThingClass", + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "location", + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + }, + { + Name: "phone", + DataType: []string{string(schema.DataTypePhoneNumber)}, + }, + }, + } + actionclass := &models.Class{ + Class: "TheBestActionClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "refProp", + DataType: []string{"TheBestThingClass"}, + }, + { + Name: "phone", + DataType: []string{string(schema.DataTypePhoneNumber)}, + }, + }, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(actionclass, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), thingclass)) + }) + + t.Run("creating the action class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), actionclass)) + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{actionclass, thingclass}, + }, + } + + thingID := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a62") + + t.Run("validating that the thing doesn't exist prior", func(t *testing.T) { + ok, err := repo.Exists(context.Background(), "TheBestThingClass", thingID, nil, "") + require.Nil(t, err) + assert.False(t, ok) + }) + + t.Run("adding a thing", func(t *testing.T) { + thing := &models.Object{ + CreationTimeUnix: 1565612833955, + LastUpdateTimeUnix: 1000001, + ID: thingID, + Class: "TheBestThingClass", + Properties: map[string]interface{}{ + "stringProp": "some value", + "phone": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: "0171 1234567", + Valid: true, + InternationalFormatted: "+49 171 1234567", + National: 1234567, + NationalFormatted: "0171 1234567", + }, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(1), + Longitude: ptFloat32(2), + }, + }, + Additional: models.AdditionalProperties{ + "interpretation": map[string]interface{}{ + "source": []interface{}{ + map[string]interface{}{ + "concept": "some", + "occurrence": float64(1), + "weight": float64(1), + }, + map[string]interface{}{ + "concept": "value", + "occurrence": float64(1), + "weight": float64(1), + }, + }, + }, + }, + } + vector := []float32{1, 3, 5, 0.4} + + err := repo.PutObject(context.Background(), thing, vector, nil, nil, nil, 0) + + assert.Nil(t, err) + }) + + t.Run("validating that the thing exists now", func(t *testing.T) { + ok, err := repo.Exists(context.Background(), "TheBestThingClass", thingID, nil, "") + require.Nil(t, err) + assert.True(t, ok) + }) + + t.Run("trying to add a thing to a non-existing class", func(t *testing.T) { + thing := &models.Object{ + CreationTimeUnix: 1565612833955, + LastUpdateTimeUnix: 1000001, + ID: thingID, + Class: "WrongClass", + Properties: map[string]interface{}{ + "stringProp": "some value", + }, + } + vector := []float32{1, 3, 5, 0.4} + + err := repo.PutObject(context.Background(), thing, vector, nil, nil, nil, 0) + assert.Equal(t, + fmt.Errorf("import into non-existing index for WrongClass"), err) + }) + + timeMust := func(t strfmt.DateTime, err error) strfmt.DateTime { + if err != nil { + panic(err) + } + + return t + } + + t.Run("updating the thing", func(t *testing.T) { + thing := &models.Object{ + CreationTimeUnix: 1565612833955, + LastUpdateTimeUnix: 10000020, + ID: thingID, + Class: "TheBestThingClass", + Properties: map[string]interface{}{ + "stringProp": "updated value", + "phone": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: "0171 1234567", + Valid: true, + InternationalFormatted: "+49 171 1234567", + National: 1234567, + NationalFormatted: "0171 1234567", + }, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(1), + Longitude: ptFloat32(2), + }, + }, + } + vector := []float32{1, 3, 5, 0.4} + + err := repo.PutObject(context.Background(), thing, vector, nil, nil, nil, 0) + assert.Nil(t, err) + }) + + t.Run("validating the updates are reflected", func(t *testing.T) { + expected := &models.Object{ + CreationTimeUnix: 1565612833955, + LastUpdateTimeUnix: 10000020, + ID: thingID, + Class: "TheBestThingClass", + VectorWeights: map[string]string(nil), + Properties: map[string]interface{}{ + "stringProp": "updated value", + "phone": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: "0171 1234567", + Valid: true, + InternationalFormatted: "+49 171 1234567", + National: 1234567, + NationalFormatted: "0171 1234567", + }, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(1), + Longitude: ptFloat32(2), + }, + }, + Additional: models.AdditionalProperties{}, + } + res, err := repo.ObjectByID(context.Background(), thingID, nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expected, res.ObjectWithVector(false)) + + res, err = repo.Object(context.Background(), expected.Class, thingID, nil, + additional.Properties{}, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res.ObjectWithVector(false)) + }) + + t.Run("finding the updated object by querying for an updated value", + func(t *testing.T) { + // This is to verify the inverted index was updated correctly + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestThingClass", + Property: "stringProp", + }, + Value: &filters.Value{ + // we would not have found this object before using "updated", as + // this string was only introduced as part of the update + Value: "updated", + Type: schema.DataTypeText, + }, + }, + }, + }) + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, thingID, res[0].ID) + }) + + t.Run("NOT finding the previous version by querying for an outdated value", + func(t *testing.T) { + // This is to verify the inverted index was cleaned up correctly + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestThingClass", + Property: "stringProp", + }, + Value: &filters.Value{ + Value: "some", + Type: schema.DataTypeText, + }, + }, + }, + }) + require.Nil(t, err) + require.Len(t, res, 0) + }) + + t.Run("still finding it for an unchanged term", + func(t *testing.T) { + // This is to verify that while we're adding new links and cleaning up + // old ones, we don't actually touch those that were present and still + // should be + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestThingClass", + Property: "stringProp", + }, + Value: &filters.Value{ + // we would not have found this object before using "updated", as + // this string was only introduced as part of the update + Value: "value", + Type: schema.DataTypeText, + }, + }, + }, + }) + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, thingID, res[0].ID) + }) + + t.Run("updating the thing back to its original value", func(t *testing.T) { + thing := &models.Object{ + CreationTimeUnix: 1565612833955, + LastUpdateTimeUnix: 1000001, + ID: thingID, + Class: "TheBestThingClass", + Properties: map[string]interface{}{ + "stringProp": "some value", + "phone": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: "0171 1234567", + Valid: true, + InternationalFormatted: "+49 171 1234567", + National: 1234567, + NationalFormatted: "0171 1234567", + }, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(1), + Longitude: ptFloat32(2), + }, + }, + } + vector := []float32{1, 3, 5, 0.4} + + err := repo.PutObject(context.Background(), thing, vector, nil, nil, nil, 0) + assert.Nil(t, err) + }) + + actionID := strfmt.UUID("022ca5ba-7c0b-4a78-85bf-26346bbcfae7") + t.Run("adding an action", func(t *testing.T) { + action := &models.Object{ + CreationTimeUnix: 1000002, + LastUpdateTimeUnix: 1000003, + ID: actionID, + Class: "TheBestActionClass", + Properties: map[string]interface{}{ + "stringProp": "some act-citing value", + "refProp": models.MultipleRef{ + &models.SingleRef{ + Classification: &models.ReferenceMetaClassification{ + LosingDistance: ptFloat64(0.7), + MeanLosingDistance: ptFloat64(0.7), + ClosestLosingDistance: ptFloat64(0.65), + WinningDistance: 0.3, + MeanWinningDistance: 0.3, + ClosestWinningDistance: 0.25, + ClosestOverallDistance: 0.25, + OverallCount: 3, + WinningCount: 2, + LosingCount: 1, + }, + Beacon: strfmt.URI( + crossref.NewLocalhost("", thingID).String()), + }, + }, + }, + Additional: models.AdditionalProperties{ + "classification": &additional.Classification{ + ID: "foo", + Scope: []string{"scope1", "scope2"}, + ClassifiedFields: []string{"field1", "field2"}, + Completed: timeMust(strfmt.ParseDateTime("2006-01-02T15:04:05.000Z")), + }, + }, + } + vector := []float32{3, 1, 0.3, 12} + + err := repo.PutObject(context.Background(), action, vector, nil, nil, nil, 0) + + assert.Nil(t, err) + }) + + t.Run("searching by vector", func(t *testing.T) { + // the search vector is designed to be very close to the action, but + // somewhat far from the thing. So it should match the action closer + searchVector := []float32{2.9, 1.1, 0.5, 8.01} + + res, err := repo.CrossClassVectorSearch(context.Background(), searchVector, "", 0, 10, nil) + + require.Nil(t, err) + require.Equal(t, true, len(res) >= 2) + assert.Equal(t, actionID, res[0].ID) + assert.Equal(t, "TheBestActionClass", res[0].ClassName) + assert.Equal(t, "TheBestActionClass", res[0].ClassName) + assert.Equal(t, int64(1000002), res[0].Created) + assert.Equal(t, int64(1000003), res[0].Updated) + assert.Equal(t, thingID, res[1].ID) + + assert.Equal(t, "TheBestThingClass", res[1].ClassName) + assert.Equal(t, int64(1565612833955), res[1].Created) + assert.Equal(t, int64(1000001), res[1].Updated) + }) + + t.Run("searching by vector for a single class", func(t *testing.T) { + // the search vector is designed to be very close to the action, but + // somewhat far from the thing. So it should match the action closer + searchVector := []float32{2.9, 1.1, 0.5, 8.01} + + params := dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + Properties: search.SelectProperties{{Name: "location"}, {Name: "stringProp"}, {Name: "phone"}}, + } + res, err := repo.VectorSearch(context.Background(), params, []string{""}, []models.Vector{searchVector}) + + require.Nil(t, err) + require.Len(t, res, 1, "got exactly one result") + assert.Equal(t, thingID, res[0].ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", res[0].ClassName, "matches the class name") + schema := res[0].Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: "0171 1234567", + Valid: true, + InternationalFormatted: "+49 171 1234567", + National: 1234567, + NationalFormatted: "0171 1234567", + }, schema["phone"], "has correct phone prop") + assert.Equal(t, models.AdditionalProperties{}, res[0].AdditionalProperties, "no meta information should be included unless explicitly asked for") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + }) + + t.Run("searching by class type", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + res, err := repo.Search(context.Background(), params) + + require.Nil(t, err) + require.Len(t, res, 1, "got exactly one result") + assert.Equal(t, thingID, res[0].ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", res[0].ClassName, "matches the class name") + schema := res[0].Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + }) + + t.Run("adding a thing with interpretation additional property", func(t *testing.T) { + thing := &models.Object{ + CreationTimeUnix: 1565612833955, + LastUpdateTimeUnix: 1000001, + ID: thingID, + Class: "TheBestThingClass", + Properties: map[string]interface{}{ + "stringProp": "some value", + "phone": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: "0171 1234567", + Valid: true, + InternationalFormatted: "+49 171 1234567", + National: 1234567, + NationalFormatted: "0171 1234567", + }, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(1), + Longitude: ptFloat32(2), + }, + }, + Additional: models.AdditionalProperties{ + "interpretation": map[string]interface{}{ + "source": []interface{}{ + map[string]interface{}{ + "concept": "some", + "occurrence": float64(1), + "weight": float64(1), + }, + map[string]interface{}{ + "concept": "value", + "occurrence": float64(1), + "weight": float64(1), + }, + }, + }, + }, + } + vector := []float32{1, 3, 5, 0.4} + + err := repo.PutObject(context.Background(), thing, vector, nil, nil, nil, 0) + + assert.Nil(t, err) + }) + + t.Run("searching all things", func(t *testing.T) { + // as the test suits grow we might have to extend the limit + res, err := repo.ObjectSearch(context.Background(), 0, 100, nil, nil, additional.Properties{}, "") + require.Nil(t, err) + + item, ok := findID(res, thingID) + require.Equal(t, true, ok, "results should contain our desired thing id") + + assert.Equal(t, thingID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + assert.Equal(t, models.AdditionalProperties{}, item.AdditionalProperties, "has no additional properties unless explicitly asked for") + }) + + t.Run("searching all things with Vector additional props", func(t *testing.T) { + // as the test suits grow we might have to extend the limit + res, err := repo.ObjectSearch(context.Background(), 0, 100, nil, nil, additional.Properties{Vector: true}, "") + require.Nil(t, err) + + item, ok := findID(res, thingID) + require.Equal(t, true, ok, "results should contain our desired thing id") + + assert.Equal(t, thingID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + assert.Equal(t, []float32{1, 3, 5, 0.4}, item.Vector, "has Vector property") + }) + + t.Run("searching all things with Vector and Interpretation additional props", func(t *testing.T) { + // as the test suits grow we might have to extend the limit + params := additional.Properties{ + Vector: true, + ModuleParams: map[string]interface{}{ + "interpretation": true, + }, + } + res, err := repo.ObjectSearch(context.Background(), 0, 100, nil, nil, params, "") + require.Nil(t, err) + + item, ok := findID(res, thingID) + require.Equal(t, true, ok, "results should contain our desired thing id") + + assert.Equal(t, thingID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + assert.Equal(t, []float32{1, 3, 5, 0.4}, item.Vector, "has Vector property") + assert.Equal(t, models.AdditionalProperties{ + "interpretation": map[string]interface{}{ + "source": []interface{}{ + map[string]interface{}{ + "concept": "some", + "occurrence": float64(1), + "weight": float64(1), + }, + map[string]interface{}{ + "concept": "value", + "occurrence": float64(1), + "weight": float64(1), + }, + }, + }, + }, item.AdditionalProperties, "has Vector and Interpretation additional property") + }) + + t.Run("searching a thing by ID", func(t *testing.T) { + item, err := repo.ObjectByID(context.Background(), thingID, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, item, "must have a result") + + assert.Equal(t, thingID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + }) + + // Check the same, but with Object() + t.Run("searching a thing by ID", func(t *testing.T) { + item, err := repo.Object(context.Background(), "TheBestThingClass", + thingID, search.SelectProperties{}, additional.Properties{}, nil, "") + require.Nil(t, err) + require.NotNil(t, item, "must have a result") + + assert.Equal(t, thingID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + }) + + t.Run("listing multiple things by IDs (MultiGet)", func(t *testing.T) { + query := []multi.Identifier{ + { + ID: "be685717-e61e-450d-8d5c-f44f32d0336c", // this id does not exist + ClassName: "TheBestThingClass", + }, + { + ID: thingID.String(), + ClassName: "TheBestThingClass", + }, + } + res, err := repo.MultiGet(context.Background(), query, additional.Properties{}, "") + require.Nil(t, err) + require.Len(t, res, 2, "length must match even with nil-items") + + assert.Equal(t, strfmt.UUID(""), res[0].ID, "empty object for the not-found item") + + item := res[1] + assert.Equal(t, thingID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestThingClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some value", schema["stringProp"], "has correct string prop") + assert.Equal(t, &models.GeoCoordinates{ptFloat32(1), ptFloat32(2)}, schema["location"], "has correct geo prop") + assert.Equal(t, thingID, schema["id"], "has id in schema as uuid field") + }) + + t.Run("searching an action by ID without meta", func(t *testing.T) { + item, err := repo.ObjectByID(context.Background(), actionID, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, item, "must have a result") + + assert.Equal(t, actionID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestActionClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some act-citing value", schema["stringProp"], "has correct string prop") + assert.Equal(t, models.AdditionalProperties{}, item.AdditionalProperties, "not meta information should be included unless explicitly asked for") + expectedRefProp := models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI( + crossref.NewLocalhost("", thingID).String()), + }, + } + assert.Equal(t, expectedRefProp, schema["refProp"]) + }) + + t.Run("searching an action by ID with Classification and Vector additional properties", func(t *testing.T) { + item, err := repo.ObjectByID(context.Background(), actionID, search.SelectProperties{}, additional.Properties{Classification: true, Vector: true, RefMeta: true}, "") + require.Nil(t, err) + require.NotNil(t, item, "must have a result") + + assert.Equal(t, actionID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestActionClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some act-citing value", schema["stringProp"], "has correct string prop") + assert.Equal(t, models.AdditionalProperties{ + "classification": &additional.Classification{ + ID: "foo", + Scope: []string{"scope1", "scope2"}, + ClassifiedFields: []string{"field1", "field2"}, + Completed: timeMust(strfmt.ParseDateTime("2006-01-02T15:04:05.000Z")), + }, + }, item.AdditionalProperties, "it should include the object meta as it was explicitly specified") + assert.Equal(t, []float32{3, 1, 0.3, 12}, item.Vector, "has Vector property") + + expectedRefProp := models.MultipleRef{ + &models.SingleRef{ + Classification: &models.ReferenceMetaClassification{ + LosingDistance: ptFloat64(0.7), + MeanLosingDistance: ptFloat64(0.7), + ClosestLosingDistance: ptFloat64(0.65), + WinningDistance: 0.3, + MeanWinningDistance: 0.3, + ClosestWinningDistance: 0.25, + ClosestOverallDistance: 0.25, + OverallCount: 3, + WinningCount: 2, + LosingCount: 1, + }, + Beacon: strfmt.URI( + crossref.NewLocalhost("", thingID).String()), + }, + } + assert.Equal(t, expectedRefProp, schema["refProp"]) + }) + + t.Run("searching an action by ID with only Vector additional property", func(t *testing.T) { + item, err := repo.ObjectByID(context.Background(), actionID, search.SelectProperties{}, additional.Properties{Vector: true}, "") + require.Nil(t, err) + require.NotNil(t, item, "must have a result") + + assert.Equal(t, actionID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestActionClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some act-citing value", schema["stringProp"], "has correct string prop") + assert.Equal(t, []float32{3, 1, 0.3, 12}, item.Vector, "it should include the object meta as it was explicitly specified") + }) + + t.Run("searching all actions", func(t *testing.T) { + res, err := repo.ObjectSearch(context.Background(), 0, 10, nil, nil, additional.Properties{}, "") + require.Nil(t, err) + + item, ok := findID(res, actionID) + require.Equal(t, true, ok, "results should contain our desired action id") + + assert.Equal(t, actionID, item.ID, "extracted the ID") + assert.Equal(t, "TheBestActionClass", item.ClassName, "matches the class name") + schema := item.Schema.(map[string]interface{}) + assert.Equal(t, "some act-citing value", schema["stringProp"], "has correct string prop") + }) + + t.Run("sorting all objects", func(t *testing.T) { + // prepare + thingID1 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000001") + thingID2 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000002") + thingID3 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000003") + thingID4 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000004") + actionID1 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b100001") + actionID2 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b100002") + actionID3 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b100003") + testData := []struct { + id strfmt.UUID + className string + stringProp string + phone uint64 + longitude float32 + }{ + { + id: thingID1, + className: "TheBestThingClass", + stringProp: "a very short text", + phone: 1234900, + longitude: 10, + }, + { + id: thingID2, + className: "TheBestThingClass", + stringProp: "zebra lives in Zoo", + phone: 1234800, + longitude: 111, + }, + { + id: thingID3, + className: "TheBestThingClass", + stringProp: "the best thing class", + phone: 1234910, + longitude: 2, + }, + { + id: thingID4, + className: "TheBestThingClass", + stringProp: "car", + phone: 1234901, + longitude: 11, + }, + { + id: actionID1, + className: "TheBestActionClass", + stringProp: "a very short text", + phone: 1234000, + longitude: 10, + }, + { + id: actionID2, + className: "TheBestActionClass", + stringProp: "zebra lives in Zoo", + phone: 1234002, + longitude: 5, + }, + { + id: actionID3, + className: "TheBestActionClass", + stringProp: "fossil fuels", + phone: 1234010, + longitude: 6, + }, + } + for _, td := range testData { + object := &models.Object{ + CreationTimeUnix: 1565612833990, + LastUpdateTimeUnix: 1000001, + ID: td.id, + Class: td.className, + Properties: map[string]interface{}{ + "stringProp": td.stringProp, + "phone": &models.PhoneNumber{ + CountryCode: 49, + DefaultCountry: "DE", + Input: fmt.Sprintf("0171 %d", td.phone), + Valid: true, + InternationalFormatted: fmt.Sprintf("+49 171 %d", td.phone), + National: td.phone, + NationalFormatted: fmt.Sprintf("0171 %d", td.phone), + }, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(1), + Longitude: ptFloat32(td.longitude), + }, + }, + } + vector := []float32{1.1, 1.3, 1.5, 1.4} + err := repo.PutObject(context.Background(), object, vector, nil, nil, nil, 0) + assert.Nil(t, err) + } + // run sorting tests + tests := []struct { + name string + sort []filters.Sort + expectedThingIDs []strfmt.UUID + expectedActionIDs []strfmt.UUID + constainsErrorMsgs []string + }{ + { + name: "by stringProp asc", + sort: []filters.Sort{{Path: []string{"stringProp"}, Order: "asc"}}, + expectedThingIDs: []strfmt.UUID{thingID1, thingID4, thingID, thingID3, thingID2}, + expectedActionIDs: []strfmt.UUID{actionID1, actionID3, actionID, actionID2}, + }, + { + name: "by stringProp desc", + sort: []filters.Sort{{Path: []string{"stringProp"}, Order: "desc"}}, + expectedThingIDs: []strfmt.UUID{thingID2, thingID3, thingID, thingID4, thingID1}, + expectedActionIDs: []strfmt.UUID{actionID2, actionID, actionID3, actionID1}, + }, + { + name: "by phone asc", + sort: []filters.Sort{{Path: []string{"phone"}, Order: "asc"}}, + expectedThingIDs: []strfmt.UUID{thingID, thingID2, thingID1, thingID4, thingID3}, + expectedActionIDs: []strfmt.UUID{actionID, actionID1, actionID2, actionID3}, + }, + { + name: "by phone desc", + sort: []filters.Sort{{Path: []string{"phone"}, Order: "desc"}}, + expectedThingIDs: []strfmt.UUID{thingID3, thingID4, thingID1, thingID2, thingID}, + expectedActionIDs: []strfmt.UUID{actionID3, actionID2, actionID1, actionID}, + }, + { + name: "by phone and stringProp asc", + sort: []filters.Sort{ + {Path: []string{"phone"}, Order: "asc"}, + {Path: []string{"stringProp"}, Order: "asc"}, + }, + expectedThingIDs: []strfmt.UUID{thingID, thingID2, thingID1, thingID4, thingID3}, + expectedActionIDs: []strfmt.UUID{actionID, actionID1, actionID2, actionID3}, + }, + { + name: "by location asc", + sort: []filters.Sort{{Path: []string{"location"}, Order: "asc"}}, + constainsErrorMsgs: []string{"search: search index thebestactionclass: sort parameter at position 0: " + + "no such prop with name 'location' found in class 'TheBestActionClass' in the schema. " + + "Check your schema files for which properties in this class are available"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, err := repo.ObjectSearch(context.Background(), 0, 100, nil, tt.sort, additional.Properties{Vector: true}, "") + if len(tt.constainsErrorMsgs) > 0 { + require.NotNil(t, err) + for _, errorMsg := range tt.constainsErrorMsgs { + assert.Contains(t, err.Error(), errorMsg) + } + } else { + require.Nil(t, err) + require.Len(t, res, 9) + + var thingIds, actionIds []strfmt.UUID + for i := range res { + if res[i].ClassName == "TheBestThingClass" { + thingIds = append(thingIds, res[i].ID) + } else { + actionIds = append(actionIds, res[i].ID) + } + } + assert.EqualValues(t, thingIds, tt.expectedThingIDs, "thing ids don't match") + assert.EqualValues(t, actionIds, tt.expectedActionIDs, "action ids don't match") + } + }) + } + // clean up + for _, td := range testData { + err := repo.DeleteObject(context.Background(), td.className, td.id, time.Now(), nil, "", 0) + assert.Nil(t, err) + } + }) + + t.Run("verifying the thing is indexed in the inverted index", func(t *testing.T) { + // This is a control for the upcoming deletion, after the deletion it should not + // be indexed anymore. + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestThingClass", + Property: "stringProp", + }, + Value: &filters.Value{ + Value: "some", + Type: schema.DataTypeText, + }, + }, + }, + }) + require.Nil(t, err) + require.Len(t, res, 1) + }) + + t.Run("verifying the action is indexed in the inverted index", func(t *testing.T) { + // This is a control for the upcoming deletion, after the deletion it should not + // be indexed anymore. + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestActionClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestActionClass", + Property: "stringProp", + }, + Value: &filters.Value{ + Value: "some", + Type: schema.DataTypeText, + }, + }, + }, + }) + require.Nil(t, err) + require.Len(t, res, 1) + }) + + t.Run("deleting a thing again", func(t *testing.T) { + err := repo.DeleteObject(context.Background(), "TheBestThingClass", thingID, time.Now(), nil, "", 0) + + assert.Nil(t, err) + }) + + t.Run("deleting a action again", func(t *testing.T) { + err := repo.DeleteObject(context.Background(), "TheBestActionClass", actionID, time.Now(), nil, "", 0) + + assert.Nil(t, err) + }) + + t.Run("trying to delete from a non-existing class", func(t *testing.T) { + err := repo.DeleteObject(context.Background(), "WrongClass", thingID, time.Now(), nil, "", 0) + + assert.Equal(t, fmt.Errorf( + "delete from non-existing index for WrongClass"), err) + }) + + t.Run("verifying the thing is NOT indexed in the inverted index", + func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestThingClass", + Property: "stringProp", + }, + Value: &filters.Value{ + Value: "some", + Type: schema.DataTypeText, + }, + }, + }, + }) + require.Nil(t, err) + require.Len(t, res, 0) + }) + + t.Run("verifying the action is NOT indexed in the inverted index", + func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestActionClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestActionClass", + Property: "stringProp", + }, + Value: &filters.Value{ + Value: "some", + Type: schema.DataTypeText, + }, + }, + }, + }) + require.Nil(t, err) + require.Len(t, res, 0) + }) + + t.Run("trying to get the deleted thing by ID", func(t *testing.T) { + item, err := repo.ObjectByID(context.Background(), thingID, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + require.Nil(t, item, "must not have a result") + }) + + t.Run("trying to get the deleted action by ID", func(t *testing.T) { + item, err := repo.ObjectByID(context.Background(), actionID, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + require.Nil(t, item, "must not have a result") + }) + + t.Run("searching by vector for a single thing class again after deletion", + func(t *testing.T) { + searchVector := []float32{2.9, 1.1, 0.5, 8.01} + params := dto.GetParams{ + ClassName: "TheBestThingClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + + res, err := repo.VectorSearch(context.Background(), params, []string{""}, []models.Vector{searchVector}) + + require.Nil(t, err) + assert.Len(t, res, 0) + }) + + t.Run("searching by vector for a single action class again after deletion", func(t *testing.T) { + searchVector := []float32{2.9, 1.1, 0.5, 8.01} + params := dto.GetParams{ + ClassName: "TheBestActionClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: nil, + } + + res, err := repo.VectorSearch(context.Background(), params, []string{""}, []models.Vector{searchVector}) + + require.Nil(t, err) + assert.Len(t, res, 0) + }) + + t.Run("ensure referenced class searches are not limited", func(t *testing.T) { + numThings := int(repo.config.QueryMaximumResults * 10) + createdActionIDs := make([]strfmt.UUID, numThings) + createdThingIDs := make([]strfmt.UUID, numThings) + + t.Run("add new action objects", func(t *testing.T) { + actionBatch := make([]objects.BatchObject, numThings) + for i := 0; i < len(createdActionIDs); i++ { + newID := strfmt.UUID(uuid.NewString()) + actionBatch[i] = objects.BatchObject{ + UUID: newID, + Object: &models.Object{ + ID: newID, + Class: "TheBestActionClass", + Properties: map[string]interface{}{ + "stringProp": fmt.Sprintf("action#%d", i), + }, + }, + } + createdActionIDs[i] = newID + } + batchObjResp, err := repo.BatchPutObjects(context.Background(), actionBatch, nil, 0) + require.Len(t, batchObjResp, numThings) + require.Nil(t, err) + for _, r := range batchObjResp { + require.Nil(t, r.Err) + } + }) + + t.Run("add more thing objects to reference", func(t *testing.T) { + thingBatch := make([]objects.BatchObject, numThings) + for i := 0; i < len(createdThingIDs); i++ { + newID := strfmt.UUID(uuid.NewString()) + thingBatch[i] = objects.BatchObject{ + UUID: newID, + Object: &models.Object{ + ID: newID, + Class: "TheBestThingClass", + Properties: map[string]interface{}{ + "stringProp": fmt.Sprintf("thing#%d", i), + }, + }, + } + createdThingIDs[i] = newID + } + batchObjResp, err := repo.BatchPutObjects(context.Background(), thingBatch, nil, 0) + require.Len(t, batchObjResp, numThings) + require.Nil(t, err) + for _, r := range batchObjResp { + require.Nil(t, r.Err) + } + }) + + t.Run("reference each thing from an action", func(t *testing.T) { + refBatch := make([]objects.BatchReference, numThings) + for i := range refBatch { + ref := objects.BatchReference{ + From: &crossref.RefSource{ + Local: true, + PeerName: "localhost", + Class: "TheBestActionClass", + Property: schema.PropertyName("refProp"), + TargetID: createdActionIDs[i], + }, + To: &crossref.Ref{ + Local: true, + PeerName: "localhost", + TargetID: createdThingIDs[i], + }, + } + refBatch[i] = ref + } + batchRefResp, err := repo.AddBatchReferences(context.Background(), refBatch, nil, 0) + require.Nil(t, err) + require.Len(t, batchRefResp, numThings) + for _, r := range batchRefResp { + require.Nil(t, r.Err) + } + }) + + t.Run("query every action for its referenced thing", func(t *testing.T) { + for i := range createdActionIDs { + resp, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TheBestActionClass", + Pagination: &filters.Pagination{Limit: 5}, + AdditionalProperties: additional.Properties{ID: true}, + Properties: search.SelectProperties{ + { + Name: "refProp", + Refs: []search.SelectClass{ + { + ClassName: "TheBestThingClass", + RefProperties: search.SelectProperties{ + { + Name: "stringProp", + IsPrimitive: true, + }, + }, + }, + }, + }, + }, + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestActionClass", + Property: "stringProp", + }, + Value: &filters.Value{ + Value: fmt.Sprintf("action#%d", i), + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorLike, + On: &filters.Path{ + Class: "TheBestActionClass", + Property: "refProp", + Child: &filters.Path{ + Class: "TheBestThingClass", + Property: "stringProp", + }, + }, + Value: &filters.Value{ + Value: "thing#*", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + }) + + require.Nil(t, err) + require.Len(t, resp, 1) + assert.Len(t, resp[0].Schema.(map[string]interface{})["refProp"], 1) + } + }) + }) + + t.Run("query obj by id which has no props", func(t *testing.T) { + id := strfmt.UUID("2cd8a381-6568-4724-9d5c-1ef28d439e94") + + t.Run("insert test obj", func(t *testing.T) { + vec := []float32{0.1, 0.2, 0.3, 0.4} + + obj := &models.Object{ + ID: id, + Class: "TheBestActionClass", + Vector: vec, + } + require.Nil(t, repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0)) + }) + + t.Run("perform search with id filter", func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + Pagination: &filters.Pagination{Limit: 10}, + ClassName: "TheBestActionClass", + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TheBestActionClass", + Property: filters.InternalPropID, + }, + Value: &filters.Value{ + Value: id.String(), + Type: schema.DataTypeText, + }, + }, + }, + }) + + require.Nil(t, err) + + expected := []search.Result{ + { + ID: id, + ClassName: "TheBestActionClass", + Schema: map[string]interface{}{ + "id": id, + }, + Score: 0, + AdditionalProperties: models.AdditionalProperties{}, + Dims: 4, + }, + } + + for i := range expected { + expected[i].DocID = res[i].DocID + } + + assert.Equal(t, expected, res) + }) + }) +} + +func Test_PutObject_MultiTenant(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + testClass := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + MultiTenancyConfig: &models.MultiTenancyConfig{ + AutoTenantActivation: true, + AutoTenantCreation: true, + Enabled: true, + }, + Class: "TestClass", + Properties: []*models.Property{}, + } + + shardState := NewMultiTenantShardingStateBuilder(). + WithNodePrefix("node"). + WithIndexName("multi-tenant-sharding state-index"). + WithReplicationFactor(1). + WithTenant("foo-tenant", "HOT"). + Build() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1", "node2"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + err = migrator.AddClass(context.Background(), testClass) + require.Nil(t, err) + + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{testClass}, + }, + } + + ok, err := repo.Exists(context.Background(), "TestClass", "5b9e2d04-0b2d-4e4b-bd5e-84e2f70a8b49", &additional.ReplicationProperties{ + ConsistencyLevel: "ONE", + }, "foo-tenant") + require.Nil(t, err) + assert.False(t, ok) + + err = repo.PutObject(context.Background(), &models.Object{ + Additional: nil, + Class: "TestClass", + ID: "5b9e2d04-0b2d-4e4b-bd5e-84e2f70a8b49", + Tenant: "foo-tenant", + }, []float32{1.1, 2.2, 3.3, 4.4}, nil, nil, &additional.ReplicationProperties{ + ConsistencyLevel: "ONE", + }, 0) + require.Nil(t, err) + + ok, err = repo.Exists(context.Background(), "TestClass", "5b9e2d04-0b2d-4e4b-bd5e-84e2f70a8b49", &additional.ReplicationProperties{ + ConsistencyLevel: "ONE", + }, "foo-tenant") + require.Nil(t, err) + assert.True(t, ok) +} + +func TestCRUD_Query(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + thingclass := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "TheBestThingClass", + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), thingclass)) + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{thingclass}, + }, + } + + t.Run("scroll through all objects", func(t *testing.T) { + // prepare + className := "TheBestThingClass" + thingID1 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000001") + thingID2 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000002") + thingID3 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000003") + thingID4 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000004") + thingID5 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000005") + thingID6 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000006") + thingID7 := strfmt.UUID("7c8183ae-150d-433f-92b6-ed095b000007") + testData := []struct { + id strfmt.UUID + className string + stringProp string + phone uint64 + longitude float32 + }{ + { + id: thingID1, + className: className, + stringProp: "a very short text", + }, + { + id: thingID2, + className: className, + stringProp: "zebra lives in Zoo", + }, + { + id: thingID3, + className: className, + stringProp: "the best thing class", + }, + { + id: thingID4, + className: className, + stringProp: "car", + }, + { + id: thingID5, + className: className, + stringProp: "a very short text", + }, + { + id: thingID6, + className: className, + stringProp: "zebra lives in Zoo", + }, + { + id: thingID7, + className: className, + stringProp: "fossil fuels", + }, + } + for _, td := range testData { + object := &models.Object{ + CreationTimeUnix: 1565612833990, + LastUpdateTimeUnix: 1000001, + ID: td.id, + Class: td.className, + Properties: map[string]interface{}{ + "stringProp": td.stringProp, + }, + } + vector := []float32{1.1, 1.3, 1.5, 1.4} + err := repo.PutObject(context.Background(), object, vector, nil, nil, nil, 0) + assert.Nil(t, err) + } + // toParams helper method + toParams := func(className string, offset, limit int, + cursor *filters.Cursor, filters *filters.LocalFilter, sort []filters.Sort, + ) *objects.QueryInput { + return &objects.QueryInput{ + Class: className, + Offset: offset, + Limit: limit, + Cursor: cursor, + Filters: filters, + Sort: sort, + Additional: additional.Properties{}, + } + } + // run scrolling through all results + tests := []struct { + name string + className string + cursor *filters.Cursor + query *objects.QueryInput + expectedThingIDs []strfmt.UUID + constainsErrorMsgs []string + }{ + { + name: "all results with step limit: 100", + query: toParams(className, 0, 100, &filters.Cursor{After: "", Limit: 100}, nil, nil), + expectedThingIDs: []strfmt.UUID{thingID1, thingID2, thingID3, thingID4, thingID5, thingID6, thingID7}, + }, + { + name: "all results with step limit: 1", + query: toParams(className, 0, 1, &filters.Cursor{After: "", Limit: 1}, nil, nil), + expectedThingIDs: []strfmt.UUID{thingID1, thingID2, thingID3, thingID4, thingID5, thingID6, thingID7}, + }, + { + name: "all results with step limit: 1 after: thingID4", + query: toParams(className, 0, 1, &filters.Cursor{After: thingID4.String(), Limit: 1}, nil, nil), + expectedThingIDs: []strfmt.UUID{thingID5, thingID6, thingID7}, + }, + { + name: "all results with step limit: 1 after: thingID7", + query: toParams(className, 0, 1, &filters.Cursor{After: thingID7.String(), Limit: 1}, nil, nil), + expectedThingIDs: []strfmt.UUID{}, + }, + { + name: "all results with step limit: 3", + query: toParams(className, 0, 3, &filters.Cursor{After: "", Limit: 3}, nil, nil), + expectedThingIDs: []strfmt.UUID{thingID1, thingID2, thingID3, thingID4, thingID5, thingID6, thingID7}, + }, + { + name: "all results with step limit: 7", + query: toParams(className, 0, 7, &filters.Cursor{After: "", Limit: 7}, nil, nil), + expectedThingIDs: []strfmt.UUID{thingID1, thingID2, thingID3, thingID4, thingID5, thingID6, thingID7}, + }, + { + name: "error on empty class", + query: toParams("", 0, 7, &filters.Cursor{After: "", Limit: 7}, nil, nil), + constainsErrorMsgs: []string{"class not found"}, + }, + { + name: "error on sort parameter", + query: toParams(className, 0, 7, + &filters.Cursor{After: "", Limit: 7}, nil, + []filters.Sort{{Path: []string{"stringProp"}, Order: "asc"}}, + ), + cursor: &filters.Cursor{After: "", Limit: 7}, + constainsErrorMsgs: []string{"sort cannot be set with after and limit parameters"}, + }, + { + name: "error on offset parameter", + query: toParams(className, 10, 7, + &filters.Cursor{After: "", Limit: 7}, nil, + nil, + ), + cursor: &filters.Cursor{After: "", Limit: 7}, + constainsErrorMsgs: []string{"offset cannot be set with after and limit parameters"}, + }, + { + name: "error on offset and sort parameter", + query: toParams(className, 10, 7, + &filters.Cursor{After: "", Limit: 7}, nil, + []filters.Sort{{Path: []string{"stringProp"}, Order: "asc"}}, + ), + cursor: &filters.Cursor{After: "", Limit: 7}, + constainsErrorMsgs: []string{"offset,sort cannot be set with after and limit parameters"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if len(tt.constainsErrorMsgs) > 0 { + res, err := repo.Query(context.Background(), tt.query) + require.NotNil(t, err) + assert.Nil(t, res) + for _, errorMsg := range tt.constainsErrorMsgs { + assert.Contains(t, err.Error(), errorMsg) + } + } else { + cursorSearch := func(t *testing.T, className string, cursor *filters.Cursor) []strfmt.UUID { + res, err := repo.Query(context.Background(), toParams(className, 0, cursor.Limit, cursor, nil, nil)) + require.Nil(t, err) + var ids []strfmt.UUID + for i := range res { + ids = append(ids, res[i].ID) + } + return ids + } + + var thingIds []strfmt.UUID + cursor := tt.query.Cursor + for { + result := cursorSearch(t, tt.query.Class, cursor) + thingIds = append(thingIds, result...) + if len(result) == 0 { + break + } + after := result[len(result)-1] + cursor = &filters.Cursor{After: after.String(), Limit: cursor.Limit} + } + + require.Equal(t, len(tt.expectedThingIDs), len(thingIds)) + for i := range tt.expectedThingIDs { + assert.Equal(t, tt.expectedThingIDs[i], thingIds[i]) + } + } + }) + } + // clean up + for _, td := range testData { + err := repo.DeleteObject(context.Background(), td.className, td.id, time.Now(), nil, "", 0) + assert.Nil(t, err) + } + }) +} + +func Test_ImportWithoutVector_UpdateWithVectorLater(t *testing.T) { + r := getRandomSeed() + total := 100 + individual := total / 4 + className := "DeferredVector" + var data []*models.Object + + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("prepare data for test", func(t *testing.T) { + data = make([]*models.Object, total) + for i := range data { + data[i] = &models.Object{ + ID: strfmt.UUID(uuid.Must(uuid.NewRandom()).String()), + Class: className, + Properties: map[string]interface{}{ + "int_prop": int64(i), + }, + Vector: nil, + } + } + }) + + t.Run("create required schema", func(t *testing.T) { + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + { + DataType: []string{string(schema.DataTypeInt)}, + Name: "int_prop", + }, + }, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + } + require.Nil(t, + migrator.AddClass(context.Background(), class)) + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{{ + Class: className, + Properties: []*models.Property{ + { + DataType: []string{string(schema.DataTypeInt)}, + Name: "int_prop", + }, + }, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + }}, + }, + } + + t.Run("import individual objects without vector", func(t *testing.T) { + for i := 0; i < individual; i++ { + err := repo.PutObject(context.Background(), data[i], nil, nil, nil, nil, 0) // nil vector ! + require.Nil(t, err) + } + }) + + t.Run("import batch objects without vector", func(t *testing.T) { + batch := make(objects.BatchObjects, total-individual) + + for i := range batch { + batch[i] = objects.BatchObject{ + OriginalIndex: i, + Err: nil, + Object: data[i+individual], + UUID: data[i+individual].ID, + } + } + + res, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + + for _, obj := range res { + require.Nil(t, obj.Err) + } + }) + + t.Run("verify inverted index works correctly", func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: buildFilter("int_prop", total+1, lte, dtInt), + ClassName: className, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: total, + }, + }) + require.Nil(t, err) + assert.Len(t, res, total) + }) + + t.Run("perform unfiltered vector search and verify there are no matches", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + Filters: nil, + ClassName: className, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: total, + }, + }, []string{""}, []models.Vector{randomVector(r, 7)}) + require.Nil(t, err) + assert.Len(t, res, 0) // we skipped the vector on half the elements, so we should now match half + }) + + t.Run("update some of the objects to add vectors", func(t *testing.T) { + for i := range data { + if i%2 == 1 { + continue + } + + data[i].Vector = randomVector(r, 7) + err := repo.PutObject(context.Background(), data[i], data[i].Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("perform unfiltered vector search and verify correct matches", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + Filters: nil, + ClassName: className, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: total, + }, + }, []string{""}, []models.Vector{randomVector(r, 7)}) + require.Nil(t, err) + assert.Len(t, res, total/2) // we skipped the vector on half the elements, so we should now match half + }) + + t.Run("perform filtered vector search and verify correct matches", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + Filters: buildFilter("int_prop", 50, lt, dtInt), + ClassName: className, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: total, + }, + }, []string{""}, []models.Vector{randomVector(r, 7)}) + require.Nil(t, err) + // we skipped the vector on half the elements, and cut the list in half with + // the filter, so we're only expected a quarter of the total size now + assert.Len(t, res, total/4) + }) +} + +func TestVectorSearch_ByDistance(t *testing.T) { + className := "SomeClass" + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + { + DataType: []string{string(schema.DataTypeInt)}, + Name: "int_prop", + }, + }, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + } + + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + // this is set really low to ensure that search + // by distance is conducted, which executes + // without regard to this value + QueryMaximumResults: 1, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("create required schema", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + searchVector := []float32{-0.10190568, -0.06259751, 0.05616188, -0.19249836, 0.09714927, -0.1902525, -0.064424865, -0.0387358, 0.17581701, 0.4476738, 0.29261824, 0.12026761, -0.19975126, 0.023600178, 0.17348698, 0.12701738, -0.36018127, -0.12051587, -0.17620522, 0.060741074, -0.064512916, 0.18640806, -0.1529852, 0.08211839, -0.02558465, -0.11369845, 0.0924098, -0.10544433, -0.14728987, -0.041860342, -0.08533595, 0.25886244, 0.2963937, 0.26010615, 0.2111097, 0.029396622, 0.01429563, 0.06410264, -0.119665794, 0.33583277, -0.05802661, 0.023306102, 0.14435922, -0.003951336, -0.13870825, 0.07140894, 0.10469943, -0.059021875, -0.065911904, 0.024216041, -0.26282874, 0.04896568, -0.08291928, -0.12793182, -0.077824734, 0.08843151, 0.31247458, -0.066301286, 0.006904921, -0.08277095, 0.13936226, -0.64392364, -0.19566211, 0.047227614, 0.086121306, -0.20725192, -0.096485816, -0.16436341, -0.06559169, -0.019639932, -0.012729637, 0.08901619, 0.0015896161, -0.24789932, 0.35496348, -0.16272856, -0.01648429, 0.11247674, 0.08099968, 0.13339259, 0.055829972, -0.34662855, 0.068509, 0.13880715, 0.3201848, -0.055557363, 0.22142135, -0.12867308, 0.0037871755, 0.24888979, -0.007443307, 0.08906625, -0.02022331, 0.11510742, -0.2385861, 0.16177008, -0.16214795, -0.28715602, 0.016784908, 0.19386634, -0.07731616, -0.100485384, 0.4100712, 0.061834496, -0.2325293, -0.026056025, -0.11632323, -0.17040555, -0.081960455, -0.0061040106, -0.05949373, 0.044952348, -0.079565264, 0.024430245, -0.09375341, -0.30249637, 0.115205586, -0.13083287, -0.04264671, -0.089810364, 0.16227561, 0.07318055, -0.10496504, 0.00063501706, -0.04936106, -0.0022282854, 1.0893154, 0.1698662, -0.019563455, -0.011128426, 0.04477475, -0.15656771, -0.056911886, -0.5759019, -0.1881429, 0.17088258, 0.24124439, 0.111288875, -0.0015475494, -0.021278847, -0.08362156, 0.09997524, -0.094385885, -0.1674031, 0.061180864, 0.28517494, -0.016217072, 0.025866214, -0.22854298, -0.17924422, -0.037767246, 0.12252907, -0.31698978, -0.038031228, 0.055408552, 0.1743545, -0.040576655, 0.1293942, -0.56650764, -0.10306195, -0.19548112, -0.245544, -0.018241389, -0.039024632, -0.31659162, 0.1565075, 0.08412337, 0.13177724, -0.13766576, -0.15355161, -0.16960397, -0.012436442, 0.04828157, 0.12566057, -0.35308784, -0.37520224, -0.1265899, -0.13991497, 0.14402144, 0.117542416, -0.20750546, -0.5849919, -0.010469457, -0.19677396, 0.011365964, 0.00666846, -0.083470255, 0.24928358, 0.07026387, 0.19082819, 0.24557637, 0.014292963, 0.14846677, 0.031625308, -0.20398879, 0.19507346, -0.18119761, -0.045725327, -0.042455163, -0.099733196, -0.33636123, -0.28447086, 0.30274838, -0.01603988, -0.0529655, 0.15784146, 0.08746072, -0.1703993, 0.2414512, 0.060322937, -0.00812057, 0.031162385, -0.1764905, 0.22107981, -0.016657066, 0.31948856, 0.07282925, -0.036991462, 0.01266936, -0.009106514, -0.038732465, 0.20973183, 0.033236098, -0.10673938, -0.06880061, 0.115524575, -0.39688373, 0.08749971, -0.21816005, -0.22100002, -0.3716853, -0.14720486, 0.24316181, 0.29673144, 0.020808747, 0.07658521, 0.16310681, 0.38785335, 0.0992224, 0.14177811, 0.025954131, -0.08690783, 0.19653428, 0.09584941, 0.040072605, -0.00038361162, -0.094546966, 0.1910902, 0.13217318, 0.060072783, -0.0655816, 0.2777626, 0.1799169, 0.20187178, -0.0996889, -0.01932122, -0.13133621, 0.057482753, -0.36892185, -0.032093313, 0.14607865, 0.12033318, -0.041683596, -0.2048406, -0.041777443, -0.14975598, -0.2526341, 0.12659752, 0.010567178, -0.297333, -0.27522174, 0.06923473, 0.043150593, -0.017045585, -0.2400216, 0.11413547, -0.40081662, -0.0018820907, 0.13800722, 0.085972115, -0.01519989, -0.10491216, 0.09170084, 0.063085504, 0.046743374, -0.014466267, 0.09880224, 0.027706565, 0.09951337, 0.17317492, -0.025654864, 0.14658073, 0.042377427, -0.08402882, -0.12423425, 0.32714987, -0.1527207, 0.106094465, 0.017378228, -0.06302387} + searchObject := strfmt.UUID("fe687bf4-f10f-4c23-948d-0746ea2927b3") + + tests := map[strfmt.UUID]struct { + inputVec []float32 + expected bool + }{ + strfmt.UUID("88460290-03b2-44a3-9adb-9fa3ae11d9e6"): { + inputVec: []float32{-0.11015724, -0.05380307, 0.027512914, -0.16925375, 0.08306809, -0.19312492, -0.08910436, -0.011051652, 0.17981204, 0.40469593, 0.28226805, 0.09381516, -0.18380599, 0.03102771, 0.1645333, 0.1530153, -0.3187937, -0.10800173, -0.18466279, 0.0004336393, -0.0495677, 0.19905856, -0.11614494, 0.08834681, -0.011200292, -0.11969374, 0.12497086, -0.12427251, -0.13395442, -0.0060353535, -0.07504816, 0.23205791, 0.2982508, 0.2517544, 0.176147, -0.036871903, 0.017852835, 0.040007118, -0.118621, 0.3648693, -0.058933854, 0.04004229, 0.11871147, -0.019860389, -0.12701912, 0.106662825, 0.086498804, -0.04303973, -0.0742352, 0.018250324, -0.26544014, 0.029228423, -0.087171465, -0.1282789, -0.06403083, 0.09680911, 0.31433868, -0.081510685, -0.011283603, -0.041624587, 0.16530018, -0.6714878, -0.2436993, 0.03173918, 0.106117725, -0.20803581, -0.10429562, -0.16975354, -0.078582145, -0.0065962705, -0.06840946, 0.094937086, -0.020617036, -0.23795949, 0.34785536, -0.19834635, -0.015064479, 0.11930141, 0.090962164, 0.120560184, 0.054095767, -0.38602966, 0.057141174, 0.12039684, 0.32000408, -0.05146908, 0.20762976, -0.09342379, 0.037577383, 0.23894139, -0.0075003104, 0.104791366, -0.015841056, 0.102840215, -0.20813248, 0.1855997, -0.12594056, -0.27132365, -0.0055563124, 0.21954241, -0.10798524, -0.111896284, 0.44049335, 0.049884494, -0.22339955, -0.005374135, -0.120713554, -0.22275059, -0.09146004, 0.017188415, -0.106493734, 0.045247544, -0.07725446, 0.056848228, -0.10294392, -0.2896642, 0.112891, -0.13773362, -0.089911595, -0.13500965, 0.14051703, 0.040092673, -0.13896292, 0.04580957, -0.014300959, 0.03737215, 1.0661443, 0.19767477, -0.07703914, -0.012910904, -0.0037716173, -0.14437087, -0.06938004, -0.5348036, -0.16047458, 0.19416414, 0.21938956, 0.092242256, -0.012630808, -0.021863988, -0.051702406, 0.08780951, -0.0815602, -0.15332024, 0.077632725, 0.25709584, -0.025725808, 0.042116437, -0.22687604, -0.11791685, -0.028626656, 0.16734225, -0.3017483, -0.03236202, 0.02888077, 0.18193199, -0.009032297, 0.14454253, -0.511494, -0.12119192, -0.20757924, -0.2561716, -0.03904554, -0.07348411, -0.28547177, 0.15967208, 0.079396725, 0.14358875, -0.12829632, -0.18175666, -0.15540425, -0.020419862, 0.019070208, 0.12168836, -0.3428434, -0.357543, -0.11218741, -0.12834033, 0.13564876, 0.12768728, -0.1817461, -0.61235875, -0.029409664, -0.19765733, 0.03872163, 0.0074950717, -0.10025679, 0.2872255, 0.033995092, 0.12945211, 0.21831632, 0.04666009, 0.14233032, 0.016767867, -0.2039244, 0.2000191, -0.13099428, -0.020210614, -0.06286195, -0.0948797, -0.34830436, -0.21595824, 0.32722405, -0.024735296, -0.07859145, 0.16975155, 0.08186461, -0.19249061, 0.23405583, 0.04837592, 0.021467948, -0.022215014, -0.14892808, 0.23908867, -0.050126728, 0.2867907, 0.07740656, -0.01714987, -0.0046314416, -0.0048108613, -0.007407311, 0.1807499, 0.049772616, -0.14680666, -0.07335314, 0.09023705, -0.40600133, 0.05522128, -0.20085222, -0.20410904, -0.34319055, -0.10792269, 0.2297779, 0.30397663, 0.05230268, 0.06408224, 0.13797496, 0.3691112, 0.083033495, 0.13695791, -0.015612457, -0.06413475, 0.18117142, 0.12928344, 0.049171276, 0.016104931, -0.102417335, 0.19589683, 0.14380622, 0.0748065, -0.005402455, 0.27243868, 0.14925551, 0.19564849, -0.10738364, -0.054175537, -0.10068278, 0.06004795, -0.38755924, -0.01654251, 0.1394104, 0.0968949, 0.004271706, -0.17105898, -0.050423585, -0.15311627, -0.24458972, 0.12665795, -0.022814916, -0.23887472, -0.289588, 0.05521137, 0.041259795, -0.021133862, -0.23674431, 0.08424598, -0.37863016, 0.017239956, 0.13776784, 0.060790475, 0.057887543, -0.08784489, 0.08803934, 0.027996546, 0.085972995, -0.014455558, 0.11668073, 0.03812387, 0.088413864, 0.22228678, -0.015599858, 0.11000236, 0.035271563, -0.08088438, -0.13092226, 0.29378533, -0.12311522, 0.09377676, 0.02948718, -0.09136077}, + expected: true, + }, + strfmt.UUID("c99bc97d-7035-4311-94f3-947dc6471f51"): { + inputVec: []float32{-0.07545987, -0.046643265, 0.044445477, -0.18531442, 0.07922216, -0.19388637, -0.069393866, -0.036144026, 0.1713317, 0.41803706, 0.23576374, 0.073170714, -0.14085358, 0.012535708, 0.17439325, 0.10057567, -0.33506152, -0.06800867, -0.18882714, 0.002687021, -0.03276807, 0.17267752, -0.13951558, 0.071382746, 0.020254405, -0.10178502, 0.13977699, -0.107296936, -0.113307, -0.002506761, -0.092065684, 0.21008658, 0.31157792, 0.19640765, 0.15769793, -0.0050196033, 0.0022481605, 0.015436289, -0.11822955, 0.31494477, -0.07425527, 0.051401984, 0.11648046, -0.00016831602, -0.12758006, 0.06814693, 0.06108981, -0.025454175, -0.018695071, 0.041827776, -0.23480764, 0.06652185, -0.078328855, -0.121668324, -0.04341973, 0.114403985, 0.32614416, -0.07992741, -0.019665314, -0.017408244, 0.12615794, -0.6350545, -0.17056493, 0.07171332, 0.047071394, -0.18428493, -0.09011123, -0.15995751, -0.03345579, -0.014678, -0.038699757, 0.044125225, 0.0042562615, -0.29445595, 0.30460796, -0.13630153, 0.00014055961, 0.08996278, 0.08948901, 0.12164838, 0.079090506, -0.36153567, 0.02817218, 0.11914518, 0.29805067, -0.07431765, 0.16793592, -0.099549234, 0.045226075, 0.22235383, -0.045654725, 0.09233901, -0.004902314, 0.08621588, -0.19723448, 0.19557382, -0.13199815, -0.22924824, -0.015981175, 0.19762704, -0.08940076, -0.084909916, 0.43372774, 0.026998578, -0.20827708, 0.037450224, -0.078997016, -0.18065391, -0.071308024, 0.00870316, -0.114981964, 0.017085023, -0.07696264, 0.009330409, -0.097458, -0.25530958, 0.118254915, -0.12516825, -0.008301536, -0.20432962, 0.15235707, 0.012840041, -0.18034773, 0.039270073, -0.03131139, 0.013706253, 0.98688674, 0.18840753, -0.055119563, 0.00836046, 0.019445436, -0.10701598, -0.024610046, -0.50088006, -0.15488546, 0.14209819, 0.1798376, 0.08615982, -0.0119235935, -0.0060070297, -0.08406098, 0.10096481, -0.09077014, -0.15957798, 0.10556352, 0.2100476, -0.036947068, 0.05316554, -0.20480183, -0.14873864, -0.0069070593, 0.16027303, -0.288908, -0.04487129, 0.0705415, 0.11973847, -0.0017247469, 0.14092937, -0.5262047, -0.094283305, -0.19120996, -0.2816572, -0.010916339, -0.07984056, -0.28659204, 0.13706332, 0.07364347, 0.12300072, -0.17554194, -0.16378267, -0.15244205, 0.00075927645, 0.017289847, 0.12072629, -0.33452734, -0.33727616, -0.12780978, -0.09350711, 0.105674624, 0.10770573, -0.17278843, -0.5760599, -0.013741414, -0.15395893, 0.009837732, 0.015417911, -0.11384676, 0.24567491, 0.04905973, 0.10762609, 0.2131752, 0.019281652, 0.11665857, 0.022718405, -0.2234067, 0.23241606, -0.12194457, -0.049972955, -0.012225418, -0.14856412, -0.386102, -0.23018965, 0.28920102, -0.023396742, -0.114672944, 0.12130062, 0.05654803, -0.16194181, 0.24095012, 0.03644393, 0.028024165, -0.008832254, -0.16496961, 0.19496499, -0.035887964, 0.25981775, 0.0970074, 0.0013458093, -0.009548204, 0.040741496, -0.019192837, 0.20718361, -0.004034228, -0.1343262, -0.06990001, 0.09888768, -0.35942966, 0.043895893, -0.19182123, -0.17963983, -0.3222771, -0.10223457, 0.23866613, 0.25855777, 0.04051543, 0.08756274, 0.15683484, 0.37856522, 0.04853359, 0.10198129, -0.0061066896, -0.049892712, 0.17087941, 0.14563805, 0.06984385, 0.0071270005, -0.11838641, 0.18716812, 0.14013803, 0.05242403, 0.034357738, 0.3083466, 0.14742611, 0.17841975, -0.124118194, -0.014102871, -0.052544866, 0.037493005, -0.33485797, -0.013164912, 0.1066288, 0.11141791, -0.04029921, -0.16429856, -0.032241724, -0.15965424, -0.2430594, 0.13654563, 0.009401224, -0.2045843, -0.28467956, 0.07325551, 0.027996557, -0.033877768, -0.24350801, 0.08329816, -0.35555813, 0.006908567, 0.07227365, 0.03188268, 0.032559503, -0.09180395, 0.05601515, 0.0047281734, 0.06878795, -0.018943194, 0.08251342, 0.042039152, 0.12902294, 0.20526606, -0.014881293, 0.11723917, 0.0115632, -0.09016013, -0.12117223, 0.31020245, -0.111444525, 0.077845715, 0.00046715315, -0.104099475}, + expected: true, + }, + strfmt.UUID("fe687bf4-f10f-4c23-948d-0746ea2927b3"): { + inputVec: []float32{-0.20739016, -0.19551805, 0.06645163, 0.008650202, 0.03700748, -0.04132599, -0.029881354, 0.04684896, 0.096614264, 0.42888844, 0.10003969, 0.026234219, -0.051639702, -0.118660435, 0.14473079, 0.2911885, -0.1180539, -0.16804434, -0.48081538, 0.021702053, 0.12612472, 0.15442817, -0.05836532, 0.074295096, -0.28077397, -0.24297802, 0.047836643, -0.36753318, -0.30482984, 0.09265357, 0.25571078, 0.41130066, 0.46177864, 0.34033778, 0.20721313, -0.37726295, 0.07721501, 0.08009689, 0.00027321206, 0.5168123, -0.15305339, 0.0937765, 0.096195236, -0.21120761, 0.014014921, 0.3133104, 0.20773117, 0.08483507, -0.27784437, -0.17281856, -0.6050923, -0.22439326, -0.16914369, -0.3149047, -0.13828672, 0.16334395, -0.0018224253, -0.024342008, 0.3511251, 0.04979151, 0.34223744, -0.6965703, -0.36211932, -0.27092442, 0.34418032, -0.09667905, 0.13344757, -0.15622364, -0.24129291, 0.06958589, -0.2681816, -0.09497071, -0.08923615, -0.06642436, 0.48688608, -0.33535984, 0.014242731, 0.079838976, 0.32949054, 0.09051045, -0.2653392, -0.47393548, 0.07508276, 0.0062832804, 0.724184, -0.18929236, 0.11718613, 0.049603477, 0.08766128, 0.31040704, 0.04038693, -0.0017023507, -0.18986607, 0.056264438, -0.20978904, -0.107441366, -0.30505633, -0.45781082, -0.11571784, 0.32160303, -0.1347523, -0.08090298, 0.51651996, -0.023250414, -0.18725531, -0.14222279, 0.009277832, -0.49789724, -0.25156206, 0.0042495225, 0.0038805408, -0.031416763, 0.10277136, 0.14383446, -0.23241928, -0.42357358, 0.027033398, -0.2262604, -0.2685295, -0.14510548, 0.18256307, 0.063297585, 0.027636252, 0.081166506, 0.06726344, 0.1677495, 1.5217289, 0.33152232, -0.2209926, 0.051426213, 0.15640806, -0.30210486, -0.32857975, -0.4170022, -0.028293105, 0.28772062, 0.50510746, 0.09162247, -0.12383193, -0.25066972, -0.1441897, 0.107192926, -0.07404076, 0.0042472635, 0.11014519, 0.22332853, 0.09434378, -0.3278343, 0.041899726, 0.06838457, 0.10983681, 0.11864574, -0.25336757, -0.047530346, -0.027303243, 0.37403497, 0.13420461, 0.14946426, -0.41996637, -0.037703935, -0.47961184, -0.29839846, -0.103934005, -0.12058302, -0.12806267, 0.22814582, 0.3904893, -0.16044962, -0.17479864, -0.33139735, -0.29185295, 0.0653074, 0.042426735, 0.06092335, -0.18776153, -0.52555144, -0.15889317, -0.20644087, 0.2293067, 0.26668283, -0.15607063, -0.696593, -0.08224992, -0.4283747, 0.26883888, -0.031052848, -0.1311875, 0.26636878, 0.16457985, 0.15660451, 0.10629464, 0.17345549, 0.23963387, 0.22997221, -0.111713186, -0.08499592, -0.2274625, 0.19285984, -0.08285016, -0.02692149, -0.3426618, -0.13361897, 0.2870389, -0.12032792, -0.22944619, 0.25588584, 0.24607788, -0.2762531, 0.30983892, 0.011088746, -0.15739818, 0.053215, -0.21660997, 0.033805694, -0.17886437, 0.2979239, 0.2163545, -0.08381542, 0.19666128, -0.28977823, -0.20994817, -0.012160099, 0.057499636, -0.12549455, 0.19303595, -0.14420606, -0.51937664, 0.23400985, -0.27893808, -0.2660984, -0.27870297, -0.32149136, 0.19958079, 0.34468395, 0.18947665, -0.16529581, 0.101419374, 0.30195153, 0.09030288, 0.12496541, 0.02999903, -0.016697621, 0.15314853, 0.27848768, 0.24102053, 0.06933273, 0.08923653, 0.10477832, 0.4389032, 0.15679164, -0.11119637, 0.134823, 0.30230528, 0.20818473, -0.005579584, -0.3474488, -0.44394243, 0.22270252, -0.3668763, 0.07474772, 0.011691334, 0.088187896, 0.23832949, -0.07960201, 0.066471875, 0.034641538, -0.39984587, 0.0032980456, -0.28492525, -0.46358657, -0.2148288, -0.107226945, 0.02734428, -0.24686679, -0.123900555, 0.18174778, -0.31248868, 0.13808723, 0.31549984, 0.21521719, 0.13966985, -0.27272752, 0.12091104, 0.14257833, 0.23175247, 0.15639938, 0.40828535, 0.31916845, 0.023645567, 0.20658277, -0.20365283, 0.113746524, 0.13173752, -0.050343305, -0.31581175, 0.09704622, -0.014172505, 0.16924341, 0.30327854, -0.17770194}, + expected: false, + }, + strfmt.UUID("e7bf6c45-de72-493a-b273-5ef198974d61"): { + inputVec: []float32{0.089313604, -0.050221898, 0.18352903, 0.16257699, 0.14520381, 0.17993976, 0.14594483, 0.019256027, -0.15505213, 0.23606326, -0.14456263, 0.2679586, -0.112208664, 0.12997514, 0.0051072896, 0.28151348, -0.10495799, 0.026782967, -0.38603118, 0.16190273, -0.0428943, -0.16265322, -0.17910561, 0.0746288, -0.3117934, -0.15871756, -0.11377734, -0.06822346, -0.13829489, 0.13019162, 0.30741218, 0.16194165, 0.013218932, 0.054517113, 0.12490437, -0.07709048, 0.02556826, -0.21159878, -0.09082174, 0.24629511, 0.05013666, 0.25168124, -0.14423938, -0.0937688, -0.07811525, -0.049346007, 0.3592527, 0.30411252, -0.1168557, 0.18870471, 0.06614835, -0.20099068, -0.084436245, 0.073036775, -0.03448665, -0.11147946, -0.10862863, -0.012393957, 0.18990599, 0.060957544, 0.19518377, -0.027541652, -0.26750082, -0.12780671, 0.09570065, -0.03541132, 0.094820626, -0.13539355, -0.09468136, 0.18476579, -0.20970085, -0.20989786, -0.12084438, -0.04517079, -0.008074663, 0.02824076, 0.114496395, -0.20462593, 0.103516705, -0.101554185, -0.1374868, -0.24884155, -0.08101618, -0.016105993, 0.22608215, -0.007247754, -0.17246912, 0.058247145, -0.041018173, 0.19471274, -0.022576109, 0.032828204, -0.079321206, -0.09259324, 0.041115705, -0.25280195, -0.28517374, -0.19496292, 0.18070905, 0.06384923, -0.004056949, 0.1536253, 0.17861623, -0.033833142, 0.12039968, 0.04458716, 0.08793809, -0.15683243, -0.1087904, 0.1741014, 0.007256374, -0.20265253, 0.034111258, 0.03311363, -0.09449356, -0.13161612, -0.026084669, 0.07609202, 0.03452338, 0.08840356, -0.044566724, 0.1507175, 0.089273594, 0.18872644, 0.18333815, -0.023196407, 0.63831943, 0.20309874, 0.10217627, 0.11445079, 0.18965706, -0.16809432, -0.343172, -0.06439529, 0.08362327, 0.32746288, 0.38483366, 0.020372175, -0.25239283, 0.019468365, -0.016367752, 0.016749177, 0.024621855, 0.030529505, 0.20601188, -0.100692995, -0.16414656, -0.23193358, 0.26616478, 0.06166736, 0.14341855, 0.1294041, 0.045133967, 0.0014262896, -0.0194398, 0.040737696, 0.10099013, -0.10838136, -0.28768313, -0.073719576, -0.15836753, -0.10482511, -0.1349642, -0.107005455, 0.01957546, 0.13799994, 0.056444198, -0.38841644, -0.07585945, -0.018703599, -0.19934878, 0.15176265, 0.04133126, 0.063531734, 0.09720055, -0.29999572, 0.04765686, -0.23604262, 0.081500284, 0.056092553, -0.13664724, -0.37729686, 0.031137427, -0.052083906, 0.117984496, -0.14562207, -0.029609507, 0.13725121, 0.090367764, 0.12787215, 0.11026589, 0.25123242, 0.12911159, 0.055398554, 0.0032232201, 0.026706887, 0.14584258, 0.019900957, -0.12197998, -0.087177716, -0.24649806, -0.17869286, 0.07139921, -0.09633085, -0.16027117, 0.23617831, 0.05429949, -0.061085824, 0.040451035, 0.052443117, -0.14255014, 0.15598148, -0.2336374, 0.08394173, -0.34318882, 0.3419207, 0.18282516, -0.03709172, 0.10525048, -0.1871602, -0.22663523, 0.01635051, 0.16996534, -0.18056048, -0.169894, -0.18467705, -0.3641231, 0.060861763, -0.080082566, -0.08888943, 0.11629789, -0.00973362, 0.07452957, 0.25680214, 0.042024083, -0.024963235, 0.1743134, 0.10921186, 0.25191578, 0.028438354, 0.004781374, -0.08364819, 0.051807538, 0.1165724, 0.29184434, -0.21512283, 0.12515399, -0.08803969, 0.41930157, -0.10181762, 0.038189832, 0.085555896, -0.026453126, 0.04717047, 0.12667313, 0.023158737, -0.45877644, 0.18732828, 0.062374037, -0.21956007, -0.04449947, 0.19028638, 0.1359094, 0.26384917, 0.077602044, 0.35136092, 0.069637895, 0.048263475, -0.02498448, -0.09221205, -0.012142404, -0.124592446, 0.14599627, -0.050875153, -0.25454503, -0.069588415, -0.29793787, -0.13407284, 0.25388947, 0.35565627, -0.034204755, 0.0024766966, 0.086427726, -0.054318108, 0.063218184, -0.037823644, 0.108287826, 0.14440496, 0.025134278, 0.14978257, -0.03355889, 0.02980915, -0.13764386, 0.4167542, -0.03938922, 0.026970355, 0.24595529, 0.111741625, -0.074567944, -0.057232533}, + expected: false, + }, + strfmt.UUID("0999d109-1d5f-465a-bd8b-e3fbd46f10aa"): { + inputVec: []float32{-0.10486144, -0.07437922, 0.069469325, -0.1438278, 0.07740161, -0.18606456, -0.09991434, -0.020051572, 0.19863395, 0.4347328, 0.297606, 0.07853262, -0.16025662, 0.023596637, 0.16935731, 0.17052403, -0.29870638, -0.10309007, -0.20055692, 0.0027809117, -0.03928043, 0.21178603, -0.13793766, 0.08118157, 0.006693433, -0.13829204, 0.14778963, -0.13180175, -0.21128704, -0.0026104634, -0.076393716, 0.22200249, 0.32417125, 0.26045212, 0.1783609, -0.114116184, 0.0100981165, 0.07233143, -0.15913877, 0.4238603, -0.036907215, 0.0595873, 0.0807002, -0.07637312, -0.12889846, 0.111177936, 0.091114685, -0.018454906, -0.12132672, 0.056664582, -0.30461523, 0.020763714, -0.10992191, -0.14430659, -0.092879646, 0.13615008, 0.33039626, -0.115675874, 0.03607886, -0.027918883, 0.19531779, -0.7211654, -0.23073879, 0.011791817, 0.1315166, -0.22779183, -0.13773227, -0.1814997, -0.09008116, 0.021698939, -0.102921166, 0.090760864, 0.011856942, -0.25561005, 0.40769714, -0.21286584, -0.018059848, 0.13812906, 0.079457305, 0.12631191, 0.0024881593, -0.4282836, 0.0619608, 0.12207897, 0.39083096, -0.009502015, 0.19990632, -0.06503092, 0.0635979, 0.27579078, -0.020699967, 0.068474516, 0.0043831975, 0.10303624, -0.1885405, 0.22989234, -0.15952443, -0.29842895, 0.006752088, 0.22831629, -0.13150804, -0.13695218, 0.5357904, 0.050116863, -0.24064547, -0.01375713, -0.096647836, -0.24984525, -0.10429946, 0.002098812, -0.08113263, 0.05237009, -0.10246039, 0.05234802, -0.13899775, -0.3439524, 0.12522809, -0.18406768, -0.09022853, -0.19954625, 0.15810682, 0.039185096, -0.13576287, 0.045047805, 0.0035671506, 0.055920787, 1.1730403, 0.24019612, -0.13423051, -0.008052084, -0.00431602, -0.17079304, -0.09064658, -0.58728856, -0.1365065, 0.22919424, 0.22795208, 0.13396585, 0.018962797, -0.0075796233, -0.072394304, 0.10908417, -0.10881145, -0.16565171, 0.10378018, 0.27296618, -0.059810717, 0.03355443, -0.22429268, -0.12499127, -0.0441017, 0.20800696, -0.29992488, -0.003536096, 0.0026575085, 0.2427503, -0.007395092, 0.13233404, -0.5494433, -0.13144702, -0.2899963, -0.27367246, -0.05257514, -0.0939783, -0.267614, 0.16651331, 0.13891254, 0.08047202, -0.14046521, -0.19062972, -0.1433134, 0.0067776316, 0.00207368, 0.12986982, -0.35847133, -0.41852546, -0.15541135, -0.09865207, 0.14805861, 0.17072491, -0.22655731, -0.6473966, -0.007884447, -0.2060257, 0.035390265, 0.02781265, -0.09760371, 0.30535778, 0.047540557, 0.14565119, 0.21733035, 0.06558403, 0.13184759, 0.044231005, -0.22218557, 0.1897204, -0.1596938, 0.017510587, -0.030249557, -0.082377456, -0.39669412, -0.18365891, 0.34806964, -0.024830062, -0.06955674, 0.21521395, 0.1201222, -0.21855503, 0.23522708, 0.038058903, -0.019610198, -0.025448406, -0.18122384, 0.26068974, -0.055872105, 0.29595166, 0.11005987, -0.00841942, 0.006325112, -0.0013332894, -0.025598384, 0.17320716, 0.03480282, -0.1504056, -0.07133905, 0.08367911, -0.41866872, 0.062191408, -0.14972427, -0.18488628, -0.37027854, -0.14803104, 0.23587811, 0.33285886, 0.059688937, 0.030515533, 0.16795416, 0.3813925, 0.0755207, 0.15504116, -0.003507182, -0.08249321, 0.24292688, 0.13771294, 0.08057683, 0.016365156, -0.12878628, 0.1833687, 0.17496476, 0.050333332, 0.008188007, 0.32129762, 0.15476923, 0.2052587, -0.060781036, -0.1502798, -0.10187848, 0.11062117, -0.41137248, 0.016532877, 0.107270226, 0.08759128, 0.011842419, -0.17039144, -0.0139911, -0.13244899, -0.23845059, 0.075682834, -0.052250806, -0.30011725, -0.28581655, -0.00055503653, 0.022204043, -0.08598292, -0.24763824, 0.08245162, -0.39607832, 0.008443992, 0.16124122, 0.08812278, 0.0335653, -0.09692297, 0.07613783, 0.033542078, 0.11447116, -0.0069911424, 0.09004892, 0.09898015, 0.14595516, 0.24977732, -0.0018444546, 0.06290809, 0.013354713, -0.10336537, -0.1028908, 0.31109008, -0.110210516, 0.07165067, 0.050161615, -0.11413514}, + expected: true, + }, + } + + t.Run("insert test objects", func(t *testing.T) { + for id, props := range tests { + err := repo.PutObject(context.Background(), &models.Object{Class: className, ID: id}, props.inputVec, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("perform nearVector search by distance", func(t *testing.T) { + results, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Distance: 0.1, + }, + AdditionalProperties: additional.Properties{Distance: true}, + }, []string{""}, []models.Vector{searchVector}) + require.Nil(t, err) + require.NotEmpty(t, results) + // ensure that we receive more results than + // the `QueryMaximumResults`, as this should + // only apply to limited vector searches + require.Greater(t, len(results), 1) + + for _, res := range results { + if props, ok := tests[res.ID]; !ok { + t.Fatalf("received unexpected result: %+v", res) + } else { + assert.True(t, props.expected, "result id was not intended to meet threshold %s", res.ID) + } + } + }) + + t.Run("perform nearObject search by distance", func(t *testing.T) { + results, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearObject: &searchparams.NearObject{ + Distance: 0.1, + ID: searchObject.String(), + }, + AdditionalProperties: additional.Properties{Distance: true}, + }, []string{""}, []models.Vector{searchVector}) + require.Nil(t, err) + require.NotEmpty(t, results) + // ensure that we receive more results than + // the `QueryMaximumResults`, as this should + // only apply to limited vector searches + require.Greater(t, len(results), 1) + + for _, res := range results { + if props, ok := tests[res.ID]; !ok { + t.Fatalf("received unexpected result: %+v", res) + } else { + assert.True(t, props.expected, "result id was not intended to meet threshold %s", res.ID) + } + } + }) +} + +func TestVectorSearch_ByCertainty(t *testing.T) { + className := "SomeClass" + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + { + DataType: []string{string(schema.DataTypeInt)}, + Name: "int_prop", + }, + }, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + } + + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + // this is set really low to ensure that search + // by distance is conducted, which executes + // without regard to this value + QueryMaximumResults: 1, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("create required schema", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + searchVector := []float32{-0.10190568, -0.06259751, 0.05616188, -0.19249836, 0.09714927, -0.1902525, -0.064424865, -0.0387358, 0.17581701, 0.4476738, 0.29261824, 0.12026761, -0.19975126, 0.023600178, 0.17348698, 0.12701738, -0.36018127, -0.12051587, -0.17620522, 0.060741074, -0.064512916, 0.18640806, -0.1529852, 0.08211839, -0.02558465, -0.11369845, 0.0924098, -0.10544433, -0.14728987, -0.041860342, -0.08533595, 0.25886244, 0.2963937, 0.26010615, 0.2111097, 0.029396622, 0.01429563, 0.06410264, -0.119665794, 0.33583277, -0.05802661, 0.023306102, 0.14435922, -0.003951336, -0.13870825, 0.07140894, 0.10469943, -0.059021875, -0.065911904, 0.024216041, -0.26282874, 0.04896568, -0.08291928, -0.12793182, -0.077824734, 0.08843151, 0.31247458, -0.066301286, 0.006904921, -0.08277095, 0.13936226, -0.64392364, -0.19566211, 0.047227614, 0.086121306, -0.20725192, -0.096485816, -0.16436341, -0.06559169, -0.019639932, -0.012729637, 0.08901619, 0.0015896161, -0.24789932, 0.35496348, -0.16272856, -0.01648429, 0.11247674, 0.08099968, 0.13339259, 0.055829972, -0.34662855, 0.068509, 0.13880715, 0.3201848, -0.055557363, 0.22142135, -0.12867308, 0.0037871755, 0.24888979, -0.007443307, 0.08906625, -0.02022331, 0.11510742, -0.2385861, 0.16177008, -0.16214795, -0.28715602, 0.016784908, 0.19386634, -0.07731616, -0.100485384, 0.4100712, 0.061834496, -0.2325293, -0.026056025, -0.11632323, -0.17040555, -0.081960455, -0.0061040106, -0.05949373, 0.044952348, -0.079565264, 0.024430245, -0.09375341, -0.30249637, 0.115205586, -0.13083287, -0.04264671, -0.089810364, 0.16227561, 0.07318055, -0.10496504, 0.00063501706, -0.04936106, -0.0022282854, 1.0893154, 0.1698662, -0.019563455, -0.011128426, 0.04477475, -0.15656771, -0.056911886, -0.5759019, -0.1881429, 0.17088258, 0.24124439, 0.111288875, -0.0015475494, -0.021278847, -0.08362156, 0.09997524, -0.094385885, -0.1674031, 0.061180864, 0.28517494, -0.016217072, 0.025866214, -0.22854298, -0.17924422, -0.037767246, 0.12252907, -0.31698978, -0.038031228, 0.055408552, 0.1743545, -0.040576655, 0.1293942, -0.56650764, -0.10306195, -0.19548112, -0.245544, -0.018241389, -0.039024632, -0.31659162, 0.1565075, 0.08412337, 0.13177724, -0.13766576, -0.15355161, -0.16960397, -0.012436442, 0.04828157, 0.12566057, -0.35308784, -0.37520224, -0.1265899, -0.13991497, 0.14402144, 0.117542416, -0.20750546, -0.5849919, -0.010469457, -0.19677396, 0.011365964, 0.00666846, -0.083470255, 0.24928358, 0.07026387, 0.19082819, 0.24557637, 0.014292963, 0.14846677, 0.031625308, -0.20398879, 0.19507346, -0.18119761, -0.045725327, -0.042455163, -0.099733196, -0.33636123, -0.28447086, 0.30274838, -0.01603988, -0.0529655, 0.15784146, 0.08746072, -0.1703993, 0.2414512, 0.060322937, -0.00812057, 0.031162385, -0.1764905, 0.22107981, -0.016657066, 0.31948856, 0.07282925, -0.036991462, 0.01266936, -0.009106514, -0.038732465, 0.20973183, 0.033236098, -0.10673938, -0.06880061, 0.115524575, -0.39688373, 0.08749971, -0.21816005, -0.22100002, -0.3716853, -0.14720486, 0.24316181, 0.29673144, 0.020808747, 0.07658521, 0.16310681, 0.38785335, 0.0992224, 0.14177811, 0.025954131, -0.08690783, 0.19653428, 0.09584941, 0.040072605, -0.00038361162, -0.094546966, 0.1910902, 0.13217318, 0.060072783, -0.0655816, 0.2777626, 0.1799169, 0.20187178, -0.0996889, -0.01932122, -0.13133621, 0.057482753, -0.36892185, -0.032093313, 0.14607865, 0.12033318, -0.041683596, -0.2048406, -0.041777443, -0.14975598, -0.2526341, 0.12659752, 0.010567178, -0.297333, -0.27522174, 0.06923473, 0.043150593, -0.017045585, -0.2400216, 0.11413547, -0.40081662, -0.0018820907, 0.13800722, 0.085972115, -0.01519989, -0.10491216, 0.09170084, 0.063085504, 0.046743374, -0.014466267, 0.09880224, 0.027706565, 0.09951337, 0.17317492, -0.025654864, 0.14658073, 0.042377427, -0.08402882, -0.12423425, 0.32714987, -0.1527207, 0.106094465, 0.017378228, -0.06302387} + searchObject := strfmt.UUID("fe687bf4-f10f-4c23-948d-0746ea2927b3") + + tests := map[strfmt.UUID]struct { + inputVec []float32 + expected bool + }{ + strfmt.UUID("88460290-03b2-44a3-9adb-9fa3ae11d9e6"): { + inputVec: []float32{-0.11015724, -0.05380307, 0.027512914, -0.16925375, 0.08306809, -0.19312492, -0.08910436, -0.011051652, 0.17981204, 0.40469593, 0.28226805, 0.09381516, -0.18380599, 0.03102771, 0.1645333, 0.1530153, -0.3187937, -0.10800173, -0.18466279, 0.0004336393, -0.0495677, 0.19905856, -0.11614494, 0.08834681, -0.011200292, -0.11969374, 0.12497086, -0.12427251, -0.13395442, -0.0060353535, -0.07504816, 0.23205791, 0.2982508, 0.2517544, 0.176147, -0.036871903, 0.017852835, 0.040007118, -0.118621, 0.3648693, -0.058933854, 0.04004229, 0.11871147, -0.019860389, -0.12701912, 0.106662825, 0.086498804, -0.04303973, -0.0742352, 0.018250324, -0.26544014, 0.029228423, -0.087171465, -0.1282789, -0.06403083, 0.09680911, 0.31433868, -0.081510685, -0.011283603, -0.041624587, 0.16530018, -0.6714878, -0.2436993, 0.03173918, 0.106117725, -0.20803581, -0.10429562, -0.16975354, -0.078582145, -0.0065962705, -0.06840946, 0.094937086, -0.020617036, -0.23795949, 0.34785536, -0.19834635, -0.015064479, 0.11930141, 0.090962164, 0.120560184, 0.054095767, -0.38602966, 0.057141174, 0.12039684, 0.32000408, -0.05146908, 0.20762976, -0.09342379, 0.037577383, 0.23894139, -0.0075003104, 0.104791366, -0.015841056, 0.102840215, -0.20813248, 0.1855997, -0.12594056, -0.27132365, -0.0055563124, 0.21954241, -0.10798524, -0.111896284, 0.44049335, 0.049884494, -0.22339955, -0.005374135, -0.120713554, -0.22275059, -0.09146004, 0.017188415, -0.106493734, 0.045247544, -0.07725446, 0.056848228, -0.10294392, -0.2896642, 0.112891, -0.13773362, -0.089911595, -0.13500965, 0.14051703, 0.040092673, -0.13896292, 0.04580957, -0.014300959, 0.03737215, 1.0661443, 0.19767477, -0.07703914, -0.012910904, -0.0037716173, -0.14437087, -0.06938004, -0.5348036, -0.16047458, 0.19416414, 0.21938956, 0.092242256, -0.012630808, -0.021863988, -0.051702406, 0.08780951, -0.0815602, -0.15332024, 0.077632725, 0.25709584, -0.025725808, 0.042116437, -0.22687604, -0.11791685, -0.028626656, 0.16734225, -0.3017483, -0.03236202, 0.02888077, 0.18193199, -0.009032297, 0.14454253, -0.511494, -0.12119192, -0.20757924, -0.2561716, -0.03904554, -0.07348411, -0.28547177, 0.15967208, 0.079396725, 0.14358875, -0.12829632, -0.18175666, -0.15540425, -0.020419862, 0.019070208, 0.12168836, -0.3428434, -0.357543, -0.11218741, -0.12834033, 0.13564876, 0.12768728, -0.1817461, -0.61235875, -0.029409664, -0.19765733, 0.03872163, 0.0074950717, -0.10025679, 0.2872255, 0.033995092, 0.12945211, 0.21831632, 0.04666009, 0.14233032, 0.016767867, -0.2039244, 0.2000191, -0.13099428, -0.020210614, -0.06286195, -0.0948797, -0.34830436, -0.21595824, 0.32722405, -0.024735296, -0.07859145, 0.16975155, 0.08186461, -0.19249061, 0.23405583, 0.04837592, 0.021467948, -0.022215014, -0.14892808, 0.23908867, -0.050126728, 0.2867907, 0.07740656, -0.01714987, -0.0046314416, -0.0048108613, -0.007407311, 0.1807499, 0.049772616, -0.14680666, -0.07335314, 0.09023705, -0.40600133, 0.05522128, -0.20085222, -0.20410904, -0.34319055, -0.10792269, 0.2297779, 0.30397663, 0.05230268, 0.06408224, 0.13797496, 0.3691112, 0.083033495, 0.13695791, -0.015612457, -0.06413475, 0.18117142, 0.12928344, 0.049171276, 0.016104931, -0.102417335, 0.19589683, 0.14380622, 0.0748065, -0.005402455, 0.27243868, 0.14925551, 0.19564849, -0.10738364, -0.054175537, -0.10068278, 0.06004795, -0.38755924, -0.01654251, 0.1394104, 0.0968949, 0.004271706, -0.17105898, -0.050423585, -0.15311627, -0.24458972, 0.12665795, -0.022814916, -0.23887472, -0.289588, 0.05521137, 0.041259795, -0.021133862, -0.23674431, 0.08424598, -0.37863016, 0.017239956, 0.13776784, 0.060790475, 0.057887543, -0.08784489, 0.08803934, 0.027996546, 0.085972995, -0.014455558, 0.11668073, 0.03812387, 0.088413864, 0.22228678, -0.015599858, 0.11000236, 0.035271563, -0.08088438, -0.13092226, 0.29378533, -0.12311522, 0.09377676, 0.02948718, -0.09136077}, + expected: true, + }, + strfmt.UUID("c99bc97d-7035-4311-94f3-947dc6471f51"): { + inputVec: []float32{-0.07545987, -0.046643265, 0.044445477, -0.18531442, 0.07922216, -0.19388637, -0.069393866, -0.036144026, 0.1713317, 0.41803706, 0.23576374, 0.073170714, -0.14085358, 0.012535708, 0.17439325, 0.10057567, -0.33506152, -0.06800867, -0.18882714, 0.002687021, -0.03276807, 0.17267752, -0.13951558, 0.071382746, 0.020254405, -0.10178502, 0.13977699, -0.107296936, -0.113307, -0.002506761, -0.092065684, 0.21008658, 0.31157792, 0.19640765, 0.15769793, -0.0050196033, 0.0022481605, 0.015436289, -0.11822955, 0.31494477, -0.07425527, 0.051401984, 0.11648046, -0.00016831602, -0.12758006, 0.06814693, 0.06108981, -0.025454175, -0.018695071, 0.041827776, -0.23480764, 0.06652185, -0.078328855, -0.121668324, -0.04341973, 0.114403985, 0.32614416, -0.07992741, -0.019665314, -0.017408244, 0.12615794, -0.6350545, -0.17056493, 0.07171332, 0.047071394, -0.18428493, -0.09011123, -0.15995751, -0.03345579, -0.014678, -0.038699757, 0.044125225, 0.0042562615, -0.29445595, 0.30460796, -0.13630153, 0.00014055961, 0.08996278, 0.08948901, 0.12164838, 0.079090506, -0.36153567, 0.02817218, 0.11914518, 0.29805067, -0.07431765, 0.16793592, -0.099549234, 0.045226075, 0.22235383, -0.045654725, 0.09233901, -0.004902314, 0.08621588, -0.19723448, 0.19557382, -0.13199815, -0.22924824, -0.015981175, 0.19762704, -0.08940076, -0.084909916, 0.43372774, 0.026998578, -0.20827708, 0.037450224, -0.078997016, -0.18065391, -0.071308024, 0.00870316, -0.114981964, 0.017085023, -0.07696264, 0.009330409, -0.097458, -0.25530958, 0.118254915, -0.12516825, -0.008301536, -0.20432962, 0.15235707, 0.012840041, -0.18034773, 0.039270073, -0.03131139, 0.013706253, 0.98688674, 0.18840753, -0.055119563, 0.00836046, 0.019445436, -0.10701598, -0.024610046, -0.50088006, -0.15488546, 0.14209819, 0.1798376, 0.08615982, -0.0119235935, -0.0060070297, -0.08406098, 0.10096481, -0.09077014, -0.15957798, 0.10556352, 0.2100476, -0.036947068, 0.05316554, -0.20480183, -0.14873864, -0.0069070593, 0.16027303, -0.288908, -0.04487129, 0.0705415, 0.11973847, -0.0017247469, 0.14092937, -0.5262047, -0.094283305, -0.19120996, -0.2816572, -0.010916339, -0.07984056, -0.28659204, 0.13706332, 0.07364347, 0.12300072, -0.17554194, -0.16378267, -0.15244205, 0.00075927645, 0.017289847, 0.12072629, -0.33452734, -0.33727616, -0.12780978, -0.09350711, 0.105674624, 0.10770573, -0.17278843, -0.5760599, -0.013741414, -0.15395893, 0.009837732, 0.015417911, -0.11384676, 0.24567491, 0.04905973, 0.10762609, 0.2131752, 0.019281652, 0.11665857, 0.022718405, -0.2234067, 0.23241606, -0.12194457, -0.049972955, -0.012225418, -0.14856412, -0.386102, -0.23018965, 0.28920102, -0.023396742, -0.114672944, 0.12130062, 0.05654803, -0.16194181, 0.24095012, 0.03644393, 0.028024165, -0.008832254, -0.16496961, 0.19496499, -0.035887964, 0.25981775, 0.0970074, 0.0013458093, -0.009548204, 0.040741496, -0.019192837, 0.20718361, -0.004034228, -0.1343262, -0.06990001, 0.09888768, -0.35942966, 0.043895893, -0.19182123, -0.17963983, -0.3222771, -0.10223457, 0.23866613, 0.25855777, 0.04051543, 0.08756274, 0.15683484, 0.37856522, 0.04853359, 0.10198129, -0.0061066896, -0.049892712, 0.17087941, 0.14563805, 0.06984385, 0.0071270005, -0.11838641, 0.18716812, 0.14013803, 0.05242403, 0.034357738, 0.3083466, 0.14742611, 0.17841975, -0.124118194, -0.014102871, -0.052544866, 0.037493005, -0.33485797, -0.013164912, 0.1066288, 0.11141791, -0.04029921, -0.16429856, -0.032241724, -0.15965424, -0.2430594, 0.13654563, 0.009401224, -0.2045843, -0.28467956, 0.07325551, 0.027996557, -0.033877768, -0.24350801, 0.08329816, -0.35555813, 0.006908567, 0.07227365, 0.03188268, 0.032559503, -0.09180395, 0.05601515, 0.0047281734, 0.06878795, -0.018943194, 0.08251342, 0.042039152, 0.12902294, 0.20526606, -0.014881293, 0.11723917, 0.0115632, -0.09016013, -0.12117223, 0.31020245, -0.111444525, 0.077845715, 0.00046715315, -0.104099475}, + expected: true, + }, + strfmt.UUID("fe687bf4-f10f-4c23-948d-0746ea2927b3"): { + inputVec: []float32{-0.20739016, -0.19551805, 0.06645163, 0.008650202, 0.03700748, -0.04132599, -0.029881354, 0.04684896, 0.096614264, 0.42888844, 0.10003969, 0.026234219, -0.051639702, -0.118660435, 0.14473079, 0.2911885, -0.1180539, -0.16804434, -0.48081538, 0.021702053, 0.12612472, 0.15442817, -0.05836532, 0.074295096, -0.28077397, -0.24297802, 0.047836643, -0.36753318, -0.30482984, 0.09265357, 0.25571078, 0.41130066, 0.46177864, 0.34033778, 0.20721313, -0.37726295, 0.07721501, 0.08009689, 0.00027321206, 0.5168123, -0.15305339, 0.0937765, 0.096195236, -0.21120761, 0.014014921, 0.3133104, 0.20773117, 0.08483507, -0.27784437, -0.17281856, -0.6050923, -0.22439326, -0.16914369, -0.3149047, -0.13828672, 0.16334395, -0.0018224253, -0.024342008, 0.3511251, 0.04979151, 0.34223744, -0.6965703, -0.36211932, -0.27092442, 0.34418032, -0.09667905, 0.13344757, -0.15622364, -0.24129291, 0.06958589, -0.2681816, -0.09497071, -0.08923615, -0.06642436, 0.48688608, -0.33535984, 0.014242731, 0.079838976, 0.32949054, 0.09051045, -0.2653392, -0.47393548, 0.07508276, 0.0062832804, 0.724184, -0.18929236, 0.11718613, 0.049603477, 0.08766128, 0.31040704, 0.04038693, -0.0017023507, -0.18986607, 0.056264438, -0.20978904, -0.107441366, -0.30505633, -0.45781082, -0.11571784, 0.32160303, -0.1347523, -0.08090298, 0.51651996, -0.023250414, -0.18725531, -0.14222279, 0.009277832, -0.49789724, -0.25156206, 0.0042495225, 0.0038805408, -0.031416763, 0.10277136, 0.14383446, -0.23241928, -0.42357358, 0.027033398, -0.2262604, -0.2685295, -0.14510548, 0.18256307, 0.063297585, 0.027636252, 0.081166506, 0.06726344, 0.1677495, 1.5217289, 0.33152232, -0.2209926, 0.051426213, 0.15640806, -0.30210486, -0.32857975, -0.4170022, -0.028293105, 0.28772062, 0.50510746, 0.09162247, -0.12383193, -0.25066972, -0.1441897, 0.107192926, -0.07404076, 0.0042472635, 0.11014519, 0.22332853, 0.09434378, -0.3278343, 0.041899726, 0.06838457, 0.10983681, 0.11864574, -0.25336757, -0.047530346, -0.027303243, 0.37403497, 0.13420461, 0.14946426, -0.41996637, -0.037703935, -0.47961184, -0.29839846, -0.103934005, -0.12058302, -0.12806267, 0.22814582, 0.3904893, -0.16044962, -0.17479864, -0.33139735, -0.29185295, 0.0653074, 0.042426735, 0.06092335, -0.18776153, -0.52555144, -0.15889317, -0.20644087, 0.2293067, 0.26668283, -0.15607063, -0.696593, -0.08224992, -0.4283747, 0.26883888, -0.031052848, -0.1311875, 0.26636878, 0.16457985, 0.15660451, 0.10629464, 0.17345549, 0.23963387, 0.22997221, -0.111713186, -0.08499592, -0.2274625, 0.19285984, -0.08285016, -0.02692149, -0.3426618, -0.13361897, 0.2870389, -0.12032792, -0.22944619, 0.25588584, 0.24607788, -0.2762531, 0.30983892, 0.011088746, -0.15739818, 0.053215, -0.21660997, 0.033805694, -0.17886437, 0.2979239, 0.2163545, -0.08381542, 0.19666128, -0.28977823, -0.20994817, -0.012160099, 0.057499636, -0.12549455, 0.19303595, -0.14420606, -0.51937664, 0.23400985, -0.27893808, -0.2660984, -0.27870297, -0.32149136, 0.19958079, 0.34468395, 0.18947665, -0.16529581, 0.101419374, 0.30195153, 0.09030288, 0.12496541, 0.02999903, -0.016697621, 0.15314853, 0.27848768, 0.24102053, 0.06933273, 0.08923653, 0.10477832, 0.4389032, 0.15679164, -0.11119637, 0.134823, 0.30230528, 0.20818473, -0.005579584, -0.3474488, -0.44394243, 0.22270252, -0.3668763, 0.07474772, 0.011691334, 0.088187896, 0.23832949, -0.07960201, 0.066471875, 0.034641538, -0.39984587, 0.0032980456, -0.28492525, -0.46358657, -0.2148288, -0.107226945, 0.02734428, -0.24686679, -0.123900555, 0.18174778, -0.31248868, 0.13808723, 0.31549984, 0.21521719, 0.13966985, -0.27272752, 0.12091104, 0.14257833, 0.23175247, 0.15639938, 0.40828535, 0.31916845, 0.023645567, 0.20658277, -0.20365283, 0.113746524, 0.13173752, -0.050343305, -0.31581175, 0.09704622, -0.014172505, 0.16924341, 0.30327854, -0.17770194}, + expected: false, + }, + strfmt.UUID("e7bf6c45-de72-493a-b273-5ef198974d61"): { + inputVec: []float32{0.089313604, -0.050221898, 0.18352903, 0.16257699, 0.14520381, 0.17993976, 0.14594483, 0.019256027, -0.15505213, 0.23606326, -0.14456263, 0.2679586, -0.112208664, 0.12997514, 0.0051072896, 0.28151348, -0.10495799, 0.026782967, -0.38603118, 0.16190273, -0.0428943, -0.16265322, -0.17910561, 0.0746288, -0.3117934, -0.15871756, -0.11377734, -0.06822346, -0.13829489, 0.13019162, 0.30741218, 0.16194165, 0.013218932, 0.054517113, 0.12490437, -0.07709048, 0.02556826, -0.21159878, -0.09082174, 0.24629511, 0.05013666, 0.25168124, -0.14423938, -0.0937688, -0.07811525, -0.049346007, 0.3592527, 0.30411252, -0.1168557, 0.18870471, 0.06614835, -0.20099068, -0.084436245, 0.073036775, -0.03448665, -0.11147946, -0.10862863, -0.012393957, 0.18990599, 0.060957544, 0.19518377, -0.027541652, -0.26750082, -0.12780671, 0.09570065, -0.03541132, 0.094820626, -0.13539355, -0.09468136, 0.18476579, -0.20970085, -0.20989786, -0.12084438, -0.04517079, -0.008074663, 0.02824076, 0.114496395, -0.20462593, 0.103516705, -0.101554185, -0.1374868, -0.24884155, -0.08101618, -0.016105993, 0.22608215, -0.007247754, -0.17246912, 0.058247145, -0.041018173, 0.19471274, -0.022576109, 0.032828204, -0.079321206, -0.09259324, 0.041115705, -0.25280195, -0.28517374, -0.19496292, 0.18070905, 0.06384923, -0.004056949, 0.1536253, 0.17861623, -0.033833142, 0.12039968, 0.04458716, 0.08793809, -0.15683243, -0.1087904, 0.1741014, 0.007256374, -0.20265253, 0.034111258, 0.03311363, -0.09449356, -0.13161612, -0.026084669, 0.07609202, 0.03452338, 0.08840356, -0.044566724, 0.1507175, 0.089273594, 0.18872644, 0.18333815, -0.023196407, 0.63831943, 0.20309874, 0.10217627, 0.11445079, 0.18965706, -0.16809432, -0.343172, -0.06439529, 0.08362327, 0.32746288, 0.38483366, 0.020372175, -0.25239283, 0.019468365, -0.016367752, 0.016749177, 0.024621855, 0.030529505, 0.20601188, -0.100692995, -0.16414656, -0.23193358, 0.26616478, 0.06166736, 0.14341855, 0.1294041, 0.045133967, 0.0014262896, -0.0194398, 0.040737696, 0.10099013, -0.10838136, -0.28768313, -0.073719576, -0.15836753, -0.10482511, -0.1349642, -0.107005455, 0.01957546, 0.13799994, 0.056444198, -0.38841644, -0.07585945, -0.018703599, -0.19934878, 0.15176265, 0.04133126, 0.063531734, 0.09720055, -0.29999572, 0.04765686, -0.23604262, 0.081500284, 0.056092553, -0.13664724, -0.37729686, 0.031137427, -0.052083906, 0.117984496, -0.14562207, -0.029609507, 0.13725121, 0.090367764, 0.12787215, 0.11026589, 0.25123242, 0.12911159, 0.055398554, 0.0032232201, 0.026706887, 0.14584258, 0.019900957, -0.12197998, -0.087177716, -0.24649806, -0.17869286, 0.07139921, -0.09633085, -0.16027117, 0.23617831, 0.05429949, -0.061085824, 0.040451035, 0.052443117, -0.14255014, 0.15598148, -0.2336374, 0.08394173, -0.34318882, 0.3419207, 0.18282516, -0.03709172, 0.10525048, -0.1871602, -0.22663523, 0.01635051, 0.16996534, -0.18056048, -0.169894, -0.18467705, -0.3641231, 0.060861763, -0.080082566, -0.08888943, 0.11629789, -0.00973362, 0.07452957, 0.25680214, 0.042024083, -0.024963235, 0.1743134, 0.10921186, 0.25191578, 0.028438354, 0.004781374, -0.08364819, 0.051807538, 0.1165724, 0.29184434, -0.21512283, 0.12515399, -0.08803969, 0.41930157, -0.10181762, 0.038189832, 0.085555896, -0.026453126, 0.04717047, 0.12667313, 0.023158737, -0.45877644, 0.18732828, 0.062374037, -0.21956007, -0.04449947, 0.19028638, 0.1359094, 0.26384917, 0.077602044, 0.35136092, 0.069637895, 0.048263475, -0.02498448, -0.09221205, -0.012142404, -0.124592446, 0.14599627, -0.050875153, -0.25454503, -0.069588415, -0.29793787, -0.13407284, 0.25388947, 0.35565627, -0.034204755, 0.0024766966, 0.086427726, -0.054318108, 0.063218184, -0.037823644, 0.108287826, 0.14440496, 0.025134278, 0.14978257, -0.03355889, 0.02980915, -0.13764386, 0.4167542, -0.03938922, 0.026970355, 0.24595529, 0.111741625, -0.074567944, -0.057232533}, + expected: false, + }, + strfmt.UUID("0999d109-1d5f-465a-bd8b-e3fbd46f10aa"): { + inputVec: []float32{-0.10486144, -0.07437922, 0.069469325, -0.1438278, 0.07740161, -0.18606456, -0.09991434, -0.020051572, 0.19863395, 0.4347328, 0.297606, 0.07853262, -0.16025662, 0.023596637, 0.16935731, 0.17052403, -0.29870638, -0.10309007, -0.20055692, 0.0027809117, -0.03928043, 0.21178603, -0.13793766, 0.08118157, 0.006693433, -0.13829204, 0.14778963, -0.13180175, -0.21128704, -0.0026104634, -0.076393716, 0.22200249, 0.32417125, 0.26045212, 0.1783609, -0.114116184, 0.0100981165, 0.07233143, -0.15913877, 0.4238603, -0.036907215, 0.0595873, 0.0807002, -0.07637312, -0.12889846, 0.111177936, 0.091114685, -0.018454906, -0.12132672, 0.056664582, -0.30461523, 0.020763714, -0.10992191, -0.14430659, -0.092879646, 0.13615008, 0.33039626, -0.115675874, 0.03607886, -0.027918883, 0.19531779, -0.7211654, -0.23073879, 0.011791817, 0.1315166, -0.22779183, -0.13773227, -0.1814997, -0.09008116, 0.021698939, -0.102921166, 0.090760864, 0.011856942, -0.25561005, 0.40769714, -0.21286584, -0.018059848, 0.13812906, 0.079457305, 0.12631191, 0.0024881593, -0.4282836, 0.0619608, 0.12207897, 0.39083096, -0.009502015, 0.19990632, -0.06503092, 0.0635979, 0.27579078, -0.020699967, 0.068474516, 0.0043831975, 0.10303624, -0.1885405, 0.22989234, -0.15952443, -0.29842895, 0.006752088, 0.22831629, -0.13150804, -0.13695218, 0.5357904, 0.050116863, -0.24064547, -0.01375713, -0.096647836, -0.24984525, -0.10429946, 0.002098812, -0.08113263, 0.05237009, -0.10246039, 0.05234802, -0.13899775, -0.3439524, 0.12522809, -0.18406768, -0.09022853, -0.19954625, 0.15810682, 0.039185096, -0.13576287, 0.045047805, 0.0035671506, 0.055920787, 1.1730403, 0.24019612, -0.13423051, -0.008052084, -0.00431602, -0.17079304, -0.09064658, -0.58728856, -0.1365065, 0.22919424, 0.22795208, 0.13396585, 0.018962797, -0.0075796233, -0.072394304, 0.10908417, -0.10881145, -0.16565171, 0.10378018, 0.27296618, -0.059810717, 0.03355443, -0.22429268, -0.12499127, -0.0441017, 0.20800696, -0.29992488, -0.003536096, 0.0026575085, 0.2427503, -0.007395092, 0.13233404, -0.5494433, -0.13144702, -0.2899963, -0.27367246, -0.05257514, -0.0939783, -0.267614, 0.16651331, 0.13891254, 0.08047202, -0.14046521, -0.19062972, -0.1433134, 0.0067776316, 0.00207368, 0.12986982, -0.35847133, -0.41852546, -0.15541135, -0.09865207, 0.14805861, 0.17072491, -0.22655731, -0.6473966, -0.007884447, -0.2060257, 0.035390265, 0.02781265, -0.09760371, 0.30535778, 0.047540557, 0.14565119, 0.21733035, 0.06558403, 0.13184759, 0.044231005, -0.22218557, 0.1897204, -0.1596938, 0.017510587, -0.030249557, -0.082377456, -0.39669412, -0.18365891, 0.34806964, -0.024830062, -0.06955674, 0.21521395, 0.1201222, -0.21855503, 0.23522708, 0.038058903, -0.019610198, -0.025448406, -0.18122384, 0.26068974, -0.055872105, 0.29595166, 0.11005987, -0.00841942, 0.006325112, -0.0013332894, -0.025598384, 0.17320716, 0.03480282, -0.1504056, -0.07133905, 0.08367911, -0.41866872, 0.062191408, -0.14972427, -0.18488628, -0.37027854, -0.14803104, 0.23587811, 0.33285886, 0.059688937, 0.030515533, 0.16795416, 0.3813925, 0.0755207, 0.15504116, -0.003507182, -0.08249321, 0.24292688, 0.13771294, 0.08057683, 0.016365156, -0.12878628, 0.1833687, 0.17496476, 0.050333332, 0.008188007, 0.32129762, 0.15476923, 0.2052587, -0.060781036, -0.1502798, -0.10187848, 0.11062117, -0.41137248, 0.016532877, 0.107270226, 0.08759128, 0.011842419, -0.17039144, -0.0139911, -0.13244899, -0.23845059, 0.075682834, -0.052250806, -0.30011725, -0.28581655, -0.00055503653, 0.022204043, -0.08598292, -0.24763824, 0.08245162, -0.39607832, 0.008443992, 0.16124122, 0.08812278, 0.0335653, -0.09692297, 0.07613783, 0.033542078, 0.11447116, -0.0069911424, 0.09004892, 0.09898015, 0.14595516, 0.24977732, -0.0018444546, 0.06290809, 0.013354713, -0.10336537, -0.1028908, 0.31109008, -0.110210516, 0.07165067, 0.050161615, -0.11413514}, + expected: true, + }, + } + + t.Run("insert test objects", func(t *testing.T) { + for id, props := range tests { + err := repo.PutObject(context.Background(), &models.Object{Class: className, ID: id}, props.inputVec, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("perform nearVector search by distance", func(t *testing.T) { + results, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearVector: &searchparams.NearVector{ + Certainty: 0.9, + }, + AdditionalProperties: additional.Properties{Certainty: true}, + }, []string{""}, []models.Vector{searchVector}) + require.Nil(t, err) + require.NotEmpty(t, results) + // ensure that we receive more results than + // the `QueryMaximumResults`, as this should + // only apply to limited vector searches + require.Greater(t, len(results), 1) + + for _, res := range results { + if props, ok := tests[res.ID]; !ok { + t.Fatalf("received unexpected result: %+v", res) + } else { + assert.True(t, props.expected, "result id was not intended to meet threshold %s", res.ID) + } + } + }) + + t.Run("perform nearObject search by distance", func(t *testing.T) { + results, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: filters.LimitFlagSearchByDist}, + NearObject: &searchparams.NearObject{ + Certainty: 0.9, + ID: searchObject.String(), + }, + AdditionalProperties: additional.Properties{Certainty: true}, + }, []string{""}, []models.Vector{searchVector}) + require.Nil(t, err) + require.NotEmpty(t, results) + // ensure that we receive more results than + // the `QueryMaximumResults`, as this should + // only apply to limited vector searches + require.Greater(t, len(results), 1) + + for _, res := range results { + if props, ok := tests[res.ID]; !ok { + t.Fatalf("received unexpected result: %+v", res) + } else { + assert.True(t, props.expected, "result id was not intended to meet threshold %s", res.ID) + } + } + }) +} + +func Test_PutPatchRestart(t *testing.T) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + testClass := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "PutPatchRestart", + Properties: []*models.Property{ + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 100, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + defer repo.Shutdown(context.Background()) + require.Nil(t, repo.WaitForStartup(ctx)) + migrator := NewMigrator(repo, logger, "node1") + + require.Nil(t, + migrator.AddClass(ctx, testClass)) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{testClass}, + }, + } + + testID := strfmt.UUID("93c31577-922e-4184-87a5-5ac6db12f73c") + testVec := []float32{0.1, 0.2, 0.1, 0.3} + + t.Run("create initial object", func(t *testing.T) { + err = repo.PutObject(ctx, &models.Object{ + ID: testID, + Class: testClass.Class, + Properties: map[string]interface{}{"description": "test object init"}, + }, testVec, nil, nil, nil, 0) + require.Nil(t, err) + }) + + t.Run("repeatedly put with nil vec, patch with vec, and restart", func(t *testing.T) { + for i := 0; i < 10; i++ { + err = repo.PutObject(ctx, &models.Object{ + ID: testID, + Class: testClass.Class, + Properties: map[string]interface{}{ + "description": fmt.Sprintf("test object, put #%d", i+1), + }, + }, nil, nil, nil, nil, 0) + require.Nil(t, err) + + err = repo.Merge(ctx, objects.MergeDocument{ + ID: testID, + Class: testClass.Class, + PrimitiveSchema: map[string]interface{}{ + "description": fmt.Sprintf("test object, patch #%d", i+1), + }, + Vector: testVec, + UpdateTime: time.Now().UnixNano() / int64(time.Millisecond), + }, nil, "", 0) + require.Nil(t, err) + + require.Nil(t, repo.Shutdown(ctx)) + require.Nil(t, repo.WaitForStartup(ctx)) + } + }) + + t.Run("assert the final result is correct", func(t *testing.T) { + findByIDFilter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName(testClass.Class), + Property: filters.InternalPropID, + }, + Value: &filters.Value{ + Value: testID.String(), + Type: schema.DataTypeText, + }, + }, + } + res, err := repo.ObjectSearch(ctx, 0, 10, findByIDFilter, + nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Len(t, res, 1) + + expectedDescription := "test object, patch #10" + resultDescription := res[0].Schema.(map[string]interface{})["description"] + assert.Equal(t, expectedDescription, resultDescription) + }) +} + +func TestCRUDWithEmptyArrays(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + class := &models.Class{ + Class: "TestClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "textArray", + DataType: schema.DataTypeTextArray.PropString(), + }, + { + Name: "numberArray", + DataType: []string{string(schema.DataTypeNumberArray)}, + }, + { + Name: "boolArray", + DataType: []string{string(schema.DataTypeBooleanArray)}, + }, + }, + } + classRefName := "TestRefClass" + classRef := &models.Class{ + Class: classRefName, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + classNameWithRefs := "TestClassWithRefs" + classWithRefs := &models.Class{ + Class: classNameWithRefs, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "refProp", + DataType: []string{classRefName}, + }, + }, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 100, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + require.Nil(t, + migrator.AddClass(context.Background(), class)) + require.Nil(t, + migrator.AddClass(context.Background(), classRef)) + require.Nil(t, + migrator.AddClass(context.Background(), classWithRefs)) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class, classRef, classWithRefs}, + }, + } + + t.Run("empty arrays", func(t *testing.T) { + objID := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a62") + obj1 := &models.Object{ + ID: objID, + Class: "TestClass", + Properties: map[string]interface{}{ + "textArray": []string{}, + "numberArray": []float64{}, + "boolArray": []bool{}, + }, + } + obj2 := &models.Object{ + ID: objID, + Class: "TestClass", + Properties: map[string]interface{}{ + "textArray": []string{"value"}, + "numberArray": []float64{0.5}, + "boolArray": []bool{true}, + }, + } + + assert.Nil(t, repo.PutObject(context.Background(), obj1, []float32{1, 3, 5, 0.4}, nil, nil, nil, 0)) + assert.Nil(t, repo.PutObject(context.Background(), obj2, []float32{1, 3, 5, 0.4}, nil, nil, nil, 0)) + + res, err := repo.ObjectByID(context.Background(), objID, nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, obj2.Properties, res.ObjectWithVector(false).Properties) + }) + + t.Run("empty references", func(t *testing.T) { + objRefID := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390000") + objRef := &models.Object{ + ID: objRefID, + Class: classRefName, + Properties: map[string]interface{}{ + "stringProp": "string prop value", + }, + } + assert.Nil(t, repo.PutObject(context.Background(), objRef, []float32{1, 3, 5, 0.4}, nil, nil, nil, 0)) + + obj1ID := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a62") + obj1 := &models.Object{ + ID: obj1ID, + Class: classNameWithRefs, + Properties: map[string]interface{}{ + "stringProp": "some prop", + // due to the fix introduced in https://github.com/weaviate/weaviate/pull/2320, + // MultipleRef's can appear as empty []interface{} when no actual refs are provided for + // an object's reference property. + // + // when obj1 is unmarshalled from storage, refProp will be represented as []interface{}, + // because it is an empty reference property. so when comparing obj1 with the result of + // repo.Object, we need this refProp here to be a []interface{}. Note that this is due + // to our usage of storobj.Object.MarshallerVersion 1, and future MarshallerVersions may + // not have this ambiguous property type limitation. + "refProp": []interface{}{}, + }, + } + obj2ID := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a63") + obj2 := &models.Object{ + ID: obj2ID, + Class: classNameWithRefs, + Properties: map[string]interface{}{ + "stringProp": "some second prop", + "refProp": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI( + crossref.NewLocalhost(classRefName, objRefID).String()), + }, + }, + }, + } + + assert.Nil(t, repo.PutObject(context.Background(), obj1, []float32{1, 3, 5, 0.4}, nil, nil, nil, 0)) + assert.Nil(t, repo.PutObject(context.Background(), obj2, []float32{1, 3, 5, 0.4}, nil, nil, nil, 0)) + + res, err := repo.Object(context.Background(), classNameWithRefs, obj1ID, nil, + additional.Properties{}, nil, "") + require.Nil(t, err) + assert.NotNil(t, res) + assert.Equal(t, obj1.Properties, res.ObjectWithVector(false).Properties) + + res, err = repo.Object(context.Background(), classNameWithRefs, obj2ID, nil, + additional.Properties{}, nil, "") + require.Nil(t, err) + assert.NotNil(t, res) + assert.Equal(t, obj2.Properties, res.ObjectWithVector(false).Properties) + }) +} + +func TestOverwriteObjects(t *testing.T) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "SomeClass", + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, + &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + t.Run("create the class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + }) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + now := time.Now() + later := now.Add(time.Hour) // time-traveling ;) + stale := &models.Object{ + ID: "981c09f9-67f3-4e6e-a988-c53eaefbd58e", + Class: class.Class, + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.UnixMilli(), + Properties: map[string]interface{}{ + "oldValue": "how things used to be", + }, + Vector: []float32{1, 2, 3}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{}, + } + + fresh := &models.Object{ + ID: "981c09f9-67f3-4e6e-a988-c53eaefbd58e", + Class: class.Class, + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: later.UnixMilli(), + Properties: map[string]interface{}{ + "oldValue": "how things used to be", + "newValue": "how they are now", + }, + Vector: []float32{4, 5, 6}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{}, + } + + t.Run("insert stale object", func(t *testing.T) { + err := repo.PutObject(context.Background(), stale, stale.Vector, nil, nil, nil, 0) + require.Nil(t, err) + }) + + t.Run("overwrite with fresh object", func(t *testing.T) { + input := []*objects.VObject{ + { + LatestObject: fresh, + Vector: []float32{4, 5, 6}, + StaleUpdateTime: stale.LastUpdateTimeUnix, + }, + } + + idx := repo.GetIndex(schema.ClassName(class.Class)) + shd, err := idx.determineObjectShard(context.Background(), fresh.ID, "") + require.Nil(t, err) + + received, err := idx.OverwriteObjects(context.Background(), shd, input) + assert.Nil(t, err) + assert.ElementsMatch(t, nil, received) + }) + + t.Run("assert data was overwritten", func(t *testing.T) { + found, err := repo.Object(context.Background(), stale.Class, + stale.ID, nil, additional.Properties{}, nil, "") + assert.Nil(t, err) + assert.EqualValues(t, fresh, found.Object()) + }) +} + +func TestIndexDigestObjects(t *testing.T) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "SomeClass", + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, + &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + t.Run("create the class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + }) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + now := time.Now() + later := now.Add(time.Hour) // time-traveling ;) + obj1 := &models.Object{ + ID: "ae48fda2-866a-4c90-94fc-fce40d5f3767", + Class: class.Class, + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.UnixMilli(), + Properties: map[string]interface{}{ + "oldValue": "how things used to be", + }, + Vector: []float32{1, 2, 3}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{}, + } + + obj2 := &models.Object{ + ID: "b71ffac8-6534-4368-9718-5410ca89ce16", + Class: class.Class, + CreationTimeUnix: later.UnixMilli(), + LastUpdateTimeUnix: later.UnixMilli(), + Properties: map[string]interface{}{ + "oldValue": "how things used to be", + }, + Vector: []float32{1, 2, 3}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{}, + } + + t.Run("insert test objects", func(t *testing.T) { + err := repo.PutObject(context.Background(), obj1, obj1.Vector, nil, nil, nil, 0) + require.Nil(t, err) + err = repo.PutObject(context.Background(), obj2, obj2.Vector, nil, nil, nil, 0) + require.Nil(t, err) + }) + + t.Run("get digest object", func(t *testing.T) { + idx := repo.GetIndex(schema.ClassName(class.Class)) + shd, err := idx.determineObjectShard(context.Background(), obj1.ID, "") + require.Nil(t, err) + + input := []strfmt.UUID{obj1.ID, obj2.ID} + + expected := []types.RepairResponse{ + { + ID: obj1.ID.String(), + UpdateTime: obj1.LastUpdateTimeUnix, + }, + { + ID: obj2.ID.String(), + UpdateTime: obj2.LastUpdateTimeUnix, + }, + } + + res, err := idx.DigestObjects(context.Background(), shd, input) + require.Nil(t, err) + assert.Equal(t, expected, res) + }) +} + +func findID(list []search.Result, id strfmt.UUID) (search.Result, bool) { + for _, item := range list { + if item.ID == id { + return item, true + } + } + + return search.Result{}, false +} + +func ptFloat32(in float32) *float32 { + return &in +} + +func ptFloat64(in float64) *float64 { + return &in +} + +func randomVector(r *rand.Rand, dim int) []float32 { + out := make([]float32, dim) + for i := range out { + out[i] = r.Float32() + } + + return out +} + +func TestIndexDifferentVectorLength(t *testing.T) { + logger, _ := test.NewNullLogger() + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "SomeClass", + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: t.TempDir(), + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, + &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + require.Nil(t, migrator.AddClass(context.Background(), class)) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + obj1ID := strfmt.UUID("ae48fda2-866a-4c90-94fc-fce40d5f3767") + objNilID := strfmt.UUID("b71ffac9-6534-4368-9718-5410ca89ce16") + + t.Run("Add object with nil vector", func(t *testing.T) { + objNil := &models.Object{ + ID: objNilID, + Class: class.Class, + Vector: nil, + } + require.Nil(t, repo.PutObject(context.Background(), objNil, objNil.Vector, nil, nil, nil, 0)) + found, err := repo.Object(context.Background(), class.Class, objNil.ID, nil, + additional.Properties{}, nil, "") + require.Nil(t, err) + require.Equal(t, found.Vector, []float32{}) + require.Equal(t, objNil.ID, found.ID) + }) + + t.Run("Add object with non-nil vector after nil vector", func(t *testing.T) { + obj1 := &models.Object{ + ID: obj1ID, + Class: class.Class, + Vector: []float32{1, 2, 3}, + } + require.Nil(t, repo.PutObject(context.Background(), obj1, obj1.Vector, nil, nil, nil, 0)) + }) + + t.Run("Add object with different vector length", func(t *testing.T) { + obj2 := &models.Object{ + ID: "b71ffac8-6534-4368-9718-5410ca89ce16", + Class: class.Class, + Vector: []float32{1, 2, 3, 4}, + } + require.NotNil(t, repo.PutObject(context.Background(), obj2, obj2.Vector, nil, nil, nil, 0)) + found, err := repo.Object(context.Background(), class.Class, obj2.ID, nil, + additional.Properties{}, nil, "") + require.Nil(t, err) + require.Nil(t, found) + }) + + t.Run("Update object with different vector length", func(t *testing.T) { + err = repo.Merge(context.Background(), objects.MergeDocument{ + ID: obj1ID, + Class: class.Class, + PrimitiveSchema: map[string]interface{}{}, + Vector: []float32{1, 2, 3, 4}, + UpdateTime: time.Now().UnixNano() / int64(time.Millisecond), + }, nil, "", 0) + require.NotNil(t, err) + found, err := repo.Object(context.Background(), class.Class, + obj1ID, nil, additional.Properties{}, nil, "") + require.Nil(t, err) + require.Len(t, found.Vector, 3) + }) + + t.Run("Update nil object with fitting vector", func(t *testing.T) { + err = repo.Merge(context.Background(), objects.MergeDocument{ + ID: objNilID, + Class: class.Class, + PrimitiveSchema: map[string]interface{}{}, + Vector: []float32{1, 2, 3}, + UpdateTime: time.Now().UnixNano() / int64(time.Millisecond), + }, nil, "", 0) + require.Nil(t, err) + found, err := repo.Object(context.Background(), class.Class, objNilID, nil, + additional.Properties{}, nil, "") + require.Nil(t, err) + require.Len(t, found.Vector, 3) + }) + + t.Run("Add nil object after objects with vector", func(t *testing.T) { + obj2Nil := &models.Object{ + ID: "b71ffac8-6534-4368-9718-5410ca89ce16", + Class: class.Class, + Vector: nil, + } + require.Nil(t, repo.PutObject(context.Background(), obj2Nil, obj2Nil.Vector, nil, nil, nil, 0)) + found, err := repo.Object(context.Background(), class.Class, obj2Nil.ID, nil, + additional.Properties{}, nil, "") + require.Nil(t, err) + require.Equal(t, obj2Nil.ID, found.ID) + require.Equal(t, []float32{}, found.Vector) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_noindex_property_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_noindex_property_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..899cae8e825336f9ed318c19c36798dd23866d15 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_noindex_property_integration_test.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestCRUD_NoIndexProp(t *testing.T) { + dirName := t.TempDir() + + vFalse := false + logger, _ := test.NewNullLogger() + thingclass := &models.Class{ + Class: "ThingClassWithNoIndexProps", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{{ + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, { + Name: "hiddenStringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + }}, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(thingclass, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + MemtablesFlushDirtyAfter: 60, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), thingclass)) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{thingclass}, + }, + } + }) + + thingID := strfmt.UUID("9f119c4f-80da-4ae5-bfd1-e4b63054125f") + + t.Run("adding a thing", func(t *testing.T) { + thing := &models.Object{ + CreationTimeUnix: 1565612833955, + LastUpdateTimeUnix: 1000001, + ID: thingID, + Class: "ThingClassWithNoIndexProps", + Properties: map[string]interface{}{ + "stringProp": "some value", + "hiddenStringProp": "some hidden value", + }, + } + vector := []float32{1, 3, 5, 0.4} + err := repo.PutObject(context.Background(), thing, vector, nil, nil, nil, 0) + + assert.Nil(t, err) + }) + + t.Run("all props are present when getting by id", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), thingID, search.SelectProperties{}, additional.Properties{}, "") + expectedSchema := map[string]interface{}{ + "stringProp": "some value", + "hiddenStringProp": "some hidden value", + "id": thingID, + } + + require.Nil(t, err) + assert.Equal(t, expectedSchema, res.Schema) + }) + + // Same as above, but with Object() + t.Run("all props are present when getting by id and class", func(t *testing.T) { + res, err := repo.Object(context.Background(), "ThingClassWithNoIndexProps", thingID, + search.SelectProperties{}, additional.Properties{}, nil, "") + expectedSchema := map[string]interface{}{ + "stringProp": "some value", + "hiddenStringProp": "some hidden value", + "id": thingID, + } + + require.Nil(t, err) + assert.Equal(t, expectedSchema, res.Schema) + }) + + t.Run("class search on the noindex prop errors", func(t *testing.T) { + _, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "ThingClassWithNoIndexProps", + Pagination: &filters.Pagination{ + Limit: 10, + }, + Filters: buildFilter("hiddenStringProp", "hidden", eq, schema.DataTypeText), + }) + + require.NotNil(t, err) + assert.Contains(t, err.Error(), "Filtering by property 'hiddenStringProp' requires inverted index. "+ + "Is `indexFilterable` option of property 'hiddenStringProp' enabled? "+ + "Set it to `true` or leave empty") + }) + + t.Run("class search on timestamp prop with no timestamp indexing error", func(t *testing.T) { + _, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "ThingClassWithNoIndexProps", + Pagination: &filters.Pagination{ + Limit: 10, + }, + Filters: buildFilter("_creationTimeUnix", "1234567891011", eq, schema.DataTypeText), + }) + + require.NotNil(t, err) + assert.Contains(t, err.Error(), + "Timestamps must be indexed to be filterable! Add `IndexTimestamps: true` to the InvertedIndexConfig in") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_null_objects_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_null_objects_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..14d9209ac5f36653bbdcc4d26e8da2d67381dcc7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_null_objects_integration_test.go @@ -0,0 +1,257 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "testing" + + schema2 "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/objects" +) + +// Cannot filter for null state without enabling in the InvertedIndexConfig +func TestFilterNullStateError(t *testing.T) { + class := createClassWithEverything(false, false) + migrator, repo, schemaGetter := createRepo(t) + defer repo.Shutdown(context.Background()) + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + nilFilter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorIsNull, + On: &filters.Path{ + Class: schema.ClassName(carClass.Class), + Property: schema.PropertyName(class.Properties[0].Name), + }, + Value: &filters.Value{ + Value: true, + Type: schema.DataTypeBoolean, + }, + }, + } + + params := dto.GetParams{ + ClassName: class.Class, + Pagination: &filters.Pagination{Limit: 5}, + Filters: nilFilter, + } + _, err = repo.Search(context.Background(), params) + require.NotNil(t, err) +} + +func TestNullArrayClass(t *testing.T) { + arrayClass := createClassWithEverything(true, false) + + names := []string{"elements", "batches"} + for _, name := range names { + t.Run("add nil object via "+name, func(t *testing.T) { + migrator, repo, schemaGetter := createRepo(t) + defer repo.Shutdown(context.Background()) + err := migrator.AddClass(context.Background(), arrayClass) + require.Nil(t, err) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{arrayClass}, + }, + } + + ObjectUuid1 := uuid.New() + arrayObjNil := &models.Object{ + ID: strfmt.UUID(ObjectUuid1.String()), + Class: "EverythingClass", + Properties: map[string]interface{}{ + "strings": nil, + "ints": nil, + "datesAsStrings": nil, + "numbers": nil, + "booleans": nil, + "texts": nil, + "number": nil, + "boolean": nil, + "int": nil, + "string": nil, + "text": nil, + "phoneNumber": nil, + "phoneNumbers": nil, + }, + } + + ObjectUuid2 := uuid.New() + arrayObjEmpty := &models.Object{ + ID: strfmt.UUID(ObjectUuid2.String()), + Class: "EverythingClass", + Properties: map[string]interface{}{}, + } + + if name == names[0] { + assert.Nil(t, repo.PutObject(context.Background(), arrayObjNil, []float32{1}, nil, nil, nil, 0)) + assert.Nil(t, repo.PutObject(context.Background(), arrayObjEmpty, []float32{1}, nil, nil, nil, 0)) + + } else { + batch := make([]objects.BatchObject, 2) + batch[0] = objects.BatchObject{Object: arrayObjNil, UUID: arrayObjNil.ID} + batch[1] = objects.BatchObject{Object: arrayObjEmpty, UUID: arrayObjEmpty.ID} + _, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + assert.Nil(t, err) + } + + item1, err := repo.ObjectByID(context.Background(), arrayObjNil.ID, nil, additional.Properties{}, "") + assert.Nil(t, err) + item2, err := repo.ObjectByID(context.Background(), arrayObjEmpty.ID, nil, additional.Properties{}, "") + assert.Nil(t, err) + + item1Schema := item1.Schema.(map[string]interface{}) + item2Schema := item2.Schema.(map[string]interface{}) + delete(item1Schema, "id") + delete(item2Schema, "id") + assert.Equal(t, item1Schema, item2Schema) + }) + } +} + +func createRepo(t *testing.T) (*Migrator, *DB, *fakeSchemaGetter) { + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + logger, _ := test.NewNullLogger() + dirName := t.TempDir() + mockSchemaReader := schema2.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + return NewMigrator(repo, logger, "node1"), repo, schemaGetter +} + +func createClassWithEverything(IndexNullState bool, IndexPropertyLength bool) *models.Class { + return &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 60, + Stopwords: &models.StopwordConfig{ + Preset: "none", + }, + IndexNullState: IndexNullState, + IndexPropertyLength: IndexPropertyLength, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Class: "EverythingClass", + Properties: []*models.Property{ + { + Name: "strings", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "texts", + DataType: []string{"text[]"}, + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "numbers", + DataType: []string{"number[]"}, + }, + { + Name: "ints", + DataType: []string{"int[]"}, + }, + { + Name: "booleans", + DataType: []string{"boolean[]"}, + }, + { + Name: "datesAsStrings", + DataType: []string{"date[]"}, + }, + { + Name: "number", + DataType: []string{"number"}, + }, + { + Name: "bool", + DataType: []string{"boolean"}, + }, + { + Name: "int", + DataType: []string{"int"}, + }, + { + Name: "string", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "text", + DataType: []string{"text"}, + }, + { + Name: "phoneNumber", + DataType: []string{"phoneNumber"}, + }, + { + Name: "phoneNumbers", + DataType: []string{"phoneNumber[]"}, + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_references_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_references_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..580e03c64440824416c906fa9dae1e820923579c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_references_integration_test.go @@ -0,0 +1,753 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "log" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestNestedReferences(t *testing.T) { + dirName := t.TempDir() + + refSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Planet", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "Continent", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "onPlanet", + DataType: []string{"Planet"}, + }, + }, + }, + { + Class: "Country", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "onContinent", + DataType: []string{"Continent"}, + }, + }, + }, + { + Class: "City", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "inCountry", + DataType: []string{"Country"}, + }, + }, + }, + { + Class: "Place", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "inCity", + DataType: []string{"City"}, + }, + }, + }, + }, + }, + } + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("adding all classes to the schema", func(t *testing.T) { + for _, class := range refSchema.Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + }) + } + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = refSchema + + t.Run("importing some thing objects with references", func(t *testing.T) { + objects := []models.Object{ + { + Class: "Planet", + Properties: map[string]interface{}{ + "name": "Earth", + }, + ID: "32c69af9-cbbe-4ec9-bf6c-365cd6c22fdf", + CreationTimeUnix: 1566464889, + }, + { + Class: "Continent", + Properties: map[string]interface{}{ + "name": "North America", + "onPlanet": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/32c69af9-cbbe-4ec9-bf6c-365cd6c22fdf", + }, + }, + }, + ID: "4aad8154-e7f3-45b8-81a6-725171419e55", + CreationTimeUnix: 1566464892, + }, + { + Class: "Country", + Properties: map[string]interface{}{ + "name": "USA", + "onContinent": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/4aad8154-e7f3-45b8-81a6-725171419e55", + }, + }, + }, + ID: "18c80a16-346a-477d-849d-9d92e5040ac9", + CreationTimeUnix: 1566464896, + }, + { + Class: "City", + Properties: map[string]interface{}{ + "name": "San Francisco", + "inCountry": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/18c80a16-346a-477d-849d-9d92e5040ac9", + }, + }, + }, + ID: "2297e094-6218-43d4-85b1-3d20af752f23", + CreationTimeUnix: 1566464899, + }, + { + Class: "Place", + Properties: map[string]interface{}{ + "name": "Tim Apple's Fruit Bar", + "inCity": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/2297e094-6218-43d4-85b1-3d20af752f23", + }, + }, + }, + ID: "4ef47fb0-3cf5-44fc-b378-9e217dff13ac", + CreationTimeUnix: 1566464904, + }, + } + + for _, thing := range objects { + t.Run(fmt.Sprintf("add %s", thing.ID), func(t *testing.T) { + err := repo.PutObject(context.Background(), &thing, []float32{1, 2, 3, 4, 5, 6, 7}, nil, nil, nil, 0) + require.Nil(t, err) + }) + } + }) + + t.Run("fully resolving the place", func(t *testing.T) { + expectedSchema := map[string]interface{}{ + "inCity": []interface{}{ + search.LocalRef{ + Class: "City", + Fields: map[string]interface{}{ + "inCountry": []interface{}{ + search.LocalRef{ + Class: "Country", + Fields: map[string]interface{}{ + "onContinent": []interface{}{ + search.LocalRef{ + Class: "Continent", + Fields: map[string]interface{}{ + "onPlanet": []interface{}{ + search.LocalRef{ + Class: "Planet", + Fields: map[string]interface{}{ + "name": "Earth", + "id": strfmt.UUID("32c69af9-cbbe-4ec9-bf6c-365cd6c22fdf"), + }, + }, + }, + "name": "North America", + "id": strfmt.UUID("4aad8154-e7f3-45b8-81a6-725171419e55"), + }, + }, + }, + "name": "USA", + "id": strfmt.UUID("18c80a16-346a-477d-849d-9d92e5040ac9"), + }, + }, + }, + "name": "San Francisco", + "id": strfmt.UUID("2297e094-6218-43d4-85b1-3d20af752f23"), + }, + }, + }, + "name": "Tim Apple's Fruit Bar", + "id": strfmt.UUID("4ef47fb0-3cf5-44fc-b378-9e217dff13ac"), + } + + res, err := repo.ObjectByID(context.Background(), "4ef47fb0-3cf5-44fc-b378-9e217dff13ac", fullyNestedSelectProperties(), additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expectedSchema, res.Schema) + }) + + t.Run("fully resolving the place with vectors", func(t *testing.T) { + expectedSchema := map[string]interface{}{ + "inCity": []interface{}{ + search.LocalRef{ + Class: "City", + Fields: map[string]interface{}{ + "inCountry": []interface{}{ + search.LocalRef{ + Class: "Country", + Fields: map[string]interface{}{ + "onContinent": []interface{}{ + search.LocalRef{ + Class: "Continent", + Fields: map[string]interface{}{ + "onPlanet": []interface{}{ + search.LocalRef{ + Class: "Planet", + Fields: map[string]interface{}{ + "name": "Earth", + "id": strfmt.UUID("32c69af9-cbbe-4ec9-bf6c-365cd6c22fdf"), + "vector": []float32{1, 2, 3, 4, 5, 6, 7}, + }, + }, + }, + "name": "North America", + "id": strfmt.UUID("4aad8154-e7f3-45b8-81a6-725171419e55"), + "vector": []float32{1, 2, 3, 4, 5, 6, 7}, + }, + }, + }, + "name": "USA", + "id": strfmt.UUID("18c80a16-346a-477d-849d-9d92e5040ac9"), + "vector": []float32{1, 2, 3, 4, 5, 6, 7}, + }, + }, + }, + "name": "San Francisco", + "id": strfmt.UUID("2297e094-6218-43d4-85b1-3d20af752f23"), + "vector": []float32{1, 2, 3, 4, 5, 6, 7}, + }, + }, + }, + "name": "Tim Apple's Fruit Bar", + "id": strfmt.UUID("4ef47fb0-3cf5-44fc-b378-9e217dff13ac"), + } + + res, err := repo.ObjectByID(context.Background(), "4ef47fb0-3cf5-44fc-b378-9e217dff13ac", fullyNestedSelectPropertiesWithVector(), additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expectedSchema, res.Schema) + }) + + t.Run("partially resolving the place", func(t *testing.T) { + expectedSchema := map[string]interface{}{ + "inCity": []interface{}{ + search.LocalRef{ + Class: "City", + Fields: map[string]interface{}{ + "name": "San Francisco", + "id": strfmt.UUID("2297e094-6218-43d4-85b1-3d20af752f23"), + // why is inCountry present here? We didn't specify it our select + // properties. Note it is "inCountry" with a lowercase letter + // (meaning unresolved) whereas "inCountry" would mean it was + // resolved. In GraphQL this property would simply be hidden (as + // the GQL is unaware of unresolved properties) + // However, for caching and other queries it is helpful that this + // info is still present, the important thing is that we're + // avoiding the costly resolving of it, if we don't need it. + "inCountry": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/18c80a16-346a-477d-849d-9d92e5040ac9", + }, + }, + }, + }, + }, + "name": "Tim Apple's Fruit Bar", + "id": strfmt.UUID("4ef47fb0-3cf5-44fc-b378-9e217dff13ac"), + } + + res, err := repo.ObjectByID(context.Background(), "4ef47fb0-3cf5-44fc-b378-9e217dff13ac", partiallyNestedSelectProperties(), additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expectedSchema, res.Schema) + }) + + t.Run("resolving without any refs", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), "4ef47fb0-3cf5-44fc-b378-9e217dff13ac", search.SelectProperties{}, additional.Properties{}, "") + + expectedSchema := map[string]interface{}{ + "id": strfmt.UUID("4ef47fb0-3cf5-44fc-b378-9e217dff13ac"), + "inCity": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/2297e094-6218-43d4-85b1-3d20af752f23", + }, + }, + "name": "Tim Apple's Fruit Bar", + } + + require.Nil(t, err) + + assert.Equal(t, expectedSchema, res.Schema, "does not contain any resolved refs") + }) + + t.Run("adding a new place to verify idnexing is constantly happening in the background", func(t *testing.T) { + newPlace := models.Object{ + Class: "Place", + Properties: map[string]interface{}{ + "name": "John Oliver's Avocados", + "inCity": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/2297e094-6218-43d4-85b1-3d20af752f23", + }, + }, + }, + ID: "0f02d525-902d-4dc0-8052-647cb420c1a6", + CreationTimeUnix: 1566464912, + } + + err := repo.PutObject(context.Background(), &newPlace, []float32{1, 2, 3, 4, 5, 6, 7}, nil, nil, nil, 0) + require.Nil(t, err) + }) +} + +func fullyNestedSelectProperties() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "inCity", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "City", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "inCountry", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "Country", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "onContinent", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "Continent", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "onPlanet", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "Planet", + RefProperties: nil, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func fullyNestedSelectPropertiesWithVector() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "inCity", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "City", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "inCountry", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "Country", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "onContinent", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "Continent", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "onPlanet", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "Planet", + RefProperties: nil, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + }, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + }, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + }, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + }, + }, + } +} + +func partiallyNestedSelectProperties() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "inCity", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "City", + RefProperties: search.SelectProperties{}, + }, + }, + }, + } +} + +func getDimensionsFromRepo(ctx context.Context, repo *DB, className string) int { + if !repo.config.TrackVectorDimensions { + log.Printf("Vector dimensions tracking is disabled, returning 0") + return 0 + } + index := repo.GetIndex(schema.ClassName(className)) + sum := 0 + index.ForEachShard(func(name string, shard ShardLike) error { + dim, err := shard.Dimensions(ctx, "") + if err != nil { + return err + } + sum += dim + return nil + }) + return sum +} + +func GetQuantizedDimensionsFromRepo(ctx context.Context, repo *DB, className string, segments int) int { + if !repo.config.TrackVectorDimensions { + log.Printf("Vector dimensions tracking is disabled, returning 0") + return 0 + } + index := repo.GetIndex(schema.ClassName(className)) + sum := 0 + index.ForEachShard(func(name string, shard ShardLike) error { + sum += shard.QuantizedDimensions(ctx, "", segments) + return nil + }) + return sum +} + +func Test_AddingReferenceOneByOne(t *testing.T) { + dirName := t.TempDir() + + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "AddingReferencesTestTarget", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "AddingReferencesTestSource", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "toTarget", + DataType: []string{"AddingReferencesTestTarget"}, + }, + }, + }, + }, + }, + } + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("add required classes", func(t *testing.T) { + for _, class := range sch.Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + }) + } + }) + + schemaGetter.schema = sch + targetID := strfmt.UUID("a4a92239-e748-4e55-bbbd-f606926619a7") + target2ID := strfmt.UUID("325084e7-4faa-43a5-b2b1-56e207be169a") + sourceID := strfmt.UUID("0826c61b-85c1-44ac-aebb-cfd07ace6a57") + + t.Run("add objects", func(t *testing.T) { + err := repo.PutObject(context.Background(), &models.Object{ + ID: sourceID, + Class: "AddingReferencesTestSource", + Properties: map[string]interface{}{ + "name": "source item", + }, + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + + err = repo.PutObject(context.Background(), &models.Object{ + ID: targetID, + Class: "AddingReferencesTestTarget", + Properties: map[string]interface{}{ + "name": "target item", + }, + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + + err = repo.PutObject(context.Background(), &models.Object{ + ID: target2ID, + Class: "AddingReferencesTestTarget", + Properties: map[string]interface{}{ + "name": "another target item", + }, + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + }) + + t.Run("add reference between them", func(t *testing.T) { + // Get dimensions before adding reference + sourceShardDimension := getDimensionsFromRepo(context.Background(), repo, "AddingReferencesTestSource") + targetShardDimension := getDimensionsFromRepo(context.Background(), repo, "AddingReferencesTestTarget") + + source := crossref.NewSource("AddingReferencesTestSource", "toTarget", sourceID) + target := crossref.New("localhost", "", targetID) + + err := repo.AddReference(context.Background(), source, target, nil, "", 0) + assert.Nil(t, err) + + // Check dimensions after adding reference + sourceDimensionAfter := getDimensionsFromRepo(context.Background(), repo, "AddingReferencesTestSource") + targetDimensionAfter := getDimensionsFromRepo(context.Background(), repo, "AddingReferencesTestTarget") + + require.Equalf(t, sourceShardDimension, sourceDimensionAfter, "dimensions of source should not change") + require.Equalf(t, targetShardDimension, targetDimensionAfter, "dimensions of target should not change") + }) + + t.Run("check reference was added", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, source) + require.NotNil(t, source.Object()) + require.NotNil(t, source.Object().Properties) + + refs := source.Object().Properties.(map[string]interface{})["toTarget"] + refsSlice, ok := refs.(models.MultipleRef) + require.True(t, ok, + fmt.Sprintf("toTarget must be models.MultipleRef, but got %#v", refs)) + + foundBeacons := []string{} + for _, ref := range refsSlice { + foundBeacons = append(foundBeacons, ref.Beacon.String()) + } + expectedBeacons := []string{ + fmt.Sprintf("weaviate://localhost/%s", targetID), + } + + assert.ElementsMatch(t, foundBeacons, expectedBeacons) + }) + + t.Run("reference a second target", func(t *testing.T) { + source := crossref.NewSource("AddingReferencesTestSource", "toTarget", sourceID) + target := crossref.New("localhost", "", target2ID) + + err := repo.AddReference(context.Background(), source, target, nil, "", 0) + assert.Nil(t, err) + }) + + t.Run("check both references are now present", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, source) + require.NotNil(t, source.Object()) + require.NotNil(t, source.Object().Properties) + + refs := source.Object().Properties.(map[string]interface{})["toTarget"] + refsSlice, ok := refs.(models.MultipleRef) + require.True(t, ok, + fmt.Sprintf("toTarget must be models.MultipleRef, but got %#v", refs)) + + foundBeacons := []string{} + for _, ref := range refsSlice { + foundBeacons = append(foundBeacons, ref.Beacon.String()) + } + expectedBeacons := []string{ + fmt.Sprintf("weaviate://localhost/%s", targetID), + fmt.Sprintf("weaviate://localhost/%s", target2ID), + } + + assert.ElementsMatch(t, foundBeacons, expectedBeacons) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_references_multiple_types_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_references_multiple_types_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..41168f67d3304e695cfdc43c235ca2b9d99486a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_references_multiple_types_integration_test.go @@ -0,0 +1,692 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestMultipleCrossRefTypes(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("adding all classes to the schema", func(t *testing.T) { + for _, class := range parkingGaragesSchema().Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + }) + } + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = parkingGaragesSchema() + + t.Run("importing with various combinations of props", func(t *testing.T) { + objects := []models.Object{ + { + Class: "MultiRefParkingGarage", + Properties: map[string]interface{}{ + "name": "Luxury Parking Garage", + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(48.864716), + Longitude: ptFloat32(2.349014), + }, + }, + ID: "a7e10b55-1ac4-464f-80df-82508eea1951", + CreationTimeUnix: 1566469890, + }, + { + Class: "MultiRefParkingGarage", + Properties: map[string]interface{}{ + "name": "Crappy Parking Garage", + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(42.331429), + Longitude: ptFloat32(-83.045753), + }, + }, + ID: "ba2232cf-bb0e-413d-b986-6aa996d34d2e", + CreationTimeUnix: 1566469892, + }, + { + Class: "MultiRefParkingLot", + Properties: map[string]interface{}{ + "name": "Fancy Parking Lot", + }, + ID: "1023967b-9512-475b-8ef9-673a110b695d", + CreationTimeUnix: 1566469894, + }, + { + Class: "MultiRefParkingLot", + Properties: map[string]interface{}{ + "name": "The worst parking lot youve ever seen", + }, + ID: "901859d8-69bf-444c-bf43-498963d798d2", + CreationTimeUnix: 1566469897, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked no where", + }, + ID: "329c306b-c912-4ec7-9b1d-55e5e0ca8dea", + CreationTimeUnix: 1566469899, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked in a garage", + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/a7e10b55-1ac4-464f-80df-82508eea1951", + }, + }, + }, + ID: "fe3ca25d-8734-4ede-9a81-bc1ed8c3ea43", + CreationTimeUnix: 1566469902, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked in a lot", + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/1023967b-9512-475b-8ef9-673a110b695d", + }, + }, + }, + ID: "21ab5130-627a-4268-baef-1a516bd6cad4", + CreationTimeUnix: 1566469906, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked in two places at the same time (magic!)", + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/a7e10b55-1ac4-464f-80df-82508eea1951", + }, + &models.SingleRef{ + Beacon: "weaviate://localhost/1023967b-9512-475b-8ef9-673a110b695d", + }, + }, + }, + ID: "533673a7-2a5c-4e1c-b35d-a3809deabace", + CreationTimeUnix: 1566469909, + }, + { + Class: "MultiRefDriver", + Properties: map[string]interface{}{ + "name": "Johny Drivemuch", + "drives": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/533673a7-2a5c-4e1c-b35d-a3809deabace", + }, + }, + }, + ID: "9653ab38-c16b-4561-80df-7a7e19300dd0", + CreationTimeUnix: 1566469912, + }, + { + Class: "MultiRefPerson", + Properties: map[string]interface{}{ + "name": "Jane Doughnut", + "friendsWith": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/9653ab38-c16b-4561-80df-7a7e19300dd0", + }, + }, + }, + ID: "91ad23a3-07ba-4d4c-9836-76c57094f734", + CreationTimeUnix: 1566469915, + }, + { + Class: "MultiRefSociety", + Properties: map[string]interface{}{ + "name": "Cool People", + "hasMembers": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/91ad23a3-07ba-4d4c-9836-76c57094f734", + }, + }, + }, + ID: "5cd9afa6-f3df-4f57-a204-840d6b256dba", + CreationTimeUnix: 1566469918, + }, + } + + for _, thing := range objects { + t.Run(fmt.Sprintf("add %s", thing.ID), func(t *testing.T) { + err := repo.PutObject(context.Background(), &thing, []float32{1, 2, 3, 4, 5, 6, 7}, nil, nil, nil, 0) + require.Nil(t, err) + }) + } + }) + + t.Run("car with no refs", func(t *testing.T) { + var id strfmt.UUID = "329c306b-c912-4ec7-9b1d-55e5e0ca8dea" + expectedSchema := map[string]interface{}{ + "name": "Car which is parked no where", + "id": id, + } + + t.Run("asking for no refs", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, nil, additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchema, res.Schema) + }) + + t.Run("asking for refs of type garage", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtGarage(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchema, res.Schema) + }) + + t.Run("asking for refs of type lot", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtLot(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchema, res.Schema) + }) + + t.Run("asking for refs of both types", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtEither(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchema, res.Schema) + }) + }) + + t.Run("car with single ref to garage", func(t *testing.T) { + var id strfmt.UUID = "fe3ca25d-8734-4ede-9a81-bc1ed8c3ea43" + expectedSchemaUnresolved := map[string]interface{}{ + "name": "Car which is parked in a garage", + "id": id, + // ref is present, but unresolved, therefore the lowercase letter + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/a7e10b55-1ac4-464f-80df-82508eea1951", + }, + }, + } + + getExpectedSchema := func(withVector bool) map[string]interface{} { + fields := map[string]interface{}{ + "name": "Luxury Parking Garage", + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(48.864716), + Longitude: ptFloat32(2.349014), + }, + "id": strfmt.UUID("a7e10b55-1ac4-464f-80df-82508eea1951"), + } + if withVector { + fields["vector"] = []float32{1, 2, 3, 4, 5, 6, 7} + } + return map[string]interface{}{ + "name": "Car which is parked in a garage", + "id": id, + "parkedAt": []interface{}{ + search.LocalRef{ + Class: "MultiRefParkingGarage", + Fields: fields, + }, + }, + } + } + + expectedSchemaWithRefs := getExpectedSchema(false) + expectedSchemaWithRefsWithVector := getExpectedSchema(true) + + t.Run("asking for no refs", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, nil, additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaUnresolved, res.Schema) + }) + + t.Run("asking for refs of type garage", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtGarage(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithRefs, res.Schema) + }) + + t.Run("asking for refs of type garage with vector", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtGarageWithVector(true), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithRefsWithVector, res.Schema) + }) + + t.Run("asking for refs of type lot", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtLot(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaUnresolved, res.Schema) + }) + + t.Run("asking for refs of both types", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtEither(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithRefs, res.Schema) + }) + }) + + t.Run("car with single ref to lot", func(t *testing.T) { + var id strfmt.UUID = "21ab5130-627a-4268-baef-1a516bd6cad4" + expectedSchemaUnresolved := map[string]interface{}{ + "name": "Car which is parked in a lot", + "id": id, + // ref is present, but unresolved, therefore the lowercase letter + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/1023967b-9512-475b-8ef9-673a110b695d", + }, + }, + } + + getSchemaWithRefs := func(withVector bool) map[string]interface{} { + fields := map[string]interface{}{ + "name": "Fancy Parking Lot", + "id": strfmt.UUID("1023967b-9512-475b-8ef9-673a110b695d"), + } + if withVector { + fields["vector"] = []float32{1, 2, 3, 4, 5, 6, 7} + } + return map[string]interface{}{ + "name": "Car which is parked in a lot", + "id": id, + "parkedAt": []interface{}{ + search.LocalRef{ + Class: "MultiRefParkingLot", + Fields: fields, + }, + }, + } + } + + expectedSchemaWithRefs := getSchemaWithRefs(false) + expectedSchemaWithRefsWithVector := getSchemaWithRefs(true) + + t.Run("asking for no refs", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, nil, additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaUnresolved, res.Schema) + }) + + t.Run("asking for refs of type garage", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtGarage(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaUnresolved, res.Schema) + }) + + t.Run("asking for refs of type lot", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtLot(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithRefs, res.Schema) + }) + + t.Run("asking for refs with vector of type lot", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtLotWithVector(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithRefsWithVector, res.Schema) + }) + + t.Run("asking for refs of both types", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtEither(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithRefs, res.Schema) + }) + }) + + t.Run("car with refs to both", func(t *testing.T) { + var id strfmt.UUID = "533673a7-2a5c-4e1c-b35d-a3809deabace" + expectedSchemaUnresolved := map[string]interface{}{ + "name": "Car which is parked in two places at the same time (magic!)", + "id": id, + // ref is present, but unresolved, therefore the lowercase letter + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/a7e10b55-1ac4-464f-80df-82508eea1951", + }, + &models.SingleRef{ + Beacon: "weaviate://localhost/1023967b-9512-475b-8ef9-673a110b695d", + }, + }, + } + getExpectedSchemaWithLotRef := func(withVector bool) map[string]interface{} { + fields := map[string]interface{}{ + "name": "Fancy Parking Lot", + "id": strfmt.UUID("1023967b-9512-475b-8ef9-673a110b695d"), + } + if withVector { + fields["vector"] = []float32{1, 2, 3, 4, 5, 6, 7} + } + return map[string]interface{}{ + "name": "Car which is parked in two places at the same time (magic!)", + "id": id, + "parkedAt": []interface{}{ + search.LocalRef{ + Class: "MultiRefParkingLot", + Fields: fields, + }, + }, + } + } + expectedSchemaWithLotRef := getExpectedSchemaWithLotRef(false) + expectedSchemaWithLotRefWithVector := getExpectedSchemaWithLotRef(true) + getExpectedSchemaWithGarageRef := func(withVector bool) map[string]interface{} { + fields := map[string]interface{}{ + "name": "Luxury Parking Garage", + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(48.864716), + Longitude: ptFloat32(2.349014), + }, + "id": strfmt.UUID("a7e10b55-1ac4-464f-80df-82508eea1951"), + } + if withVector { + fields["vector"] = []float32{1, 2, 3, 4, 5, 6, 7} + } + return map[string]interface{}{ + "name": "Car which is parked in two places at the same time (magic!)", + "id": id, + "parkedAt": []interface{}{ + search.LocalRef{ + Class: "MultiRefParkingGarage", + Fields: fields, + }, + }, + } + } + expectedSchemaWithGarageRef := getExpectedSchemaWithGarageRef(false) + expectedSchemaWithGarageRefWithVector := getExpectedSchemaWithGarageRef(true) + getExpectedSchemaWithAllRefs := func(withVector bool) map[string]interface{} { + fieldsParkingLot := map[string]interface{}{ + "name": "Fancy Parking Lot", + "id": strfmt.UUID("1023967b-9512-475b-8ef9-673a110b695d"), + } + if withVector { + fieldsParkingLot["vector"] = []float32{1, 2, 3, 4, 5, 6, 7} + } + fieldsParkingGarage := map[string]interface{}{ + "name": "Luxury Parking Garage", + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(48.864716), + Longitude: ptFloat32(2.349014), + }, + "id": strfmt.UUID("a7e10b55-1ac4-464f-80df-82508eea1951"), + } + if withVector { + fieldsParkingGarage["vector"] = []float32{1, 2, 3, 4, 5, 6, 7} + } + return map[string]interface{}{ + "name": "Car which is parked in two places at the same time (magic!)", + "id": id, + "parkedAt": []interface{}{ + search.LocalRef{ + Class: "MultiRefParkingLot", + Fields: fieldsParkingLot, + }, + search.LocalRef{ + Class: "MultiRefParkingGarage", + Fields: fieldsParkingGarage, + }, + }, + } + } + expectedSchemaWithAllRefs := getExpectedSchemaWithAllRefs(false) + expectedSchemaWithAllRefsWithVector := getExpectedSchemaWithAllRefs(true) + + t.Run("asking for no refs", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, nil, additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaUnresolved, res.Schema) + }) + + t.Run("asking for refs of type garage", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtGarage(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithGarageRef, res.Schema) + }) + + t.Run("asking for refs with vector of type garage", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtGarageWithVector(true), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithGarageRefWithVector, res.Schema) + }) + + t.Run("asking for refs of type lot", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtLot(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithLotRef, res.Schema) + }) + + t.Run("asking for refs with vector of type lot", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtLotWithVector(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithLotRefWithVector, res.Schema) + }) + + t.Run("asking for refs of both types", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtEither(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithAllRefs, res.Schema) + }) + + t.Run("asking for refs with vectors of both types", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), id, parkedAtEitherWithVector(), additional.Properties{}, "") + require.Nil(t, err) + + assert.Equal(t, expectedSchemaWithAllRefsWithVector, res.Schema) + }) + }) +} + +func parkedAtGarage() search.SelectProperties { + return parkedAtGarageWithVector(false) +} + +func parkedAtGarageWithVector(withVector bool) search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "parkedAt", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "MultiRefParkingGarage", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "name", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: withVector, + }, + }, + }, + }, + } +} + +func parkedAtLot() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "parkedAt", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "MultiRefParkingLot", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + } +} + +func parkedAtLotWithVector() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "parkedAt", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "MultiRefParkingLot", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "name", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + }, + }, + } +} + +func parkedAtEither() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "parkedAt", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "MultiRefParkingLot", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "name", + IsPrimitive: true, + }, + }, + }, + { + ClassName: "MultiRefParkingGarage", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "name", + IsPrimitive: true, + }, + }, + }, + }, + }, + } +} + +func parkedAtEitherWithVector() search.SelectProperties { + return search.SelectProperties{ + search.SelectProperty{ + Name: "parkedAt", + IsPrimitive: false, + Refs: []search.SelectClass{ + { + ClassName: "MultiRefParkingLot", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "name", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + { + ClassName: "MultiRefParkingGarage", + RefProperties: search.SelectProperties{ + search.SelectProperty{ + Name: "name", + IsPrimitive: true, + }, + }, + AdditionalProperties: additional.Properties{ + Vector: true, + }, + }, + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_update_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_update_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..61de37db6676f84952d67b357f24a518efaa24ee --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/crud_update_integration_test.go @@ -0,0 +1,415 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + libschema "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +// Updates are non trivial, because vector indices are built under the +// assumption that items are immutable (this is true for HNSW, the assumption +// is that this is generally true in the majority of cases). Therefore an +// update is essentially a delete and a new import with a new doc ID. This +// needs to be tested extensively because there's a lot of room for error +// regarding the clean up of Doc ID pointers in the inverted indices, etc. +func TestUpdateJourney(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: libschema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + schema := libschema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{updateTestClass()}, + }, + } + + t.Run("add schema", func(t *testing.T) { + err := migrator.AddClass(context.Background(), updateTestClass()) + require.Nil(t, err) + }) + schemaGetter.schema = schema + + t.Run("import some objects", func(t *testing.T) { + for _, res := range updateTestData() { + err := repo.PutObject(context.Background(), res.Object(), res.Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + + tracker := getTracker(repo, "UpdateTestClass") + + require.Nil(t, err) + + sum, count, mean, err := tracker.PropertyTally("name") + require.Nil(t, err) + assert.Equal(t, 4, sum) + assert.Equal(t, 4, count) + assert.InEpsilon(t, 1, mean, 0.1) + }) + + searchVector := []float32{0.1, 0.1, 0.1} + + t.Run("verify vector search results are initially as expected", + func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "UpdateTestClass", + Pagination: &filters.Pagination{ + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "name"}}, + }, []string{""}, []models.Vector{searchVector}) + + expectedInAnyOrder := []interface{}{ + "element-0", "element-1", "element-2", "element-3", + } + + require.Nil(t, err) + require.Len(t, res, 4) + assert.ElementsMatch(t, expectedInAnyOrder, extractPropValues(res, "name")) + }) + + searchInv := func(t *testing.T, op filters.Operator, value int) []interface{} { + res, err := repo.ObjectSearch(context.Background(), 0, 100, + &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: op, + On: &filters.Path{ + Class: "UpdateTestClass", + Property: libschema.PropertyName("intProp"), + }, + Value: &filters.Value{ + Type: libschema.DataTypeInt, + Value: value, + }, + }, + }, nil, additional.Properties{}, "") + require.Nil(t, err) + return extractPropValues(res, "name") + } + + t.Run("verify invert index results are initially as expected", + func(t *testing.T) { + expectedInAnyOrder := []interface{}{ + "element-0", "element-1", "element-2", "element-3", + } + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorGreaterThanEqual, 0)) + + expectedInAnyOrder = []interface{}{"element-0"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 0)) + + expectedInAnyOrder = []interface{}{"element-1"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 10)) + + expectedInAnyOrder = []interface{}{"element-2"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 20)) + + expectedInAnyOrder = []interface{}{"element-3"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 30)) + }) + + t.Run("update vector position of one item to move it into a different direction", + func(t *testing.T) { + // updating element-0 to be very far away from our search vector + updatedVec := []float32{-0.1, -0.12, -0.105} + id := updateTestData()[0].ID + + old, err := repo.ObjectByID(context.Background(), id, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + + err = repo.PutObject(context.Background(), old.Object(), updatedVec, nil, nil, nil, 0) + require.Nil(t, err) + + tracker := getTracker(repo, "UpdateTestClass") + + require.Nil(t, err) + + sum, count, mean, err := tracker.PropertyTally("name") + require.Nil(t, err) + assert.Equal(t, 4, sum) + assert.Equal(t, 4, count) + assert.InEpsilon(t, 1, mean, 0.1) + }) + + t.Run("verify new vector search results are as expected", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "UpdateTestClass", + Pagination: &filters.Pagination{ + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "name"}}, + }, []string{""}, []models.Vector{searchVector}) + + expectedInAnyOrder := []interface{}{ + "element-0", "element-1", "element-2", "element-3", + } + + require.Nil(t, err) + require.Len(t, res, 4) + assert.ElementsMatch(t, expectedInAnyOrder, extractPropValues(res, "name")) + }) + + t.Run("verify invert results still work properly", func(t *testing.T) { + expectedInAnyOrder := []interface{}{ + "element-0", "element-1", "element-2", "element-3", + } + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorGreaterThanEqual, 0)) + + expectedInAnyOrder = []interface{}{"element-0"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 0)) + + expectedInAnyOrder = []interface{}{"element-1"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 10)) + + expectedInAnyOrder = []interface{}{"element-2"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 20)) + + expectedInAnyOrder = []interface{}{"element-3"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 30)) + }) + + t.Run("update a second object and modify vector and invert props at the same time", + func(t *testing.T) { + // this time we are updating element-2 and move it away from the search + // vector, as well as updating an invert prop + + updatedVec := []float32{-0.1, -0.12, -0.105123} + id := updateTestData()[2].ID + + old, err := repo.ObjectByID(context.Background(), id, search.SelectProperties{}, additional.Properties{}, "") + require.Nil(t, err) + + old.Schema.(map[string]interface{})["intProp"] = int64(21) + err = repo.PutObject(context.Background(), old.Object(), updatedVec, nil, nil, nil, 0) + require.Nil(t, err) + + tracker := getTracker(repo, "UpdateTestClass") + + require.Nil(t, err) + + sum, count, mean, err := tracker.PropertyTally("name") + require.Nil(t, err) + assert.Equal(t, 4, sum) + assert.Equal(t, 4, count) + assert.InEpsilon(t, 1, mean, 0.1) + }) + + t.Run("verify new vector search results are as expected", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + ClassName: "UpdateTestClass", + Pagination: &filters.Pagination{ + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "name"}}, + }, []string{""}, []models.Vector{searchVector}) + + expectedInAnyOrder := []interface{}{ + "element-0", "element-1", "element-2", "element-3", + } + + require.Nil(t, err) + require.Len(t, res, 4) + assert.ElementsMatch(t, expectedInAnyOrder, extractPropValues(res, "name")) + }) + + t.Run("verify invert results have been updated correctly", func(t *testing.T) { + expectedInAnyOrder := []interface{}{ + "element-0", "element-1", "element-2", "element-3", + } + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorGreaterThanEqual, 0)) + + expectedInAnyOrder = []interface{}{"element-0"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 0)) + + expectedInAnyOrder = []interface{}{"element-1"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 10)) + + expectedInAnyOrder = []interface{}{} // value is no longer 20, but 21 + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 20)) + + expectedInAnyOrder = []interface{}{"element-2"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 21)) + + expectedInAnyOrder = []interface{}{"element-3"} + assert.ElementsMatch(t, expectedInAnyOrder, searchInv(t, filters.OperatorEqual, 30)) + }) + + t.Run("test recount", func(t *testing.T) { + tracker := getTracker(repo, "UpdateTestClass") + + require.Nil(t, err) + + sum, count, mean, err := tracker.PropertyTally("name") + require.Nil(t, err) + assert.Equal(t, 4, sum) + assert.Equal(t, 4, count) + assert.InEpsilon(t, 1, mean, 0.1) + + tracker.Clear() + sum, count, mean, err = tracker.PropertyTally("name") + require.Nil(t, err) + assert.Equal(t, 0, sum) + assert.Equal(t, 0, count) + assert.Equal(t, float64(0), mean) + + logger := logrus.New() + migrator := NewMigrator(repo, logger, "node1") + migrator.RecountProperties(context.Background()) + + sum, count, mean, err = tracker.PropertyTally("name") + require.Nil(t, err) + assert.Equal(t, 4, sum) + assert.Equal(t, 4, count) + assert.Equal(t, float64(1), mean) + }) +} + +func updateTestClass() *models.Class { + return &models.Class{ + Class: "UpdateTestClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 3, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + DataType: []string{string(libschema.DataTypeInt)}, + Name: "intProp", + }, + { + DataType: libschema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + Name: "name", + }, + }, + } +} + +func updateTestData() search.Results { + return search.Results{ + search.Result{ + ClassName: "UpdateTestClass", + ID: "426b0b29-9ded-40b6-b786-da3d1fec412f", + Schema: map[string]interface{}{ + "intProp": int64(0), + "name": "element-0", + }, + Vector: []float32{0.89379513, 0.67022973, 0.57360715}, + }, + search.Result{ + ClassName: "UpdateTestClass", + ID: "a1560f12-f0f0-4439-b5b8-b7bcecf5fed7", + + Schema: map[string]interface{}{ + "intProp": int64(10), + "name": "element-1", + }, + Vector: []float32{0.9660323, 0.35887036, 0.6072966}, + }, + search.Result{ + ClassName: "UpdateTestClass", + ID: "0c73f145-5dc4-49a9-bd58-82725f8b13fa", + + Schema: map[string]interface{}{ + "intProp": int64(20), + "name": "element-2", + }, + Vector: []float32{0.8194746, 0.56142205, 0.5130103}, + }, + search.Result{ + ClassName: "UpdateTestClass", + ID: "aec8462e-276a-4989-a612-8314c35d163a", + Schema: map[string]interface{}{ + "intProp": int64(30), + "name": "element-3", + }, + Vector: []float32{0.42401955, 0.8278863, 0.5952888}, + }, + } +} + +func extractPropValues(in search.Results, propName string) []interface{} { + out := make([]interface{}, len(in)) + + for i, res := range in { + out[i] = res.Schema.(map[string]interface{})[propName] + } + + return out +} + +func getTracker(repo *DB, className string) *inverted.JsonShardMetaData { + index := repo.GetIndex("UpdateTestClass") + var shard ShardLike + index.ForEachShard(func(name string, shardv ShardLike) error { + shard = shardv + return nil + }) + + tracker := shard.GetPropertyLengthTracker() + + return tracker +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/delete_filter_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/delete_filter_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c5623256d3af7baa75873c177ece9148150ec601 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/delete_filter_integration_test.go @@ -0,0 +1,305 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +// This test aims to prevent a regression on +// https://github.com/weaviate/weaviate/issues/1308 where we +// discovered that if the first n doc ids are deleted and a filter would return +// <= n doc ids, it would return no results instead of skipping the deleted ids +// and returning the next ones +func Test_FilterSearchesOnDeletedDocIDsWithLimits(t *testing.T) { + className := "DeletedDocIDLimitTestClass" + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + thingclass := &models.Class{ + Class: className, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{{ + Name: "unrelatedProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, { + Name: "boolProp", + DataType: []string{string(schema.DataTypeBoolean)}, + }}, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + migrator := NewMigrator(repo, logger, "node1") + defer repo.Shutdown(testCtx()) + + t.Run("creating the thing class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), thingclass)) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{thingclass}, + }, + } + }) + + var things []*models.Object + t.Run("importing 10 initial items", func(t *testing.T) { + things = make([]*models.Object, 10) + for i := 0; i < 10; i++ { + things[i] = &models.Object{ + Class: className, + ID: mustNewUUID(), + Properties: map[string]interface{}{ + "boolProp": i < 5, + "unrelatedProp": "initialValue", + }, + Vector: []float32{0.1}, + } + + err := repo.PutObject(context.Background(), things[i], things[i].Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("updating the first 5 elements", func(t *testing.T) { + // The idea is that the first 5 elements can be found with a boolProp==true + // search, however, the bug occurred if those items all had received an + // update + + for i := 0; i < 5; i++ { + things[i].Properties.(map[string]interface{})["unrelatedProp"] = "updatedValue" + + err := repo.PutObject(context.Background(), things[i], things[i].Vector, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("searching for boolProp == true with a strict limit", func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{ + // important as the first 5 doc ids we encounter now should all be + // deleted + Limit: 5, + }, + Filters: buildFilter("boolProp", true, eq, dtBool), + }) + expectedIDs := []strfmt.UUID{ + things[0].ID, things[1].ID, things[2].ID, things[3].ID, things[4].ID, + } + + require.Nil(t, err) + + require.Len(t, res, 5) + actualIDs := extractIDs(res) + assert.Equal(t, expectedIDs, actualIDs) + }) +} + +func mustNewUUID() strfmt.UUID { + id, err := uuid.NewRandom() + if err != nil { + panic(err) + } + + return strfmt.UUID(id.String()) +} + +func extractIDs(in []search.Result) []strfmt.UUID { + out := make([]strfmt.UUID, len(in)) + for i, res := range in { + out[i] = res.ID + } + + return out +} + +// This bug aims to prevent a regression on +// https://github.com/weaviate/weaviate/issues/1765 +func TestLimitOneAfterDeletion(t *testing.T) { + className := "Test" + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + class := &models.Class{ + Class: className, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{{ + Name: "author", + DataType: []string{string(schema.DataTypeText)}, + Tokenization: "word", + }}, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(testCtx()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + }) + + firstID := strfmt.UUID("114c8f57-f244-4419-b5c1-cb2f635b76d0") + + t.Run("import single object", func(t *testing.T) { + err := repo.PutObject(context.Background(), &models.Object{ + Class: "Test", + ID: firstID, + Properties: map[string]interface{}{ + "author": "Simon", + }, + }, []float32{0, 1}, nil, nil, nil, 0) + + require.Nil(t, err) + }) + + t.Run("delete first object", func(t *testing.T) { + err := repo.DeleteObject(context.Background(), "Test", firstID, time.Now(), nil, "", 0) + require.Nil(t, err) + }) + + t.Run("create another object", func(t *testing.T) { + // new object has a different ID, but the same inverted props as the + // previously deleted one + err := repo.PutObject(context.Background(), &models.Object{ + Class: "Test", + ID: "74776bbd-2de0-421d-8cef-757e16466dd9", + Properties: map[string]interface{}{ + "author": "Simon", + }, + }, []float32{0, 1}, nil, nil, nil, 0) + + require.Nil(t, err) + }) + + t.Run("query with high limit", func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: buildFilter("author", "Simon", eq, dtText), + ClassName: "Test", + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 100, + }, + Properties: search.SelectProperties{{Name: "author"}}, + }) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "Simon", res[0].Object().Properties.(map[string]interface{})["author"]) + }) + + t.Run("query with limit 1", func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: buildFilter("author", "Simon", eq, dtText), + ClassName: "Test", + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 1, + }, + Properties: search.SelectProperties{{Name: "author"}}, + }) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "Simon", res[0].Object().Properties.(map[string]interface{})["author"]) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/disk_use_unix.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/disk_use_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..6ec65ab4d5d59b4f68b7e467751e5820cbc61cd4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/disk_use_unix.go @@ -0,0 +1,35 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build !windows + +package db + +import ( + "syscall" +) + +func (db *DB) getDiskUse(diskPath string) diskUse { + fs := syscall.Statfs_t{} + + err := syscall.Statfs(diskPath, &fs) + if err != nil { + db.logger.WithField("action", "read_disk_use"). + WithField("path", diskPath). + Errorf("failed to read disk usage: %s", err) + } + + return diskUse{ + total: fs.Blocks * uint64(fs.Bsize), + free: fs.Bfree * uint64(fs.Bsize), + avail: fs.Bfree * uint64(fs.Bsize), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/disk_use_windows.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/disk_use_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..a54d2d03895a0da79c3a285da51e3bb23ba66f44 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/disk_use_windows.go @@ -0,0 +1,44 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build windows + +package db + +import ( + "syscall" + + "golang.org/x/sys/windows" +) + +func (db *DB) getDiskUse(diskPath string) diskUse { + var freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes uint64 + + _, _ = syscall.UTF16PtrFromString(diskPath) + + err := windows.GetDiskFreeSpaceEx( + syscall.StringToUTF16Ptr(diskPath), + &freeBytesAvailable, + &totalNumberOfBytes, + &totalNumberOfFreeBytes, + ) + if err != nil { + db.logger.WithField("action", "read_disk_use"). + WithField("path", diskPath). + Errorf("failed to read disk usage: %s", err) + } + + return diskUse{ + total: totalNumberOfBytes, + free: totalNumberOfFreeBytes, + avail: freeBytesAvailable, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/docid/scan.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/docid/scan.go new file mode 100644 index 0000000000000000000000000000000000000000..3a078dd95b5e82780466001d26ffb01c6a0b0760 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/docid/scan.go @@ -0,0 +1,125 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package docid + +import ( + "encoding/binary" + + "github.com/weaviate/weaviate/entities/storobj" + + "github.com/weaviate/weaviate/entities/models" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" +) + +// ObjectScanFn is called once per object, if false or an error is returned, +// the scanning will stop +type ObjectScanFn func(prop *models.PropertySchema, docID uint64) (bool, error) + +// ScanObjectsLSM calls the provided scanFn on each object for the +// specified pointer. If a pointer does not resolve to an object-id, the item +// will be skipped. The number of times scanFn is called can therefore be +// smaller than the input length of pointers. +func ScanObjectsLSM(store *lsmkv.Store, pointers []uint64, scan ObjectScanFn, properties []string) error { + return newObjectScannerLSM(store, pointers, scan, properties).Do() +} + +type objectScannerLSM struct { + store *lsmkv.Store + pointers []uint64 + scanFn ObjectScanFn + objectsBucket *lsmkv.Bucket + properties []string +} + +func newObjectScannerLSM(store *lsmkv.Store, pointers []uint64, + scan ObjectScanFn, properties []string, +) *objectScannerLSM { + return &objectScannerLSM{ + store: store, + pointers: pointers, + scanFn: scan, + properties: properties, + } +} + +func (os *objectScannerLSM) Do() error { + if err := os.init(); err != nil { + return errors.Wrap(err, "init object scanner") + } + + if err := os.scan(); err != nil { + return errors.Wrap(err, "scan") + } + + return nil +} + +func (os *objectScannerLSM) init() error { + bucket := os.store.Bucket(helpers.ObjectsBucketLSM) + if bucket == nil { + return errors.Errorf("objects bucket not found") + } + os.objectsBucket = bucket + + return nil +} + +func (os *objectScannerLSM) scan() error { + // each object is scanned one after the other, so we can reuse the same memory allocations for all objects + docIDBytes := make([]byte, 8) + + // Preallocate property paths needed for json unmarshalling + propertyPaths := make([][]string, len(os.properties)) + for i := range os.properties { + propertyPaths[i] = []string{os.properties[i]} + } + + var ( + properties models.PropertySchema + + // used for extraction from json + propertiesTyped = make(map[string]interface{}, len(os.properties)) + ) + for _, id := range os.pointers { + binary.LittleEndian.PutUint64(docIDBytes, id) + res, err := os.objectsBucket.GetBySecondary(0, docIDBytes) + if err != nil { + return err + } + + if res == nil { + continue + } + + if len(propertyPaths) > 0 { + err = storobj.UnmarshalPropertiesFromObject(res, propertiesTyped, propertyPaths) + if err != nil { + return errors.Wrapf(err, "unmarshal data object") + } + properties = propertiesTyped + } + + continueScan, err := os.scanFn(&properties, id) + if err != nil { + return errors.Wrapf(err, "scan") + } + + if !continueScan { + break + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/fakes_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/fakes_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a66fc5f335c13ba4ef4166f4259456b0863d2f96 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/fakes_for_test.go @@ -0,0 +1,554 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/file" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" + "github.com/weaviate/weaviate/usecases/sharding" + shardingConfig "github.com/weaviate/weaviate/usecases/sharding/config" +) + +type fakeSchemaGetter struct { + schema schema.Schema + shardState *sharding.State +} + +func (f *fakeSchemaGetter) GetSchemaSkipAuth() schema.Schema { + return f.schema +} + +func (f *fakeSchemaGetter) ReadOnlyClass(class string) *models.Class { + return f.schema.GetClass(class) +} + +func (f *fakeSchemaGetter) ResolveAlias(string) string { + return "" +} + +func (f *fakeSchemaGetter) GetAliasesForClass(string) []*models.Alias { + return nil +} + +func (f *fakeSchemaGetter) CopyShardingState(class string) *sharding.State { + return f.shardState +} + +func (f *fakeSchemaGetter) ShardOwner(class, shard string) (string, error) { + ss := f.shardState + x, ok := ss.Physical[shard] + if !ok { + return "", fmt.Errorf("shard not found") + } + if len(x.BelongsToNodes) < 1 || x.BelongsToNodes[0] == "" { + return "", fmt.Errorf("owner node not found") + } + return x.BelongsToNodes[0], nil +} + +func (f *fakeSchemaGetter) ShardReplicas(class, shard string) ([]string, error) { + ss := f.shardState + x, ok := ss.Physical[shard] + if !ok { + return nil, fmt.Errorf("shard not found") + } + return x.BelongsToNodes, nil +} + +func (f *fakeSchemaGetter) TenantsShards(_ context.Context, class string, tenants ...string) (map[string]string, error) { + res := map[string]string{} + for _, t := range tenants { + res[t] = models.TenantActivityStatusHOT + } + return res, nil +} + +func (f *fakeSchemaGetter) OptimisticTenantStatus(_ context.Context, class string, tenant string) (map[string]string, error) { + res := map[string]string{} + res[tenant] = models.TenantActivityStatusHOT + return res, nil +} + +func (f *fakeSchemaGetter) ShardFromUUID(class string, uuid []byte) string { + ss := f.shardState + return ss.Shard("", string(uuid)) +} + +func (f *fakeSchemaGetter) Nodes() []string { + return []string{"node1"} +} + +func (f *fakeSchemaGetter) NodeName() string { + return "node1" +} + +func (f *fakeSchemaGetter) ClusterHealthScore() int { + return 0 +} + +func (f fakeSchemaGetter) ResolveParentNodes(_ string, shard string) (map[string]string, error) { + return nil, nil +} + +func (f fakeSchemaGetter) Statistics() map[string]any { + return nil +} + +func singleShardState() *sharding.State { + config, err := shardingConfig.ParseConfig(nil, 1) + if err != nil { + panic(err) + } + + selector := mocks.NewMockNodeSelector("node1") + s, err := sharding.InitState("test-index", config, selector.LocalName(), selector.StorageCandidates(), 1, false) + if err != nil { + panic(err) + } + + return s +} + +func multiShardState() *sharding.State { + config, err := shardingConfig.ParseConfig(map[string]interface{}{ + "desiredCount": json.Number("3"), + }, 1) + if err != nil { + panic(err) + } + + selector := mocks.NewMockNodeSelector("node1") + s, err := sharding.InitState("multi-shard-test-index", config, selector.LocalName(), selector.StorageCandidates(), 1, false) + if err != nil { + panic(err) + } + + return s +} + +// MultiTenantShardingStateBuilder provides a fluent interface for creating multi-tenant sharding states +// with partition-based sharding and a configurable set of tenants, each with its own name and status. +type MultiTenantShardingStateBuilder struct { + tenants []tenantConfig + replicationFactor int64 + nodePrefix string + indexName string +} + +type tenantConfig struct { + name string + status string +} + +// NewMultiTenantStateBuilder creates a new builder with default values. +// Default configuration: +// - replicationFactor: 1 +// - nodePrefix: "node" +// - indexName: "multi-tenant-test-index" +func NewMultiTenantShardingStateBuilder() *MultiTenantShardingStateBuilder { + return &MultiTenantShardingStateBuilder{ + tenants: make([]tenantConfig, 0), + replicationFactor: 1, + nodePrefix: "node", + indexName: "multi-tenant-test-index", + } +} + +// AddTenant adds a tenant with the specified status to the builder. +func (b *MultiTenantShardingStateBuilder) AddTenant(name, status string) *MultiTenantShardingStateBuilder { + b.tenants = append(b.tenants, tenantConfig{name: name, status: status}) + return b +} + +// WithReplicationFactor sets the number of replicas per tenant shard. +func (b *MultiTenantShardingStateBuilder) WithReplicationFactor(factor int64) *MultiTenantShardingStateBuilder { + b.replicationFactor = factor + return b +} + +// WithNodePrefix sets the prefix for generated node names (default: "node"). +// Nodes will be named as "{prefix}1", "{prefix}2", etc. +func (b *MultiTenantShardingStateBuilder) WithNodePrefix(prefix string) *MultiTenantShardingStateBuilder { + b.nodePrefix = prefix + return b +} + +// WithIndexName sets the index name for the sharding state (default: "multi-tenant-test-index"). +func (b *MultiTenantShardingStateBuilder) WithIndexName(name string) *MultiTenantShardingStateBuilder { + b.indexName = name + return b +} + +func (b *MultiTenantShardingStateBuilder) WithTenant(name, status string) *MultiTenantShardingStateBuilder { + if b.tenants == nil { + b.tenants = make([]tenantConfig, 0) + } + b.tenants = append(b.tenants, tenantConfig{name: name, status: status}) + return b +} + +// Build creates and returns the configured sharding state. +// Panics if no tenants have been added or if configuration is invalid. +func (b *MultiTenantShardingStateBuilder) Build() *sharding.State { + if len(b.tenants) <= 0 { + panic(fmt.Sprintf("invalid number of tenants: %d", len(b.tenants))) + } + + nodeCount := max(int(b.replicationFactor), len(b.tenants)) + nodeNames := b.generateNodeNames(nodeCount) + config := b.createConfig() + selector := mocks.NewMockNodeSelector(nodeNames...) + + s, err := sharding.InitState( + b.indexName, + config, + selector.LocalName(), + nodeNames, + b.replicationFactor, + true, + ) + if err != nil { + panic(fmt.Sprintf("failed to initialize sharding state: %v", err)) + } + + for i, tenant := range b.tenants { + replicaNodes := selectNodesForTenant(nodeNames, i, int(b.replicationFactor)) + _, err = s.AddPartition(tenant.name, replicaNodes, tenant.status) + if err != nil { + panic(fmt.Sprintf("failed to add partition for tenant %s: %v", tenant.name, err)) + } + } + + return s +} + +func (b *MultiTenantShardingStateBuilder) generateNodeNames(count int) []string { + names := make([]string, count) + for i := 0; i < count; i++ { + names[i] = fmt.Sprintf("%s%d", b.nodePrefix, i+1) + } + return names +} + +func (b *MultiTenantShardingStateBuilder) createConfig() shardingConfig.Config { + // Virtual shard configuration values are unused in partition-based sharding. + // Key, Strategy, and Function are the only required properties. + return shardingConfig.Config{ + Key: "_id", + Strategy: "hash", + Function: "murmur3", + } +} + +// selectNodesForTenant selects replica nodes for a tenant using round-robin distribution. +// Starting from position tenantIndex in the node list, it selects replicationFactor consecutive +// nodes with wraparound. This ensures even distribution of tenant replicas across the cluster. +func selectNodesForTenant(allNodes []string, tenantIndex int, replicationFactor int) []string { + if replicationFactor > len(allNodes) { + panic(fmt.Sprintf("replication factor %d exceeds available nodes %d", replicationFactor, len(allNodes))) + } + + selectedNodes := make([]string, replicationFactor) + startIndex := tenantIndex % len(allNodes) + + for i := 0; i < replicationFactor; i++ { + nodeIndex := (startIndex + i) % len(allNodes) + selectedNodes[i] = allNodes[nodeIndex] + } + + return selectedNodes +} + +func fixedMultiShardState() *sharding.State { + raw := []byte(`{"indexID":"multi-shard-test-index","config":{"virtualPerPhysical":128,"desiredCount":3,"actualCount":0,"desiredVirtualCount":384,"actualVirtualCount":0,"key":"_id","strategy":"hash","function":"murmur3"},"physical":{"10HAiobndwqQ":{"name":"10HAiobndwqQ","ownsVirtual":["P1ib1Jb4kcVl","cJnMLj5AELbO","OXKlgrTvR2oT","Wwn8XeDuTnEq","aFlA2RXHaTZq","oeibhOxhslJ4","xNWB5azVw7oD","wXr2DKiiO4rQ","6IcKuxi0brIT","Ib0bC21DkrSY","xLBqexoUmcoS","k7s6GojJDHw4","Ij8HLoZKUIGh","cYXwzjMjpL2p","18xE7smej6jn","wKvtkC7ISgqS","JQoplEib8uSu","CXmIRbTMCrVg","xeqIBs9YD2cA","48RZ8vWnLnYd","N2liOQZHORRd","FLYoUFplOmxQ","yPsCEegMk4Q2","MDVwNcNC9Gra","2PgGKvp3Nl9s","NKgE3HgdeGQm","NEObTyTfjbkD","MKp04TysWuOK","aV4XlXJRhzIX","jcaPi3kHStaK","8BgRFjMgSMe4","z6T8lD2o39SQ","ioHJiTM0L2b0","rblINp3n1sZ0","c5elvzxRiyRR","3W0h1o0dwd3u","JSuWUzk63Inq","2AmfpIT0zs3p","XS08CGL3mnm6","Uvrp2Des8Ab3","L215BrEJZez6","8FlXfnU8F53r","mYSXTeMJNg5X","LE68IkTZ1KBq","3DpBKBAyrh49","comyKg1nNYkV","VEUsNjgJ1M4k","nMusHd6e2Fkf","mbwwycSRznHa","UdvpOIbYGk3E","w54br5SJaUNd","glARwN18WtJG","DUFmLbD5AQdB","OyAthRrzjE9C","aRIS8UTR2sMs","dtl4DVfMpjM2","YceXk2KF4xnV","ntXhJG4ehh97","6JQHfQJXq9tC","81UYRgmxCxvy","UUvwR3z3SjqT","hVoXGtgS5V4b","pJQ37OFnd0h0","7jZlJg79Tx3C","81Tr7kFVuWFO","uoVfvn3j1vVs","si39KY4GMFCu","opTmmfmor5Xi","jUJHmkXSdjJM","taj02QCfJJ2v","vCn9VOgxbsvC","BhoBBmk2O5Fx","0Ym87WlhMWSH","8v1uPEAZANss","KcyNwToNc5uf","uTwHaNDD5s4i","nN3HYdDnNFvW","wBKHrHzpqkXI","DSeQOWKAAfPy","Dr8dRry1VNRG","qvDj32cx0hSO","FrFb8tNKNyUW","pgxcn0O1NKoW","LQdu95vAEz5N","TR2Tz7fHYqme","KoUT5JzcEigR","EAnJxQirsjKw","R8oWsRMA8Z9G","Q8PJeKnlygDF","KpWXuBwKB3Np","BpVhOJVXrM9b","zPbNJqGLV31c","nXTYGKAWzMD3","DfDu4yl3r1qn","VExbdXg0Ek0R","KISjTsevB5iE","3qa8pPUU95kc","iWYrenDncRMZ","Cqju2FPfqDzN","X4f5TfQ2Zh3w","Ol4Jrf6D4hI8","sTxMnH5hC27G","f1r8QwYMUnF2","M8Xxly7aXuVg","JprYyU27D3SD","1pZrOBR3Mp0n","7dAlRglk4gUq","meZAtRpcJSGj","RAKQqUg9tkVy","evXAOdPjRLLO","ZhRSfF5kiST2","K2KoDH0smQno","97FKS8uAgBGZ","A6uDhUMxgGzC","JcvKnGUxrjbi","7oYBFzAQhylH","0qi2orWv2yBQ","HruSBzNO6D2X","Pw1ZyGgzqr4w","Ua0wIIuK7MVF","0yiZLUGHfCfg","aTgCTGMPZTPv","04uFLfQjFQ5M","MaWgxbZRgif0","SPWsrRGsxBtQ","CMG5X2zuOP8T","rjKzLkrjnadI","3zdVkbUS6vRy"],"ownsPercentage":0.3115328681081298},"5UnDHhImGGus":{"name":"5UnDHhImGGus","ownsVirtual":["kk3uy33qESSP","AYNqxnC70vWf","1L72JpPhK3AH","N5YYIOLBJsT5","Kdpf7oo9rHRd","2zc4yEUgcsxU","04flFfZJX9y4","siSzkkL8ch9H","vSuf6j765jqp","PT37knJ3yCVo","IfrQL0OScc3u","CGR9oTerTPfV","saP105LXdjTN","wJbYdeQm8NxO","tHpmYN4knZgr","7KyVTpba2SB8","cghDTFg7bkKV","9SKvNyPOO16k","mpyKh0Uvq9pK","RBxCMlncsWP4","wzCL9BumCknu","SliMd6ggZIIT","MRj5CdiSSHkh","A9NHb9n0ZxAu","o0I69xAYCy8l","cOB2G6rQQDMp","fq4nrOTRJSFl","MVHggjKS2WIh","f7KnlZipyMoU","kC8TDVP2qpDU","RSCYtxYOBl4u","FRl5xH4a7yhi","Pn8nYsbPoQXd","VawtjdDTNeP4","xWK39CVQOq7c","IiRbFljPDBMc","DInND1HhGkNp","odhRcafbqKku","wMSpj5IJ6Jr2","urILYmbzLgFM","f8XCDnms7AuS","85iBn44HwzLG","4sh42JIRZ5x4","RlZ9POMVYbei","9DuGADJSRpBx","zY4rPaerd1ys","PA9vlkamSA8b","wgvpTSabR2Gp","lUb5dLRn4Q7i","CIohP98pYrEU","uydQQfk9x5dv","U0547wfcqSsu","OejkNdX5mxtz","ndz1d2vCwoIq","uImQxXnTgMUt","73XdZtGlJTnb","jTVVZexNxBtp","w0MlBKADJImt","0s0krALwyqS4","tq85nEKxsXd6","itKBOdSzdcD2","T1LLWfgdGEtq","vBUqaK2WYdna","t0KeI7GeoLhD","rohnBjf5J9D8","SQ4Xh9ZRBYnc","VhJWJyF49q7N","hsdU1XjrtTL0","EgXtER7wworu","xp4mWs2aVMbB","hK5JiEF1Odsf","vxlhQDXcCDoc","Ytl1gLqLrmUF","7JIKqhHrk8gs","fnaBlG1IB2tO","UtdVz5Nl280P","S2fMEGZ6FK5b","HYBYL0oNGwfa","jPIF7y5qI3fh","H6BtD1FRUcEb","PeiLrsaz0A1Z","58AEK168LCOI","b2T01LA7C14D","ofCP6twUePhY","gLBhIXTrzmM5","ZdNfpA9QwBPW","I6lAuYCU2JkE","6BoSzEVYohm2","iH2Nxldim4CZ","WCrjXqYEOGO3","3CBHlZozw0Z1","KnwHXiKEEMD5","u2NQ8vav4GGA","AJexcI7np4a6","xY2JLJ1fxlZP","aGQsUOxlSSI7","6mcznERQIGgE","WqDu1P0hsKi2","vQ8Nr3MMIvqX","cCuxaZc2WPTR","e2bHBdxkqCWf","AVApjgCPtUh6","h03hg1MFr8UK","bd7afitI4wfb","mA1JyNKe3vCB","94dJ1MbkYawY","rkAx8oOGiGp5","5dkf0GDkoFS3","ULSfcu1j23rT","IXPatqSYHjfd","Od8MgXRVCkCj","qJn82gKq9WyS","0suDv1qp3ndv","ptSPQhrhvHUx","bFNikojhtkrf","EVmTAbpjGR6I","wAmFbdhLWtEt","3XGWS3W2ZWY1","JPenxeL41DVm","WYWMO6T7W33P","gzKXC2EEuP0y","wrXomqZRYWlP","3xb5qWUIdy3l","q6uOaqi6vhFp","1tkGefsVhdMZ","VxASALmWn2ch","hiizguisdpTg","tfavKaxeSt8e"],"ownsPercentage":0.34078263030984246},"qc4oYcGtIr3y":{"name":"qc4oYcGtIr3y","ownsVirtual":["ZpSrDlljAmhU","PoixZZemlqGZ","uq0LzAAY5iuw","hDhqptRj4wek","f2c8q7U2y0Wc","zO4HfORWFYQx","E4OaMLfzLI3l","qjwhfQLQXRtk","2iX5gmw2cSrS","uMkCwD02ZjTb","UixcolZ7TGh7","PzFLgH6tNojh","a0D06UHaaNTQ","EfHluzACLDad","zLp00SBesBQM","MWsu1YNw7GyZ","EcEpTU5Z1N2m","FVhhe4CrETSN","oAGIG611zPBS","93O50DIDBjvT","psuAMG40uLmP","4ZKvAdv7y3W3","2hY6C71VJZa6","TsFT7FF0NLc9","mdW6a1iyMMQB","Pqolehn1Kgzx","6lS4e7DEBTSK","cTPggh41RFbY","IeTvnejJYwTE","shwsWnArjWTe","NPiIKetDYl31","fgTWGQMwfW8m","AmEGOYK67uL5","3KocyAIgLFK9","NtPJZ0DCROIt","qPo0bL1DanzJ","S7pN5St5UtGx","WmgtIhiclMxv","iZr9zB8YgfBS","Ec4HWsyJ98Pj","Vbw2kZ8Pzb10","W1p8DeaOGQwa","tf20ZJKNTYuN","VYlWIOMzTG6Q","E7qcPMaKvpuY","HnNcLsgB4Qle","9UENJD0jVfQJ","QEzoqtGH5L3q","c3Fmgjy0kiGM","nPdFzMMAl8e1","vPsB4hMU3u1Q","zcHMGe7BCirf","jlzvsigHmfto","6t35oHrP3E89","Un0x4I8BmBxf","ENBKLdnBVJMj","PPBVWwBeAt88","6OVVNvUPztYL","4mW6gvrMBHur","lGwgDbeyOBH7","nQF4tc5aCEj6","YhUygFdgZypN","bYeSTo07NtUQ","8Di0Ol5egcop","k0raV8wB89r3","YZq35jzzcmLB","IFXmevuG7Zrh","3C5qZO15G38a","2RvYTa9iW6Om","Xgg7Xi7IPaxF","KoRSyfj9NHxw","ngQi7Ikjaotp","pZxtjHxpnAP7","gbdKhYN535Uf","Xaqm44zLtfaI","qT0JE0lB5nbG","iHFJaeDAyc3y","VQjkAf5kPk7D","6DXk6xSSav8y","M3kWMrkyMrGT","XZG9YcJJQOQB","6MSMwtkaDKYl","Nk6OCjX40weL","uLUgLpz1cxlj","PquK03kCX2s6","gtoqy7jnEc10","rX2vYIXPVa18","8irbpjVkihDh","OH4vVMfkcJJp","tcrH84kQAesz","qhFDFwxgRnVT","IiO65JAUXrTl","g7Up5jauHf3w","aOm3ikcxu3yr","s4qjXxgUNJsT","gtk7Xsi7whtY","qcgtLYQ2qEet","HKmvdMeJ7ZUM","wkkemoIF2SPl","jv8aLeLUQAs3","XqtbcXDoer5y","6QrhJx5gbnzD","a6g520corlxT","UXRmLgtMDrXc","10sk2R5hdJ3w","7dRVmMSfU9TF","BVUpofeiRXJ0","Fbu1Q7R1XE6Q","KhlfU1eDGz8J","asfBwHMBj3DG","h10KwfryF0Hs","gpqm1I3ZXfFw","Jrt8yXZwyfFi","l7bTeARR9sxT","SF2wURCU9LAf","TEVjUfLfnSxk","WadYBzIWkqkJ","MPs72GrXF80t","XIK3cSdZyVhk","5G1XAnbIxrsI","THznNzcBVet8","scRBGW6E4EA4","maRmPi8FWELR","LKWfqTsgLJ2h","ThChEv0oUVF8","BwQaadmEuOWL","xQhsuMpqsB9Y","90E1fTakTcmi"],"ownsPercentage":0.3476845015820278}},"virtual":[{"name":"wMSpj5IJ6Jr2","upper":890026480952456,"ownsPercentage":0.0009179861551301902,"assignedToPhysical":"5UnDHhImGGus"},{"name":"DInND1HhGkNp","upper":20777246139474581,"ownsPercentage":0.001078088337923306,"assignedToPhysical":"5UnDHhImGGus"},{"name":"c5elvzxRiyRR","upper":160478001236453307,"ownsPercentage":0.007573193108700487,"assignedToPhysical":"10HAiobndwqQ"},{"name":"aOm3ikcxu3yr","upper":169380448435422008,"ownsPercentage":0.00048260262967796797,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"gtoqy7jnEc10","upper":196373783656105797,"ownsPercentage":0.0014633116344447423,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"3qa8pPUU95kc","upper":212860879729245124,"ownsPercentage":0.0008937672690237444,"assignedToPhysical":"10HAiobndwqQ"},{"name":"wrXomqZRYWlP","upper":214005801208299024,"ownsPercentage":0.00006206631774577777,"assignedToPhysical":"5UnDHhImGGus"},{"name":"psuAMG40uLmP","upper":251004648148060861,"ownsPercentage":0.002005711511577422,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"BVUpofeiRXJ0","upper":299839797586403218,"ownsPercentage":0.0026473587557352524,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"PoixZZemlqGZ","upper":312733734053195943,"ownsPercentage":0.000698981696459337,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"SPWsrRGsxBtQ","upper":332381381418126825,"ownsPercentage":0.0010651010978643578,"assignedToPhysical":"10HAiobndwqQ"},{"name":"kk3uy33qESSP","upper":348216885392409783,"ownsPercentage":0.0008584443905660211,"assignedToPhysical":"5UnDHhImGGus"},{"name":"qT0JE0lB5nbG","upper":454179755158415031,"ownsPercentage":0.005744258680155073,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"1pZrOBR3Mp0n","upper":493292167589083547,"ownsPercentage":0.0021202881264239925,"assignedToPhysical":"10HAiobndwqQ"},{"name":"RSCYtxYOBl4u","upper":560609404341315928,"ownsPercentage":0.0036492747166245697,"assignedToPhysical":"5UnDHhImGGus"},{"name":"HruSBzNO6D2X","upper":577899831252128784,"ownsPercentage":0.0009373159209952564,"assignedToPhysical":"10HAiobndwqQ"},{"name":"ENBKLdnBVJMj","upper":638066638177577744,"ownsPercentage":0.003261649139004383,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"2RvYTa9iW6Om","upper":685076307063247990,"ownsPercentage":0.0025483992566833953,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"UXRmLgtMDrXc","upper":698279650281856233,"ownsPercentage":0.0007157546700843405,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"PA9vlkamSA8b","upper":723564314835311292,"ownsPercentage":0.0013706844119711601,"assignedToPhysical":"5UnDHhImGGus"},{"name":"7JIKqhHrk8gs","upper":806465637273563321,"ownsPercentage":0.004494089694473707,"assignedToPhysical":"5UnDHhImGGus"},{"name":"b2T01LA7C14D","upper":833505543382924548,"ownsPercentage":0.0014658362473786754,"assignedToPhysical":"5UnDHhImGGus"},{"name":"odhRcafbqKku","upper":859042461960569242,"ownsPercentage":0.0013843591300233909,"assignedToPhysical":"5UnDHhImGGus"},{"name":"73XdZtGlJTnb","upper":909598291510966844,"ownsPercentage":0.002740637011517397,"assignedToPhysical":"5UnDHhImGGus"},{"name":"U0547wfcqSsu","upper":925074112165663730,"ownsPercentage":0.0008389459187409203,"assignedToPhysical":"5UnDHhImGGus"},{"name":"oAGIG611zPBS","upper":960831498707513441,"ownsPercentage":0.0019384118085538698,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"aFlA2RXHaTZq","upper":986327436288144592,"ownsPercentage":0.0013821375457237555,"assignedToPhysical":"10HAiobndwqQ"},{"name":"nQF4tc5aCEj6","upper":1025597420805531024,"ownsPercentage":0.0021288301263611246,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Xgg7Xi7IPaxF","upper":1085798861384560310,"ownsPercentage":0.003263526633127028,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"2zc4yEUgcsxU","upper":1157579081722514043,"ownsPercentage":0.0038912135415948815,"assignedToPhysical":"5UnDHhImGGus"},{"name":"WYWMO6T7W33P","upper":1203198582761505852,"ownsPercentage":0.0024730381067089822,"assignedToPhysical":"5UnDHhImGGus"},{"name":"rjKzLkrjnadI","upper":1270538940193839560,"ownsPercentage":0.00365052809120433,"assignedToPhysical":"10HAiobndwqQ"},{"name":"shwsWnArjWTe","upper":1370658063932621614,"ownsPercentage":0.005427468573246627,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Un0x4I8BmBxf","upper":1399326713180590975,"ownsPercentage":0.0015541305898436652,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"MaWgxbZRgif0","upper":1403710789048654944,"ownsPercentage":0.00023766122902481144,"assignedToPhysical":"10HAiobndwqQ"},{"name":"si39KY4GMFCu","upper":1452671883616463620,"ownsPercentage":0.0026541862548843196,"assignedToPhysical":"10HAiobndwqQ"},{"name":"EcEpTU5Z1N2m","upper":1457938633566877424,"ownsPercentage":0.00028551108690882845,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"MKp04TysWuOK","upper":1489663018037007097,"ownsPercentage":0.0017197823281639994,"assignedToPhysical":"10HAiobndwqQ"},{"name":"5G1XAnbIxrsI","upper":1500937357544703297,"ownsPercentage":0.0006111831693791686,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"A6uDhUMxgGzC","upper":1534480789179842443,"ownsPercentage":0.0018183930725718428,"assignedToPhysical":"10HAiobndwqQ"},{"name":"R8oWsRMA8Z9G","upper":1636736621003898917,"ownsPercentage":0.005543299750647721,"assignedToPhysical":"10HAiobndwqQ"},{"name":"6mcznERQIGgE","upper":1672314219256841825,"ownsPercentage":0.0019286654658828594,"assignedToPhysical":"5UnDHhImGGus"},{"name":"iHFJaeDAyc3y","upper":1824597043576342080,"ownsPercentage":0.00825526844797153,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"jv8aLeLUQAs3","upper":1845224475960403555,"ownsPercentage":0.001118215350179865,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"uoVfvn3j1vVs","upper":1849612883575629432,"ownsPercentage":0.00023789605350899137,"assignedToPhysical":"10HAiobndwqQ"},{"name":"fnaBlG1IB2tO","upper":2005466986221275098,"ownsPercentage":0.008448867833959392,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Ec4HWsyJ98Pj","upper":2032295005301287389,"ownsPercentage":0.0014543498285015945,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"iZr9zB8YgfBS","upper":2141312756521615279,"ownsPercentage":0.005909864135628187,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"evXAOdPjRLLO","upper":2198349852896446041,"ownsPercentage":0.00309198719009283,"assignedToPhysical":"10HAiobndwqQ"},{"name":"BpVhOJVXrM9b","upper":2253577169221936466,"ownsPercentage":0.002993878817032044,"assignedToPhysical":"10HAiobndwqQ"},{"name":"9DuGADJSRpBx","upper":2257868273525488385,"ownsPercentage":0.0002326212304136444,"assignedToPhysical":"5UnDHhImGGus"},{"name":"asfBwHMBj3DG","upper":2290812012486862222,"ownsPercentage":0.0017858836675858434,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"hiizguisdpTg","upper":2311656255664796327,"ownsPercentage":0.0011299686868666155,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Ib0bC21DkrSY","upper":2330298822826987746,"ownsPercentage":0.0010106155908977431,"assignedToPhysical":"10HAiobndwqQ"},{"name":"I6lAuYCU2JkE","upper":2364963278411676435,"ownsPercentage":0.0018791639026473376,"assignedToPhysical":"5UnDHhImGGus"},{"name":"uLUgLpz1cxlj","upper":2368186543570048126,"ownsPercentage":0.00017473355436017104,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"MVHggjKS2WIh","upper":2388232167176925479,"ownsPercentage":0.001086675433170157,"assignedToPhysical":"5UnDHhImGGus"},{"name":"qhFDFwxgRnVT","upper":2399872610948814081,"ownsPercentage":0.0006310297213088491,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"meZAtRpcJSGj","upper":2471390138583843854,"ownsPercentage":0.003876972941634569,"assignedToPhysical":"10HAiobndwqQ"},{"name":"K2KoDH0smQno","upper":2490612946772682019,"ownsPercentage":0.0010420705199805241,"assignedToPhysical":"10HAiobndwqQ"},{"name":"6OVVNvUPztYL","upper":2564700834413600126,"ownsPercentage":0.0040163124367572686,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"zLp00SBesBQM","upper":2599202103553865989,"ownsPercentage":0.001870317547769167,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"UtdVz5Nl280P","upper":2610648603275380327,"ownsPercentage":0.0006205159932710283,"assignedToPhysical":"5UnDHhImGGus"},{"name":"4ZKvAdv7y3W3","upper":2766853347457801461,"ownsPercentage":0.008467876149756172,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"HYBYL0oNGwfa","upper":2768980890630006328,"ownsPercentage":0.00011533434646806092,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Kdpf7oo9rHRd","upper":2818247286096185879,"ownsPercentage":0.002670736649748094,"assignedToPhysical":"5UnDHhImGGus"},{"name":"bd7afitI4wfb","upper":2886637236964887853,"ownsPercentage":0.0037074266654011797,"assignedToPhysical":"5UnDHhImGGus"},{"name":"XIK3cSdZyVhk","upper":2964061555958206534,"ownsPercentage":0.004197180742788341,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"wkkemoIF2SPl","upper":2978179529135431872,"ownsPercentage":0.0007653368594919895,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"l7bTeARR9sxT","upper":3050093338750210702,"ownsPercentage":0.0038984554308026083,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"A9NHb9n0ZxAu","upper":3055274346561046711,"ownsPercentage":0.0002808629962086384,"assignedToPhysical":"5UnDHhImGGus"},{"name":"8BgRFjMgSMe4","upper":3057496754161327476,"ownsPercentage":0.0001204769574186351,"assignedToPhysical":"10HAiobndwqQ"},{"name":"fq4nrOTRJSFl","upper":3062424065410077862,"ownsPercentage":0.0002671100780203716,"assignedToPhysical":"5UnDHhImGGus"},{"name":"uImQxXnTgMUt","upper":3219508588015719193,"ownsPercentage":0.008515569033644234,"assignedToPhysical":"5UnDHhImGGus"},{"name":"3KocyAIgLFK9","upper":3284826999312512617,"ownsPercentage":0.0035409181715642573,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"t0KeI7GeoLhD","upper":3299284401243240197,"ownsPercentage":0.0007837373290895484,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Vbw2kZ8Pzb10","upper":3328043137210732710,"ownsPercentage":0.0015590142006946198,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"ZpSrDlljAmhU","upper":3351786810141955740,"ownsPercentage":0.0012871470887408638,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"UixcolZ7TGh7","upper":3412825665246073188,"ownsPercentage":0.003308922965495603,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"jUJHmkXSdjJM","upper":3467954473586604049,"ownsPercentage":0.002988538688467028,"assignedToPhysical":"10HAiobndwqQ"},{"name":"VawtjdDTNeP4","upper":3475360162886164233,"ownsPercentage":0.00040146322136679026,"assignedToPhysical":"5UnDHhImGGus"},{"name":"gtk7Xsi7whtY","upper":3528953607002574204,"ownsPercentage":0.002905306427099608,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"nMusHd6e2Fkf","upper":3574259978294764044,"ownsPercentage":0.0024560633091213558,"assignedToPhysical":"10HAiobndwqQ"},{"name":"iH2Nxldim4CZ","upper":3603054592879595105,"ownsPercentage":0.0015609591844378313,"assignedToPhysical":"5UnDHhImGGus"},{"name":"7dAlRglk4gUq","upper":3675694468025527315,"ownsPercentage":0.003937815522114775,"assignedToPhysical":"10HAiobndwqQ"},{"name":"6MSMwtkaDKYl","upper":3762304686931209074,"ownsPercentage":0.004695149374849264,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Ytl1gLqLrmUF","upper":3823342486002833461,"ownsPercentage":0.003308865717859443,"assignedToPhysical":"5UnDHhImGGus"},{"name":"8Di0Ol5egcop","upper":3881547047175440066,"ownsPercentage":0.003155275583595276,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"maRmPi8FWELR","upper":3955364129023922481,"ownsPercentage":0.0040016320253332465,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"wJbYdeQm8NxO","upper":4044968890060555644,"ownsPercentage":0.004857483829048108,"assignedToPhysical":"5UnDHhImGGus"},{"name":"aTgCTGMPZTPv","upper":4049380745841614516,"ownsPercentage":0.00023916718112583805,"assignedToPhysical":"10HAiobndwqQ"},{"name":"lGwgDbeyOBH7","upper":4111968510537073276,"ownsPercentage":0.003392889522691397,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"DSeQOWKAAfPy","upper":4204527092302830545,"ownsPercentage":0.005017610771630561,"assignedToPhysical":"10HAiobndwqQ"},{"name":"10sk2R5hdJ3w","upper":4224041044327154580,"ownsPercentage":0.0010578534589275013,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"IeTvnejJYwTE","upper":4235756052132322162,"ownsPercentage":0.0006350718456523667,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"nPdFzMMAl8e1","upper":4236420495176881018,"ownsPercentage":0.000036019529620179725,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"LKWfqTsgLJ2h","upper":4263385304921123528,"ownsPercentage":0.0014617652652683015,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"vPsB4hMU3u1Q","upper":4292354302930656194,"ownsPercentage":0.0015704125288331784,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"FRl5xH4a7yhi","upper":4322318686813382506,"ownsPercentage":0.001624372505142075,"assignedToPhysical":"5UnDHhImGGus"},{"name":"3zdVkbUS6vRy","upper":4343664613540309444,"ownsPercentage":0.0011571650065525288,"assignedToPhysical":"10HAiobndwqQ"},{"name":"FrFb8tNKNyUW","upper":4496485165952040014,"ownsPercentage":0.008284418746261658,"assignedToPhysical":"10HAiobndwqQ"},{"name":"u2NQ8vav4GGA","upper":4532019006561781396,"ownsPercentage":0.0019262933592917625,"assignedToPhysical":"5UnDHhImGGus"},{"name":"itKBOdSzdcD2","upper":4537357993340643413,"ownsPercentage":0.0002894270532256792,"assignedToPhysical":"5UnDHhImGGus"},{"name":"f2c8q7U2y0Wc","upper":4555516035749608803,"ownsPercentage":0.00098434945139421,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"EVmTAbpjGR6I","upper":4572129041729378275,"ownsPercentage":0.0009005928587390369,"assignedToPhysical":"5UnDHhImGGus"},{"name":"xNWB5azVw7oD","upper":4613011128090221427,"ownsPercentage":0.0022162223424083078,"assignedToPhysical":"10HAiobndwqQ"},{"name":"Pqolehn1Kgzx","upper":4651985171511437890,"ownsPercentage":0.0021127871273913636,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Ua0wIIuK7MVF","upper":4696630627126192084,"ownsPercentage":0.002420234998456083,"assignedToPhysical":"10HAiobndwqQ"},{"name":"3C5qZO15G38a","upper":4784907021305312069,"ownsPercentage":0.004785472917409431,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"h03hg1MFr8UK","upper":4796223604197399554,"ownsPercentage":0.0006134731878356771,"assignedToPhysical":"5UnDHhImGGus"},{"name":"THznNzcBVet8","upper":4915944216646993407,"ownsPercentage":0.006490067405457239,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"ioHJiTM0L2b0","upper":4957842579607055296,"ownsPercentage":0.0022713148072442645,"assignedToPhysical":"10HAiobndwqQ"},{"name":"aV4XlXJRhzIX","upper":4993935340105560932,"ownsPercentage":0.00195659246717394,"assignedToPhysical":"10HAiobndwqQ"},{"name":"0qi2orWv2yBQ","upper":5024894406509825712,"ownsPercentage":0.0016782943526813434,"assignedToPhysical":"10HAiobndwqQ"},{"name":"CGR9oTerTPfV","upper":5158458025247331609,"ownsPercentage":0.007240498280011476,"assignedToPhysical":"5UnDHhImGGus"},{"name":"97FKS8uAgBGZ","upper":5174323415549741130,"ownsPercentage":0.0008600645316601428,"assignedToPhysical":"10HAiobndwqQ"},{"name":"BhoBBmk2O5Fx","upper":5187434264602339602,"ownsPercentage":0.000710740551297839,"assignedToPhysical":"10HAiobndwqQ"},{"name":"nXTYGKAWzMD3","upper":5195193606611309874,"ownsPercentage":0.00042063477315918037,"assignedToPhysical":"10HAiobndwqQ"},{"name":"WqDu1P0hsKi2","upper":5224777598831476475,"ownsPercentage":0.0016037514317949445,"assignedToPhysical":"5UnDHhImGGus"},{"name":"PT37knJ3yCVo","upper":5375620358405560754,"ownsPercentage":0.00817720238169654,"assignedToPhysical":"5UnDHhImGGus"},{"name":"f8XCDnms7AuS","upper":5495283831850589643,"ownsPercentage":0.00648696989381309,"assignedToPhysical":"5UnDHhImGGus"},{"name":"7jZlJg79Tx3C","upper":5506419823422963041,"ownsPercentage":0.0006036833127773753,"assignedToPhysical":"10HAiobndwqQ"},{"name":"Uvrp2Des8Ab3","upper":5542895169334733013,"ownsPercentage":0.001977332463985063,"assignedToPhysical":"10HAiobndwqQ"},{"name":"k0raV8wB89r3","upper":5552370617218929180,"ownsPercentage":0.0005136650590659331,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"MWsu1YNw7GyZ","upper":5663161801830126264,"ownsPercentage":0.0060060021523851235,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"qjwhfQLQXRtk","upper":5698664742506073073,"ownsPercentage":0.0019246182705242756,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"opTmmfmor5Xi","upper":5721258211780544165,"ownsPercentage":0.0012247944235683026,"assignedToPhysical":"10HAiobndwqQ"},{"name":"gLBhIXTrzmM5","upper":5774022938275618813,"ownsPercentage":0.0028603815548281695,"assignedToPhysical":"5UnDHhImGGus"},{"name":"dtl4DVfMpjM2","upper":5819402277572133063,"ownsPercentage":0.0024600189125618785,"assignedToPhysical":"10HAiobndwqQ"},{"name":"3CBHlZozw0Z1","upper":5827388583406330893,"ownsPercentage":0.0004329385067785473,"assignedToPhysical":"5UnDHhImGGus"},{"name":"PzFLgH6tNojh","upper":5831627539924522089,"ownsPercentage":0.00022979429330472422,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"pZxtjHxpnAP7","upper":5872740343819337424,"ownsPercentage":0.0022287295649864648,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"81Tr7kFVuWFO","upper":5894639010637646722,"ownsPercentage":0.0011871291069473584,"assignedToPhysical":"10HAiobndwqQ"},{"name":"WadYBzIWkqkJ","upper":5920889332354674332,"ownsPercentage":0.0014230327917022375,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"9UENJD0jVfQJ","upper":5950482002594647221,"ownsPercentage":0.001604221868191287,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"cJnMLj5AELbO","upper":5962987215025360822,"ownsPercentage":0.000677908924238621,"assignedToPhysical":"10HAiobndwqQ"},{"name":"KISjTsevB5iE","upper":5986387043408926511,"ownsPercentage":0.0012685072384624944,"assignedToPhysical":"10HAiobndwqQ"},{"name":"IfrQL0OScc3u","upper":6084330577947103338,"ownsPercentage":0.005309529646360018,"assignedToPhysical":"5UnDHhImGGus"},{"name":"tfavKaxeSt8e","upper":6101649858778365729,"ownsPercentage":0.0009388800951570619,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Q8PJeKnlygDF","upper":6120544210147852964,"ownsPercentage":0.0010242648401251264,"assignedToPhysical":"10HAiobndwqQ"},{"name":"1L72JpPhK3AH","upper":6139009236851798467,"ownsPercentage":0.0010009911033710284,"assignedToPhysical":"5UnDHhImGGus"},{"name":"58AEK168LCOI","upper":6203537455014676060,"ownsPercentage":0.0034980817159405236,"assignedToPhysical":"5UnDHhImGGus"},{"name":"ntXhJG4ehh97","upper":6250479863873075726,"ownsPercentage":0.002544753083298985,"assignedToPhysical":"10HAiobndwqQ"},{"name":"EfHluzACLDad","upper":6257881409784994672,"ownsPercentage":0.00040123860787268626,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Pw1ZyGgzqr4w","upper":6276168982882809802,"ownsPercentage":0.0009913713241069314,"assignedToPhysical":"10HAiobndwqQ"},{"name":"hDhqptRj4wek","upper":6281860672615602714,"ownsPercentage":0.00030854711867037577,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Nk6OCjX40weL","upper":6284740492439475368,"ownsPercentage":0.0001561153454704777,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"IiRbFljPDBMc","upper":6322365517697796882,"ownsPercentage":0.002039656706244708,"assignedToPhysical":"5UnDHhImGGus"},{"name":"81UYRgmxCxvy","upper":6374289897584535219,"ownsPercentage":0.0028148262739082167,"assignedToPhysical":"10HAiobndwqQ"},{"name":"ULSfcu1j23rT","upper":6442652778775914548,"ownsPercentage":0.003705959215253095,"assignedToPhysical":"5UnDHhImGGus"},{"name":"aGQsUOxlSSI7","upper":6628874745657227247,"ownsPercentage":0.010095113052862145,"assignedToPhysical":"5UnDHhImGGus"},{"name":"xY2JLJ1fxlZP","upper":6635203822280934241,"ownsPercentage":0.0003430999312625172,"assignedToPhysical":"5UnDHhImGGus"},{"name":"zO4HfORWFYQx","upper":6699485210543020577,"ownsPercentage":0.0034847010402069105,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"DfDu4yl3r1qn","upper":6727354121803122774,"ownsPercentage":0.001510776706650427,"assignedToPhysical":"10HAiobndwqQ"},{"name":"M3kWMrkyMrGT","upper":6740502144735913698,"ownsPercentage":0.0007127557513810577,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"PquK03kCX2s6","upper":6747123146248742074,"ownsPercentage":0.00035892521121191684,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"CMG5X2zuOP8T","upper":6980966254492504618,"ownsPercentage":0.012676660298932517,"assignedToPhysical":"10HAiobndwqQ"},{"name":"zPbNJqGLV31c","upper":6987529689123558289,"ownsPercentage":0.00035580450429774927,"assignedToPhysical":"10HAiobndwqQ"},{"name":"85iBn44HwzLG","upper":7008758635743647494,"ownsPercentage":0.001150823502253976,"assignedToPhysical":"5UnDHhImGGus"},{"name":"jTVVZexNxBtp","upper":7184765512514992474,"ownsPercentage":0.009541351908394035,"assignedToPhysical":"5UnDHhImGGus"},{"name":"scRBGW6E4EA4","upper":7288564078426028941,"ownsPercentage":0.005626931533081278,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"E7qcPMaKvpuY","upper":7317924845818590019,"ownsPercentage":0.0015916503896428141,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"VxASALmWn2ch","upper":7320272934945058874,"ownsPercentage":0.00012729016660535615,"assignedToPhysical":"5UnDHhImGGus"},{"name":"W1p8DeaOGQwa","upper":7329379115336767830,"ownsPercentage":0.0004936470281867876,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"siSzkkL8ch9H","upper":7365032735264167895,"ownsPercentage":0.0019327866091129811,"assignedToPhysical":"5UnDHhImGGus"},{"name":"h10KwfryF0Hs","upper":7437357468956214082,"ownsPercentage":0.003920731669667602,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"JSuWUzk63Inq","upper":7474332566751363072,"ownsPercentage":0.002004424067868226,"assignedToPhysical":"10HAiobndwqQ"},{"name":"nN3HYdDnNFvW","upper":7485158231506008231,"ownsPercentage":0.0005868604622793019,"assignedToPhysical":"10HAiobndwqQ"},{"name":"BwQaadmEuOWL","upper":7507090743021187083,"ownsPercentage":0.0011889638316410127,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Wwn8XeDuTnEq","upper":7528762709048575525,"ownsPercentage":0.0011748396324463297,"assignedToPhysical":"10HAiobndwqQ"},{"name":"f1r8QwYMUnF2","upper":7530763667960730121,"ownsPercentage":0.00010847219998061223,"assignedToPhysical":"10HAiobndwqQ"},{"name":"4mW6gvrMBHur","upper":7545234120228279501,"ownsPercentage":0.0007844447892662416,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"urILYmbzLgFM","upper":7773164834661528343,"ownsPercentage":0.012356148788235075,"assignedToPhysical":"5UnDHhImGGus"},{"name":"TsFT7FF0NLc9","upper":7773394258423625280,"ownsPercentage":0.000012437087064264831,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"04flFfZJX9y4","upper":7854961957136350003,"ownsPercentage":0.004421793807448961,"assignedToPhysical":"5UnDHhImGGus"},{"name":"qvDj32cx0hSO","upper":7883310802927429864,"ownsPercentage":0.0015367940097072666,"assignedToPhysical":"10HAiobndwqQ"},{"name":"xLBqexoUmcoS","upper":7905257767824779877,"ownsPercentage":0.0011897473510584994,"assignedToPhysical":"10HAiobndwqQ"},{"name":"JprYyU27D3SD","upper":7910815885169914432,"ownsPercentage":0.00030130614502621244,"assignedToPhysical":"10HAiobndwqQ"},{"name":"jlzvsigHmfto","upper":7933413828155010817,"ownsPercentage":0.0012250369439072532,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"93O50DIDBjvT","upper":7993784483485729679,"ownsPercentage":0.0032726997831969496,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"YceXk2KF4xnV","upper":7996374436592219689,"ownsPercentage":0.0001404016392346025,"assignedToPhysical":"10HAiobndwqQ"},{"name":"VYlWIOMzTG6Q","upper":8006107196738942655,"ownsPercentage":0.0005276139847678688,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"vSuf6j765jqp","upper":8017216651818234722,"ownsPercentage":0.0006022447666049291,"assignedToPhysical":"5UnDHhImGGus"},{"name":"NEObTyTfjbkD","upper":8050138084024281148,"ownsPercentage":0.0017846744159564893,"assignedToPhysical":"10HAiobndwqQ"},{"name":"ndz1d2vCwoIq","upper":8066848342316863678,"ownsPercentage":0.0009058649171805948,"assignedToPhysical":"5UnDHhImGGus"},{"name":"vQ8Nr3MMIvqX","upper":8069602844311102736,"ownsPercentage":0.00014932185231348206,"assignedToPhysical":"5UnDHhImGGus"},{"name":"uTwHaNDD5s4i","upper":8130579194898972165,"ownsPercentage":0.00330553458888029,"assignedToPhysical":"10HAiobndwqQ"},{"name":"mdW6a1iyMMQB","upper":8250566081955192821,"ownsPercentage":0.006504502180806364,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"S7pN5St5UtGx","upper":8340682427773445817,"ownsPercentage":0.004885216895630245,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"ofCP6twUePhY","upper":8447921142937480046,"ownsPercentage":0.005813422397770006,"assignedToPhysical":"5UnDHhImGGus"},{"name":"RAKQqUg9tkVy","upper":8504599523728357916,"ownsPercentage":0.0030725411793215236,"assignedToPhysical":"10HAiobndwqQ"},{"name":"6QrhJx5gbnzD","upper":8513094344746041558,"ownsPercentage":0.0004605051701124064,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"OejkNdX5mxtz","upper":8539594040314563465,"ownsPercentage":0.0014365513752797974,"assignedToPhysical":"5UnDHhImGGus"},{"name":"6lS4e7DEBTSK","upper":8575073699044782028,"ownsPercentage":0.001923356153717363,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"xp4mWs2aVMbB","upper":8602429679782843757,"ownsPercentage":0.001482970687333907,"assignedToPhysical":"5UnDHhImGGus"},{"name":"tHpmYN4knZgr","upper":8635392688536345460,"ownsPercentage":0.0017869282851102623,"assignedToPhysical":"5UnDHhImGGus"},{"name":"2AmfpIT0zs3p","upper":8670860499000032734,"ownsPercentage":0.0019227138579016923,"assignedToPhysical":"10HAiobndwqQ"},{"name":"mA1JyNKe3vCB","upper":8676513197675841415,"ownsPercentage":0.0003064334092358853,"assignedToPhysical":"5UnDHhImGGus"},{"name":"jPIF7y5qI3fh","upper":8706429952052375065,"ownsPercentage":0.0016217905044376502,"assignedToPhysical":"5UnDHhImGGus"},{"name":"gbdKhYN535Uf","upper":8799657348866550157,"ownsPercentage":0.005053867308054841,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"8FlXfnU8F53r","upper":8899248676446159866,"ownsPercentage":0.005398856686126419,"assignedToPhysical":"10HAiobndwqQ"},{"name":"M8Xxly7aXuVg","upper":9005461565059215222,"ownsPercentage":0.005757812229011776,"assignedToPhysical":"10HAiobndwqQ"},{"name":"2hY6C71VJZa6","upper":9067556512448386258,"ownsPercentage":0.003366173842985617,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"comyKg1nNYkV","upper":9070826503298640431,"ownsPercentage":0.0001772665591926648,"assignedToPhysical":"10HAiobndwqQ"},{"name":"XS08CGL3mnm6","upper":9127646470634449826,"ownsPercentage":0.003080216601301997,"assignedToPhysical":"10HAiobndwqQ"},{"name":"CXmIRbTMCrVg","upper":9160013201600391890,"ownsPercentage":0.0017546040014764118,"assignedToPhysical":"10HAiobndwqQ"},{"name":"JQoplEib8uSu","upper":9242936734742863962,"ownsPercentage":0.004495293739162097,"assignedToPhysical":"10HAiobndwqQ"},{"name":"3XGWS3W2ZWY1","upper":9275480562589952859,"ownsPercentage":0.0017642044426404019,"assignedToPhysical":"5UnDHhImGGus"},{"name":"SliMd6ggZIIT","upper":9291129844433481316,"ownsPercentage":0.0008483492686295756,"assignedToPhysical":"5UnDHhImGGus"},{"name":"3W0h1o0dwd3u","upper":9351332456188418356,"ownsPercentage":0.0032635901227002053,"assignedToPhysical":"10HAiobndwqQ"},{"name":"z6T8lD2o39SQ","upper":9370501884911054932,"ownsPercentage":0.0010391768133194302,"assignedToPhysical":"10HAiobndwqQ"},{"name":"ptSPQhrhvHUx","upper":9557106538402328786,"ownsPercentage":0.010115858535557194,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Fbu1Q7R1XE6Q","upper":9581667180131361575,"ownsPercentage":0.0013314350560127742,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"fgTWGQMwfW8m","upper":9616806305989840223,"ownsPercentage":0.0019048958297502058,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"WCrjXqYEOGO3","upper":9633001025346879568,"ownsPercentage":0.0008779174954847554,"assignedToPhysical":"5UnDHhImGGus"},{"name":"tq85nEKxsXd6","upper":9753345111991751138,"ownsPercentage":0.006523866009307677,"assignedToPhysical":"5UnDHhImGGus"},{"name":"zY4rPaerd1ys","upper":9769847995880917641,"ownsPercentage":0.0008946231282455177,"assignedToPhysical":"5UnDHhImGGus"},{"name":"OXKlgrTvR2oT","upper":9784001926533888562,"ownsPercentage":0.0007672861181580123,"assignedToPhysical":"10HAiobndwqQ"},{"name":"qJn82gKq9WyS","upper":9804919859287427473,"ownsPercentage":0.001133963406764629,"assignedToPhysical":"5UnDHhImGGus"},{"name":"AmEGOYK67uL5","upper":9837139302196143865,"ownsPercentage":0.0017466194998951496,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"cYXwzjMjpL2p","upper":9844833667801973668,"ownsPercentage":0.00041711239528692084,"assignedToPhysical":"10HAiobndwqQ"},{"name":"Ol4Jrf6D4hI8","upper":10005914080522808168,"ownsPercentage":0.008732186670839522,"assignedToPhysical":"10HAiobndwqQ"},{"name":"EAnJxQirsjKw","upper":10253565889542214993,"ownsPercentage":0.013425231467940306,"assignedToPhysical":"10HAiobndwqQ"},{"name":"UdvpOIbYGk3E","upper":10281661603144781072,"ownsPercentage":0.0015230716862716341,"assignedToPhysical":"10HAiobndwqQ"},{"name":"uMkCwD02ZjTb","upper":10369513924269020804,"ownsPercentage":0.004762483871039744,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"wzCL9BumCknu","upper":10436218359642224243,"ownsPercentage":0.003616054687302305,"assignedToPhysical":"5UnDHhImGGus"},{"name":"xeqIBs9YD2cA","upper":10556580804488844452,"ownsPercentage":0.006524861209418617,"assignedToPhysical":"10HAiobndwqQ"},{"name":"rohnBjf5J9D8","upper":10561798726114405432,"ownsPercentage":0.00028286409811461547,"assignedToPhysical":"5UnDHhImGGus"},{"name":"5dkf0GDkoFS3","upper":10565577061354284927,"ownsPercentage":0.0002048239637727944,"assignedToPhysical":"5UnDHhImGGus"},{"name":"1tkGefsVhdMZ","upper":10640136392647408019,"ownsPercentage":0.004041869448353525,"assignedToPhysical":"5UnDHhImGGus"},{"name":"E4OaMLfzLI3l","upper":10734061622421226437,"ownsPercentage":0.0050916969085987055,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"FLYoUFplOmxQ","upper":10799040812602198121,"ownsPercentage":0.0035225289580279127,"assignedToPhysical":"10HAiobndwqQ"},{"name":"LE68IkTZ1KBq","upper":10816649793406241553,"ownsPercentage":0.0009545847621499717,"assignedToPhysical":"10HAiobndwqQ"},{"name":"vBUqaK2WYdna","upper":10915645950426407919,"ownsPercentage":0.005366592425449025,"assignedToPhysical":"5UnDHhImGGus"},{"name":"k7s6GojJDHw4","upper":11001855601817753716,"ownsPercentage":0.004673434566385755,"assignedToPhysical":"10HAiobndwqQ"},{"name":"DUFmLbD5AQdB","upper":11008974609608993012,"ownsPercentage":0.00038592218566014386,"assignedToPhysical":"10HAiobndwqQ"},{"name":"a6g520corlxT","upper":11021271569193417168,"ownsPercentage":0.0006666195148199558,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"RBxCMlncsWP4","upper":11052047453477516654,"ownsPercentage":0.001668364030049158,"assignedToPhysical":"5UnDHhImGGus"},{"name":"90E1fTakTcmi","upper":11188766378182331430,"ownsPercentage":0.007411547759242115,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"wBKHrHzpqkXI","upper":11243260275836301483,"ownsPercentage":0.0029541201111818534,"assignedToPhysical":"10HAiobndwqQ"},{"name":"YZq35jzzcmLB","upper":11356751102912206819,"ownsPercentage":0.006152350063643664,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"6BoSzEVYohm2","upper":11467132392555628345,"ownsPercentage":0.005983781701657466,"assignedToPhysical":"5UnDHhImGGus"},{"name":"RlZ9POMVYbei","upper":11534987860118061834,"ownsPercentage":0.003678452267310503,"assignedToPhysical":"5UnDHhImGGus"},{"name":"mYSXTeMJNg5X","upper":11584157949447943989,"ownsPercentage":0.0026655158836382275,"assignedToPhysical":"10HAiobndwqQ"},{"name":"a0D06UHaaNTQ","upper":11686754521353283792,"ownsPercentage":0.0055617713074667344,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"uydQQfk9x5dv","upper":11719597803869881397,"ownsPercentage":0.0017804379128025154,"assignedToPhysical":"5UnDHhImGGus"},{"name":"WmgtIhiclMxv","upper":11783499227615313778,"ownsPercentage":0.0034641031224857295,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"yPsCEegMk4Q2","upper":11785036598019323645,"ownsPercentage":0.00008334101659712077,"assignedToPhysical":"10HAiobndwqQ"},{"name":"hVoXGtgS5V4b","upper":11822411649275226527,"ownsPercentage":0.002026105588420349,"assignedToPhysical":"10HAiobndwqQ"},{"name":"rX2vYIXPVa18","upper":11839341004889018262,"ownsPercentage":0.0009177422067626335,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"VEUsNjgJ1M4k","upper":11891663786092253089,"ownsPercentage":0.0028364236525515456,"assignedToPhysical":"10HAiobndwqQ"},{"name":"7KyVTpba2SB8","upper":12025520187007287877,"ownsPercentage":0.007256370033658569,"assignedToPhysical":"5UnDHhImGGus"},{"name":"0yiZLUGHfCfg","upper":12054389308461989689,"ownsPercentage":0.0015649982099467795,"assignedToPhysical":"10HAiobndwqQ"},{"name":"w54br5SJaUNd","upper":12070914180643214570,"ownsPercentage":0.0008958151159464646,"assignedToPhysical":"10HAiobndwqQ"},{"name":"KhlfU1eDGz8J","upper":12112182337465455065,"ownsPercentage":0.002237151264057282,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"w0MlBKADJImt","upper":12241122797128861048,"ownsPercentage":0.006989876324417216,"assignedToPhysical":"5UnDHhImGGus"},{"name":"TR2Tz7fHYqme","upper":12287552841407224373,"ownsPercentage":0.0025169777437599843,"assignedToPhysical":"10HAiobndwqQ"},{"name":"jcaPi3kHStaK","upper":12488886731702542352,"ownsPercentage":0.010914332062657099,"assignedToPhysical":"10HAiobndwqQ"},{"name":"L215BrEJZez6","upper":12528467364563959494,"ownsPercentage":0.0021456704068349808,"assignedToPhysical":"10HAiobndwqQ"},{"name":"SF2wURCU9LAf","upper":12560487892291319550,"ownsPercentage":0.0017358362863068052,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"VhJWJyF49q7N","upper":12605819633176064544,"ownsPercentage":0.002457438597489524,"assignedToPhysical":"5UnDHhImGGus"},{"name":"8irbpjVkihDh","upper":12607449777738158082,"ownsPercentage":0.00008837031378436226,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"vxlhQDXcCDoc","upper":12623878231076967528,"ownsPercentage":0.000890588240025697,"assignedToPhysical":"5UnDHhImGGus"},{"name":"tf20ZJKNTYuN","upper":12632610065963450389,"ownsPercentage":0.0004733537176854718,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"18xE7smej6jn","upper":12818842642211020687,"ownsPercentage":0.010095688187759404,"assignedToPhysical":"10HAiobndwqQ"},{"name":"Xaqm44zLtfaI","upper":12839153738477675750,"ownsPercentage":0.001101066734893482,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"AVApjgCPtUh6","upper":12939045257719913228,"ownsPercentage":0.005415130108765572,"assignedToPhysical":"5UnDHhImGGus"},{"name":"kC8TDVP2qpDU","upper":12941012840812649932,"ownsPercentage":0.00010666289318454411,"assignedToPhysical":"5UnDHhImGGus"},{"name":"lUb5dLRn4Q7i","upper":12975402347204344357,"ownsPercentage":0.0018642588770289618,"assignedToPhysical":"5UnDHhImGGus"},{"name":"VQjkAf5kPk7D","upper":13015759331929071073,"ownsPercentage":0.0021877565256756513,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"6DXk6xSSav8y","upper":13034693447105073367,"ownsPercentage":0.0010264204403956224,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"vCn9VOgxbsvC","upper":13049209090978829828,"ownsPercentage":0.0007868946311476329,"assignedToPhysical":"10HAiobndwqQ"},{"name":"0s0krALwyqS4","upper":13077149257960491748,"ownsPercentage":0.0015146394870562806,"assignedToPhysical":"5UnDHhImGGus"},{"name":"pgxcn0O1NKoW","upper":13176230253235278906,"ownsPercentage":0.0053711915164475115,"assignedToPhysical":"10HAiobndwqQ"},{"name":"gpqm1I3ZXfFw","upper":13232741920389130436,"ownsPercentage":0.0030635036149491775,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"04uFLfQjFQ5M","upper":13283819730558978214,"ownsPercentage":0.0027689336375975575,"assignedToPhysical":"10HAiobndwqQ"},{"name":"OH4vVMfkcJJp","upper":13344918619775115132,"ownsPercentage":0.0033121774212293404,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"xQhsuMpqsB9Y","upper":13434661038335714055,"ownsPercentage":0.004864946258375241,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"cTPggh41RFbY","upper":13540465127958032066,"ownsPercentage":0.00573565119131841,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"ThChEv0oUVF8","upper":13608899141929009629,"ownsPercentage":0.0037098153309618616,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"tcrH84kQAesz","upper":13751466221448629825,"ownsPercentage":0.007728576867004294,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"bFNikojhtkrf","upper":13791329741974145281,"ownsPercentage":0.0021610057778342177,"assignedToPhysical":"5UnDHhImGGus"},{"name":"AJexcI7np4a6","upper":13794193112533578183,"ownsPercentage":0.00015522362905840933,"assignedToPhysical":"5UnDHhImGGus"},{"name":"KoUT5JzcEigR","upper":13800391291566166029,"ownsPercentage":0.00033600395862929224,"assignedToPhysical":"10HAiobndwqQ"},{"name":"c3Fmgjy0kiGM","upper":13807991780268228783,"ownsPercentage":0.0004120233181363985,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"hK5JiEF1Odsf","upper":13822883361746399217,"ownsPercentage":0.0008072742495188642,"assignedToPhysical":"5UnDHhImGGus"},{"name":"bYeSTo07NtUQ","upper":14015079807354872646,"ownsPercentage":0.01041899019363495,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"aRIS8UTR2sMs","upper":14090228842807991479,"ownsPercentage":0.004073837374923082,"assignedToPhysical":"10HAiobndwqQ"},{"name":"rkAx8oOGiGp5","upper":14101487892057637593,"ownsPercentage":0.0006103542828293803,"assignedToPhysical":"5UnDHhImGGus"},{"name":"cOB2G6rQQDMp","upper":14213755249803946573,"ownsPercentage":0.006086025658387776,"assignedToPhysical":"5UnDHhImGGus"},{"name":"ngQi7Ikjaotp","upper":14228350724118087400,"ownsPercentage":0.0007912222479923932,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"sTxMnH5hC27G","upper":14241811106918739693,"ownsPercentage":0.0007296888137476867,"assignedToPhysical":"10HAiobndwqQ"},{"name":"hsdU1XjrtTL0","upper":14243251849196632867,"ownsPercentage":0.00007810279538417468,"assignedToPhysical":"5UnDHhImGGus"},{"name":"xWK39CVQOq7c","upper":14260628115736981572,"ownsPercentage":0.0009419692966366622,"assignedToPhysical":"5UnDHhImGGus"},{"name":"N2liOQZHORRd","upper":14295405224153487552,"ownsPercentage":0.0018852708248969852,"assignedToPhysical":"10HAiobndwqQ"},{"name":"gzKXC2EEuP0y","upper":14373776598404986114,"ownsPercentage":0.004248520711207463,"assignedToPhysical":"5UnDHhImGGus"},{"name":"HnNcLsgB4Qle","upper":14436355817013137488,"ownsPercentage":0.0033924262383701513,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"g7Up5jauHf3w","upper":14449061804126616310,"ownsPercentage":0.0006887929416003281,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"qPo0bL1DanzJ","upper":14466288443411053553,"ownsPercentage":0.0009338579868405498,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"e2bHBdxkqCWf","upper":14522703947576021700,"ownsPercentage":0.0030582906088761743,"assignedToPhysical":"5UnDHhImGGus"},{"name":"SQ4Xh9ZRBYnc","upper":14646673551193670540,"ownsPercentage":0.006720405678221086,"assignedToPhysical":"5UnDHhImGGus"},{"name":"zcHMGe7BCirf","upper":14679858498593792044,"ownsPercentage":0.0017989596032514463,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"wKvtkC7ISgqS","upper":14689830528749515078,"ownsPercentage":0.0005405848379462938,"assignedToPhysical":"10HAiobndwqQ"},{"name":"PeiLrsaz0A1Z","upper":14714795221021913078,"ownsPercentage":0.0013533386798582998,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Dr8dRry1VNRG","upper":14775254950618810772,"ownsPercentage":0.0032775285088421317,"assignedToPhysical":"10HAiobndwqQ"},{"name":"0Ym87WlhMWSH","upper":14833826949240357903,"ownsPercentage":0.0031751944076149684,"assignedToPhysical":"10HAiobndwqQ"},{"name":"pJQ37OFnd0h0","upper":14860665055930654367,"ownsPercentage":0.0014548966789508589,"assignedToPhysical":"10HAiobndwqQ"},{"name":"3xb5qWUIdy3l","upper":14949938709884717460,"ownsPercentage":0.004839534478135717,"assignedToPhysical":"5UnDHhImGGus"},{"name":"P1ib1Jb4kcVl","upper":14974180759437663598,"ownsPercentage":0.0013141641395402726,"assignedToPhysical":"10HAiobndwqQ"},{"name":"6t35oHrP3E89","upper":15157689149190409377,"ownsPercentage":0.009948009741962183,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"IFXmevuG7Zrh","upper":15179330466362727829,"ownsPercentage":0.001173178154683776,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Ij8HLoZKUIGh","upper":15222784680299792327,"ownsPercentage":0.0023556576577107606,"assignedToPhysical":"10HAiobndwqQ"},{"name":"ZhRSfF5kiST2","upper":15269456053544292690,"ownsPercentage":0.002530060213228457,"assignedToPhysical":"10HAiobndwqQ"},{"name":"AYNqxnC70vWf","upper":15311513209867171019,"ownsPercentage":0.0022799230126913576,"assignedToPhysical":"5UnDHhImGGus"},{"name":"YhUygFdgZypN","upper":15313859598534298546,"ownsPercentage":0.0001271979845197516,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"0suDv1qp3ndv","upper":15378307640662597300,"ownsPercentage":0.003493735364396941,"assignedToPhysical":"5UnDHhImGGus"},{"name":"PPBVWwBeAt88","upper":15385273392099262806,"ownsPercentage":0.00037761414203133826,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"wXr2DKiiO4rQ","upper":15398475090070389854,"ownsPercentage":0.0007156654810396711,"assignedToPhysical":"10HAiobndwqQ"},{"name":"HKmvdMeJ7ZUM","upper":15401238888200171185,"ownsPercentage":0.00014982579683101466,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"48RZ8vWnLnYd","upper":15410079619103447657,"ownsPercentage":0.00047925698258460435,"assignedToPhysical":"10HAiobndwqQ"},{"name":"oeibhOxhslJ4","upper":15414157861976474291,"ownsPercentage":0.0002210819891429501,"assignedToPhysical":"10HAiobndwqQ"},{"name":"Cqju2FPfqDzN","upper":15424911264355003597,"ownsPercentage":0.0005829431110206132,"assignedToPhysical":"10HAiobndwqQ"},{"name":"H6BtD1FRUcEb","upper":15510388510461004250,"ownsPercentage":0.004633730796310202,"assignedToPhysical":"5UnDHhImGGus"},{"name":"94dJ1MbkYawY","upper":15520430127780613983,"ownsPercentage":0.000544357165659447,"assignedToPhysical":"5UnDHhImGGus"},{"name":"KcyNwToNc5uf","upper":15531135481528689161,"ownsPercentage":0.0005803383895444473,"assignedToPhysical":"10HAiobndwqQ"},{"name":"IXPatqSYHjfd","upper":15577751823830575066,"ownsPercentage":0.002527076979851631,"assignedToPhysical":"5UnDHhImGGus"},{"name":"Jrt8yXZwyfFi","upper":15630618935322638641,"ownsPercentage":0.0028659318566364353,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"OyAthRrzjE9C","upper":15636043160678880161,"ownsPercentage":0.00029404784576440076,"assignedToPhysical":"10HAiobndwqQ"},{"name":"LQdu95vAEz5N","upper":15694615554822090470,"ownsPercentage":0.003175215848887293,"assignedToPhysical":"10HAiobndwqQ"},{"name":"2iX5gmw2cSrS","upper":15834882014397728570,"ownsPercentage":0.0076038600099378505,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"wAmFbdhLWtEt","upper":15923976449109514542,"ownsPercentage":0.004829818983544314,"assignedToPhysical":"5UnDHhImGGus"},{"name":"JPenxeL41DVm","upper":16062655725361741270,"ownsPercentage":0.007517818629569082,"assignedToPhysical":"5UnDHhImGGus"},{"name":"wgvpTSabR2Gp","upper":16095714196141917796,"ownsPercentage":0.001792103291945798,"assignedToPhysical":"5UnDHhImGGus"},{"name":"q6uOaqi6vhFp","upper":16096660802391646118,"ownsPercentage":0.00005131562762219013,"assignedToPhysical":"5UnDHhImGGus"},{"name":"QEzoqtGH5L3q","upper":16119600637102882747,"ownsPercentage":0.0012435709315190568,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"ZdNfpA9QwBPW","upper":16184079002913190227,"ownsPercentage":0.003495379214492522,"assignedToPhysical":"5UnDHhImGGus"},{"name":"o0I69xAYCy8l","upper":16202714695155389065,"ownsPercentage":0.0010102429007381622,"assignedToPhysical":"5UnDHhImGGus"},{"name":"NtPJZ0DCROIt","upper":16335259737813065899,"ownsPercentage":0.007185281160081854,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"FVhhe4CrETSN","upper":16363533504010903771,"ownsPercentage":0.001532723936802152,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Od8MgXRVCkCj","upper":16372836885229567459,"ownsPercentage":0.0005043373064368005,"assignedToPhysical":"5UnDHhImGGus"},{"name":"iWYrenDncRMZ","upper":16399843699729802176,"ownsPercentage":0.001464042347653375,"assignedToPhysical":"10HAiobndwqQ"},{"name":"JcvKnGUxrjbi","upper":16445456059619812276,"ownsPercentage":0.0024726509842469815,"assignedToPhysical":"10HAiobndwqQ"},{"name":"qcgtLYQ2qEet","upper":16479360271815554726,"ownsPercentage":0.0018379510259516748,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"rblINp3n1sZ0","upper":16487866384862819249,"ownsPercentage":0.0004611173122625745,"assignedToPhysical":"10HAiobndwqQ"},{"name":"XZG9YcJJQOQB","upper":16521085219067707559,"ownsPercentage":0.0018007966106187846,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"7dRVmMSfU9TF","upper":16595274498779891691,"ownsPercentage":0.0040218089119542395,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"f7KnlZipyMoU","upper":16684900004595745633,"ownsPercentage":0.0048586084057830525,"assignedToPhysical":"5UnDHhImGGus"},{"name":"KoRSyfj9NHxw","upper":16691189783006387724,"ownsPercentage":0.0003409695708635289,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"NKgE3HgdeGQm","upper":16839333850387189639,"ownsPercentage":0.00803090598475522,"assignedToPhysical":"10HAiobndwqQ"},{"name":"4sh42JIRZ5x4","upper":16853708534942197307,"ownsPercentage":0.000779253211166657,"assignedToPhysical":"5UnDHhImGGus"},{"name":"N5YYIOLBJsT5","upper":16912253308197006536,"ownsPercentage":0.0031737185175267713,"assignedToPhysical":"5UnDHhImGGus"},{"name":"MDVwNcNC9Gra","upper":16913676074504809068,"ownsPercentage":0.00007712831609293425,"assignedToPhysical":"10HAiobndwqQ"},{"name":"KnwHXiKEEMD5","upper":16942230975172303110,"ownsPercentage":0.001547964266940241,"assignedToPhysical":"5UnDHhImGGus"},{"name":"6JQHfQJXq9tC","upper":16984712941502358546,"ownsPercentage":0.002302952009325108,"assignedToPhysical":"10HAiobndwqQ"},{"name":"7oYBFzAQhylH","upper":17037846590624227824,"ownsPercentage":0.0028803808905006594,"assignedToPhysical":"10HAiobndwqQ"},{"name":"6IcKuxi0brIT","upper":17068513180173499251,"ownsPercentage":0.0016624391506020674,"assignedToPhysical":"10HAiobndwqQ"},{"name":"CIohP98pYrEU","upper":17175059447197700381,"ownsPercentage":0.005775884708892977,"assignedToPhysical":"5UnDHhImGGus"},{"name":"X4f5TfQ2Zh3w","upper":17191937029992434765,"ownsPercentage":0.0009149355966177495,"assignedToPhysical":"10HAiobndwqQ"},{"name":"XqtbcXDoer5y","upper":17295841694861764475,"ownsPercentage":0.005632683169135277,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"mpyKh0Uvq9pK","upper":17296806499840116196,"ownsPercentage":0.0000523021826776883,"assignedToPhysical":"5UnDHhImGGus"},{"name":"mbwwycSRznHa","upper":17300383715923936993,"ownsPercentage":0.00019392127247642982,"assignedToPhysical":"10HAiobndwqQ"},{"name":"glARwN18WtJG","upper":17305101748749341512,"ownsPercentage":0.0002557650719580751,"assignedToPhysical":"10HAiobndwqQ"},{"name":"taj02QCfJJ2v","upper":17404497849161967856,"ownsPercentage":0.005388273400197841,"assignedToPhysical":"10HAiobndwqQ"},{"name":"KpWXuBwKB3Np","upper":17414490882508683665,"ownsPercentage":0.0005417234232114685,"assignedToPhysical":"10HAiobndwqQ"},{"name":"s4qjXxgUNJsT","upper":17416265334764561276,"ownsPercentage":0.0000961932495397155,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"8v1uPEAZANss","upper":17467684639236524850,"ownsPercentage":0.002787446080809826,"assignedToPhysical":"10HAiobndwqQ"},{"name":"NPiIKetDYl31","upper":17513502977661685481,"ownsPercentage":0.002483817103011761,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"Pn8nYsbPoQXd","upper":17545583939057489619,"ownsPercentage":0.0017391124020377222,"assignedToPhysical":"5UnDHhImGGus"},{"name":"cCuxaZc2WPTR","upper":17551748894130738275,"ownsPercentage":0.00033420288418458625,"assignedToPhysical":"5UnDHhImGGus"},{"name":"saP105LXdjTN","upper":17666884407730275401,"ownsPercentage":0.006241508698742624,"assignedToPhysical":"5UnDHhImGGus"},{"name":"3DpBKBAyrh49","upper":17697646293978287087,"ownsPercentage":0.0016676051949923117,"assignedToPhysical":"10HAiobndwqQ"},{"name":"MPs72GrXF80t","upper":17857969619417060675,"ownsPercentage":0.008691144887040944,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"VExbdXg0Ek0R","upper":17904914075552982977,"ownsPercentage":0.0025448640664358715,"assignedToPhysical":"10HAiobndwqQ"},{"name":"TEVjUfLfnSxk","upper":17954419263366483034,"ownsPercentage":0.002683681608834984,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"uq0LzAAY5iuw","upper":18090967361863579046,"ownsPercentage":0.007402287251965807,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"S2fMEGZ6FK5b","upper":18099625189226614089,"ownsPercentage":0.000469341761800352,"assignedToPhysical":"5UnDHhImGGus"},{"name":"9SKvNyPOO16k","upper":18126877605085664304,"ownsPercentage":0.001477356423993033,"assignedToPhysical":"5UnDHhImGGus"},{"name":"UUvwR3z3SjqT","upper":18281219283787153712,"ownsPercentage":0.008366879167660726,"assignedToPhysical":"10HAiobndwqQ"},{"name":"MRj5CdiSSHkh","upper":18295367407612657153,"ownsPercentage":0.0007669713294102378,"assignedToPhysical":"5UnDHhImGGus"},{"name":"2PgGKvp3Nl9s","upper":18318403431528121668,"ownsPercentage":0.0012487853587287332,"assignedToPhysical":"10HAiobndwqQ"},{"name":"cghDTFg7bkKV","upper":18341280274520652486,"ownsPercentage":0.001240156143605585,"assignedToPhysical":"5UnDHhImGGus"},{"name":"IiO65JAUXrTl","upper":18364923769868434758,"ownsPercentage":0.0012817164510608228,"assignedToPhysical":"qc4oYcGtIr3y"},{"name":"EgXtER7wworu","upper":18388782832305629848,"ownsPercentage":0.0012934023663937104,"assignedToPhysical":"5UnDHhImGGus"},{"name":"T1LLWfgdGEtq","upper":18430700244523608817,"ownsPercentage":0.002272347469585161,"assignedToPhysical":"5UnDHhImGGus"}]} +`) + + s, err := sharding.StateFromJSON(raw, mocks.NewMockNodeSelector("node1")) + if err != nil { + panic(err) + } + + for name, shard := range s.Physical { + shard.BelongsToNodes = []string{"node1"} + s.Physical[name] = shard + } + return s +} + +type fakeRemoteClient struct{} + +func (f *fakeRemoteClient) BatchPutObjects(ctx context.Context, hostName, indexName, shardName string, objs []*storobj.Object, repl *additional.ReplicationProperties, schemaVersion uint64) []error { + return nil +} + +func (f *fakeRemoteClient) PutObject(ctx context.Context, hostName, indexName, + shardName string, obj *storobj.Object, schemaVersion uint64, +) error { + return nil +} + +func (f *fakeRemoteClient) GetObject(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, props search.SelectProperties, + additional additional.Properties, +) (*storobj.Object, error) { + return nil, nil +} + +func (f *fakeRemoteClient) Exists(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, +) (bool, error) { + return false, nil +} + +func (f *fakeRemoteClient) DeleteObject(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, deletionTime time.Time, schemaVersion uint64, +) error { + return nil +} + +func (f *fakeRemoteClient) MergeObject(ctx context.Context, hostName, indexName, + shardName string, mergeDoc objects.MergeDocument, schemaVersion uint64, +) error { + return nil +} + +func (f *fakeRemoteClient) MultiGetObjects(ctx context.Context, hostName, indexName, + shardName string, ids []strfmt.UUID, +) ([]*storobj.Object, error) { + return nil, nil +} + +func (f *fakeRemoteClient) SearchShard(ctx context.Context, hostName, indexName, + shardName string, vector []models.Vector, targetVector []string, distance float32, limit int, + filters *filters.LocalFilter, _ *searchparams.KeywordRanking, sort []filters.Sort, + cursor *filters.Cursor, groupBy *searchparams.GroupBy, additional additional.Properties, targetCombination *dto.TargetCombination, + properties []string, +) ([]*storobj.Object, []float32, error) { + return nil, nil, nil +} + +func (f *fakeRemoteClient) Aggregate(ctx context.Context, hostName, indexName, + shardName string, params aggregation.Params, +) (*aggregation.Result, error) { + return nil, nil +} + +func (f *fakeRemoteClient) BatchAddReferences(ctx context.Context, hostName, + indexName, shardName string, refs objects.BatchReferences, schemaVersion uint64, +) []error { + return nil +} + +func (f *fakeRemoteClient) FindUUIDs(ctx context.Context, hostName, indexName, shardName string, + filters *filters.LocalFilter, +) ([]strfmt.UUID, error) { + return nil, nil +} + +func (f *fakeRemoteClient) DeleteObjectBatch(ctx context.Context, hostName, indexName, shardName string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) objects.BatchSimpleObjects { + return nil +} + +func (f *fakeRemoteClient) GetShardQueueSize(ctx context.Context, + hostName, indexName, shardName string, +) (int64, error) { + return 0, nil +} + +func (f *fakeRemoteClient) GetShardStatus(ctx context.Context, + hostName, indexName, shardName string, +) (string, error) { + return "", nil +} + +func (f *fakeRemoteClient) UpdateShardStatus(ctx context.Context, hostName, indexName, shardName, + targetStatus string, schemaVersion uint64, +) error { + return nil +} + +func (f *fakeRemoteClient) PutFile(ctx context.Context, hostName, indexName, shardName, + fileName string, payload io.ReadSeekCloser, +) error { + return nil +} + +func (f *fakeRemoteClient) PauseFileActivity(ctx context.Context, hostName, indexName, shardName string, schemaVersion uint64) error { + return nil +} + +func (f *fakeRemoteClient) ResumeFileActivity(ctx context.Context, hostName, indexName, shardName string) error { + return nil +} + +func (f *fakeRemoteClient) ListFiles(ctx context.Context, hostName, indexName, shardName string) ([]string, error) { + return nil, nil +} + +func (f *fakeRemoteClient) GetFileMetadata(ctx context.Context, hostName, indexName, shardName, + fileName string, +) (file.FileMetadata, error) { + return file.FileMetadata{}, nil +} + +func (f *fakeRemoteClient) GetFile(ctx context.Context, hostName, indexName, shardName, + fileName string, +) (io.ReadCloser, error) { + return nil, nil +} + +func (f *fakeRemoteClient) AddAsyncReplicationTargetNode(ctx context.Context, hostName, indexName, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error { + return nil +} + +func (f *fakeRemoteClient) RemoveAsyncReplicationTargetNode(ctx context.Context, hostName, indexName, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + return nil +} + +type fakeNodeResolver struct{} + +func (f *fakeNodeResolver) AllHostnames() []string { + return nil +} + +func (f *fakeNodeResolver) NodeHostname(string) (string, bool) { + return "", false +} + +type fakeRemoteNodeClient struct{} + +func (f *fakeRemoteNodeClient) GetNodeStatus(ctx context.Context, hostName, className, shardName, output string) (*models.NodeStatus, error) { + return &models.NodeStatus{}, nil +} + +func (f *fakeRemoteNodeClient) GetStatistics(ctx context.Context, hostName string) (*models.Statistics, error) { + return &models.Statistics{}, nil +} + +type fakeReplicationClient struct{} + +var _ replica.Client = (*fakeReplicationClient)(nil) + +func (f *fakeReplicationClient) PutObject(ctx context.Context, host, index, shard, requestID string, + obj *storobj.Object, schemaVersion uint64, +) (replica.SimpleResponse, error) { + return replica.SimpleResponse{}, nil +} + +func (f *fakeReplicationClient) DeleteObject(ctx context.Context, host, index, shard, requestID string, + id strfmt.UUID, deletionTime time.Time, schemaVersion uint64, +) (replica.SimpleResponse, error) { + return replica.SimpleResponse{}, nil +} + +func (f *fakeReplicationClient) PutObjects(ctx context.Context, host, index, shard, requestID string, + objs []*storobj.Object, schemaVersion uint64, +) (replica.SimpleResponse, error) { + return replica.SimpleResponse{}, nil +} + +func (f *fakeReplicationClient) MergeObject(ctx context.Context, host, index, shard, requestID string, + mergeDoc *objects.MergeDocument, schemaVersion uint64, +) (replica.SimpleResponse, error) { + return replica.SimpleResponse{}, nil +} + +func (f *fakeReplicationClient) DeleteObjects(ctx context.Context, host, index, shard, requestID string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) (replica.SimpleResponse, error) { + return replica.SimpleResponse{}, nil +} + +func (f *fakeReplicationClient) AddReferences(ctx context.Context, host, index, shard, requestID string, + refs []objects.BatchReference, schemaVersion uint64, +) (replica.SimpleResponse, error) { + return replica.SimpleResponse{}, nil +} + +func (f *fakeReplicationClient) Commit(ctx context.Context, host, index, shard, requestID string, resp interface{}) error { + return nil +} + +func (f *fakeReplicationClient) Abort(ctx context.Context, host, index, shard, requestID string) (replica.SimpleResponse, error) { + return replica.SimpleResponse{}, nil +} + +func (fakeReplicationClient) Exists(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, +) (bool, error) { + return false, nil +} + +func (*fakeReplicationClient) FetchObject(ctx context.Context, hostName, indexName, + shardName string, id strfmt.UUID, props search.SelectProperties, + additional additional.Properties, numRetries int, +) (replica.Replica, error) { + return replica.Replica{}, nil +} + +func (*fakeReplicationClient) DigestObjects(ctx context.Context, + hostName, indexName, shardName string, ids []strfmt.UUID, numRetries int, +) (result []types.RepairResponse, err error) { + return nil, nil +} + +func (*fakeReplicationClient) FetchObjects(ctx context.Context, host, + index, shard string, ids []strfmt.UUID, +) ([]replica.Replica, error) { + return nil, nil +} + +func (*fakeReplicationClient) OverwriteObjects(ctx context.Context, + host, index, shard string, objects []*objects.VObject, +) ([]types.RepairResponse, error) { + return nil, nil +} + +func (*fakeReplicationClient) FindUUIDs(ctx context.Context, + hostName, indexName, shardName string, filters *filters.LocalFilter, +) (result []strfmt.UUID, err error) { + return nil, nil +} + +func (c *fakeReplicationClient) DigestObjectsInRange(ctx context.Context, host, index, shard string, + initialUUID, finalUUID strfmt.UUID, limit int, +) ([]types.RepairResponse, error) { + return nil, nil +} + +func (c *fakeReplicationClient) HashTreeLevel(ctx context.Context, host, index, shard string, level int, + discriminant *hashtree.Bitset, +) (digests []hashtree.Digest, err error) { + return nil, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/file_structure_migration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/file_structure_migration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f2170561df2b8278113e0520ce03f63a28277649 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/file_structure_migration_test.go @@ -0,0 +1,339 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "math/rand" + "os" + "path" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + schema2 "github.com/weaviate/weaviate/usecases/schema" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +const ( + numClasses = 100 + numShards = 10 + uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + lowercase = "abcdefghijklmnopqrstuvwxyz" + digits = "0123456789" + chars = uppercase + lowercase + digits + localNode = "node1" +) + +var ( + rootFiles = []string{ + "classifications.db", + "modules.db", + "schema.db", + } + indexDirExts = []string{ + ".hnsw.commitlog.d", + "_someGeoProp.hnsw.commitlog.d", + "_lsm", + } + indexFileExts = []string{ + ".indexcount", + ".proplengths", + ".version", + } + migratedRootFiles = append(rootFiles, + "migration1.22.fs.hierarchy") +) + +func TestFileStructureMigration(t *testing.T) { + shardsByClass := make(map[string][]string, numClasses) + + t.Run("generate index and shard names", func(t *testing.T) { + for i := 0; i < numClasses; i++ { + c := randClassName() + shardsByClass[c] = make([]string, numShards) + for j := 0; j < numShards; j++ { + s := randShardName() + shardsByClass[c][j] = s + } + } + }) + + root := t.TempDir() + + t.Run("write test db files", func(t *testing.T) { + for _, f := range rootFiles { + require.Nil(t, os.WriteFile(path.Join(root, f), nil, os.ModePerm)) + } + + for class, shards := range shardsByClass { + for _, shard := range shards { + idx := path.Join(root, fmt.Sprintf("%s_%s", strings.ToLower(class), shard)) + for _, ext := range indexDirExts { + require.Nil(t, os.MkdirAll(idx+ext, os.ModePerm)) + } + for _, ext := range indexFileExts { + require.Nil(t, os.WriteFile(idx+ext, nil, os.ModePerm)) + } + + pqDir := path.Join(root, class, shard, "compressed_objects") + require.Nil(t, os.MkdirAll(pqDir, os.ModePerm)) + } + } + }) + + files, err := os.ReadDir(root) + require.Nil(t, err) + + t.Run("assert expected flat contents length", func(t *testing.T) { + // Flat structure root contains: + // - (3 dirs + 3 files) per shard per index + // - dirs: main commilog, geo prop commitlog, lsm store + // - files: indexcount, proplengths, version + // - 1 dir per index; shards dirs are nested + // - pq store + // - 3 root db files + expectedLen := numClasses*(numShards*(len(indexDirExts)+len(indexFileExts))+1) + len(rootFiles) + require.Len(t, files, expectedLen) + }) + + t.Run("migrate the db", func(t *testing.T) { + classes := make([]*models.Class, numClasses) + states := make(map[string]*sharding.State, numClasses) + + i := 0 + for class, shards := range shardsByClass { + classes[i] = &models.Class{ + Class: class, + Properties: []*models.Property{{ + Name: "someGeoProp", + DataType: schema.DataTypeGeoCoordinates.PropString(), + }}, + } + states[class] = &sharding.State{ + Physical: make(map[string]sharding.Physical), + } + states[class].SetLocalName(localNode) + + for _, shard := range shards { + states[class].Physical[shard] = sharding.Physical{ + Name: shard, + BelongsToNodes: []string{localNode}, + } + } + + i++ + } + + db := testDB(t, root, classes, states) + require.Nil(t, db.migrateFileStructureIfNecessary()) + }) + + files, err = os.ReadDir(root) + require.Nil(t, err) + + t.Run("assert expected hierarchical contents length", func(t *testing.T) { + // After migration, the hierarchical structure root contains: + // - one dir per index + // - 3 original root db files, and one additional which is the FS migration indicator + expectedLen := numClasses + len(migratedRootFiles) + require.Len(t, files, expectedLen) + }) + + t.Run("assert all db files were migrated", func(t *testing.T) { + var foundRootFiles []string + for _, f := range files { + if f.IsDir() { + idx := f + shardsRoot, err := os.ReadDir(path.Join(root, idx.Name())) + require.Nil(t, err) + for _, shard := range shardsRoot { + assertShardRootContents(t, shardsByClass, root, idx, shard) + } + } else { + foundRootFiles = append(foundRootFiles, f.Name()) + } + } + + assert.ElementsMatch(t, migratedRootFiles, foundRootFiles) + }) +} + +func assertShardRootContents(t *testing.T, shardsByClass map[string][]string, root string, idx, shard os.DirEntry) { + assert.True(t, shard.IsDir()) + + // Whatever we find in this shard directory, it should be able to + // be mapped back to the original flat structure root contents + lowercasedClasses := make(map[string]string, len(shardsByClass)) + for class := range shardsByClass { + lowercasedClasses[strings.ToLower(class)] = class + } + require.Contains(t, lowercasedClasses, idx.Name()) + assert.Contains(t, shardsByClass[lowercasedClasses[idx.Name()]], shard.Name()) + + // Now we will get a set of all expected files within the shard dir. + // Check to see if all of these files are found. + expected := expectedShardContents() + shardFiles, err := os.ReadDir(path.Join(root, idx.Name(), shard.Name())) + require.Nil(t, err) + for _, sf := range shardFiles { + expected[sf.Name()] = true + } + expected.assert(t) + + // Check if pq store was migrated to main store as "vectors_compressed" subdir + pqDir := path.Join(root, idx.Name(), shard.Name(), "lsm", helpers.VectorsCompressedBucketLSM) + info, err := os.Stat(pqDir) + require.NoError(t, err) + assert.True(t, info.IsDir()) +} + +func testDB(t *testing.T, root string, classes []*models.Class, states map[string]*sharding.State) *DB { + logger, _ := test.NewNullLogger() + mockSchemaReader := schema2.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).RunAndReturn(func(className string) ([]string, error) { + return states[className].AllPhysicalShards(), nil + }).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + for _, class := range classes { + if className == class.Class { + return readFunc(class, states[className]) + } + } + return nil + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: classes}).Maybe() + return &DB{ + config: Config{RootPath: root}, + logger: logger, + schemaGetter: &fakeMigrationSchemaGetter{ + sch: schema.Schema{Objects: &models.Schema{Classes: classes}}, + states: states, + }, + schemaReader: mockSchemaReader, + } +} + +func randClassName() string { + return randStringBytes(16) +} + +func randShardName() string { + return randStringBytes(8) +} + +func randStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + switch i { + case 0: + b[i] = randChar(uppercase) + case n / 2: + b[i] = []byte("_")[0] + default: + b[i] = randChar(chars) + } + } + return string(b) +} + +func randChar(str string) byte { + return str[rand.Intn(len(str))] +} + +type shardContents map[string]bool + +func expectedShardContents() shardContents { + return shardContents{ + "main.hnsw.commitlog.d": false, + "geo.someGeoProp.hnsw.commitlog.d": false, + "lsm": false, + "indexcount": false, + "proplengths": false, + "version": false, + } +} + +func (c shardContents) assert(t *testing.T) { + for name, found := range c { + assert.True(t, found, "didn't find %q in shard contents", name) + } +} + +type fakeMigrationSchemaGetter struct { + sch schema.Schema + states map[string]*sharding.State +} + +func (sg *fakeMigrationSchemaGetter) GetSchemaSkipAuth() schema.Schema { + return sg.sch +} + +func (sg *fakeMigrationSchemaGetter) ReadOnlyClass(class string) *models.Class { + return sg.sch.GetClass(class) +} + +func (sg *fakeMigrationSchemaGetter) ResolveAlias(string) string { + return "" +} + +func (sg *fakeMigrationSchemaGetter) GetAliasesForClass(string) []*models.Alias { + return nil +} + +func (sg *fakeMigrationSchemaGetter) Nodes() []string { + return nil +} + +func (sg *fakeMigrationSchemaGetter) NodeName() string { + return "" +} + +func (sg *fakeMigrationSchemaGetter) ClusterHealthScore() int { + return 0 +} + +func (sg *fakeMigrationSchemaGetter) ResolveParentNodes(string, string) (map[string]string, error) { + return nil, nil +} + +func (sg *fakeMigrationSchemaGetter) Statistics() map[string]any { + return nil +} + +func (sg *fakeMigrationSchemaGetter) ShardOwner(class, shard string) (string, error) { + return "", nil +} + +func (sg *fakeMigrationSchemaGetter) TenantsShards(_ context.Context, class string, tenants ...string) (map[string]string, error) { + return nil, nil +} + +func (f *fakeMigrationSchemaGetter) OptimisticTenantStatus(_ context.Context, class string, tenant string) (map[string]string, error) { + return nil, nil +} + +func (sg *fakeMigrationSchemaGetter) ShardFromUUID(class string, uuid []byte) string { + return "" +} + +func (sg *fakeMigrationSchemaGetter) ShardReplicas(class, shard string) ([]string, error) { + return nil, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3b2cf8d4356e73e5ea87434c58d33cc001d8cdca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_integration_test.go @@ -0,0 +1,1750 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/search" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestFilters(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(testCtx()) + + migrator := NewMigrator(repo, logger, "node1") + t.Run("prepare test schema and data ", prepareCarTestSchemaAndData(repo, migrator, schemaGetter)) + + t.Run("primitive props without nesting", testPrimitiveProps(repo)) + + t.Run("primitive props with limit", testPrimitivePropsWithLimit(repo)) + + t.Run("chained primitive props", testChainedPrimitiveProps(repo, migrator)) + + t.Run("sort props", testSortProperties(repo)) +} + +func TestFiltersNoLengthIndex(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(testCtx()) + migrator := NewMigrator(repo, logger, "node1") + t.Run("prepare test schema and data ", prepareCarTestSchemaAndDataNoLength(repo, migrator, schemaGetter)) + t.Run("primitive props without nesting", testPrimitivePropsWithNoLengthIndex(repo)) +} + +var ( + // operators + eq = filters.OperatorEqual + neq = filters.OperatorNotEqual + lt = filters.OperatorLessThan + lte = filters.OperatorLessThanEqual + like = filters.OperatorLike + gt = filters.OperatorGreaterThan + gte = filters.OperatorGreaterThanEqual + wgr = filters.OperatorWithinGeoRange + and = filters.OperatorAnd + null = filters.OperatorIsNull + + // datatypes + dtInt = schema.DataTypeInt + dtBool = schema.DataTypeBoolean + dtNumber = schema.DataTypeNumber + dtText = schema.DataTypeText + dtDate = schema.DataTypeDate + dtGeoCoordinates = schema.DataTypeGeoCoordinates +) + +func prepareCarTestSchemaAndData(repo *DB, + migrator *Migrator, schemaGetter *fakeSchemaGetter, +) func(t *testing.T) { + return func(t *testing.T) { + t.Run("creating the class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), carClass)) + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{ + carClass, + }, + } + }) + + for i, fixture := range cars { + t.Run(fmt.Sprintf("importing car %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), &fixture, carVectors[i], nil, nil, nil, 0)) + }) + } + } +} + +func prepareCarTestSchemaAndDataNoLength(repo *DB, + migrator *Migrator, schemaGetter *fakeSchemaGetter, +) func(t *testing.T) { + return func(t *testing.T) { + t.Run("creating the class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), carClassNoLengthIndex)) + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{ + carClassNoLengthIndex, + }, + } + }) + + for i, fixture := range cars { + t.Run(fmt.Sprintf("importing car %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), &fixture, carVectors[i], nil, nil, nil, 0)) + }) + } + } +} + +func testPrimitivePropsWithNoLengthIndex(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + type test struct { + name string + filter *filters.LocalFilter + expectedIDs []strfmt.UUID + limit int + ErrMsg string + } + + tests := []test{ + { + name: "Filter by unsupported geo-coordinates", + filter: buildFilter("len(parkedAt)", 0, eq, dtInt), + expectedIDs: []strfmt.UUID{}, + ErrMsg: "Property length must be indexed to be filterable! add `IndexPropertyLength: true` to the invertedIndexConfig in", + }, + { + name: "Filter by unsupported number", + filter: buildFilter("len(horsepower)", 1, eq, dtInt), + expectedIDs: []strfmt.UUID{}, + ErrMsg: "Property length must be indexed to be filterable", + }, + { + name: "Filter by unsupported date", + filter: buildFilter("len(released)", 1, eq, dtInt), + expectedIDs: []strfmt.UUID{}, + ErrMsg: "Property length must be indexed to be filterable! add `IndexPropertyLength: true` to the invertedIndexConfig in", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.limit == 0 { + test.limit = 100 + } + params := dto.GetParams{ + ClassName: carClass.Class, + Pagination: &filters.Pagination{Limit: test.limit}, + Filters: test.filter, + } + res, err := repo.Search(context.Background(), params) + if len(test.ErrMsg) > 0 { + require.Contains(t, err.Error(), test.ErrMsg) + } else { + require.Nil(t, err) + require.Len(t, res, len(test.expectedIDs)) + + ids := make([]strfmt.UUID, len(test.expectedIDs)) + for pos, concept := range res { + ids[pos] = concept.ID + } + assert.ElementsMatch(t, ids, test.expectedIDs, "ids don't match") + + } + }) + } + } +} + +func testPrimitiveProps(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + type test struct { + name string + filter *filters.LocalFilter + expectedIDs []strfmt.UUID + limit int + ErrMsg string + } + + tests := []test{ + { + name: "horsepower == 130", + filter: buildFilter("horsepower", 130, eq, dtInt), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "horsepower < 200", + filter: buildFilter("horsepower", 200, lt, dtInt), + expectedIDs: []strfmt.UUID{carSprinterID, carPoloID}, + }, + { + name: "horsepower <= 130", + filter: buildFilter("horsepower", 130, lte, dtInt), + expectedIDs: []strfmt.UUID{carSprinterID, carPoloID}, + }, + { + name: "horsepower > 200", + filter: buildFilter("horsepower", 200, gt, dtInt), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "horsepower >= 612", + filter: buildFilter("horsepower", 612, gte, dtInt), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "modelName != sprinter", + filter: buildFilter("modelName", "sprinter", neq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carPoloID, carNilID, carEmpty}, + }, + { + name: "modelName = spr*er (optimizable) dtText", + filter: buildFilter("modelName", "spr*er", like, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "modelName = *rinte? (non-optimizable) dtText", + filter: buildFilter("modelName", "*rinte?", like, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "modelName = spr*er (optimizable) dtText", + filter: buildFilter("modelName", "spr*er", like, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "modelName = *rinte? (non-optimizable) dtText", + filter: buildFilter("modelName", "*rinte?", like, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "weight == 3499.90", + filter: buildFilter("weight", 3499.90, eq, dtNumber), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "weight <= 3499.90", + filter: buildFilter("weight", 3499.90, lte, dtNumber), + expectedIDs: []strfmt.UUID{carSprinterID, carE63sID, carPoloID}, + }, + { + name: "weight < 3499.90", + filter: buildFilter("weight", 3499.90, lt, dtNumber), + expectedIDs: []strfmt.UUID{carE63sID, carPoloID}, + }, + { + name: "weight > 3000", + filter: buildFilter("weight", 3000.0, gt, dtNumber), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "weight == 2069.4", + filter: buildFilter("weight", 2069.4, eq, dtNumber), + expectedIDs: []strfmt.UUID{}, + }, + { + name: "weight == 2069.5", + filter: buildFilter("weight", 2069.5, eq, dtNumber), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "weight >= 2069.5", + filter: buildFilter("weight", 2069.5, gte, dtNumber), + expectedIDs: []strfmt.UUID{carSprinterID, carE63sID}, + }, + { + name: "before or equal 2017", + filter: buildFilter("released", mustParseTime("2017-02-17T09:47:00+02:00"), lte, dtDate), + expectedIDs: []strfmt.UUID{carPoloID, carE63sID, carSprinterID}, + }, + { + name: "before 1980", + filter: buildFilter("released", mustParseTime("1980-01-01T00:00:00+02:00"), lt, dtDate), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "from or equal 1995 on", + filter: buildFilter("released", mustParseTime("1995-08-17T12:47:00+02:00"), gte, dtDate), + expectedIDs: []strfmt.UUID{carSprinterID, carE63sID}, + }, + { + name: "from 1995 on", + filter: buildFilter("released", mustParseTime("1995-08-17T12:47:00+02:00"), gt, dtDate), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "equal to 1995-08-17T12:47:00+02:00", + filter: buildFilter("released", mustParseTime("1995-08-17T12:47:00+02:00"), eq, dtDate), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "not equal to 1995-08-17T12:47:00+02:00", + filter: buildFilter("released", mustParseTime("1995-08-17T12:47:00+02:00"), neq, dtDate), + expectedIDs: []strfmt.UUID{carPoloID, carE63sID, carEmpty, carNilID}, + }, + { + name: "exactly matching a specific contact email", + filter: buildFilter("contact", "john@heavycars.example.com", eq, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "matching an email from within a text (not string) field", + filter: buildFilter("description", "john@heavycars.example.com", eq, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "full-text matching the word engine", + filter: buildFilter("description", "engine", eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "matching two words", + filter: buildFilter("description", "this car", eq, dtText), + expectedIDs: []strfmt.UUID{carSprinterID, carPoloID, carE63sID}, + }, + { + name: "matching three words", + filter: buildFilter("description", "but car has", eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID, carE63sID}, + }, + { + name: "matching words with special characters", + filter: buildFilter("description", "it's also not exactly lightweight.", eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "matching words without special characters", + filter: buildFilter("description", "also not exactly lightweight", eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "by id", + filter: buildFilter("id", carPoloID.String(), eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "by id not equal", + filter: buildFilter("id", carE63sID.String(), neq, dtText), + expectedIDs: []strfmt.UUID{carPoloID, carSprinterID, carNilID, carEmpty}, + }, + { + name: "by id less then equal", + filter: buildFilter("id", carPoloID.String(), lte, dtText), + expectedIDs: []strfmt.UUID{carPoloID, carE63sID}, + }, + { + name: "by id less then", + filter: buildFilter("id", carPoloID.String(), lt, dtText), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "by id greater then equal", + filter: buildFilter("id", carPoloID.String(), gte, dtText), + expectedIDs: []strfmt.UUID{carPoloID, carSprinterID, carNilID, carEmpty}, + }, + { + name: "by id greater then", + filter: buildFilter("id", carPoloID.String(), gt, dtText), + expectedIDs: []strfmt.UUID{carSprinterID, carNilID, carEmpty}, + }, + { + name: "within 600km of San Francisco", + filter: buildFilter("parkedAt", filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(37.733795), + Longitude: ptFloat32(-122.446747), + }, + Distance: 600000, + }, wgr, dtGeoCoordinates), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + // { + // name: "by id like", + // filter: buildFilter("id", carPoloID.String(), like, dtText), + // expectedIDs: []strfmt.UUID{carPoloID}, + // }, + { + name: "by color with word tokenization", + filter: buildFilter("colorWhitespace", "grey", eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID, carPoloID}, + }, + { + name: "by color with word tokenization multiword (1)", + filter: buildFilter("colorWhitespace", "light grey", eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID}, + }, + { + name: "by color with word tokenization multiword (2)", + filter: buildFilter("colorWhitespace", "dark grey", eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "by color with field tokenization", + filter: buildFilter("colorField", "grey", eq, dtText), + expectedIDs: []strfmt.UUID{}, + }, + { + name: "by color with field tokenization multiword (1)", + filter: buildFilter("colorField", "light grey", eq, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "by color with field tokenization multiword (2)", + filter: buildFilter("colorField", "dark grey", eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "by color array with word tokenization", + filter: buildFilter("colorArrayWhitespace", "grey", eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID, carPoloID}, + }, + { + name: "by color array with word tokenization multiword (1)", + filter: buildFilter("colorArrayWhitespace", "light grey", eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID}, + }, + { + name: "by color array with word tokenization multiword (2)", + filter: buildFilter("colorArrayWhitespace", "dark grey", eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "by color array with field tokenization", + filter: buildFilter("colorArrayField", "grey", eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carPoloID}, + }, + { + name: "by color with array field tokenization multiword (1)", + filter: buildFilter("colorArrayField", "light grey", eq, dtText), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "by color with array field tokenization multiword (2)", + filter: buildFilter("colorArrayField", "dark grey", eq, dtText), + expectedIDs: []strfmt.UUID{}, + }, + { + name: "by null value", + filter: buildFilter("colorArrayField", true, null, dtBool), + expectedIDs: []strfmt.UUID{carNilID, carEmpty}, + }, + { + name: "by value not null", + filter: buildFilter("colorArrayField", false, null, dtBool), + expectedIDs: []strfmt.UUID{carE63sID, carPoloID, carSprinterID}, + }, + { + name: "by string length", + filter: buildFilter("len(colorField)", 10, eq, dtInt), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "by array length", + filter: buildFilter("len(colorArrayField)", 2, eq, dtInt), + expectedIDs: []strfmt.UUID{carE63sID, carPoloID}, + }, + { + name: "by text length (equal)", + filter: buildFilter("len(description)", 65, eq, dtInt), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "by text length (lte)", + filter: buildFilter("len(description)", 65, lte, dtInt), + expectedIDs: []strfmt.UUID{carE63sID, carNilID, carEmpty}, + }, + { + name: "by text length (gte)", + filter: buildFilter("len(description)", 65, gte, dtInt), + expectedIDs: []strfmt.UUID{carE63sID, carPoloID, carSprinterID}, + }, + { + name: "length 0 (not added and empty)", + filter: buildFilter("len(colorArrayWhitespace)", 0, eq, dtInt), + expectedIDs: []strfmt.UUID{carNilID, carEmpty}, + }, + { + name: "Filter unicode strings", + filter: buildFilter("len(contact)", 30, eq, dtInt), + expectedIDs: []strfmt.UUID{carE63sID}, + }, + { + name: "Filter unicode texts", + filter: buildFilter("len(description)", 110, eq, dtInt), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "Empty string properties", + filter: buildFilter("modelName", true, null, dtBool), + expectedIDs: []strfmt.UUID{carEmpty}, + }, + { + name: "Empty string by length", + filter: buildFilter("len(description)", 0, eq, dtInt), + expectedIDs: []strfmt.UUID{carEmpty, carNilID}, + }, + { + name: "Empty array by length", + filter: buildFilter("len(colorArrayWhitespace)", 0, eq, dtInt), + expectedIDs: []strfmt.UUID{carEmpty, carNilID}, + }, + { + name: "made by Mercedes ... I mean manufacturer1", + filter: buildFilter("manufacturerId", manufacturer1.String(), eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID}, + }, + { + name: "made by manufacturer2", + filter: buildFilter("manufacturerId", manufacturer2.String(), eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + { + name: "available at the north dealership", + filter: buildFilter("availableAtDealerships", dealershipNorth.String(), eq, dtText), + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID}, + }, + { + name: "available at the south dealership", + filter: buildFilter("availableAtDealerships", dealershipSouth.String(), eq, dtText), + expectedIDs: []strfmt.UUID{carPoloID, carSprinterID}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.limit == 0 { + test.limit = 100 + } + params := dto.GetParams{ + ClassName: carClass.Class, + Pagination: &filters.Pagination{Limit: test.limit}, + Filters: test.filter, + } + res, err := repo.Search(context.Background(), params) + if len(test.ErrMsg) > 0 { + require.Contains(t, err.Error(), test.ErrMsg) + } else { + require.Nil(t, err) + + require.Len(t, res, len(test.expectedIDs)) + + ids := make([]strfmt.UUID, len(test.expectedIDs)) + for pos, concept := range res { + ids[pos] = concept.ID + } + assert.ElementsMatch(t, ids, test.expectedIDs, "ids don't match") + + } + }) + } + } +} + +func testPrimitivePropsWithLimit(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + t.Run("greater than", func(t *testing.T) { + limit := 1 + + params := dto.GetParams{ + ClassName: carClass.Class, + Pagination: &filters.Pagination{Limit: limit}, + Filters: buildFilter("horsepower", 2, gt, dtInt), // would otherwise return 3 results + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + assert.Len(t, res, limit) + }) + + t.Run("less than", func(t *testing.T) { + limit := 1 + + params := dto.GetParams{ + ClassName: carClass.Class, + Pagination: &filters.Pagination{Limit: limit}, + Filters: buildFilter("horsepower", 20000, lt, dtInt), // would otherwise return 3 results + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + assert.Len(t, res, limit) + }) + } +} + +func testChainedPrimitiveProps(repo *DB, + migrator *Migrator, +) func(t *testing.T) { + return func(t *testing.T) { + type test struct { + name string + filter *filters.LocalFilter + expectedIDs []strfmt.UUID + } + + tests := []test{ + { + name: "modelName == sprinter AND weight > 3000", + filter: filterAnd( + buildFilter("modelName", "sprinter", eq, dtText), + buildFilter("weight", float64(3000), gt, dtNumber), + ), + expectedIDs: []strfmt.UUID{carSprinterID}, + }, + { + name: "modelName == sprinter OR modelName == e63s", + filter: filterOr( + buildFilter("modelName", "sprinter", eq, dtText), + buildFilter("modelName", "e63s", eq, dtText), + ), + expectedIDs: []strfmt.UUID{carSprinterID, carE63sID}, + }, + { + name: "NOT (modelName == sprinter OR modelName == e63s)", + filter: filterNot(filterOr( + buildFilter("modelName", "sprinter", eq, dtText), + buildFilter("modelName", "e63s", eq, dtText), + )), + expectedIDs: []strfmt.UUID{carPoloID, carNilID, carEmpty}, + }, + { + name: "NOT (horsepower < 200 OR weight > 3000)", + filter: filterNot(filterOr( + buildFilter("horsepower", 200, lt, dtInt), + buildFilter("weight", float64(3000), gt, dtNumber), + )), + expectedIDs: []strfmt.UUID{carE63sID, carNilID, carEmpty}, + }, + { + name: "(heavy AND powerful) OR light", + filter: filterOr( + filterAnd( + buildFilter("horsepower", 200, gt, dtInt), + buildFilter("weight", float64(1500), gt, dtNumber), + ), + buildFilter("weight", float64(1500), lt, dtNumber), + ), + expectedIDs: []strfmt.UUID{carE63sID, carPoloID}, + }, + + // this test prevents a regression on + // https://github.com/weaviate/weaviate/issues/1638 + { + name: "Like ca* AND Like eng*", + filter: filterAnd( + buildFilter("description", "ca*", like, dtText), + buildFilter("description", "eng*", like, dtText), + ), + expectedIDs: []strfmt.UUID{carPoloID}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + params := dto.GetParams{ + ClassName: carClass.Class, + Pagination: &filters.Pagination{Limit: 100}, + Filters: test.filter, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, len(test.expectedIDs)) + + ids := make([]strfmt.UUID, len(test.expectedIDs)) + for pos, concept := range res { + ids[pos] = concept.ID + } + assert.ElementsMatch(t, ids, test.expectedIDs, "ids dont match") + }) + } + } +} + +func buildFilter(propName string, value interface{}, operator filters.Operator, schemaType schema.DataType) *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: operator, + On: &filters.Path{ + Class: schema.ClassName(carClass.Class), + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: value, + Type: schemaType, + }, + }, + } +} + +func buildSortFilter(path []string, order string) filters.Sort { + return filters.Sort{Path: path, Order: order} +} + +func compoundFilter(operator filters.Operator, + operands ...*filters.LocalFilter, +) *filters.LocalFilter { + clauses := make([]filters.Clause, len(operands)) + for i, filter := range operands { + clauses[i] = *filter.Root + } + + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: operator, + Operands: clauses, + }, + } +} + +func filterAnd(operands ...*filters.LocalFilter) *filters.LocalFilter { + return compoundFilter(filters.OperatorAnd, operands...) +} + +func filterOr(operands ...*filters.LocalFilter) *filters.LocalFilter { + return compoundFilter(filters.OperatorOr, operands...) +} + +func filterNot(operand *filters.LocalFilter) *filters.LocalFilter { + return compoundFilter(filters.OperatorNot, operand) +} + +// test data +var carClass = &models.Class{ + Class: "FilterTestCar", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Name: "modelName", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "contact", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "description", + Tokenization: models.PropertyTokenizationWord, + }, + { + DataType: []string{string(schema.DataTypeInt)}, + Name: "horsepower", + }, + { + DataType: []string{string(schema.DataTypeNumber)}, + Name: "weight", + }, + { + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + Name: "parkedAt", + }, + { + DataType: []string{string(schema.DataTypeDate)}, + Name: "released", + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "colorWhitespace", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "colorField", + Tokenization: models.PropertyTokenizationField, + }, + { + DataType: schema.DataTypeTextArray.PropString(), + Name: "colorArrayWhitespace", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeTextArray.PropString(), + Name: "colorArrayField", + Tokenization: models.PropertyTokenizationField, + }, + { + DataType: []string{string(schema.DataTypeUUID)}, + Name: "manufacturerId", + }, + { + DataType: []string{string(schema.DataTypeUUIDArray)}, + Name: "availableAtDealerships", + }, + }, +} + +// test data +var carClassNoLengthIndex = &models.Class{ + Class: "FilterTestCar", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 60, + Stopwords: &models.StopwordConfig{ + Preset: "none", + }, + IndexNullState: true, + IndexPropertyLength: false, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Name: "modelName", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "contact", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "description", + Tokenization: models.PropertyTokenizationWord, + }, + { + DataType: []string{string(schema.DataTypeInt)}, + Name: "horsepower", + }, + { + DataType: []string{string(schema.DataTypeNumber)}, + Name: "weight", + }, + { + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + Name: "parkedAt", + }, + { + DataType: []string{string(schema.DataTypeDate)}, + Name: "released", + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "colorWhitespace", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeText.PropString(), + Name: "colorField", + Tokenization: models.PropertyTokenizationField, + }, + { + DataType: schema.DataTypeTextArray.PropString(), + Name: "colorArrayWhitespace", + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + DataType: schema.DataTypeTextArray.PropString(), + Name: "colorArrayField", + Tokenization: models.PropertyTokenizationField, + }, + { + DataType: []string{string(schema.DataTypeUUID)}, + Name: "manufacturerId", + }, + { + DataType: []string{string(schema.DataTypeUUIDArray)}, + Name: "availableAtDealerships", + }, + }, +} + +var ( + carSprinterID strfmt.UUID = "d4c48788-7798-4bdd-bca9-5cd5012a5271" + carE63sID strfmt.UUID = "62906c61-f92f-4f2c-874f-842d4fb9d80b" + carPoloID strfmt.UUID = "b444e1d8-d73a-4d53-a417-8d6501c27f2e" + carNilID strfmt.UUID = "b444e1d8-d73a-4d53-a417-8d6501c27f3e" + carEmpty strfmt.UUID = "b444e1d8-d73a-4d53-a417-8d6501c27f4e" + + // these UUIDs are not primary IDs of objects, but rather values for uuid and + // uuid[] fields + manufacturer1 = uuid.MustParse("11111111-2222-3333-4444-000000000001") + manufacturer2 = uuid.MustParse("11111111-2222-3333-4444-000000000002") + dealershipNorth = uuid.MustParse("99999999-9999-9999-9999-000000000001") + dealershipSouth = uuid.MustParse("99999999-9999-9999-9999-000000000002") +) + +func mustParseTime(in string) time.Time { + asTime, err := time.Parse(time.RFC3339, in) + if err != nil { + panic(err) + } + return asTime +} + +var cars = []models.Object{ + { + Class: carClass.Class, + ID: carSprinterID, + Properties: map[string]interface{}{ + "modelName": "sprinter", + "horsepower": int64(130), + "weight": 3499.90, + "released": mustParseTime("1995-08-17T12:47:00+02:00"), + "parkedAt": &models.GeoCoordinates{ + Latitude: ptFloat32(34.052235), + Longitude: ptFloat32(-118.243683), + }, + "contact": "john@heavycars.example.com", + "description": "This car resembles a large van that can still be driven with a regular license. Contact john@heavycars.example.com for details", + "colorWhitespace": "light grey", + "colorField": "light grey", + "colorArrayWhitespace": []interface{}{"light grey"}, + "colorArrayField": []interface{}{"light grey"}, + "manufacturerId": manufacturer1, + "availableAtDealerships": []uuid.UUID{dealershipNorth, dealershipSouth}, + }, + }, + { + Class: carClass.Class, + ID: carE63sID, + Properties: map[string]interface{}{ + "modelName": "e63s", + "horsepower": int64(612), + "weight": 2069.5, + "released": mustParseTime("2017-02-17T09:47:00+02:00"), + "parkedAt": &models.GeoCoordinates{ + Latitude: ptFloat32(40.730610), + Longitude: ptFloat32(-73.935242), + }, + "contact": "jessica-世界@unicode.example.com", + "description": "This car has a huge motor, but it's also not exactly lightweight.", + "colorWhitespace": "very light grey", + "colorField": "very light grey", + "colorArrayWhitespace": []interface{}{"very light", "grey"}, + "colorArrayField": []interface{}{"very light", "grey"}, + "manufacturerId": manufacturer1, + "availableAtDealerships": []uuid.UUID{dealershipNorth}, + }, + }, + { + Class: carClass.Class, + ID: carPoloID, + Properties: map[string]interface{}{ + "released": mustParseTime("1975-01-01T10:12:00+02:00"), + "modelName": "polo", + "horsepower": int64(100), + "weight": 1200.0, + "contact": "sandra@efficientcars.example.com", + "description": "This small car has a small engine and unicode labels (ąę), but it's very light, so it feels faster than it is.", + "colorWhitespace": "dark grey", + "colorField": "dark grey", + "colorArrayWhitespace": []interface{}{"dark", "grey"}, + "colorArrayField": []interface{}{"dark", "grey"}, + "manufacturerId": manufacturer2, + "availableAtDealerships": []uuid.UUID{dealershipSouth}, + }, + }, + { + Class: carClass.Class, + ID: carNilID, + Properties: map[string]interface{}{ + "modelName": "NilCar", + }, + }, + { + Class: carClass.Class, + ID: carEmpty, + Properties: map[string]interface{}{ + "modelName": "", + "contact": "", + "description": "", + "colorWhitespace": "", + "colorField": "", + "colorArrayWhitespace": []interface{}{}, + "colorArrayField": []interface{}{}, + }, + }, +} + +var carVectors = [][]float32{ + {1.1, 0, 0, 0, 0}, + {0, 1.1, 0, 0, 0}, + {0, 0, 1.1, 0, 0}, + {0, 0, 0, 1.1, 0}, + {0, 0, 0, 0, 1.1}, +} + +func TestGeoPropUpdateJourney(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("import schema", func(t *testing.T) { + class := &models.Class{ + Class: "GeoUpdateTestClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "location", + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + }, + }, + } + + migrator.AddClass(context.Background(), class) + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{class}, + } + }) + + ids := []strfmt.UUID{ + "4002609e-ee57-4404-a0ad-798af7da0004", + "1477aed8-f677-4131-a3ad-4deef6176066", + } + + searchQuery := filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(6.0), + Longitude: ptFloat32(-2.0), + }, + Distance: 400000, // distance to filter only 1 closest object in both test cases + } + + upsertFn := func(coordinates [][]float32) func(t *testing.T) { + return func(t *testing.T) { + for i, id := range ids { + repo.PutObject(context.Background(), &models.Object{ + Class: "GeoUpdateTestClass", + ID: id, + Properties: map[string]interface{}{ + "location": &models.GeoCoordinates{ + Latitude: &coordinates[i][0], + Longitude: &coordinates[i][1], + }, + }, + }, []float32{0.5}, nil, nil, nil, 0) + } + } + } + + t.Run("import items", upsertFn([][]float32{ + {7, 1}, + {8, 2}, + })) + + t.Run("verify 1st object found", func(t *testing.T) { + res, err := repo.Search(context.Background(), + getParamsWithFilter("GeoUpdateTestClass", buildFilter( + "location", searchQuery, wgr, schema.DataTypeGeoCoordinates, + ))) + + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, ids[0], res[0].ID) + }) + + t.Run("import items", upsertFn([][]float32{ + // move item 0 farther away from the search query and item 1 closer to it + {23, 14}, + {6.5, -1}, + })) + + t.Run("verify 2nd object found", func(t *testing.T) { + res, err := repo.Search(context.Background(), + getParamsWithFilter("GeoUpdateTestClass", buildFilter( + "location", searchQuery, wgr, schema.DataTypeGeoCoordinates, + ))) + + require.Nil(t, err) + require.Len(t, res, 1) + + // notice the opposite order + assert.Equal(t, ids[1], res[0].ID) + }) +} + +// This test prevents a regression on +// https://github.com/weaviate/weaviate/issues/1426 +func TestCasingOfOperatorCombinations(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + class := &models.Class{ + Class: "FilterCasingBug", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "textPropWord", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "textPropWhitespace", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + + objects := []*models.Object{ + { + Class: class.Class, + ID: strfmt.UUID(uuid.New().String()), + Properties: map[string]interface{}{ + "name": "all lowercase", + "textPropWhitespace": "apple banana orange", + "textPropWord": "apple banana orange", + }, + Vector: []float32{0.1}, + }, + { + Class: class.Class, + ID: strfmt.UUID(uuid.New().String()), + Properties: map[string]interface{}{ + "name": "mixed case", + "textPropWhitespace": "apple Banana ORANGE", + "textPropWord": "apple Banana ORANGE", + }, + Vector: []float32{0.1}, + }, + { + Class: class.Class, + ID: strfmt.UUID(uuid.New().String()), + Properties: map[string]interface{}{ + "name": "first letter uppercase", + "textPropWhitespace": "Apple Banana Orange", + "textPropWord": "Apple Banana Orange", + }, + Vector: []float32{0.1}, + }, + { + Class: class.Class, + ID: strfmt.UUID(uuid.New().String()), + Properties: map[string]interface{}{ + "name": "all uppercase", + "textPropWhitespace": "APPLE BANANA ORANGE", + "textPropWord": "APPLE BANANA ORANGE", + }, + Vector: []float32{0.1}, + }, + } + + t.Run("creating the class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{ + class, + }, + } + }) + + t.Run("importing the objects", func(t *testing.T) { + for i, obj := range objects { + t.Run(fmt.Sprintf("importing object %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0)) + }) + } + }) + + t.Run("verifying combinations", func(t *testing.T) { + type test struct { + name string + filter *filters.LocalFilter + expectedNames []string + limit int + } + + tests := []test{ + { + name: "text word, lowercase, single word, should match all", + filter: buildFilter("textPropWord", "apple", eq, dtText), + expectedNames: []string{ + "all uppercase", "all lowercase", "mixed case", + "first letter uppercase", + }, + }, + { + name: "text word, lowercase, multiple words, should match all", + filter: buildFilter("textPropWord", "apple banana orange", eq, dtText), + expectedNames: []string{ + "all uppercase", "all lowercase", "mixed case", + "first letter uppercase", + }, + }, + { + name: "text word, mixed case, single word, should match all", + filter: buildFilter("textPropWord", "Apple", eq, dtText), + expectedNames: []string{ + "all uppercase", "all lowercase", "mixed case", + "first letter uppercase", + }, + }, + { + name: "text word, mixed case, multiple words, should match all", + filter: buildFilter("textPropWord", "Apple Banana Orange", eq, dtText), + expectedNames: []string{ + "all uppercase", "all lowercase", "mixed case", + "first letter uppercase", + }, + }, + { + name: "text word, uppercase, single word, should match all", + filter: buildFilter("textPropWord", "APPLE", eq, dtText), + expectedNames: []string{ + "all uppercase", "all lowercase", "mixed case", + "first letter uppercase", + }, + }, + { + name: "text word, uppercase, multiple words, should match all", + filter: buildFilter("textPropWord", "APPLE BANANA ORANGE", eq, dtText), + expectedNames: []string{ + "all uppercase", "all lowercase", "mixed case", + "first letter uppercase", + }, + }, + { + name: "text whitespace, lowercase, single word, should match exact casing", + filter: buildFilter("textPropWhitespace", "apple", eq, dtText), + expectedNames: []string{ + "all lowercase", "mixed case", // mixed matches because the first word is all lowercase + }, + }, + { + name: "text whitespace, lowercase, multiple words, should match all-lowercase", + filter: buildFilter("textPropWhitespace", "apple banana orange", eq, dtText), + expectedNames: []string{"all lowercase"}, + }, + { + name: "text whitespace, mixed case, single word, should match exact matches", + filter: buildFilter("textPropWhitespace", "Banana", eq, dtText), + expectedNames: []string{ + "mixed case", "first letter uppercase", + }, + }, + { + name: "text whitespace, mixed case, multiple words, should match exact matches", + filter: buildFilter("textPropWhitespace", "apple Banana ORANGE", eq, dtText), + expectedNames: []string{ + "mixed case", + }, + }, + { + name: "text whitespace, uppercase, single word, should match all upper", + filter: buildFilter("textPropWhitespace", "APPLE", eq, dtText), + expectedNames: []string{ + "all uppercase", + }, + }, + { + name: "text whitespace, uppercase, multiple words, should match only all upper", + filter: buildFilter("textPropWhitespace", "APPLE BANANA ORANGE", eq, dtText), + expectedNames: []string{ + "all uppercase", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.limit == 0 { + test.limit = 100 + } + params := dto.GetParams{ + ClassName: class.Class, + Pagination: &filters.Pagination{Limit: test.limit}, + Filters: test.filter, + Properties: search.SelectProperties{{Name: "name"}}, + } + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, len(test.expectedNames)) + + names := make([]string, len(test.expectedNames)) + for pos, obj := range res { + names[pos] = obj.Schema.(map[string]interface{})["name"].(string) + } + assert.ElementsMatch(t, names, test.expectedNames, "names don't match") + }) + } + }) +} + +func testSortProperties(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + type test struct { + name string + sort []filters.Sort + expectedIDs []strfmt.UUID + wantErr bool + errMessage string + } + tests := []test{ + { + name: "modelName asc", + sort: []filters.Sort{ + buildSortFilter([]string{"modelName"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carEmpty, carE63sID, carNilID, carPoloID, carSprinterID}, + }, + { + name: "modelName desc", + sort: []filters.Sort{ + buildSortFilter([]string{"modelName"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carSprinterID, carPoloID, carNilID, carE63sID, carEmpty}, + }, + { + name: "horsepower asc", + sort: []filters.Sort{ + buildSortFilter([]string{"horsepower"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carNilID, carEmpty, carPoloID, carSprinterID, carE63sID}, + }, + { + name: "horsepower desc", + sort: []filters.Sort{ + buildSortFilter([]string{"horsepower"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID, carPoloID, carNilID, carEmpty}, + }, + { + name: "weight asc", + sort: []filters.Sort{ + buildSortFilter([]string{"weight"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carNilID, carEmpty, carPoloID, carE63sID, carSprinterID}, + }, + { + name: "weight desc", + sort: []filters.Sort{ + buildSortFilter([]string{"weight"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carSprinterID, carE63sID, carPoloID, carNilID, carEmpty}, + }, + { + name: "released asc", + sort: []filters.Sort{ + buildSortFilter([]string{"released"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carNilID, carEmpty, carPoloID, carSprinterID, carE63sID}, + }, + { + name: "released desc", + sort: []filters.Sort{ + buildSortFilter([]string{"released"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID, carPoloID, carNilID, carEmpty}, + }, + { + name: "parkedAt asc", + sort: []filters.Sort{ + buildSortFilter([]string{"parkedAt"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carPoloID, carNilID, carEmpty, carSprinterID, carE63sID}, + }, + { + name: "parkedAt desc", + sort: []filters.Sort{ + buildSortFilter([]string{"parkedAt"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID, carPoloID, carNilID, carEmpty}, + }, + { + name: "contact asc", + sort: []filters.Sort{ + buildSortFilter([]string{"contact"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carNilID, carEmpty, carE63sID, carSprinterID, carPoloID}, + }, + { + name: "contact desc", + sort: []filters.Sort{ + buildSortFilter([]string{"contact"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carPoloID, carSprinterID, carE63sID, carEmpty, carNilID}, + }, + { + name: "description asc", + sort: []filters.Sort{ + buildSortFilter([]string{"description"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carNilID, carEmpty, carE63sID, carSprinterID, carPoloID}, + }, + { + name: "description desc", + sort: []filters.Sort{ + buildSortFilter([]string{"description"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carPoloID, carSprinterID, carE63sID, carEmpty, carNilID}, + }, + { + name: "colorArrayWhitespace asc", + sort: []filters.Sort{ + buildSortFilter([]string{"colorArrayWhitespace"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carNilID, carEmpty, carPoloID, carSprinterID, carE63sID}, + }, + { + name: "colorArrayWhitespace desc", + sort: []filters.Sort{ + buildSortFilter([]string{"colorArrayWhitespace"}, "desc"), + }, + expectedIDs: []strfmt.UUID{carE63sID, carSprinterID, carPoloID, carNilID, carEmpty}, + }, + { + name: "modelName and horsepower asc", + sort: []filters.Sort{ + buildSortFilter([]string{"modelName"}, "asc"), + buildSortFilter([]string{"horsepower"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carEmpty, carE63sID, carNilID, carPoloID, carSprinterID}, + }, + { + name: "horsepower and modelName asc", + sort: []filters.Sort{ + buildSortFilter([]string{"horsepower"}, "asc"), + buildSortFilter([]string{"modelName"}, "asc"), + }, + expectedIDs: []strfmt.UUID{carEmpty, carNilID, carPoloID, carSprinterID, carE63sID}, + }, + { + name: "horsepower and modelName asc invalid sort", + sort: []filters.Sort{ + buildSortFilter([]string{"horsepower", "modelName"}, "asc"), + }, + expectedIDs: nil, + wantErr: true, + errMessage: "sort object list: sorting by reference not supported, path must have exactly one argument", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + params := dto.GetParams{ + ClassName: carClass.Class, + Pagination: &filters.Pagination{Limit: 100}, + Sort: test.sort, + } + res, err := repo.Search(context.Background(), params) + if test.wantErr { + require.NotNil(t, err) + require.Contains(t, err.Error(), test.errMessage) + } else { + require.Nil(t, err) + require.Len(t, res, len(test.expectedIDs)) + + ids := make([]strfmt.UUID, len(test.expectedIDs)) + for pos, concept := range res { + ids[pos] = concept.ID + } + assert.EqualValues(t, test.expectedIDs, ids, "ids dont match") + } + }) + } + } +} + +func TestFilteringAfterDeletion(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + class := &models.Class{ + Class: "DeletionClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "other", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + UUID1 := strfmt.UUID(uuid.New().String()) + UUID2 := strfmt.UUID(uuid.New().String()) + objects := []*models.Object{ + { + Class: class.Class, + ID: UUID1, + Properties: map[string]interface{}{ + "name": "otherthing", + "other": "not nil", + }, + }, + { + Class: class.Class, + ID: UUID2, + Properties: map[string]interface{}{ + "name": "something", + "other": nil, + }, + }, + } + + t.Run("creating the class and add objects", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), class)) + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{ + class, + }, + } + for i, obj := range objects { + t.Run(fmt.Sprintf("importing object %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0)) + }) + } + }) + + t.Run("Filter before deletion", func(t *testing.T) { + filterNil := buildFilter("other", true, null, dtBool) + paramsNil := dto.GetParams{ + ClassName: class.Class, + Pagination: &filters.Pagination{Limit: 2}, + Filters: filterNil, + } + resNil, err := repo.Search(context.Background(), paramsNil) + assert.Nil(t, err) + assert.Equal(t, 1, len(resNil)) + assert.Equal(t, UUID2, resNil[0].ID) + + filterLen := buildFilter("len(name)", 9, eq, dtInt) + paramsLen := dto.GetParams{ + ClassName: class.Class, + Pagination: &filters.Pagination{Limit: 2}, + Filters: filterLen, + } + resLen, err := repo.Search(context.Background(), paramsLen) + assert.Nil(t, err) + assert.Equal(t, 1, len(resLen)) + assert.Equal(t, UUID2, resLen[0].ID) + }) + + t.Run("Delete object and filter again", func(t *testing.T) { + repo.DeleteObject(context.Background(), "DeletionClass", UUID2, time.Now(), nil, "", 0) + + filterNil := buildFilter("other", true, null, dtBool) + paramsNil := dto.GetParams{ + ClassName: class.Class, + Pagination: &filters.Pagination{Limit: 2}, + Filters: filterNil, + } + resNil, err := repo.Search(context.Background(), paramsNil) + assert.Nil(t, err) + assert.Equal(t, 0, len(resNil)) + + filterLen := buildFilter("len(name)", 9, eq, dtInt) + paramsLen := dto.GetParams{ + ClassName: class.Class, + Pagination: &filters.Pagination{Limit: 2}, + Filters: filterLen, + } + resLen, err := repo.Search(context.Background(), paramsLen) + assert.Nil(t, err) + assert.Equal(t, 0, len(resLen)) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_limits_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_limits_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c6530373a9689fb1a205a2ce440cc2b01f7e0dfe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_limits_integration_test.go @@ -0,0 +1,428 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/search" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +// This test aims to prevent a regression on +// https://github.com/weaviate/weaviate/issues/1352 +// +// It reuses the company-schema from the regular filters test, but runs them in +// isolation as to not interfere with the existing tests +func Test_LimitsOnChainedFilters(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the class", func(t *testing.T) { + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + productClass, + companyClass, + }, + }, + } + + require.Nil(t, + migrator.AddClass(context.Background(), productClass)) + require.Nil(t, + migrator.AddClass(context.Background(), companyClass)) + + schemaGetter.schema = schema + }) + + data := chainedFilterCompanies(100) + + t.Run("import companies", func(t *testing.T) { + for i, company := range data { + t.Run(fmt.Sprintf("importing product %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), company, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("combine two filters with a strict limit", func(t *testing.T) { + limit := 20 + + filter := filterAnd( + buildFilter("price", 20, gte, dtInt), + buildFilter("price", 100, lt, dtInt), + ) + + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: companyClass.Class, + Filters: filter, + Pagination: &filters.Pagination{ + Limit: limit, + }, + Properties: search.SelectProperties{{Name: "price"}}, + }) + + require.Nil(t, err) + assert.Len(t, res, limit) + + for _, obj := range res { + assert.Less(t, obj.Schema.(map[string]interface{})["price"].(float64), + float64(100)) + assert.GreaterOrEqual(t, + obj.Schema.(map[string]interface{})["price"].(float64), float64(20)) + } + }) +} + +func chainedFilterCompanies(size int) []*models.Object { + out := make([]*models.Object, size) + + for i := range out { + out[i] = &models.Object{ + ID: mustNewUUID(), + Class: companyClass.Class, + Properties: map[string]interface{}{ + "price": int64(i), + }, + } + } + + return out +} + +// This test aims to prevent a regression on +// https://github.com/weaviate/weaviate/issues/1355 +// +// It reuses the company-schema from the regular filters test, but runs them in +// isolation as to not interfere with the existing tests +func Test_FilterLimitsAfterUpdates(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the class", func(t *testing.T) { + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + productClass, + companyClass, + }, + }, + } + + require.Nil(t, + migrator.AddClass(context.Background(), productClass)) + require.Nil(t, + migrator.AddClass(context.Background(), companyClass)) + + schemaGetter.schema = schema + }) + + data := chainedFilterCompanies(100) + + t.Run("import companies", func(t *testing.T) { + for i, company := range data { + t.Run(fmt.Sprintf("importing product %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), company, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("verify all with ref count 0 are found", func(t *testing.T) { + limit := 100 + filter := buildFilter("makesProduct", 0, eq, dtInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: companyClass.Class, + Filters: filter, + Pagination: &filters.Pagination{ + Limit: limit, + }, + }) + + require.Nil(t, err) + assert.Len(t, res, limit) + }) + + t.Run("verify a non refcount prop", func(t *testing.T) { + limit := 100 + filter := buildFilter("price", float64(0), gte, dtNumber) + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: companyClass.Class, + Filters: filter, + Pagination: &filters.Pagination{ + Limit: limit, + }, + }) + + require.Nil(t, err) + assert.Len(t, res, limit) + }) + + t.Run("perform updates on each company", func(t *testing.T) { + // in this case we're altering the vector position, but it doesn't really + // matter - what we want to provoke is to fill up our index with deleted + // doc ids + for i, company := range data { + t.Run(fmt.Sprintf("importing product %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), company, []float32{0.1, 0.21, 0.01, 0.2}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("verify all with ref count 0 are found", func(t *testing.T) { + limit := 100 + filter := buildFilter("makesProduct", 0, eq, dtInt) + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: companyClass.Class, + Filters: filter, + Pagination: &filters.Pagination{ + Limit: limit, + }, + }) + + require.Nil(t, err) + assert.Len(t, res, limit) + }) + + t.Run("verify a non refcount prop", func(t *testing.T) { + limit := 100 + filter := buildFilter("price", float64(0), gte, dtNumber) + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: companyClass.Class, + Filters: filter, + Pagination: &filters.Pagination{ + Limit: limit, + }, + }) + + require.Nil(t, err) + assert.Len(t, res, limit) + }) +} + +// This test aims to prevent a regression on +// https://github.com/weaviate/weaviate/issues/1356 +// +// It reuses the company-schema from the regular filters test, but runs them in +// isolation as to not interfere with the existing tests +func Test_AggregationsAfterUpdates(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the class", func(t *testing.T) { + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + productClass, + companyClass, + }, + }, + } + + require.Nil(t, + migrator.AddClass(context.Background(), productClass)) + require.Nil(t, + migrator.AddClass(context.Background(), companyClass)) + + schemaGetter.schema = schema + }) + + data := chainedFilterCompanies(100) + + t.Run("import companies", func(t *testing.T) { + for i, company := range data { + t.Run(fmt.Sprintf("importing product %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), company, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("verify all with ref count 0 are correctly aggregated", + func(t *testing.T) { + filter := buildFilter("makesProduct", 0, eq, dtInt) + res, err := repo.Aggregate(context.Background(), + aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + Filters: filter, + IncludeMetaCount: true, + }, nil) + + require.Nil(t, err) + require.Len(t, res.Groups, 1) + assert.Equal(t, res.Groups[0].Count, 100) + }) + + t.Run("perform updates on each company", func(t *testing.T) { + // in this case we're altering the vector position, but it doesn't really + // matter - what we want to provoke is to fill up our index with deleted + // doc ids + for i, company := range data { + t.Run(fmt.Sprintf("importing product %d", i), func(t *testing.T) { + require.Nil(t, + repo.PutObject(context.Background(), company, []float32{0.1, 0.21, 0.01, 0.2}, nil, nil, nil, 0)) + }) + } + }) + + t.Run("verify all with ref count 0 are correctly aggregated", + func(t *testing.T) { + filter := buildFilter("makesProduct", 0, eq, dtInt) + res, err := repo.Aggregate(context.Background(), + aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + Filters: filter, + IncludeMetaCount: true, + }, nil) + + require.Nil(t, err) + require.Len(t, res.Groups, 1) + assert.Equal(t, res.Groups[0].Count, 100) + }) + + t.Run("verify all with ref count 0 are correctly aggregated", + func(t *testing.T) { + filter := buildFilter("makesProduct", 0, eq, dtInt) + res, err := repo.Aggregate(context.Background(), + aggregation.Params{ + ClassName: schema.ClassName(companyClass.Class), + Filters: filter, + IncludeMetaCount: true, + }, nil) + + require.Nil(t, err) + require.Len(t, res.Groups, 1) + assert.Equal(t, 100, res.Groups[0].Count) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_on_refs_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_on_refs_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ddb785034563f24e454dc6fe2334605e6f5875a2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/filters_on_refs_integration_test.go @@ -0,0 +1,843 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestRefFilters(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryLimit: 20, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(testCtx()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("adding all classes to the schema", func(t *testing.T) { + schemaGetter.schema.Objects = &models.Schema{} + for _, class := range parkingGaragesSchema().Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + schemaGetter.schema.Objects.Classes = append(schemaGetter.schema.Objects.Classes, class) + }) + } + }) + + t.Run("importing with various combinations of props", func(t *testing.T) { + objects := []models.Object{ + { + Class: "MultiRefParkingGarage", + Properties: map[string]interface{}{ + "name": "Luxury Parking Garage", + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(48.864716), + Longitude: ptFloat32(2.349014), + }, + }, + ID: "a7e10b55-1ac4-464f-80df-82508eea1951", + CreationTimeUnix: 1566469890, + }, + { + Class: "MultiRefParkingGarage", + Properties: map[string]interface{}{ + "name": "Crappy Parking Garage", + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(42.331429), + Longitude: ptFloat32(-83.045753), + }, + }, + ID: "ba2232cf-bb0e-413d-b986-6aa996d34d2e", + CreationTimeUnix: 1566469892, + }, + { + Class: "MultiRefParkingLot", + Properties: map[string]interface{}{ + "name": "Fancy Parking Lot", + }, + ID: "1023967b-9512-475b-8ef9-673a110b695d", + CreationTimeUnix: 1566469894, + }, + { + Class: "MultiRefParkingLot", + Properties: map[string]interface{}{ + "name": "The worst parking lot youve ever seen", + }, + ID: "901859d8-69bf-444c-bf43-498963d798d2", + CreationTimeUnix: 1566469897, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked no where", + }, + ID: "329c306b-c912-4ec7-9b1d-55e5e0ca8dea", + CreationTimeUnix: 1566469899, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked in a garage", + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/MultiRefParkingGarage/a7e10b55-1ac4-464f-80df-82508eea1951", + }, + }, + }, + ID: "fe3ca25d-8734-4ede-9a81-bc1ed8c3ea43", + CreationTimeUnix: 1566469902, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked in a lot", + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/1023967b-9512-475b-8ef9-673a110b695d", + }, + }, + }, + ID: "21ab5130-627a-4268-baef-1a516bd6cad4", + CreationTimeUnix: 1566469906, + }, + { + Class: "MultiRefCar", + Properties: map[string]interface{}{ + "name": "Car which is parked in two places at the same time (magic!)", + "parkedAt": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/a7e10b55-1ac4-464f-80df-82508eea1951", + }, + &models.SingleRef{ + Beacon: "weaviate://localhost/MultiRefParkingLot/1023967b-9512-475b-8ef9-673a110b695d", + }, + }, + }, + ID: "533673a7-2a5c-4e1c-b35d-a3809deabace", + CreationTimeUnix: 1566469909, + }, + { + Class: "MultiRefDriver", + Properties: map[string]interface{}{ + "name": "Johny Drivemuch", + "drives": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/533673a7-2a5c-4e1c-b35d-a3809deabace", + }, + }, + }, + ID: "9653ab38-c16b-4561-80df-7a7e19300dd0", + CreationTimeUnix: 1566469912, + }, + { + Class: "MultiRefPerson", + Properties: map[string]interface{}{ + "name": "Jane Doughnut", + "friendsWith": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/9653ab38-c16b-4561-80df-7a7e19300dd0", + }, + }, + }, + ID: "91ad23a3-07ba-4d4c-9836-76c57094f734", + CreationTimeUnix: 1566469915, + }, + { + Class: "MultiRefSociety", + Properties: map[string]interface{}{ + "name": "Cool People", + "hasMembers": models.MultipleRef{ + &models.SingleRef{ + Beacon: "weaviate://localhost/91ad23a3-07ba-4d4c-9836-76c57094f734", + }, + }, + }, + ID: "5cd9afa6-f3df-4f57-a204-840d6b256dba", + CreationTimeUnix: 1566469918, + }, + } + + for _, thing := range objects { + t.Run(fmt.Sprintf("add %s", thing.ID), func(t *testing.T) { + err := repo.PutObject(context.Background(), &thing, []float32{1, 2, 3, 4, 5, 6, 7}, nil, nil, nil, 0) + require.Nil(t, err) + }) + } + }) + + t.Run("filtering", func(t *testing.T) { + t.Run("one level deep", func(t *testing.T) { + t.Run("ref name matches", func(t *testing.T) { + filter := filterCarParkedAtGarage(schema.DataTypeText, + "name", filters.OperatorEqual, "Luxury Parking Garage") + params := getParamsWithFilter("MultiRefCar", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 2) + }) + + t.Run("ref id matches", func(t *testing.T) { + filter := filterCarParkedAtGarage(schema.DataTypeText, + "id", filters.OperatorEqual, "a7e10b55-1ac4-464f-80df-82508eea1951") + params := getParamsWithFilter("MultiRefCar", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 2) + }) + + t.Run("ref name doesn't match", func(t *testing.T) { + filter := filterCarParkedAtGarage(schema.DataTypeText, + "name", filters.OperatorEqual, "There is no parking garage with this name") + params := getParamsWithFilter("MultiRefCar", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 0) + }) + + t.Run("within geo range", func(t *testing.T) { + filter := filterCarParkedAtGarage(schema.DataTypeGeoCoordinates, + "location", filters.OperatorWithinGeoRange, filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(48.801407), + Longitude: ptFloat32(2.130122), + }, + Distance: 100000, + }) + params := getParamsWithFilter("MultiRefCar", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 2) + + names := extractNames(res) + expectedNames := []string{ + "Car which is parked in a garage", + "Car which is parked in two places at the same time (magic!)", + } + + assert.ElementsMatch(t, names, expectedNames) + }) + + t.Run("outside of geo range", func(t *testing.T) { + filter := filterCarParkedAtGarage(schema.DataTypeGeoCoordinates, + "location", filters.OperatorWithinGeoRange, filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(42.279594), + Longitude: ptFloat32(-83.732124), + }, + Distance: 100000, + }) + params := getParamsWithFilter("MultiRefCar", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 0) + }) + + t.Run("combining ref filter with primitive root filter", func(t *testing.T) { + parkedAtFilter := filterCarParkedAtGarage(schema.DataTypeGeoCoordinates, + "location", filters.OperatorWithinGeoRange, filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(48.801407), + Longitude: ptFloat32(2.130122), + }, + Distance: 100000, + }) + + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + *(parkedAtFilter.Root), + { + On: &filters.Path{ + Class: schema.ClassName("MultiRefCar"), + Property: schema.PropertyName("name"), + }, + Value: &filters.Value{ + Value: "Car which is parked in a garage", + Type: schema.DataTypeText, + }, + Operator: filters.OperatorEqual, + }, + }, + }, + } + params := getParamsWithFilter("MultiRefCar", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 1) + + names := extractNames(res) + expectedNames := []string{ + "Car which is parked in a garage", + } + + assert.ElementsMatch(t, names, expectedNames) + }) + }) + + t.Run("multiple levels deep", func(t *testing.T) { + t.Run("ref name matches", func(t *testing.T) { + filter := filterDrivesCarParkedAtGarage(schema.DataTypeText, + "name", filters.OperatorEqual, "Luxury Parking Garage") + params := getParamsWithFilter("MultiRefDriver", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 1) + + assert.Equal(t, "Johny Drivemuch", res[0].Schema.(map[string]interface{})["name"]) + }) + + t.Run("ref name doesn't match", func(t *testing.T) { + filter := filterDrivesCarParkedAtGarage(schema.DataTypeText, + "name", filters.OperatorEqual, "There is no parking garage with this name") + params := getParamsWithFilter("MultiRefDriver", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 0) + }) + + t.Run("within geo range", func(t *testing.T) { + filter := filterDrivesCarParkedAtGarage(schema.DataTypeGeoCoordinates, + "location", filters.OperatorWithinGeoRange, filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(48.801407), + Longitude: ptFloat32(2.130122), + }, + Distance: 100000, + }) + params := getParamsWithFilter("MultiRefDriver", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 1) + + assert.Equal(t, "Johny Drivemuch", res[0].Schema.(map[string]interface{})["name"]) + }) + + t.Run("outside of geo range", func(t *testing.T) { + filter := filterDrivesCarParkedAtGarage(schema.DataTypeGeoCoordinates, + "location", filters.OperatorWithinGeoRange, filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + Latitude: ptFloat32(42.279594), + Longitude: ptFloat32(-83.732124), + }, + Distance: 100000, + }) + params := getParamsWithFilter("MultiRefDriver", filter) + + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 0) + }) + }) + + t.Run("by reference count", func(t *testing.T) { + t.Run("equal to zero", func(t *testing.T) { + filter := filterCarParkedCount(filters.OperatorEqual, 0) + params := getParamsWithFilter("MultiRefCar", filter) + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + require.Len(t, res, 1) // there is just one car parked nowhere + assert.Equal(t, "Car which is parked no where", res[0].Schema.(map[string]interface{})["name"]) + }) + + t.Run("equal to one", func(t *testing.T) { + filter := filterCarParkedCount(filters.OperatorEqual, 1) + params := getParamsWithFilter("MultiRefCar", filter) + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + expectedNames := []string{ + "Car which is parked in a garage", + "Car which is parked in a lot", + } + assert.ElementsMatch(t, expectedNames, extractNames(res)) + }) + + t.Run("equal to more than one", func(t *testing.T) { + filter := filterCarParkedCount(filters.OperatorGreaterThan, 1) + params := getParamsWithFilter("MultiRefCar", filter) + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + expectedNames := []string{ + "Car which is parked in two places at the same time (magic!)", + } + assert.ElementsMatch(t, expectedNames, extractNames(res)) + }) + + t.Run("greater or equal one", func(t *testing.T) { + filter := filterCarParkedCount(filters.OperatorGreaterThanEqual, 1) + params := getParamsWithFilter("MultiRefCar", filter) + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + expectedNames := []string{ + "Car which is parked in a garage", + "Car which is parked in a lot", + "Car which is parked in two places at the same time (magic!)", + } + assert.ElementsMatch(t, expectedNames, extractNames(res)) + }) + + t.Run("less than one", func(t *testing.T) { + filter := filterCarParkedCount(filters.OperatorLessThan, 1) + params := getParamsWithFilter("MultiRefCar", filter) + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + expectedNames := []string{ + "Car which is parked no where", + } + assert.ElementsMatch(t, expectedNames, extractNames(res)) + }) + + t.Run("less than or equal one", func(t *testing.T) { + filter := filterCarParkedCount(filters.OperatorLessThanEqual, 1) + params := getParamsWithFilter("MultiRefCar", filter) + res, err := repo.Search(context.Background(), params) + require.Nil(t, err) + expectedNames := []string{ + "Car which is parked in a garage", + "Car which is parked in a lot", + "Car which is parked no where", + } + assert.ElementsMatch(t, expectedNames, extractNames(res)) + }) + }) + }) +} + +func TestRefFilters_MergingWithAndOperator(t *testing.T) { + // This test is to prevent a regression where checksums get lost on an AND + // operator, which was discovered through a journey test as part of gh-1286. + // The schema is modelled after the journey test, as the regular tests suites + // above do not seem to run into this issue on their own + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(testCtx()) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("adding all classes to the schema", func(t *testing.T) { + schemaGetter.schema.Objects = &models.Schema{} + for _, class := range cityCountryAirportSchema().Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + schemaGetter.schema.Objects.Classes = append(schemaGetter.schema.Objects.Classes, class) + }) + } + }) + + const ( + netherlands strfmt.UUID = "67b79643-cf8b-4b22-b206-6e63dbb4e57a" + germany strfmt.UUID = "561eea29-b733-4079-b50b-cfabd78190b7" + amsterdam strfmt.UUID = "8f5f8e44-d348-459c-88b1-c1a44bb8f8be" + rotterdam strfmt.UUID = "660db307-a163-41d2-8182-560782cd018f" + berlin strfmt.UUID = "9b9cbea5-e87e-4cd0-89af-e2f424fd52d6" + dusseldorf strfmt.UUID = "6ffb03f8-a853-4ec5-a5d8-302e45aaaf13" + nullisland strfmt.UUID = "823abeca-eef3-41c7-b587-7a6977b08003" + airport1 strfmt.UUID = "4770bb19-20fd-406e-ac64-9dac54c27a0f" + airport2 strfmt.UUID = "cad6ab9b-5bb9-4388-a933-a5bdfd23db37" + airport3 strfmt.UUID = "55a4dbbb-e2af-4b2a-901d-98146d1eeca7" + airport4 strfmt.UUID = "62d15920-b546-4844-bc87-3ae33268fab5" + ) + + t.Run("import all data objects", func(t *testing.T) { + objects := []*models.Object{ + { + Class: "Country", + ID: netherlands, + Properties: map[string]interface{}{ + "name": "Netherlands", + }, + }, + { + Class: "Country", + ID: germany, + Properties: map[string]interface{}{ + "name": "Germany", + }, + }, + + // cities + { + Class: "City", + ID: amsterdam, + Properties: map[string]interface{}{ + "name": "Amsterdam", + "population": int64(1800000), + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(52.366667), + Longitude: ptFloat32(4.9), + }, + "inCountry": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI( + strfmt.URI(crossref.NewLocalhost("", netherlands).String()), + ), + }, + }, + }, + }, + { + Class: "City", + ID: rotterdam, + Properties: map[string]interface{}{ + "name": "Rotterdam", + "population": int64(600000), + "inCountry": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", netherlands).String()), + }, + }, + }, + }, + { + Class: "City", + ID: berlin, + Properties: map[string]interface{}{ + "name": "Berlin", + "population": int64(3470000), + "inCountry": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", germany).String()), + }, + }, + }, + }, + { + Class: "City", + ID: dusseldorf, + Properties: map[string]interface{}{ + "name": "Dusseldorf", + "population": int64(600000), + "inCountry": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", germany).String()), + }, + }, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(51.225556), + Longitude: ptFloat32(6.782778), + }, + }, + }, + + { + Class: "City", + ID: nullisland, + Properties: map[string]interface{}{ + "name": "Null Island", + "population": 0, + "location": &models.GeoCoordinates{ + Latitude: ptFloat32(0), + Longitude: ptFloat32(0), + }, + }, + }, + + // airports + { + Class: "Airport", + ID: airport1, + Properties: map[string]interface{}{ + "code": "10000", + "phone": map[string]interface{}{ + "input": "+311234567", + }, + "inCity": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", amsterdam).String()), + }, + }, + }, + }, + { + Class: "Airport", + ID: airport2, + Properties: map[string]interface{}{ + "code": "20000", + "inCity": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", rotterdam).String()), + }, + }, + }, + }, + { + Class: "Airport", + ID: airport3, + Properties: map[string]interface{}{ + "code": "30000", + "inCity": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", dusseldorf).String()), + }, + }, + }, + }, + { + Class: "Airport", + ID: airport4, + Properties: map[string]interface{}{ + "code": "40000", + "inCity": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", berlin).String()), + }, + }, + }, + }, + } + + for _, obj := range objects { + require.Nil(t, repo.PutObject(context.Background(), obj, []float32{0.1}, nil, nil, nil, 0)) + } + }) + + t.Run("combining multi-level ref filters with AND", func(t *testing.T) { + // In gh-1286 we discovered that on this query the checksum was missing and + // we somehow didn't perform a merge, but rather always took the first set + // of ids + + filter := filterAirportsInGermanCitiesOver600k() + res, err := repo.Search(context.Background(), + getParamsWithFilter("Airport", filter)) + require.Nil(t, err) + + expectedCodes := []string{"40000"} + actualCodes := extractCodes(res) + + assert.Equal(t, expectedCodes, actualCodes) + }) +} + +func filterCarParkedAtGarage(dataType schema.DataType, + prop string, operator filters.Operator, value interface{}, +) *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: operator, + On: &filters.Path{ + Class: schema.ClassName("MultiRefCar"), + Property: schema.PropertyName("parkedAt"), + Child: &filters.Path{ + Class: schema.ClassName("MultiRefParkingGarage"), + Property: schema.PropertyName(prop), + }, + }, + Value: &filters.Value{ + Value: value, + Type: dataType, + }, + }, + } +} + +func filterCarParkedCount(operator filters.Operator, value int) *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: operator, + On: &filters.Path{ + Class: schema.ClassName("MultiRefCar"), + Property: schema.PropertyName("parkedAt"), + }, + Value: &filters.Value{ + Value: value, + Type: schema.DataTypeInt, + }, + }, + } +} + +func filterDrivesCarParkedAtGarage(dataType schema.DataType, + prop string, operator filters.Operator, value interface{}, +) *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: operator, + On: &filters.Path{ + Class: schema.ClassName("MultiRefDriver"), + Property: schema.PropertyName("drives"), + Child: filterCarParkedAtGarage(dataType, prop, operator, value).Root.On, + }, + Value: &filters.Value{ + Value: value, + Type: dataType, + }, + }, + } +} + +func filterAirportsInGermanCitiesOver600k() *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: and, + Operands: []filters.Clause{ + { + Operator: gt, + On: &filters.Path{ + Class: schema.ClassName("Airport"), + Property: schema.PropertyName("inCity"), + Child: &filters.Path{ + Class: schema.ClassName("City"), + Property: schema.PropertyName("population"), + }, + }, + Value: &filters.Value{ + Value: 600000, + Type: dtInt, + }, + }, + { + Operator: eq, + On: &filters.Path{ + Class: schema.ClassName("Airport"), + Property: schema.PropertyName("inCity"), + Child: &filters.Path{ + Class: schema.ClassName("City"), + Property: schema.PropertyName("inCountry"), + Child: &filters.Path{ + Class: schema.ClassName("Country"), + Property: schema.PropertyName("name"), + }, + }, + }, + Value: &filters.Value{ + Value: "Germany", + Type: schema.DataTypeText, + }, + }, + }, + }, + } +} + +func getParamsWithFilter(className string, filter *filters.LocalFilter) dto.GetParams { + return dto.GetParams{ + Filters: filter, + // we don't care about actually resolving the ref as long as filtering + // on it worked + Properties: search.SelectProperties{{Name: "name"}, {Name: "code"}}, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 10, + }, + ClassName: className, + } +} + +func extractNames(in []search.Result) []string { + out := make([]string, len(in)) + for i, res := range in { + out[i] = res.Schema.(map[string]interface{})["name"].(string) + } + + return out +} + +func extractCodes(in []search.Result) []string { + out := make([]string, len(in)) + for i, res := range in { + out[i] = res.Schema.(map[string]interface{})["code"].(string) + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/group_merger.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/group_merger.go new file mode 100644 index 0000000000000000000000000000000000000000..b82f95fda99b269c594180e20673c23e40692d27 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/group_merger.go @@ -0,0 +1,122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "fmt" + "sort" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" +) + +type groupMerger struct { + objects []*storobj.Object + dists []float32 + groupBy *searchparams.GroupBy +} + +func newGroupMerger(objects []*storobj.Object, dists []float32, + groupBy *searchparams.GroupBy, +) *groupMerger { + return &groupMerger{objects, dists, groupBy} +} + +func (gm *groupMerger) Do() ([]*storobj.Object, []float32, error) { + groups := map[string][]*additional.Group{} + objects := map[string][]int{} + + for i, obj := range gm.objects { + g, ok := obj.AdditionalProperties()["group"] + if !ok { + return nil, nil, fmt.Errorf("group not found for object: %v", obj.ID()) + } + group, ok := g.(*additional.Group) + if !ok { + return nil, nil, fmt.Errorf("wrong group type for object: %v", obj.ID()) + } + groups[group.GroupedBy.Value] = append(groups[group.GroupedBy.Value], group) + objects[group.GroupedBy.Value] = append(objects[group.GroupedBy.Value], i) + } + + getMinDistance := func(groups []*additional.Group) float32 { + min := groups[0].MinDistance + for i := range groups { + if groups[i].MinDistance < min { + min = groups[i].MinDistance + } + } + return min + } + + type groupMinDistance struct { + value string + distance float32 + } + + groupDistances := []groupMinDistance{} + for val, group := range groups { + groupDistances = append(groupDistances, groupMinDistance{ + value: val, distance: getMinDistance(group), + }) + } + + sort.Slice(groupDistances, func(i, j int) bool { + return groupDistances[i].distance < groupDistances[j].distance + }) + + desiredLength := len(groups) + if desiredLength > gm.groupBy.Groups { + desiredLength = gm.groupBy.Groups + } + + objs := make([]*storobj.Object, desiredLength) + dists := make([]float32, desiredLength) + for i, groupDistance := range groupDistances[:desiredLength] { + val := groupDistance.value + group := groups[groupDistance.value] + count := 0 + hits := []map[string]interface{}{} + for _, g := range group { + count += g.Count + hits = append(hits, g.Hits...) + } + + sort.Slice(hits, func(i, j int) bool { + return hits[i]["_additional"].(*additional.GroupHitAdditional).Distance < + hits[j]["_additional"].(*additional.GroupHitAdditional).Distance + }) + + if len(hits) > gm.groupBy.ObjectsPerGroup { + hits = hits[:gm.groupBy.ObjectsPerGroup] + count = len(hits) + } + + indx := objects[val][0] + obj, dist := gm.objects[indx], gm.dists[indx] + obj.AdditionalProperties()["group"] = &additional.Group{ + ID: i, + GroupedBy: &additional.GroupedBy{ + Value: val, + Path: []string{gm.groupBy.Property}, + }, + Count: count, + Hits: hits, + MaxDistance: hits[0]["_additional"].(*additional.GroupHitAdditional).Distance, + MinDistance: hits[len(hits)-1]["_additional"].(*additional.GroupHitAdditional).Distance, + } + objs[i], dists[i] = obj, dist + } + + return objs, dists, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helper_for_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helper_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6b68e166bb40ca06146d15a5d6e62258b21c95f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helper_for_test.go @@ -0,0 +1,458 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "log" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/stopwords" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/storobj" + esync "github.com/weaviate/weaviate/entities/sync" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/monitoring" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +func parkingGaragesSchema() schema.Schema { + return schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "MultiRefParkingGarage", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "location", + DataType: []string{string(schema.DataTypeGeoCoordinates)}, + }, + }, + }, + { + Class: "MultiRefParkingLot", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "MultiRefCar", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "parkedAt", + DataType: []string{"MultiRefParkingGarage", "MultiRefParkingLot"}, + }, + }, + }, + { + Class: "MultiRefDriver", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "drives", + DataType: []string{"MultiRefCar"}, + }, + }, + }, + { + Class: "MultiRefPerson", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "friendsWith", + DataType: []string{"MultiRefDriver"}, + }, + }, + }, + { + Class: "MultiRefSociety", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "hasMembers", + DataType: []string{"MultiRefPerson"}, + }, + }, + }, + + // for classifications test + { + Class: "ExactCategory", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "MainCategory", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + } +} + +func cityCountryAirportSchema() schema.Schema { + return schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Country", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + {Name: "name", DataType: schema.DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + }, + }, + { + Class: "City", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + {Name: "name", DataType: schema.DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "inCountry", DataType: []string{"Country"}}, + {Name: "population", DataType: []string{"int"}}, + {Name: "location", DataType: []string{"geoCoordinates"}}, + }, + }, + { + Class: "Airport", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + {Name: "code", DataType: schema.DataTypeText.PropString(), Tokenization: models.PropertyTokenizationWhitespace}, + {Name: "phone", DataType: []string{"phoneNumber"}}, + {Name: "inCity", DataType: []string{"City"}}, + }, + }, + }, + }, + } +} + +func testCtx() context.Context { + //nolint:govet + ctx, _ := context.WithTimeout(context.Background(), 30*time.Second) + return ctx +} + +func getRandomSeed() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} + +func testShard(t *testing.T, ctx context.Context, className string, indexOpts ...func(*Index)) (ShardLike, *Index) { + return testShardWithSettings(t, ctx, &models.Class{Class: className}, enthnsw.UserConfig{Skip: true}, + false, false, indexOpts...) +} + +func testShardMultiTenant(t *testing.T, ctx context.Context, className string, indexOpts ...func(*Index)) (ShardLike, *Index) { + return testShardWithMultiTenantSettings(t, ctx, &models.Class{Class: className}, enthnsw.UserConfig{Skip: true}, + false, false, indexOpts...) +} + +func createTestDatabaseWithClass(t *testing.T, metrics *monitoring.PrometheusMetrics, classes ...*models.Class) *DB { + t.Helper() + + require.NotNil(t, metrics, "metrics parameter cannot be nil") + metricsCopy := *metrics + metricsCopy.Registerer = monitoring.NoopRegisterer + + shardState := singleShardState() + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + for _, class := range classes { + if className == class.Class { + return readFunc(class, shardState) + } + } + return nil + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + db, err := New(logrus.New(), "node1", Config{ + RootPath: t.TempDir(), + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, &metricsCopy, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + + db.SetSchemaGetter(&fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: classes}}, + shardState: shardState, + }) + + require.Nil(t, db.WaitForStartup(t.Context())) + t.Cleanup(func() { + require.NoError(t, db.Shutdown(context.Background())) + }) + + return db +} + +func publishVectorMetricsFromDB(t *testing.T, db *DB) { + t.Helper() + + if !db.config.TrackVectorDimensions { + t.Logf("Vector dimensions tracking is disabled, returning 0") + return + } + db.metricsObserver.publishVectorMetrics(t.Context()) +} + +func getSingleShardNameFromRepo(repo *DB, className string) string { + shardName := "" + if !repo.config.TrackVectorDimensions { + log.Printf("Vector dimensions tracking is disabled, returning 0") + return shardName + } + index := repo.GetIndex(schema.ClassName(className)) + index.ForEachShard(func(name string, shard ShardLike) error { + shardName = shard.Name() + return nil + }) + return shardName +} + +func setupTestShardWithSettings(t *testing.T, ctx context.Context, class *models.Class, + vic schemaConfig.VectorIndexConfig, withStopwords, withCheckpoints, multiTenant bool, indexOpts ...func(*Index), +) (ShardLike, *Index) { + tmpDir := t.TempDir() + logger, _ := test.NewNullLogger() + maxResults := int64(10_000) + + var shardState *sharding.State + if multiTenant { + shardState = NewMultiTenantShardingStateBuilder(). + WithIndexName("multi-tenant-index"). + WithNodePrefix("node"). + WithReplicationFactor(1). + WithTenant("foo-tenant", "HOT"). + Build() + } else { + shardState = singleShardState() + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: tmpDir, + QueryMaximumResults: maxResults, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + schemaGetter := &fakeSchemaGetter{shardState: shardState, schema: sch} + + iic := schema.InvertedIndexConfig{} + if class.InvertedIndexConfig != nil { + iic = inverted.ConfigFromModel(class.InvertedIndexConfig) + } + var sd *stopwords.Detector + if withStopwords { + sd, err = stopwords.NewDetectorFromConfig(iic.Stopwords) + require.NoError(t, err) + } + var checkpts *indexcheckpoint.Checkpoints + if withCheckpoints { + checkpts, err = indexcheckpoint.New(tmpDir, logger) + require.NoError(t, err) + } + + idx := &Index{ + Config: IndexConfig{ + RootPath: tmpDir, + ClassName: schema.ClassName(class.Class), + QueryMaximumResults: maxResults, + ReplicationFactor: 1, + }, + metrics: NewMetrics(logger, nil, class.Class, ""), + partitioningEnabled: shardState.PartitioningEnabled, + invertedIndexConfig: iic, + vectorIndexUserConfig: vic, + vectorIndexUserConfigs: map[string]schemaConfig.VectorIndexConfig{}, + logger: logger, + getSchema: schemaGetter, + centralJobQueue: repo.jobQueueCh, + stopwords: sd, + indexCheckpoints: checkpts, + allocChecker: memwatch.NewDummyMonitor(), + shardCreateLocks: esync.NewKeyLocker(), + scheduler: repo.scheduler, + shardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + shardReindexer: NewShardReindexerV3Noop(), + } + idx.closingCtx, idx.closingCancel = context.WithCancel(context.Background()) + idx.initCycleCallbacksNoop() + for _, opt := range indexOpts { + opt(idx) + } + + shardName := shardState.AllPhysicalShards()[0] + + shard, err := idx.initShard(ctx, shardName, class, nil, idx.Config.DisableLazyLoadShards, true) + require.NoError(t, err) + + idx.shards.Store(shardName, shard) + + return idx.shards.Load(shardName), idx +} + +// Simplified functions that delegate to the common helper +func testShardWithMultiTenantSettings(t *testing.T, ctx context.Context, class *models.Class, + vic schemaConfig.VectorIndexConfig, withStopwords, withCheckpoints bool, indexOpts ...func(*Index), +) (ShardLike, *Index) { + return setupTestShardWithSettings(t, ctx, class, vic, withStopwords, withCheckpoints, true, indexOpts...) +} + +func testShardWithSettings(t *testing.T, ctx context.Context, class *models.Class, + vic schemaConfig.VectorIndexConfig, withStopwords, withCheckpoints bool, indexOpts ...func(*Index), +) (ShardLike, *Index) { + return setupTestShardWithSettings(t, ctx, class, vic, withStopwords, withCheckpoints, false, indexOpts...) +} + +func testObject(className string) *storobj.Object { + return &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: strfmt.UUID(uuid.NewString()), + Class: className, + }, + } +} + +func createRandomObjects(r *rand.Rand, className string, numObj int, vectorDim int) []*storobj.Object { + obj := make([]*storobj.Object, numObj) + + for i := 0; i < numObj; i++ { + obj[i] = &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: strfmt.UUID(uuid.NewString()), + Class: className, + }, + Vector: make([]float32, vectorDim), + } + + for d := 0; d < vectorDim; d++ { + obj[i].Vector[d] = r.Float32() + } + } + return obj +} + +func invertedConfig() *models.InvertedIndexConfig { + return &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 60, + Stopwords: &models.StopwordConfig{ + Preset: "none", + }, + IndexNullState: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/allow_list.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/allow_list.go new file mode 100644 index 0000000000000000000000000000000000000000..1dc98c12056006c3aa24a61d2490110e678838cd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/allow_list.go @@ -0,0 +1,158 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +type AllowList interface { + Close() + + Insert(ids ...uint64) + Contains(id uint64) bool + DeepCopy() AllowList + WrapOnWrite() AllowList + Slice() []uint64 + + IsEmpty() bool + Len() int + Min() uint64 + Max() uint64 + Size() uint64 + Truncate(uint64) AllowList + + Iterator() AllowListIterator + LimitedIterator(limit int) AllowListIterator +} + +type AllowListIterator interface { + Next() (uint64, bool) + Len() int +} + +func NewAllowList(ids ...uint64) AllowList { + return NewAllowListFromBitmap(roaringset.NewBitmap(ids...)) +} + +func NewAllowListFromBitmap(bm *sroar.Bitmap) AllowList { + return NewAllowListCloseableFromBitmap(bm, func() {}) +} + +func NewAllowListCloseableFromBitmap(bm *sroar.Bitmap, release func()) AllowList { + return &BitmapAllowList{Bm: bm, release: release} +} + +func NewAllowListFromBitmapDeepCopy(bm *sroar.Bitmap) AllowList { + return NewAllowListFromBitmap(bm.Clone()) +} + +// this was changed to be public to allow for accessing the underlying bitmap and intersecting it with other *sroar.Bitmap for faster keyword retrieval +// We should consider making this private again and adding a method to intersect two AllowLists, but at the same time, it would also make the interface bloated +// and add the burden of supporting this method in all (future, if any) implementations of AllowList +type BitmapAllowList struct { + Bm *sroar.Bitmap + release func() +} + +func (al *BitmapAllowList) Close() { + al.release() +} + +func (al *BitmapAllowList) Insert(ids ...uint64) { + al.Bm.SetMany(ids) +} + +func (al *BitmapAllowList) Contains(id uint64) bool { + return al.Bm.Contains(id) +} + +func (al *BitmapAllowList) DeepCopy() AllowList { + return NewAllowListFromBitmapDeepCopy(al.Bm) +} + +func (al *BitmapAllowList) WrapOnWrite() AllowList { + return newWrappedAllowList(al) +} + +func (al *BitmapAllowList) Slice() []uint64 { + return al.Bm.ToArray() +} + +func (al *BitmapAllowList) IsEmpty() bool { + return al.Bm.IsEmpty() +} + +func (al *BitmapAllowList) Len() int { + return al.Bm.GetCardinality() +} + +func (al *BitmapAllowList) Min() uint64 { + return al.Bm.Minimum() +} + +func (al *BitmapAllowList) Max() uint64 { + return al.Bm.Maximum() +} + +func (al *BitmapAllowList) Size() uint64 { + // TODO provide better size estimation + return uint64(1.5 * float64(len(al.Bm.ToBuffer()))) +} + +func (al *BitmapAllowList) Truncate(upTo uint64) AllowList { + card := al.Bm.GetCardinality() + if upTo < uint64(card) { + al.Bm.RemoveRange(upTo, uint64(al.Bm.GetCardinality()+1)) + } + return al +} + +func (al *BitmapAllowList) Iterator() AllowListIterator { + return al.LimitedIterator(0) +} + +func (al *BitmapAllowList) LimitedIterator(limit int) AllowListIterator { + return newBitmapAllowListIterator(al.Bm, limit) +} + +type bitmapAllowListIterator struct { + len int + counter int + it *sroar.Iterator +} + +func newBitmapAllowListIterator(bm *sroar.Bitmap, limit int) AllowListIterator { + len := bm.GetCardinality() + if limit > 0 && limit < len { + len = limit + } + + return &bitmapAllowListIterator{ + len: len, + counter: 0, + it: bm.NewIterator(), + } +} + +func (i *bitmapAllowListIterator) Next() (uint64, bool) { + if i.counter >= i.len { + return 0, false + } + i.counter++ + return i.it.Next(), true +} + +func (i *bitmapAllowListIterator) Len() int { + return i.len +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/allow_list_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/allow_list_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f2025cb4c2894847469da6eea7ca15ac5a9858cb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/allow_list_test.go @@ -0,0 +1,284 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func TestAllowList(t *testing.T) { + t.Run("allowlist created with no values", func(t *testing.T) { + al := NewAllowList() + + assert.Equal(t, 0, al.Len()) + assert.True(t, al.IsEmpty()) + + assert.Equal(t, uint64(0), al.Min()) + assert.Equal(t, uint64(0), al.Max()) + }) + + t.Run("allowlist created with initial values", func(t *testing.T) { + al := NewAllowList(1, 2, 3) + + assert.Equal(t, 3, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(3), al.Max()) + }) + + t.Run("allowlist with inserted values", func(t *testing.T) { + al := NewAllowList(1, 2, 3) + al.Insert(4, 5) + + assert.Equal(t, 5, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + assert.True(t, al.Contains(4)) + assert.True(t, al.Contains(5)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(5), al.Max()) + }) + + t.Run("allowlist exported to slice", func(t *testing.T) { + al := NewAllowList(1, 2, 3) + al.Insert(4, 5) + + assert.ElementsMatch(t, []uint64{1, 2, 3, 4, 5}, al.Slice()) + }) + + t.Run("allowlist deepcopy", func(t *testing.T) { + al := NewAllowList(1, 2, 3) + copy := al.DeepCopy() + al.Insert(4, 5) + + assert.Equal(t, 5, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + assert.True(t, al.Contains(4)) + assert.True(t, al.Contains(5)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(5), al.Max()) + + assert.Equal(t, 3, copy.Len()) + assert.False(t, copy.IsEmpty()) + + assert.True(t, copy.Contains(1)) + assert.True(t, copy.Contains(2)) + assert.True(t, copy.Contains(3)) + + assert.Equal(t, uint64(1), copy.Min()) + assert.Equal(t, uint64(3), copy.Max()) + }) + + t.Run("allowlist created from bitmap", func(t *testing.T) { + bm := roaringset.NewBitmap(1, 2, 3) + + al := NewAllowListFromBitmap(bm) + bm.SetMany([]uint64{4, 5}) + + assert.Equal(t, 5, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + assert.True(t, al.Contains(4)) + assert.True(t, al.Contains(5)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(5), al.Max()) + }) + + t.Run("allowlist created from bitmap deepcopy", func(t *testing.T) { + bm := roaringset.NewBitmap(1, 2, 3) + + al := NewAllowListFromBitmapDeepCopy(bm) + bm.SetMany([]uint64{4, 5}) + + assert.Equal(t, 3, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(3), al.Max()) + }) +} + +func TestAllowList_Iterator(t *testing.T) { + t.Run("empty bitmap iterator", func(t *testing.T) { + it := NewAllowList().Iterator() + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + + assert.Equal(t, 0, it.Len()) + assert.False(t, ok1) + assert.Equal(t, uint64(0), id1) + assert.False(t, ok2) + assert.Equal(t, uint64(0), id2) + }) + + t.Run("iterating step by step", func(t *testing.T) { + it := NewAllowList(3, 2, 1).Iterator() + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + id4, ok4 := it.Next() + + assert.Equal(t, 3, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.True(t, ok3) + assert.Equal(t, uint64(3), id3) + assert.False(t, ok4) + assert.Equal(t, uint64(0), id4) + }) + + t.Run("iterating in loop", func(t *testing.T) { + it := NewAllowList(3, 2, 1).Iterator() + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 3, it.Len()) + assert.Equal(t, []uint64{1, 2, 3}, ids) + }) +} + +func TestAllowList_LimitedIterator(t *testing.T) { + t.Run("empty bitmap iterator", func(t *testing.T) { + it := NewAllowList().LimitedIterator(2) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + + assert.Equal(t, 0, it.Len()) + assert.False(t, ok1) + assert.Equal(t, uint64(0), id1) + assert.False(t, ok2) + assert.Equal(t, uint64(0), id2) + }) + + t.Run("iterating step by step (higher limit)", func(t *testing.T) { + it := NewAllowList(3, 2, 1).LimitedIterator(4) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + id4, ok4 := it.Next() + + assert.Equal(t, 3, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.True(t, ok3) + assert.Equal(t, uint64(3), id3) + assert.False(t, ok4) + assert.Equal(t, uint64(0), id4) + }) + + t.Run("iterating step by step (equal limit)", func(t *testing.T) { + it := NewAllowList(3, 2, 1).LimitedIterator(3) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + id4, ok4 := it.Next() + + assert.Equal(t, 3, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.True(t, ok3) + assert.Equal(t, uint64(3), id3) + assert.False(t, ok4) + assert.Equal(t, uint64(0), id4) + }) + + t.Run("iterating step by step (lower limit)", func(t *testing.T) { + it := NewAllowList(3, 2, 1).LimitedIterator(2) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + + assert.Equal(t, 2, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.False(t, ok3) + assert.Equal(t, uint64(0), id3) + }) + + t.Run("iterating in loop (higher limit)", func(t *testing.T) { + it := NewAllowList(3, 2, 1).LimitedIterator(4) + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 3, it.Len()) + assert.Equal(t, []uint64{1, 2, 3}, ids) + }) + + t.Run("iterating in loop (equal limit)", func(t *testing.T) { + it := NewAllowList(3, 2, 1).LimitedIterator(3) + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 3, it.Len()) + assert.Equal(t, []uint64{1, 2, 3}, ids) + }) + + t.Run("iterating in loop (lower limit)", func(t *testing.T) { + it := NewAllowList(3, 2, 1).LimitedIterator(2) + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 2, it.Len()) + assert.Equal(t, []uint64{1, 2}, ids) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/helpers.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..396915380a336a58a57117ed63e0564d7831bc37 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/helpers.go @@ -0,0 +1,113 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/filters" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +var ( + ObjectsBucket = []byte("objects") + ObjectsBucketLSM = "objects" + VectorsCompressedBucketLSM = "vectors_compressed" + VectorsBucketLSM = "vectors" + DimensionsBucketLSM = "dimensions" +) + +const ObjectsBucketLSMDocIDSecondaryIndex int = 0 + +// MetaCountProp helps create an internally used propName for meta props that +// don't explicitly exist in the user schema, but are required for proper +// indexing, such as the count of arrays. +func MetaCountProp(propName string) string { + return fmt.Sprintf("%s__meta_count", propName) +} + +func PropLength(propName string) string { + return propName + filters.InternalPropertyLength +} + +func PropNull(propName string) string { + return propName + filters.InternalNullIndex +} + +// BucketFromPropNameLSM creates string used as the bucket name +// for a particular prop in the inverted index +func BucketFromPropNameLSM(propName string) string { + return fmt.Sprintf("property_%s", propName) +} + +func BucketFromPropNameLengthLSM(propName string) string { + return BucketFromPropNameLSM(PropLength(propName)) +} + +func BucketFromPropNameNullLSM(propName string) string { + return BucketFromPropNameLSM(PropNull(propName)) +} + +func BucketFromPropNameMetaCountLSM(propName string) string { + return BucketFromPropNameLSM(MetaCountProp(propName)) +} + +func TempBucketFromBucketName(bucketName string) string { + return bucketName + "_temp" +} + +func BucketSearchableFromPropNameLSM(propName string) string { + return BucketFromPropNameLSM(propName + "_searchable") +} + +func BucketRangeableFromPropNameLSM(propName string) string { + return BucketFromPropNameLSM(propName + "_rangeable") +} + +// CompressionRatioFromConfig calculates the compression ratio from vector index config +// This is used for inactive tenants where we don't have access to the actual vector index +func CompressionRatioFromConfig(config schemaConfig.VectorIndexConfig, dimensions int) float64 { + // Check for different compression types in config by type asserting + if hnswConfig, ok := config.(hnsw.UserConfig); ok { + // Check for different compression types in HNSW config + if hnswConfig.PQ.Enabled { + // PQ compression ratio depends on segments + segments := hnswConfig.PQ.Segments + if segments > 0 { + return float64(dimensions*4) / float64(segments) + } + } else if hnswConfig.BQ.Enabled { + // BQ compression ratio is approximately 32x + return 1.0 / 32.0 + } else if hnswConfig.SQ.Enabled { + // SQ compression ratio is approximately 4x + return 0.25 + } + } else if flatConfig, ok := config.(flat.UserConfig); ok { + // Check for different compression types in Flat config + if flatConfig.BQ.Enabled { + // BQ compression ratio is approximately 32x + return 1.0 / 32.0 + } else if flatConfig.PQ.Enabled { + // PQ compression ratio depends on segments (not supported in flat but handle gracefully) + return 0.25 + } else if flatConfig.SQ.Enabled { + // SQ compression ratio is approximately 4x + return 0.25 + } + } + + // Default to no compression + return 1.0 +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries.go new file mode 100644 index 0000000000000000000000000000000000000000..1a52b8cff5276da4a91aef98d55016537d95407b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "context" + "fmt" + "maps" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +const ( + DefaultSlowLogThreshold = 5 * time.Second +) + +type SlowQueryReporter interface { + LogIfSlow(context.Context, time.Time, map[string]any) +} + +type BaseSlowReporter struct { + threshold *runtime.DynamicValue[time.Duration] + enabled *runtime.DynamicValue[bool] + logger logrus.FieldLogger +} + +func NewSlowQueryReporter( + enabled *runtime.DynamicValue[bool], + threshold *runtime.DynamicValue[time.Duration], + logger logrus.FieldLogger, +) *BaseSlowReporter { + logger.WithField("action", "slow_log_startup").Debugf("Starting SlowQueryReporter with %s threshold", threshold.Get()) + return &BaseSlowReporter{ + threshold: threshold, + enabled: enabled, + logger: logger, + } +} + +// LogIfSlow prints a warning log if the request takes longer than the threshold. +// Usage: +// +// startTime := time.Now() +// defer s.slowQueryReporter.LogIfSlow(startTime, map[string]any{ +// "key": "value" +// }) +// +// TODO (sebneira): Consider providing fields out of the box (e.g. shard info). Right now we're +// limited because of circular dependencies. +func (sq *BaseSlowReporter) LogIfSlow(ctx context.Context, startTime time.Time, fields map[string]any) { + if !sq.enabled.Get() { + return + } + + threshold := sq.threshold.Get() + if threshold <= 0 { + threshold = DefaultSlowLogThreshold + } + + took := time.Since(startTime) + if took > threshold { + if fields == nil { + fields = map[string]any{} + } + + detailFields := ExtractSlowQueryDetails(ctx) + if detailFields != nil { + maps.Copy(fields, detailFields) + } + fields["took"] = took + sq.logger.WithFields(fields).Warn(fmt.Sprintf("Slow query detected (%s)", took.Round(time.Millisecond))) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_details.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_details.go new file mode 100644 index 0000000000000000000000000000000000000000..2e76cd8462741ef11d7094388830361c64549d4d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_details.go @@ -0,0 +1,115 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "context" + "fmt" + "maps" + "sync" + "time" +) + +type SlowQueryDetails struct { + sync.Mutex + values map[string]any +} + +func NewSlowQueryDetails() *SlowQueryDetails { + return &SlowQueryDetails{ + values: make(map[string]any), + } +} + +func InitSlowQueryDetails(ctx context.Context) context.Context { + d := NewSlowQueryDetails() + return context.WithValue(ctx, "slow_query_details", d) +} + +func AnnotateSlowQueryLog(ctx context.Context, key string, value any) { + val := ctx.Value("slow_query_details") + if val == nil { + return + } + + details, ok := val.(*SlowQueryDetails) + if !ok { + return + } + + details.Lock() + defer details.Unlock() + + details.values[key] = value + + if asTime, ok := value.(time.Duration); ok { + details.values[key+"_string"] = asTime.String() + } +} + +func AnnotateSlowQueryLogAppend(ctx context.Context, key string, value any) { + val := ctx.Value("slow_query_details") + if val == nil { + return + } + + details, ok := val.(*SlowQueryDetails) + if !ok { + return + } + + details.Lock() + defer details.Unlock() + + prev, ok := details.values[key] + if !ok { + prev = make([]any, 0) + } + + asList, ok := prev.([]any) + if !ok { + return + } + + asList = append(asList, value) + details.values[key] = asList +} + +func SprintfWithNesting(nesting int, format string, args ...any) string { + nestingPrefix := " " + listItem := " - " + prefix := "" + for i := 0; i < nesting; i++ { + prefix += nestingPrefix + } + prefix += listItem + return fmt.Sprintf("%s%s", prefix, fmt.Sprintf(format, args...)) +} + +func ExtractSlowQueryDetails(ctx context.Context) map[string]any { + val := ctx.Value("slow_query_details") + if val == nil { + return nil + } + + details, ok := val.(*SlowQueryDetails) + if !ok { + return nil + } + + details.Lock() + defer details.Unlock() + + values := maps.Clone(details.values) + + return values +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_details_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_details_test.go new file mode 100644 index 0000000000000000000000000000000000000000..562ce4468a79cd377462962b129010a04abdb522 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_details_test.go @@ -0,0 +1,46 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "context" + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSlowQueryDetailsJourney(t *testing.T) { + ctx := InitSlowQueryDetails(context.Background()) + + wg := &sync.WaitGroup{} + for i := 0; i < 100; i++ { + i := i + wg.Add(1) + go func() { + defer wg.Done() + AnnotateSlowQueryLog(ctx, fmt.Sprintf("key_%d", i), fmt.Sprintf("value_%d", i)) + }() + } + + wg.Wait() + + details := ExtractSlowQueryDetails(ctx) + require.Len(t, details, 100) + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key_%d", i) + value := fmt.Sprintf("value_%d", i) + assert.Equal(t, value, details[key]) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8e46e76d66aa1a162dcbe5e3a7b39cd351a33746 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/slow_queries_test.go @@ -0,0 +1,77 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "context" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +func TestBaseSlowReporter_LogIfSlow(t *testing.T) { + tests := map[string]struct { + // input + enabled bool + threshold time.Duration + latencyMs int + expected any + fields map[string]any + + // output + expectLog bool + message string + }{ + "sanity": { + enabled: true, + threshold: 200 * time.Millisecond, + latencyMs: 2000, + fields: map[string]any{"foo": "bar"}, + + expectLog: true, + message: "Slow query detected (2s)", + }, + "fast query": { + enabled: true, + threshold: 100 * time.Millisecond, + latencyMs: 50, + fields: map[string]any{"foo": "bar"}, + + expectLog: false, + message: "", + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + sq := NewSlowQueryReporter(runtime.NewDynamicValue(tt.enabled), + runtime.NewDynamicValue(tt.threshold), logger) + ctx := context.Background() + + startTime := time.Now().Add(-time.Duration(tt.latencyMs) * time.Millisecond) + + // Call method + sq.LogIfSlow(ctx, startTime, tt.fields) + + // Assertions + if tt.expectLog { + assert.Equal(t, tt.message, hook.LastEntry().Message) + assert.Equal(t, logrus.Fields(tt.fields), hook.LastEntry().Data) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/wrapped_allow_list.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/wrapped_allow_list.go new file mode 100644 index 0000000000000000000000000000000000000000..85c62b91dfe4d8bcb2c8a2e22de187ecd3d9786f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/wrapped_allow_list.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +type wrappedAllowList struct { + wAllowList AllowList + allowList AllowList +} + +func newWrappedAllowList(al AllowList) AllowList { + return &wrappedAllowList{ + wAllowList: al, + } +} + +func (al *wrappedAllowList) Close() { + al.wAllowList.Close() +} + +func (al *wrappedAllowList) Insert(ids ...uint64) { + fids := make([]uint64, 0, len(ids)) + + for _, id := range ids { + if al.wAllowList.Contains(id) { + continue + } + + fids = append(fids, id) + } + + if len(fids) == 0 { + return + } + + if al.allowList == nil { + al.allowList = NewAllowList() + } + + al.allowList.Insert(fids...) +} + +func (al *wrappedAllowList) Contains(id uint64) bool { + if al.allowList != nil && al.allowList.Contains(id) { + return true + } + return al.wAllowList.Contains(id) +} + +func (al *wrappedAllowList) DeepCopy() AllowList { + var innerAllowListCopy AllowList + + if al.allowList != nil { + innerAllowListCopy = al.allowList.DeepCopy() + } + + return &wrappedAllowList{ + wAllowList: al.wAllowList.DeepCopy(), + allowList: innerAllowListCopy, + } +} + +func (al *wrappedAllowList) WrapOnWrite() AllowList { + return newWrappedAllowList(al) +} + +func (al *wrappedAllowList) Slice() []uint64 { + if al.allowList == nil { + return al.wAllowList.Slice() + } + + return append(al.wAllowList.Slice(), al.allowList.Slice()...) +} + +func (al *wrappedAllowList) IsEmpty() bool { + return (al.allowList == nil || al.allowList.IsEmpty()) && al.wAllowList.IsEmpty() +} + +func (al *wrappedAllowList) Len() int { + if al.allowList == nil { + return al.wAllowList.Len() + } + + return al.allowList.Len() + al.wAllowList.Len() +} + +func (al *wrappedAllowList) Min() uint64 { + if al.allowList == nil { + return al.wAllowList.Min() + } + + min := al.allowList.Min() + wmin := al.wAllowList.Min() + + if min <= wmin { + return min + } + + return wmin +} + +func (al *wrappedAllowList) Max() uint64 { + if al.allowList == nil { + return al.wAllowList.Max() + } + + max := al.allowList.Max() + wmax := al.wAllowList.Max() + + if max >= wmax { + return max + } + + return wmax +} + +func (al *wrappedAllowList) Size() uint64 { + if al.allowList == nil { + return al.wAllowList.Size() + } + + return al.allowList.Size() + al.wAllowList.Size() +} + +func (al *wrappedAllowList) Truncate(upTo uint64) AllowList { + if al.allowList != nil { + al.allowList = al.allowList.Truncate(upTo) + } + + al.wAllowList = al.wAllowList.Truncate(upTo) + return al +} + +func (al *wrappedAllowList) Iterator() AllowListIterator { + return al.LimitedIterator(0) +} + +func (al *wrappedAllowList) LimitedIterator(limit int) AllowListIterator { + if al.allowList == nil { + return al.wAllowList.LimitedIterator(limit) + } + + return newComposedAllowListIterator( + al.allowList.LimitedIterator(limit), + al.wAllowList.LimitedIterator(limit), + limit, + ) +} + +type composedAllowListIterator struct { + it1 AllowListIterator + it2 AllowListIterator + limit int + itCount int +} + +func newComposedAllowListIterator(it1, it2 AllowListIterator, limit int) AllowListIterator { + return &composedAllowListIterator{ + it1: it1, + it2: it2, + limit: limit, + } +} + +func (i *composedAllowListIterator) Next() (uint64, bool) { + if i.limit > 0 && i.itCount >= i.limit { + return 0, false + } + + id, ok := i.it1.Next() + if ok { + i.itCount++ + return id, ok + } + + i.itCount++ + return i.it2.Next() +} + +func (i *composedAllowListIterator) Len() int { + return i.it1.Len() + i.it2.Len() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/wrapped_allow_list_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/wrapped_allow_list_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fe324db48e891f458a7af0705fcdd849a3dbc692 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/helpers/wrapped_allow_list_test.go @@ -0,0 +1,284 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package helpers + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func TestWrappedAllowList(t *testing.T) { + t.Run("wrapped allowlist created with no values", func(t *testing.T) { + al := newWrappedAllowList(NewAllowList()) + + assert.Equal(t, 0, al.Len()) + assert.True(t, al.IsEmpty()) + + assert.Equal(t, uint64(0), al.Min()) + assert.Equal(t, uint64(0), al.Max()) + }) + + t.Run("wrapped allowlist created with initial values", func(t *testing.T) { + al := newWrappedAllowList(NewAllowList(1, 2, 3)) + + assert.Equal(t, 3, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(3), al.Max()) + }) + + t.Run("wrapped allowlist with inserted values", func(t *testing.T) { + al := newWrappedAllowList(NewAllowList(1, 2, 3)) + al.Insert(4, 5) + + assert.Equal(t, 5, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + assert.True(t, al.Contains(4)) + assert.True(t, al.Contains(5)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(5), al.Max()) + }) + + t.Run("wrapped allowlist exported to slice", func(t *testing.T) { + al := newWrappedAllowList(NewAllowList(1, 2, 3)) + al.Insert(4, 5) + + assert.ElementsMatch(t, []uint64{1, 2, 3, 4, 5}, al.Slice()) + }) + + t.Run("wrapped allowlist deepcopy", func(t *testing.T) { + al := newWrappedAllowList(NewAllowList(1, 2, 3)) + copy := al.DeepCopy() + al.Insert(4, 5) + + assert.Equal(t, 5, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + assert.True(t, al.Contains(4)) + assert.True(t, al.Contains(5)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(5), al.Max()) + + assert.Equal(t, 3, copy.Len()) + assert.False(t, copy.IsEmpty()) + + assert.True(t, copy.Contains(1)) + assert.True(t, copy.Contains(2)) + assert.True(t, copy.Contains(3)) + + assert.Equal(t, uint64(1), copy.Min()) + assert.Equal(t, uint64(3), copy.Max()) + }) + + t.Run("wrapped allowlist created from bitmap", func(t *testing.T) { + bm := roaringset.NewBitmap(1, 2, 3) + + al := newWrappedAllowList(NewAllowListFromBitmap(bm)) + bm.SetMany([]uint64{4, 5}) + + assert.Equal(t, 5, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + assert.True(t, al.Contains(4)) + assert.True(t, al.Contains(5)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(5), al.Max()) + }) + + t.Run("wrapped allowlist created from bitmap deepcopy", func(t *testing.T) { + bm := roaringset.NewBitmap(1, 2, 3) + + al := newWrappedAllowList(NewAllowListFromBitmapDeepCopy(bm)) + bm.SetMany([]uint64{4, 5}) + + assert.Equal(t, 3, al.Len()) + assert.False(t, al.IsEmpty()) + + assert.True(t, al.Contains(1)) + assert.True(t, al.Contains(2)) + assert.True(t, al.Contains(3)) + + assert.Equal(t, uint64(1), al.Min()) + assert.Equal(t, uint64(3), al.Max()) + }) +} + +func TestWrappedAllowList_Iterator(t *testing.T) { + t.Run("empty bitmap iterator", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList()).Iterator() + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + + assert.Equal(t, 0, it.Len()) + assert.False(t, ok1) + assert.Equal(t, uint64(0), id1) + assert.False(t, ok2) + assert.Equal(t, uint64(0), id2) + }) + + t.Run("iterating step by step", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).Iterator() + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + id4, ok4 := it.Next() + + assert.Equal(t, 3, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.True(t, ok3) + assert.Equal(t, uint64(3), id3) + assert.False(t, ok4) + assert.Equal(t, uint64(0), id4) + }) + + t.Run("iterating in loop", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).Iterator() + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 3, it.Len()) + assert.Equal(t, []uint64{1, 2, 3}, ids) + }) +} + +func TestWrappedAllowList_LimitedIterator(t *testing.T) { + t.Run("empty bitmap iterator", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList()).LimitedIterator(2) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + + assert.Equal(t, 0, it.Len()) + assert.False(t, ok1) + assert.Equal(t, uint64(0), id1) + assert.False(t, ok2) + assert.Equal(t, uint64(0), id2) + }) + + t.Run("iterating step by step (higher limit)", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).LimitedIterator(4) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + id4, ok4 := it.Next() + + assert.Equal(t, 3, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.True(t, ok3) + assert.Equal(t, uint64(3), id3) + assert.False(t, ok4) + assert.Equal(t, uint64(0), id4) + }) + + t.Run("iterating step by step (equal limit)", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).LimitedIterator(3) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + id4, ok4 := it.Next() + + assert.Equal(t, 3, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.True(t, ok3) + assert.Equal(t, uint64(3), id3) + assert.False(t, ok4) + assert.Equal(t, uint64(0), id4) + }) + + t.Run("iterating step by step (lower limit)", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).LimitedIterator(2) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + + assert.Equal(t, 2, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(1), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(2), id2) + assert.False(t, ok3) + assert.Equal(t, uint64(0), id3) + }) + + t.Run("iterating in loop (higher limit)", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).LimitedIterator(4) + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 3, it.Len()) + assert.Equal(t, []uint64{1, 2, 3}, ids) + }) + + t.Run("iterating in loop (equal limit)", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).LimitedIterator(3) + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 3, it.Len()) + assert.Equal(t, []uint64{1, 2, 3}, ids) + }) + + t.Run("iterating in loop (lower limit)", func(t *testing.T) { + it := newWrappedAllowList(NewAllowList(3, 2, 1)).LimitedIterator(2) + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 2, it.Len()) + assert.Equal(t, []uint64{1, 2}, ids) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/hybrid_search_offsets_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/hybrid_search_offsets_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4544e96606dd595e078636d1c0d3baf7ed6b00d9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/hybrid_search_offsets_test.go @@ -0,0 +1,270 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "math/rand" + "strings" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/traverser" +) + +var ( + collectionSize = 1100 + queryMaximumResults = int64(1000) + queryHybridMaximumResults = []int64{10, 100, 1000} +) + +func SetupPaginationTestData(t require.TestingT, repo *DB, schemaGetter *fakeSchemaGetter, logger logrus.FieldLogger, k1, b float32) []string { + class := &models.Class{ + VectorIndexType: "flat", + VectorIndexConfig: flat.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(k1, b, "none"), + Class: "PaginationTest", + Properties: []*models.Property{ + { + Name: "title", + DataType: []string{string(schema.DataTypeText)}, + Tokenization: "word", + }, + { + Name: "text", + DataType: []string{string(schema.DataTypeText)}, + Tokenization: "word", + }, + }, + } + + seed := getRandomSeed() + + props := make([]string, len(class.Properties)) + for i, prop := range class.Properties { + props[i] = prop.Name + } + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + schemaGetter.schema = schema + + migrator := NewMigrator(repo, logger, "node1") + migrator.AddClass(context.Background(), class) + + // generate different ratios + + words := []string{"a", "b"} + + text := strings.Repeat(words[0]+" ", collectionSize) + strings.Repeat(words[1]+" ", collectionSize) + + // n+1 is necessary to ensure that we have an object with all words[0] and an object with all words[1] + for i := 0; i < collectionSize+1; i++ { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + + // text: words[0] * i and words[1] * (n - i) + data := map[string]interface{}{"title": fmt.Sprintf("%d", i), "text": text[i*2 : (i+collectionSize)*2]} + + // create a random vector + vector := generateVector(seed) + + obj := &models.Object{Class: "PaginationTest", ID: id, Properties: data} + err := repo.PutObject(context.Background(), obj, vector, nil, nil, nil, 0) + require.Nil(t, err) + } + return props +} + +func generateVector(seed *rand.Rand) []float32 { + // floatValue := float32(n) + // return distancer.Normalize([]float32{floatValue, floatValue, floatValue, floatValue, floatValue * floatValue}) + // random vector with 5 dimensions, ignore the n + return distancer.Normalize(randomVector(seed, 5)) +} + +func TestHybridOffsets(t *testing.T) { + seed := getRandomSeed() + + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 99999, + MemtablesMaxActiveSeconds: 99999, + MemtablesMaxSizeMB: 1000, + RootPath: dirName, + QueryMaximumResults: queryMaximumResults, + QueryHybridMaximumResults: queryHybridMaximumResults[0], + MaxImportGoroutinesFactor: 60, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + SetupPaginationTestData(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("PaginationTest") + require.NotNil(t, idx) + + queries := [][2]interface{}{ + {"a", generateVector(seed)}, + {"a b", generateVector(seed)}, + } + + for _, location := range []string{"memory", "disk"} { + for _, queryHybridMaximumResult := range queryHybridMaximumResults { + repo.config.QueryHybridMaximumResults = queryHybridMaximumResult + repo.config.QueryMaximumResults = queryMaximumResults + + myConfig := config.Config{ + QueryDefaults: config.QueryDefaults{ + Limit: queryMaximumResults, + }, + QueryMaximumResults: queryMaximumResults, + QueryHybridMaximumResults: queryHybridMaximumResult, + } + + pageSize := int(queryHybridMaximumResult / 10) + paginations := []filters.Pagination{ + // base case, no offset, limit is the maximum results. This is the ground truth for the other cases + // must be at paginations index 0, as it is the base case and used to compare the other cases against + {Offset: 0, Limit: int(queryHybridMaximumResult)}, + // normal pagination cases, offset is i*pageSize and limit is the page size + {Offset: 0, Limit: pageSize}, + {Offset: pageSize, Limit: pageSize}, + {Offset: pageSize * 9, Limit: pageSize}, + // will fail, as the offset + limit exceeds the maximum results + {Offset: pageSize * 10, Limit: pageSize}, + // "uneven" limit case + {Offset: 1, Limit: 7}, + // same as Offset: 0, Limit: int(queryHybridMaximumResult) + {Offset: 0, Limit: -1}, + // will NOT fail, as the offset is 0 and the limit is the maximum results. + // This is a special case, where we override the maximum results with an offset of zero. + // May return different results than the base case, as the offset is 0 and the limit is the maximum results. + {Offset: 0, Limit: int(queryHybridMaximumResult) * 10}, + // will NOT fail, but may return results after queryHybridMaximumResult + // May return different results than the base case, and will not be evaluated against the ground trut + {Offset: 1, Limit: -1}, + // will fail with an error, as it exceeds the maximum results + {Offset: pageSize, Limit: int(queryHybridMaximumResult)}, + } + for _, queryAndVector := range queries { + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, nil, nil, myConfig) + explorer.SetSchemaGetter(schemaGetter) + for _, alpha := range []float64{0.0, 0.5, 1.0} { + query := queryAndVector[0].(string) + vector := queryAndVector[1].([]float32) + + gtResults := make(map[uint64]float32, 0) + for p, pagination := range paginations { + t.Run(fmt.Sprintf("hybrid search offset test (%s) (maximum hybrid %d) query '%s' alpha %.2f pagination %d:%d", location, queryHybridMaximumResult, query, alpha, pagination.Offset, pagination.Offset+pagination.Limit), func(t *testing.T) { + params := dto.GetParams{ + ClassName: "PaginationTest", + HybridSearch: &searchparams.HybridSearch{ + Query: query, + Vector: vector, + Alpha: alpha, + }, + Pagination: &pagination, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "text"}}, + AdditionalProperties: additional.Properties{ + ExplainScore: true, + }, + } + + hybridResults, err := explorer.Hybrid(context.TODO(), params) + if pagination.Offset+pagination.Limit > int(queryHybridMaximumResult) { + // t.Logf("Not validating the results for pagination as offset %d + limit %d > %d: results %d", pagination.Offset, pagination.Limit, int(queryHybridMaximumResult), len(hybridResults)) + return + } + require.Nil(t, err) + + if p == 0 { + for _, res := range hybridResults { + gtResults[*res.DocID] = res.Score + } + } else { + if pagination.Limit != -1 && pagination.Offset+pagination.Limit > int(queryHybridMaximumResult) { + // no need to check the results, as this is the exception where we override the maximum results with an offset of zero + // t.Logf("Skipping result check for pagination offset %d + limit %d > %d", pagination.Offset, pagination.Limit, int(queryHybridMaximumResult)) + return + } + + for rank, res := range hybridResults { + innerRank := rank + pagination.Offset + require.Equal(t, gtResults[*res.DocID], res.Score, "Score mismatch at rank %d", innerRank+1) + } + + } + }) + } + } + } + + } + idx.ForEachShard(func(name string, shard ShardLike) error { + err := shard.Store().FlushMemtables(context.Background()) + require.Nil(t, err) + return nil + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/hybrid_search_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/hybrid_search_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1e8e54934790aadcb04054fe4f5189c54720cdaf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/hybrid_search_test.go @@ -0,0 +1,1200 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "encoding/json" + "fmt" + "os" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/traverser" + "github.com/weaviate/weaviate/usecases/traverser/hybrid" +) + +type TestDoc struct { + DocID string + Document string +} + +type TestQuery struct { + QueryID string + Query string + MatchingDocIDs []string +} + +var defaultConfig = config.Config{ + QueryDefaults: config.QueryDefaults{ + Limit: 100, + }, + QueryMaximumResults: 100, + QueryHybridMaximumResults: 100, +} + +func SetupStandardTestData(t require.TestingT, repo *DB, schemaGetter *fakeSchemaGetter, logger logrus.FieldLogger, k1, b float32) []string { + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(k1, b, "none"), + Class: "StandardTest", + Properties: []*models.Property{ + { + Name: "document", + DataType: []string{string(schema.DataTypeText)}, + Tokenization: "word", + }, + }, + } + props := make([]string, len(class.Properties)) + for i, prop := range class.Properties { + props[i] = prop.Name + } + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + schemaGetter.schema = schema + + migrator := NewMigrator(repo, logger, "node1") + migrator.AddClass(context.Background(), class) + + // Load text from file standard_test_data.json + // This is a list of 1000 documents from the MEDLINE database + // Each document is a medical abstract + + data, _ := os.ReadFile("NFCorpus-Corpus.json") + var docs []TestDoc + json.Unmarshal(data, &docs) + + for i, doc := range docs { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + + data := map[string]interface{}{"document": doc.Document, "code": doc.DocID} + obj := &models.Object{Class: "StandardTest", ID: id, Properties: data, CreationTimeUnix: 1565612833955, LastUpdateTimeUnix: 10000020} + err := repo.PutObject(context.Background(), obj, nil, nil, nil, nil, 0) + require.Nil(t, err) + } + return props +} + +func TestHybrid(t *testing.T) { + dirName := t.TempDir() + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupStandardTestData(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("StandardTest") + require.NotNil(t, idx) + + // Load queries from file standard_test_queries.json + // This is a list of 100 queries from the MEDLINE database + + data, _ := os.ReadFile("NFCorpus-Query.json") + var queries []TestQuery + json.Unmarshal(data, &queries) + for _, query := range queries { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{}, Query: query.Query} + addit := additional.Properties{} + res, _, _ := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + + fmt.Printf("query for %s returned %d results\n", query.Query, len(res)) + + } +} + +func TestBIER(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + props := SetupStandardTestData(t, repo, schemaGetter, logger, 1.2, 0.75) + + idx := repo.GetIndex("StandardTest") + require.NotNil(t, idx) + + // Load queries from file standard_test_queries.json + // This is a list of 100 queries from the MEDLINE database + + data, _ := os.ReadFile("NFCorpus-Query.json") + var queries []TestQuery + json.Unmarshal(data, &queries) + for _, query := range queries { + kwr := &searchparams.KeywordRanking{Type: "bm25", Properties: []string{}, Query: query.Query} + addit := additional.Properties{} + res, _, _ := idx.objectSearch(context.TODO(), 1000, nil, kwr, nil, nil, addit, nil, "", 0, props) + + fmt.Printf("query for %s returned %d results\n", query.Query, len(res)) + // fmt.Printf("Results: %v\n", res) + + //for j, doc := range res { + // fmt.Printf("res %v, %v\n", j, doc.Object.GetAdditionalProperty("code")) + //} + + //Check the docIDs are the same + //for j, doc := range res[0:10] { + // fmt.Printf("Result: rank %v, docID %v, score %v (%v)\n", j, doc.Object.GetAdditionalProperty("code"), doc.Score(), doc.Object.GetAdditionalProperty("document")) + // fmt.Printf("Expected: rank %v, docID %v\n", j, query.MatchingDocIDs[j].Object.GetAdditionalProperty("code")) + // require.Equal(t, query.MatchingDocIDs[j], doc.Object.GetAdditionalProperty("code").(string)) + //} + + } +} + +func addObj(repo *DB, i int, props map[string]interface{}, vec []float32) error { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + + obj := &models.Object{Class: "MyClass", ID: id, Properties: props, CreationTimeUnix: 1565612833955, LastUpdateTimeUnix: 10000020} + vector := vec + err := repo.PutObject(context.Background(), obj, vector, nil, nil, nil, 0) + return err +} + +func SetupFusionClass(t require.TestingT, repo *DB, schemaGetter *fakeSchemaGetter, logger logrus.FieldLogger, k1, b float32) *models.Class { + class := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: BM25FinvertedConfig(k1, b, "none"), + Class: "MyClass", + Vectorizer: "test-vectoriser", + Properties: []*models.Property{ + { + Name: "title", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + }, + } + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + schemaGetter.schema = schema + + migrator := NewMigrator(repo, logger, "node1") + migrator.AddClass(context.Background(), class) + + addObj(repo, 0, map[string]interface{}{"title": "Our journey to BM25F", "description": "This is how we get to BM25F"}, []float32{ + -0.04488207, -0.32971063, 0.23568298, 0.004971265, -0.12588727, 0.0036464946, -0.6209942, -0.23247992, -0.19338948, 0.3544481, 0.00050193403, 0.36604947, -0.13813384, -0.126298, 0.05640272, -0.36604303, -0.10976448, 0.196644, -0.02206351, -0.27649152, -0.0050981278, 0.020616557, 0.14605781, 0.093781, -0.25838777, -0.5038474, -0.46846977, 0.13360861, -0.15851927, 0.55075127, 0.34870508, 0.085248806, -0.02763206, -0.07068178, -0.26878145, 0.2814703, -0.33317414, 0.48958343, -0.39648432, 0.606744, 0.12882654, 0.07246548, 0.54059577, 0.19526751, 0.85892624, 0.1485534, 0.19790995, -0.34280643, 0.27512825, 0.105886005, 0.030610563, 0.3811836, 0.18384686, 0.29538825, -0.020791993, -0.31372088, -0.08811446, -0.12979206, 0.30209363, -0.14561261, 0.22077207, -0.40219122, -0.40567216, -0.08740993, -0.31625694, -0.18109407, 0.5411316, -0.09015073, 0.22272661, 0.13575949, -0.36186692, -0.02766613, -0.22463024, 0.15271285, 0.304631, -0.57313913, 0.31346974, -0.118818894, -0.36198893, 0.24609627, 0.47406524, -0.55662453, 0.37812573, -0.2959746, 0.6146945, -0.3934654, 0.30840993, -0.24944904, -0.2063059, -0.48078862, -0.08967737, -0.1273727, 0.1587198, -0.44776592, 0.0048942068, 0.18738478, -0.4544592, -0.4755225, 0.2156486, 0.39935833, 0.25160903, -0.35463294, 0.60699236, -0.12445828, -0.029569108, -0.4983043, 0.44752246, -0.2340386, -0.27559096, 0.67984164, 0.51470226, -0.5285723, 0.0024457057, -0.20425095, -0.4915065, -0.3221788, -0.15558766, 0.20102327, 0.23525643, 0.28365672, 0.58687097, 0.3190138, -0.31130886, 0.053733524, -0.10361888, -0.2598206, 1.5977107, 0.60224503, 0.0074084206, 0.17191416, 0.5619663, 0.41178456, 0.27006987, -0.5354418, -0.054428957, 0.6849038, -0.024342017, 0.43103293, -0.22892271, 0.036829337, -0.103084944, -0.2021301, -0.11352237, -0.17110321, 0.76075333, -0.1755375, 0.029183118, -0.34927735, 0.22040148, -0.18136469, 0.16048056, 0.34151044, -0.048658744, 0.03941434, 0.45190382, 0.103645615, 0.10437423, -0.086864054, 0.523172, -0.59672165, -0.1225319, -0.5800122, 0.2197229, 0.49325037, 0.30607533, 0.012414166, 0.1539727, -0.60095996, 0.05142522, 0.021675617, -0.54661363, -0.0050268047, -0.507448, -0.04522115, -0.77988946, 0.10536073, 0.099219516, 0.40711993, 0.27353838, 0.1728696, -0.4171313, -1.3076599, -0.19778727, -0.23201689, 0.40729725, -0.28640944, 0.06354561, -0.3877251, -0.7938625, 0.29908186, -0.24450836, -0.22622268, 0.32792783, 0.28376722, -0.3685573, 0.031423382, -0.012464195, 0.2254249, 0.26994115, -0.19821979, -0.24086252, 0.24454598, 0.30043048, -0.627896, -0.3355214, -0.14054148, 0.50488055, -0.073988594, -0.31053177, 0.36260405, -0.56093204, 0.12066587, -0.47301888, 0.88418764, 0.09010807, -0.10899238, 0.62317103, 0.27237964, -0.604178, 0.0067386073, 0.1370205, -0.094664395, 0.3479645, 0.25092986, 0.16948108, -0.20874223, -0.54980844, -0.100548536, 0.47177002, -0.4981452, 0.1815202, -0.80878633, -0.076736815, 0.43152434, -0.43210435, -0.28010413, 0.1249095, 0.385616, -0.2984289, -0.006841246, 0.3496464, -0.33298343, 0.06344994, 0.37393335, 0.18608452, -0.10631552, -0.40111285, -0.146849, -0.04161288, -0.31621853, -0.06889858, 0.13343252, -0.11599523, 0.5377954, 0.25938663, -0.43172404, -0.7476662, -0.54316807, 0.0029651164, 0.09958581, 0.0730254, 0.22785394, 0.3276773, 0.01816153, 0.094938636, 0.71604383, -0.09648144, -0.0035640472, -0.5383972, 0.28588042, 0.7625968, -0.22359839, 0.17167832, -0.06235203, -0.32480234, -0.18599075, 0.1570872, -0.06470149, -0.029198159, 0.23251827, 0.100047514, -0.06314679, 0.6390605, -0.06232509, 0.76272035, 0.2975126, 0.15871438, 0.18222457, -0.548036, 0.23633306, -0.17981203, 0.023965335, 0.24478278, -0.21601695, -0.108217336, 0.05834005, 0.3718355, 0.0970174, 0.04476983, -0.118143275, + }) + + addObj(repo, 1, map[string]interface{}{"title": "Our peanuts to BM25F", "description": "This is how we get to BM25F"}, []float32{0.11676752, -0.4837953, -0.06559026, 0.3242706, 0.08680799, -0.30777612, -0.22926088, 0.01667141, 0.31844103, 0.4666344, 0.417305, 0.06108997, -0.0740552, 0.14234918, 0.06823654, 0.16182217, -0.012199775, -0.17269811, -0.16104576, -0.09208117, 0.063624315, 0.3113634, -0.3830663, 0.05831715, -0.14125349, -0.26962206, -0.0696671, -0.013111545, 0.20097807, 0.033809602, -0.048573885, 0.46815604, 0.32582077, 0.32308698, 0.20355524, -0.08757271, 0.17099291, 0.31500003, -0.05445185, 0.7712824, -0.2096038, 0.28787872, 0.10871067, -0.3266944, -0.1633618, 0.34630018, -0.15387866, -0.45506623, -0.21508889, -0.19249445, -0.28801772, -0.2694916, -0.18476918, -0.12890251, -0.29947013, 0.0008435306, -0.06490287, -0.006560939, 0.24637267, -0.111215346, 0.3775517, -0.82433224, -0.3179537, 0.022306278, 0.19248968, -0.1701471, 0.052865, -0.044782564, -0.10222186, 0.09571932, -0.19251339, 0.241193, -0.13216764, -0.19301765, 0.46628228, -0.29973802, 0.0030274116, 0.01664786, 0.1216316, 0.12837356, -0.048461247, -0.56439394, 0.06110007, 0.102808535, 0.63137263, -0.13134736, 0.41365498, -0.113528065, -0.06924132, 0.1076709, -0.06833764, 0.31522226, 0.13445137, -0.16227263, -0.15102008, 0.23768687, -0.41108298, -0.473573, -0.35702798, 0.21465969, -0.30590045, -0.26616427, 0.7287231, -0.036261655, -0.34903425, -0.1396425, -0.022058574, -0.33956096, -0.3359471, -0.035496157, -0.1786069, -0.0857123, -0.0845917, 0.13232024, -0.02890402, -0.45281035, -0.026353035, -0.39124215, -0.15753527, -0.075793914, 0.35795033, 0.35925874, -0.1423145, -0.0969307, 0.08920737, 0.15772092, 1.3536518, 0.29779792, -0.05407743, -0.048793554, 0.12263066, -0.06248072, -0.49598575, -0.46484944, -0.31050035, 0.6283043, 0.5242193, 0.25987545, -0.2584134, 0.32898954, 0.014580286, 0.14016634, -0.010093123, -0.22610027, -0.029830063, 0.18112054, 0.020298548, -0.025797658, -0.40394786, -0.17097965, 0.11640611, 0.29304397, -0.27026933, -0.14832975, -0.099585906, 0.4554175, -0.0018298444, 0.23190805, -0.65866566, -0.09366216, -0.7000203, 0.004698127, -0.17523476, -0.34830904, -0.16284281, 0.15495956, 0.5772887, 0.048939474, -0.12923703, -0.236143, -0.03874896, 0.2960667, 0.029154046, 0.42814374, -0.4332385, -0.31293675, -0.10682973, -0.12069777, 0.071893886, 0.06644212, -0.46342105, -0.8599067, 0.017380634, -0.38347453, 0.14165273, -0.08906643, -0.06801824, 0.19660597, -0.06807183, 0.33882818, 0.044932134, 0.27550527, 0.2308957, -0.101730466, -0.19064885, -0.015364495, 0.0149245, 0.24177131, -0.15636654, -0.002376896, -0.6399841, 0.14845476, 0.46339074, 0.036926877, -0.067630276, 0.289784, 0.15529989, -0.5235124, 0.50196457, -0.004536148, -0.3716798, 0.047304608, -0.027990041, 0.15901157, 0.021176483, 0.35387334, 0.4457043, 0.094738215, 0.08722517, 0.0450516, 0.1739127, -0.2606226, 0.035999063, -0.12919275, -0.11809982, -0.20865, -0.6917279, 0.093973815, -0.38069052, -0.114874505, -0.3051481, -0.357749, 0.48254266, 0.31795567, 0.37491056, 0.0047062743, -0.1265727, 0.51655954, 0.1622121, 0.39811996, -0.002116253, -0.375531, 0.6347343, 0.14833164, 0.032251768, 0.021101426, -0.34346518, 0.22451165, 0.028649824, -0.04794777, 0.056036226, 0.14179966, 0.32724753, 0.17185552, 0.2504634, -0.05013007, -0.31430584, -0.22200464, -0.508279, -0.10017326, 0.16302426, -0.09568865, 0.05985463, -0.22916546, -0.084666654, -0.15271503, -0.24385636, -0.028514259, -0.33194387, -0.17132543, -0.1474212, -0.18526097, 0.2198915, -0.1689729, -0.19907063, 0.19941927, -0.47478884, 0.0695081, 0.3741401, 0.19423902, 0.085894205, -0.53214043, 0.33309302, 0.18701339, 0.23461546, -0.14038202, 0.07201847, 0.3462437, 0.1640635, 0.07200127, -0.09130982, 0.3868172, -0.09754013, 0.040958565, -0.18743117, 0.14117524, -0.18739408, 0.13669269, -0.09902989, -0.16762646}) + + addObj(repo, 2, map[string]interface{}{"title": "Elephant Parade", "description": "Elephants elephants elephant"}, []float32{-0.04488207, -0.32971063, 0.23568298, 0.004971265, -0.12588727, 0.0036464946, -0.6209942, -0.23247992, -0.19338948, 0.3544481, 0.00050193403, 0.36604947, -0.13813384, -0.126298, 0.05640272, -0.36604303, -0.10976448, 0.196644, -0.02206351, -0.27649152, -0.0050981278, 0.020616557, 0.14605781, 0.093781, -0.25838777, -0.5038474, -0.46846977, 0.13360861, -0.15851927, 0.55075127, 0.34870508, 0.085248806, -0.02763206, -0.07068178, -0.26878145, 0.2814703, -0.33317414, 0.48958343, -0.39648432, 0.606744, 0.12882654, 0.07246548, 0.54059577, 0.19526751, 0.85892624, 0.1485534, 0.19790995, -0.34280643, 0.27512825, 0.105886005, 0.030610563, 0.3811836, 0.18384686, 0.29538825, -0.020791993, -0.31372088, -0.08811446, -0.12979206, 0.30209363, -0.14561261, 0.22077207, -0.40219122, -0.40567216, -0.08740993, -0.31625694, -0.18109407, 0.5411316, -0.09015073, 0.22272661, 0.13575949, -0.36186692, -0.02766613, -0.22463024, 0.15271285, 0.304631, -0.57313913, 0.31346974, -0.118818894, -0.36198893, 0.24609627, 0.47406524, -0.55662453, 0.37812573, -0.2959746, 0.6146945, -0.3934654, 0.30840993, -0.24944904, -0.2063059, -0.48078862, -0.08967737, -0.1273727, 0.1587198, -0.44776592, 0.0048942068, 0.18738478, -0.4544592, -0.4755225, 0.2156486, 0.39935833, 0.25160903, -0.35463294, 0.60699236, -0.12445828, -0.029569108, -0.4983043, 0.44752246, -0.2340386, -0.27559096, 0.67984164, 0.51470226, -0.5285723, 0.0024457057, -0.20425095, -0.4915065, -0.3221788, -0.15558766, 0.20102327, 0.23525643, 0.28365672, 0.58687097, 0.3190138, -0.31130886, 0.053733524, -0.10361888, -0.2598206, 1.5977107, 0.60224503, 0.0074084206, 0.17191416, 0.5619663, 0.41178456, 0.27006987, -0.5354418, -0.054428957, 0.6849038, -0.024342017, 0.43103293, -0.22892271, 0.036829337, -0.103084944, -0.2021301, -0.11352237, -0.17110321, 0.76075333, -0.1755375, 0.029183118, -0.34927735, 0.22040148, -0.18136469, 0.16048056, 0.34151044, -0.048658744, 0.03941434, 0.45190382, 0.103645615, 0.10437423, -0.086864054, 0.523172, -0.59672165, -0.1225319, -0.5800122, 0.2197229, 0.49325037, 0.30607533, 0.012414166, 0.1539727, -0.60095996, 0.05142522, 0.021675617, -0.54661363, -0.0050268047, -0.507448, -0.04522115, -0.77988946, 0.10536073, 0.099219516, 0.40711993, 0.27353838, 0.1728696, -0.4171313, -1.3076599, -0.19778727, -0.23201689, 0.40729725, -0.28640944, 0.06354561, -0.3877251, -0.7938625, 0.29908186, -0.24450836, -0.22622268, 0.32792783, 0.28376722, -0.3685573, 0.031423382, -0.012464195, 0.2254249, 0.26994115, -0.19821979, -0.24086252, 0.24454598, 0.30043048, -0.627896, -0.3355214, -0.14054148, 0.50488055, -0.073988594, -0.31053177, 0.36260405, -0.56093204, 0.12066587, -0.47301888, 0.88418764, 0.09010807, -0.10899238, 0.62317103, 0.27237964, -0.604178, 0.0067386073, 0.1370205, -0.094664395, 0.3479645, 0.25092986, 0.16948108, -0.20874223, -0.54980844, -0.100548536, 0.47177002, -0.4981452, 0.1815202, -0.80878633, -0.076736815, 0.43152434, -0.43210435, -0.28010413, 0.1249095, 0.385616, -0.2984289, -0.006841246, 0.3496464, -0.33298343, 0.06344994, 0.37393335, 0.18608452, -0.10631552, -0.40111285, -0.146849, -0.04161288, -0.31621853, -0.06889858, 0.13343252, -0.11599523, 0.5377954, 0.25938663, -0.43172404, -0.7476662, -0.54316807, 0.0029651164, 0.09958581, 0.0730254, 0.22785394, 0.3276773, 0.01816153, 0.094938636, 0.71604383, -0.09648144, -0.0035640472, -0.5383972, 0.28588042, 0.7625968, -0.22359839, 0.17167832, -0.06235203, -0.32480234, -0.18599075, 0.1570872, -0.06470149, -0.029198159, 0.23251827, 0.100047514, -0.06314679, 0.6390605, -0.06232509, 0.76272035, 0.2975126, 0.15871438, 0.18222457, -0.548036, 0.23633306, -0.17981203, 0.023965335, 0.24478278, -0.21601695, -0.108217336, 0.05834005, 0.3718355, 0.0970174, 0.04476983, -0.118143275}) + + return class +} + +func TestRFJourney(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + QueryLimit: 20, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + class := SetupFusionClass(t, repo, schemaGetter, logger, 1.2, 0.75) + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + docId1 := uint64(1) + docId2 := uint64(2) + docId3 := uint64(3) + + doc1 := &search.Result{ + ID: strfmt.UUID("e6f7e8b1-ac53-48eb-b6e4-cbe67396bcfa"), + DocID: &docId1, + Schema: map[string]interface{}{ + "title": "peanuts", + }, + Vector: []float32{0.1, 0.2, 0.3, 0.4, 0.5}, + Score: 0.1, + SecondarySortValue: 0.2, // Adding a secondary sort value reverses the order of the results, when the two sets are equally weighted + } + + doc2 := &search.Result{ + ID: strfmt.UUID("2b7a8bc9-29d9-4cc8-b145-a0baf5fc231d"), + DocID: &docId2, + Schema: map[string]interface{}{ + "title": "journey", + }, + Vector: []float32{0.5, 0.4, 0.3, 0.3, 0.1}, + Score: 0.2, + SecondarySortValue: 0.1, // Adding a secondary sort value reverses the order of the results, when the two sets are equally weighted + } + + doc3 := &search.Result{ + ID: strfmt.UUID("dddddddd-29d9-4cc8-b145-a0baf5fc231d"), + DocID: &docId3, + Schema: map[string]interface{}{ + "title": "alalala", + }, + Vector: []float32{0.5, 0.4, 0.3, 0.3, 0.1}, + Score: 0.2, + } + + resultSet1 := []*search.Result{doc1, doc2, doc3} + resultSet2 := []*search.Result{doc2, doc1, doc3} + + // If two results have the same score, the secondary sort value is used to determine the order + t.Run("check_secondary_sort", func(t *testing.T) { + hybridResults := hybrid.FusionRanked([]float64{0.5, 0.5}, + [][]*search.Result{resultSet1, resultSet2}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal secondary sort ---") + for _, result := range hybridResults { + schema := result.Schema.(map[string]interface{}) + fmt.Println(schema["title"], result.ID, result.Score) + } + require.Equal(t, 3, len(hybridResults)) + require.Equal(t, doc1.ID, hybridResults[0].ID) + require.Equal(t, doc2.ID, hybridResults[1].ID) + require.Equal(t, doc3.ID, hybridResults[2].ID) + require.Equal(t, float32(0.016530056), hybridResults[0].Score) + require.Equal(t, float32(0.016530056), hybridResults[1].Score) + }) + /* + // Check that we would fail without a secondary score + disabled as the order is different on the github test servers! + t.Run("check_secondary_sort", func(t *testing.T) { + resultSet1[0].SecondarySortValue = 0.0 + resultSet1[1].SecondarySortValue = 0.0 + hybridResults := hybrid.FusionRanked([]float64{0.5, 0.5}, + [][]*search.Result{resultSet1, resultSet2}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal secondary sort fail ---") + for _, result := range hybridResults { + schema := result.Schema.(map[string]interface{}) + fmt.Println(schema["title"], result.ID, result.Score) + } + require.Equal(t, 3, len(hybridResults)) + require.Equal(t, doc2.ID, hybridResults[0].ID) + require.Equal(t, doc1.ID, hybridResults[1].ID) + require.Equal(t, doc3.ID, hybridResults[2].ID) + require.Equal(t, float32(0.016530056), hybridResults[0].Score) + require.Equal(t, float32(0.016530056), hybridResults[1].Score) + }) + */ + + t.Run("Fusion Reciprocal", func(t *testing.T) { + results := hybrid.FusionRanked([]float64{0.4, 0.6}, + [][]*search.Result{resultSet1, resultSet2}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal ---") + for _, result := range results { + schema := result.Schema.(map[string]interface{}) + fmt.Println(schema["title"], result.ID, result.Score) + } + require.Equal(t, 3, len(results)) + require.Equal(t, resultSet2[0].ID, results[0].ID) + require.Equal(t, resultSet2[1].ID, results[1].ID) + require.Equal(t, resultSet2[2].ID, results[2].ID) + require.Equal(t, float32(0.016557377), results[0].Score) + require.Equal(t, float32(0.016502732), results[1].Score) + }) + + t.Run("Fusion Reciprocal 2", func(t *testing.T) { + results := hybrid.FusionRanked([]float64{0.8, 0.2}, + [][]*search.Result{resultSet1, resultSet2}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal ---") + for _, result := range results { + schema := result.Schema.(map[string]interface{}) + fmt.Println(schema["title"], result.ID, result.Score) + } + require.Equal(t, 3, len(results)) + require.Equal(t, resultSet2[0].ID, results[1].ID) + require.Equal(t, resultSet2[1].ID, results[0].ID) + require.Equal(t, resultSet2[2].ID, results[2].ID) + require.Equal(t, float32(0.016612023), results[0].Score) + require.Equal(t, float32(0.016448088), results[1].Score) + }) + + t.Run("Vector Only", func(t *testing.T) { + results := hybrid.FusionRanked([]float64{0.0, 1.0}, + [][]*search.Result{resultSet1, resultSet2}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal ---") + for _, result := range results { + schema := result.Schema.(map[string]interface{}) + fmt.Println(schema["title"], result.ID, result.Score) + } + require.Equal(t, 3, len(results)) + require.Equal(t, resultSet2[0].ID, results[0].ID) + require.Equal(t, resultSet2[1].ID, results[1].ID) + require.Equal(t, resultSet2[2].ID, results[2].ID) + require.Equal(t, float32(0.016666668), results[0].Score) + require.Equal(t, float32(0.016393442), results[1].Score) + }) + + t.Run("BM25 only", func(t *testing.T) { + results := hybrid.FusionRanked([]float64{1.0, 0.0}, + [][]*search.Result{resultSet1, resultSet2}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal ---") + for _, result := range results { + schema := result.Schema.(map[string]interface{}) + fmt.Println(schema["title"], result.ID, result.Score) + } + require.Equal(t, 3, len(results)) + require.Equal(t, resultSet1[0].ID, results[0].ID) + require.Equal(t, resultSet1[1].ID, results[1].ID) + require.Equal(t, resultSet1[2].ID, results[2].ID) + require.Equal(t, float32(0.016666668), results[0].Score) + require.Equal(t, float32(0.016393442), results[1].Score) + }) + + t.Run("Check basic search with one property", func(t *testing.T) { + // Check basic search with one property + results_set_1, err := repo.VectorSearch( + context.TODO(), + dto.GetParams{ + ClassName: "MyClass", + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 6, + }, + Properties: []search.SelectProperty{{Name: "title"}, {Name: "description"}}, + }, + []string{""}, + []models.Vector{PeanutsVector()}, + ) + + require.Nil(t, err) + results_set_2, err := repo.VectorSearch( + context.TODO(), + dto.GetParams{ + ClassName: "MyClass", + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 6, + }, + Properties: []search.SelectProperty{{Name: "title"}, {Name: "description"}}, + }, + []string{""}, + []models.Vector{JourneyVector()}, + ) + require.Nil(t, err) + + // convert search.Result to hybrid.Result + var results_set_1_hybrid []*search.Result + for i := range results_set_1 { + // parse the last 12 digits of the id to get the uint64 + + results_set_1_hybrid = append(results_set_1_hybrid, &results_set_1[i]) + } + + var results_set_2_hybrid []*search.Result + for i := range results_set_2 { + results_set_2_hybrid = append(results_set_2_hybrid, &results_set_1[i]) + } + + res := hybrid.FusionRanked([]float64{0.2, 0.8}, [][]*search.Result{results_set_1_hybrid, results_set_2_hybrid}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal (", len(res), ")---") + for _, r := range res { + + schema := r.Schema.(map[string]interface{}) + title := schema["title"].(string) + description := schema["description"].(string) + fmt.Printf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.ID, r.Score, title, description, r.AdditionalProperties) + } + + require.Equal(t, "00000000-0000-0000-0000-000000000001", string(res[0].ID)) + }) + + t.Run("Hybrid", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "elephant", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 6, + }, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "description"}}, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + require.Nil(t, err) + + fmt.Println("--- Start results for hybrid ---") + for _, r := range hybridResults { + schema := r.Schema.(map[string]interface{}) + title := schema["title"].(string) + description := schema["description"].(string) + fmt.Printf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.ID, r.Score, title, description, r.AdditionalProperties) + } + }) + + t.Run("Hybrid with negative limit", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "Elephant Parade", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: -1, + }, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "description"}}, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("Elephant Parade", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + + fmt.Println("--- Start results for hybrid with negative limit ---") + for _, r := range hybridResults { + schema := r.Schema.(map[string]interface{}) + title := schema["title"].(string) + description := schema["description"].(string) + fmt.Printf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.ID, r.Score, title, description, r.AdditionalProperties) + } + require.Nil(t, err) + require.True(t, len(hybridResults) > 0) + }) + + t.Run("Hybrid with offset 1", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "Elephant Parade", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 2, + Limit: 1, + }, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "description"}}, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("Elephant Parade", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + + fmt.Println("--- Start results for hybrid with offset 2 ---") + for _, r := range hybridResults { + schema := r.Schema.(map[string]interface{}) + title := schema["title"].(string) + description := schema["description"].(string) + fmt.Printf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.ID, r.Score, title, description, r.AdditionalProperties) + } + + require.Nil(t, err) + require.Equal(t, 1, len(hybridResults)) + require.Equal(t, strfmt.UUID("00000000-0000-0000-0000-000000000001"), hybridResults[0].ID) + }) + + t.Run("Hybrid with offset 2", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "Elephant Parade", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 4, + Limit: 1, + }, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "description"}}, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("Elephant Parade", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + + fmt.Println("--- Start results for hybrid with offset 4 ---") + for _, r := range hybridResults { + schema := r.Schema.(map[string]interface{}) + title := schema["title"].(string) + description := schema["description"].(string) + fmt.Printf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.ID, r.Score, title, description, r.AdditionalProperties) + } + + require.Nil(t, err) + require.Equal(t, len(hybridResults), 0) + }) +} + +func TestRFJourneyWithFilters(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + QueryLimit: 20, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + class := SetupFusionClass(t, repo, schemaGetter, logger, 1.2, 0.75) + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "elephant", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "elephant", + Type: schema.DataTypeText, + }, + }, + }, + }, + } + + filter1 := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "My", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName("MyClass"), + Property: schema.PropertyName("title"), + }, + Value: &filters.Value{ + Value: "journeys", + Type: schema.DataTypeText, + }, + }, + }, + }, + } + + t.Run("Hybrid with filter - no results expected", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "elephant", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 100, + }, + Filters: filter1, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "description"}}, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + require.Nil(t, err) + require.Equal(t, 0, len(hybridResults)) + }) + + t.Run("Hybrid", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "elephant", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: -1, + }, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "description"}}, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + require.Nil(t, err) + require.Equal(t, 3, len(hybridResults)) + + fmt.Println("--- Start results for hybrid vector ---") + for _, r := range hybridResults { + schema := r.Schema.(map[string]interface{}) + title := schema["title"].(string) + description := schema["description"].(string) + fmt.Printf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.ID, r.Score, title, description, r.AdditionalProperties) + } + require.Equal(t, strfmt.UUID("00000000-0000-0000-0000-000000000002"), hybridResults[0].ID) + }) + + t.Run("Hybrid with filter", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "elephant", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: -1, + }, + Filters: filter, + Properties: search.SelectProperties{search.SelectProperty{Name: "title"}, search.SelectProperty{Name: "description"}}, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(repo, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + require.Nil(t, err) + require.Equal(t, 1, len(hybridResults)) + + fmt.Println("--- Start results for hybrid with filter---") + for _, r := range hybridResults { + schema := r.Schema.(map[string]interface{}) + title := schema["title"].(string) + description := schema["description"].(string) + fmt.Printf("Result id: %v, score: %v, title: %v, description: %v, additional %+v\n", r.ID, r.Score, title, description, r.AdditionalProperties) + } + require.Equal(t, strfmt.UUID("00000000-0000-0000-0000-000000000002"), hybridResults[0].ID) + }) +} + +func TestStability(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + QueryLimit: 20, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + SetupFusionClass(t, repo, schemaGetter, logger, 1.2, 0.75) + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + docId1 := uint64(1) + docId2 := uint64(2) + docId3 := uint64(3) + + doc1 := &search.Result{ + ID: strfmt.UUID("e6f7e8b1-ac53-48eb-b6e4-cbe67396bcfa"), + DocID: &docId1, + Schema: map[string]interface{}{ + "title": "peanuts", + }, + Vector: []float32{0.1, 0.2, 0.3, 0.4, 0.5}, + Score: 0.1, + } + + doc2 := &search.Result{ + ID: strfmt.UUID("e6f7e8b1-ac53-48eb-b6e4-cbe67396bcfb"), + DocID: &docId2, + Schema: map[string]interface{}{ + "title": "peanuts", + }, + Vector: []float32{0.1, 0.2, 0.3, 0.4, 0.5}, + Score: 0.1, + } + + doc3 := &search.Result{ + ID: strfmt.UUID("e6f7e8b1-ac53-48eb-b6e4-cbe67396bcfc"), + DocID: &docId3, + Schema: map[string]interface{}{ + "title": "peanuts", + }, + Vector: []float32{0.1, 0.2, 0.3, 0.4, 0.5}, + Score: 0.1, + } + + resultSet1 := []*search.Result{doc1, doc2, doc3} + resultSet2 := []*search.Result{doc2, doc1, doc3} + + t.Run("Fusion Reciprocal", func(t *testing.T) { + results := hybrid.FusionRanked([]float64{0.4, 0.6}, + [][]*search.Result{resultSet1, resultSet2}, []string{"set1", "set2"}) + fmt.Println("--- Start results for Fusion Reciprocal ---") + for _, result := range results { + schema := result.Schema.(map[string]interface{}) + fmt.Println(schema["title"], result.ID, result.Score) + } + require.Equal(t, 3, len(results)) + require.Equal(t, resultSet2[0].ID, results[0].ID) + require.Equal(t, resultSet2[1].ID, results[1].ID) + require.Equal(t, resultSet2[2].ID, results[2].ID) + }) +} + +func elephantVector() []float32 { + return []float32{ + -0.106136, -0.021716, 0.632442, 0.195315, -0.038854, -0.260533, -0.728847, -0.313725, -0.161967, 0.179243, -0.124185, 0.158839, 0.09563, -0.071267, 0.073928, -0.096735, 0.27266, -0.204127, -0.387028, -0.361406, -0.278027, 0.298766, 0.265405, 0.037477, -0.079904, -0.778953, -0.525643, -0.052346, -0.2174, 0.095746, 0.610937, 0.315672, -0.125526, 0.013475, -0.075578, -0.053183, -0.381475, 0.620278, -0.093857, 0.802608, -0.105773, -0.007902, 0.663528, 0.407708, 0.753832, 0.420718, 0.139289, -0.126864, 0.36345, -0.039222, 0.089002, 0.092151, 0.138025, 0.18881, 0.51416, -0.391045, -0.169528, -0.044023, 0.437196, -0.23917, 0.081247, -0.440846, -0.484764, 0.090495, 0.001852, -0.03441, 0.18548, -0.440182, 0.286827, -0.081451, 0.030155, -0.072746, -0.366531, 0.354118, 0.418432, -0.305682, 0.515893, -0.424999, -0.495273, 0.731375, 0.358407, -0.415989, 0.441337, -0.022167, 0.318837, -0.473018, 0.342046, -0.499794, -0.303161, -0.379234, -0.279082, -0.325648, 0.200613, -0.457396, 0.116745, 0.225836, -0.322175, -0.151425, 0.322014, 0.077097, 0.049998, -0.01005, 0.489028, -0.273297, 0.218896, -0.507729, 0.488891, -0.207774, -0.499136, 0.992803, 0.379556, -0.572352, -0.295821, -0.071392, -0.625823, -0.425159, 0.024593, 0.307965, 0.311686, 0.287844, 0.435028, 0.454474, -0.208158, -0.111947, -0.380334, -0.392014, 1.747561, 0.360315, 0.472088, 0.273835, 0.635424, 0.390057, -0.021349, -0.746944, 0.265353, 0.60709, -0.171053, 0.408823, -0.059646, 0.058306, -0.062817, -0.41064, -0.342016, -0.048077, 0.862758, -0.217101, -0.048961, -0.314094, 0.228395, -0.339353, 0.558551, 0.370054, -0.319855, 0.543137, 0.71334, 0.166296, 0.040412, -0.160482, 0.432088, -0.491292, 0.072819, -0.409627, 0.300197, 0.169077, 0.44379, 0.117131, 0.142459, -0.482226, -0.100245, 0.058273, -0.590567, -0.061971, -0.415718, -0.018105, -0.693528, -0.047609, -0.041873, 0.606186, 0.19767, -0.091001, -0.315381, -1.234111, 0.228805, -0.636861, 0.208757, -0.270024, -0.259684, -0.351592, -0.978549, 0.683986, -0.331669, -0.078729, 0.385676, 0.390955, -0.901898, -0.071451, -0.103991, 0.206379, 0.469656, 0.071528, -0.152589, 0.282268, 0.539651, -0.856463, -0.344053, -0.40572, 0.771483, -0.065611, -0.408832, 0.303948, -0.565157, 0.153293, -0.699892, 1.112725, 0.259508, 0.135771, 0.484552, 0.151274, -0.743235, 0.069811, 0.137583, 0.212661, 0.376839, 0.136164, 0.145626, -0.466645, -0.474334, -0.365033, 0.251158, -0.313904, 0.210487, -1.016155, 0.262768, 0.432895, -0.291339, -0.221825, 0.513278, 0.659038, -0.401398, -0.164522, 0.395279, -0.449811, 0.076142, 0.389243, 0.076184, 0.05539, -0.597094, -0.149824, 0.206724, -0.477001, -0.315719, 0.166689, -0.357187, 0.34429, 0.256624, -0.236781, -0.713059, -0.440255, 0.27353, -0.032257, 0.06925, 0.359134, -0.088975, 0.112507, -0.071103, 0.880417, 0.528587, 0.155656, -0.720531, 0.3068, 0.754715, 0.009366, 0.067487, -0.11898, -0.471064, -0.396507, 0.298669, 0.038283, 0.057218, -0.075818, -0.01513, -0.319236, 0.692123, -0.122985, 0.875938, 0.378184, 0.427029, 0.315545, -0.549573, 0.389602, -0.017071, 0.160122, 0.368208, 0.060474, -0.199651, 0.087829, 0.447339, 0.012265, -0.095388, -0.07034, + } +} + +// "journey" +func JourneyVector() []float32 { + return []float32{ + -0.523002, 0.14169, 0.016461, -0.069062, 0.487908, -0.024193, -0.282436, 0.004778, -0.378135, 0.396011, 0.094045, -0.06584, 0.061162, -0.600018, -0.110189, 0.244562, 0.433501, 0.303775, -0.451004, -0.453709, 0.350324, 0.2047, -0.091615, -0.282805, -0.232953, -0.215143, 0.333113, -0.126952, -0.639225, 0.101498, 0.232343, 0.58831, 0.971, 0.494446, -0.483305, -0.873438, -0.483694, 0.406465, 0.342816, 1.253387, -0.24718, -0.046063, -0.660406, 0.103386, -0.06063, 0.3422, 0.322542, 0.026074, -0.623612, 0.489793, -0.632363, 0.448922, -0.370049, 0.212377, -0.315855, 0.364525, 0.056798, 0.805679, 0.145633, 0.850648, 0.432728, -1.431841, -0.226569, -0.315194, 0.560742, 0.261859, -0.001653, -0.068738, -0.662729, -0.049259, -0.380322, -0.374194, 0.363328, 0.341796, -0.077566, 0.503337, 0.353664, -0.045754, -0.499081, 0.198603, 0.038837, -0.460198, 0.00735, -0.270993, 0.950923, -0.085815, -0.52167, -0.10439, 0.31398, -0.560229, 0.411738, -0.129033, -0.009998, 0.443882, -0.045643, -0.078445, -0.259311, -0.08337, 0.232652, -0.015912, -0.229458, -0.474973, 1.265934, -0.204483, -0.293586, -0.619023, 0.158895, -0.730671, -0.163626, 0.411716, -0.000132, 0.069014, -0.682714, 0.303234, 0.299097, -0.484469, 0.608172, -0.163785, -0.419754, -0.160745, 0.278904, 0.550542, -0.008052, 0.160397, -0.211354, -0.19755, 1.182627, 0.705073, -0.461941, -0.235292, 0.534275, -0.096419, -0.405812, -0.157745, -0.335469, 0.200545, 0.406497, -0.05341, -0.009234, -0.029925, -0.394101, -0.060133, 0.182601, 0.615583, 0.212157, 0.363921, 0.41868, -0.652791, 0.657173, -0.131662, 0.269305, 0.381748, -0.827964, -0.452596, 0.201918, 0.0673, -0.020293, 0.486942, -0.72454, -0.435051, -0.615452, -0.218852, 0.090703, -0.471036, 0.032373, 0.569953, 0.098359, -0.570767, -0.21015, -0.53019, -0.227117, 0.327978, 0.087079, -0.115037, 0.09193, -0.922884, -0.165566, -0.353596, 0.535904, -0.328579, 0.029465, -1.508702, -0.320394, -0.596324, 0.290277, -0.272515, 0.104348, 0.062855, -0.236447, 0.388958, -0.186552, -0.156253, 0.355678, 0.53834, -0.321627, 0.486004, 0.301326, 0.786779, 0.430292, -0.012458, -0.164964, -0.072951, 0.746564, 0.19136, 0.003213, 0.53479, 0.511118, -0.559153, -0.088731, -0.436206, 0.421004, 0.193043, -0.656222, 0.133223, 0.00107, 0.037087, 0.263503, 0.378593, 0.158718, -0.401664, -0.10563, -0.111221, 0.018598, -0.036396, 0.189584, -0.347721, -0.544111, -0.018158, 0.134147, -0.362431, -0.702383, -0.375221, 0.365745, 0.118082, -0.19102, -0.150732, 0.638995, 0.070662, -0.054605, 0.221755, 0.23726, -0.274418, 0.294639, 0.221177, -0.012947, 0.08444, -0.486605, -0.225034, 0.774728, 0.167609, 0.766647, 0.381622, 0.241907, -0.196452, 0.245138, -0.203225, -0.701671, 0.236662, -0.627221, 0.143006, 0.055671, 0.564561, -0.114897, -0.542244, 0.464601, 0.201577, -0.177196, -0.795015, -0.580793, -0.134996, -0.579672, -0.399042, 0.008118, -0.458077, -0.43296, 0.074138, 0.328092, 0.02934, 0.406294, 0.330677, -0.138583, -0.676608, -0.099983, -0.137182, 0.713108, 0.248643, 0.153462, 0.56039, -0.109877, 0.260655, -0.529779, -0.13416, 0.067448, -0.139468, -0.179535, 0.372629, 0.287185, 0.100582, 0.093573, -0.208796, + } +} + +// "peanuts" +func PeanutsVector() []float32 { + return []float32{0.563772, -0.779601, -0.18491, 0.509093, 0.080691, -0.621506, -0.127855, -0.165435, 0.57496, 0.006945, 0.452967, -0.285534, -0.129205, 0.193883, 0.092732, 0.083284, 0.714696, 0.107078, -0.398886, -0.117344, -0.387671, 0.026748, -0.562581, -0.007178, -0.354846, -0.431299, -0.788228, 0.175199, 0.914486, 0.441425, 0.089804, 0.284472, 0.106916, -0.133174, 0.399299, 0.002177, 0.551474, 0.389343, -0.016404, 0.770212, -0.219833, 0.303322, 0.127598, -0.378037, -0.172971, 0.394854, -0.424415, -0.71173, 0.080323, -0.406372, 0.398395, -0.594257, -0.418287, 0.055755, -0.352343, -0.393373, -0.732443, 0.333113, 0.420378, -0.50231, 0.261863, -0.061356, -0.180985, 0.311916, -0.180207, -0.154169, 0.371969, 0.454717, 0.320499, -0.182448, 0.087347, 0.585272, 0.136098, 0.288909, -0.229571, -0.140278, 0.229644, -0.557327, -0.110147, 0.034364, -0.021627, -0.598707, 0.221168, -0.059591, -0.203555, -0.434876, 0.209634, -0.460895, -0.345391, -0.18248, -0.24853, 0.730295, -0.295402, -0.562237, 0.255922, 0.076661, -0.713794, -0.354747, -1.109888, -0.066694, -0.195747, -0.282781, 0.459869, -0.309599, -0.002211, -0.274471, -0.003621, 0.008228, 0.011961, -0.258772, -0.210687, -0.664148, -0.257968, 0.231335, 0.530392, -0.205764, -0.621055, -0.440582, 0.080335, 0.017367, 0.880771, 0.656272, -0.713248, -0.208629, 0.095346, 0.336802, 0.888765, 0.251927, 0.066473, 0.182678, -0.220494, 0.288927, -0.602036, 0.057106, -0.594172, 0.848978, 0.751973, 0.090758, -0.732184, 0.683475, -0.075085, 0.381326, -0.076531, -0.253831, 0.10311, -0.02988, -0.043583, 0.005746, -0.460183, -0.189048, 0.25792, 0.477565, 0.391953, 0.08469, -0.10022, 0.454383, 0.170811, 0.196819, -0.760276, 0.045886, -0.743934, 0.190072, -0.216326, -0.624262, -0.22944, 0.066233, 1.024283, 0.044009, -0.373543, -0.243663, 0.204444, 0.402183, 0.043356, 0.31716, 0.302178, 0.369374, 0.36901, 0.02886, -0.26132, -0.234714, -0.791308, -0.433528, -0.098797, -0.447567, -0.124892, -0.119958, 0.31019, -0.096092, -0.259021, -0.078099, -0.178679, 0.14879, 0.106432, -0.450003, -0.294972, 0.044257, 0.402832, 0.263266, -0.309787, -0.17766, -0.399104, 0.577422, 0.30102, 0.05326, -0.271873, 0.204839, -0.019002, -0.743543, 0.739314, -0.115868, -0.504568, -0.115713, 0.042769, -0.123561, -0.057097, 0.407096, 0.770627, 0.372981, -0.321945, 0.349865, 0.437571, -0.77394, -0.090017, -0.011273, -0.468664, -0.735247, -0.745655, 0.018983, -0.248165, 0.215342, -0.136942, -0.458205, 0.4572, -0.032293, 0.654409, -0.024184, -0.392144, 0.634579, 0.222185, 0.471951, -0.063678, -0.473611, 0.796793, -0.295494, -0.157621, -0.103365, -0.564606, -0.092231, -0.517754, -0.369358, 0.137479, -0.214837, 0.11057, -0.095227, 0.726768, -0.079352, -0.065927, -0.846602, -0.317556, -0.344271, 0.201353, -0.367633, -0.004477, 0.157801, -0.249114, -0.549599, -0.147123, 0.308084, -0.175564, 0.306867, -0.071157, -0.588356, 0.450987, -0.184879, -0.096782, -0.006346, -0.017689, 0.005998, 0.200963, 0.225338, 0.189993, -1.105824, 0.520005, 0.129679, 0.198194, -0.254813, -0.127583, 0.326054, 0.009956, -0.016008, -0.483044, 0.801135, -0.517766, 0.067179, -0.372756, -0.511781, 0.058562, -0.082906, -0.28168, -0.285859} +} + +type fakeObjectSearcher struct{} + +func (f *fakeObjectSearcher) Search(context.Context, dto.GetParams) ([]search.Result, error) { + return nil, nil +} + +func (f *fakeObjectSearcher) VectorSearch(context.Context, dto.GetParams, []string, []models.Vector) ([]search.Result, error) { + return nil, nil +} + +func (f *fakeObjectSearcher) CrossClassVectorSearch(context.Context, models.Vector, string, int, int, *filters.LocalFilter) ([]search.Result, error) { + return nil, nil +} + +func (f *fakeObjectSearcher) Object(ctx context.Context, className string, id strfmt.UUID, props search.SelectProperties, additional additional.Properties, properties *additional.ReplicationProperties, tenant string) (*search.Result, error) { + return nil, nil +} + +func (f *fakeObjectSearcher) ObjectsByID(ctx context.Context, id strfmt.UUID, props search.SelectProperties, additional additional.Properties, tenant string) (search.Results, error) { + return nil, nil +} + +func (f *fakeObjectSearcher) SparseObjectSearch(ctx context.Context, params dto.GetParams) ([]*storobj.Object, []float32, error) { + out := []*storobj.Object{ + { + Object: models.Object{ + ID: "9889a225-3b28-477d-b8fc-5f6071bb4731", + }, + + Vector: []float32{1, 2, 3}, + }, + { + Object: models.Object{ + ID: "0bcdef12-3314-442e-a4d1-e94d7c0afc3a", + }, + Vector: []float32{4, 5, 6}, + }, + } + lim := params.Pagination.Offset + params.Pagination.Limit + if lim > len(out) { + lim = len(out) + } + + return out[:lim], []float32{0.008, 0.001}[:lim], nil +} + +func CopyElems[T any](list1, list2 []T, pos int) bool { + if len(list1) != len(list2) { + return false + } + if pos < 0 || pos >= len(list1) { + return true + } + list1[pos] = list2[pos] + return CopyElems(list1, list2, pos+1) +} + +func (f *fakeObjectSearcher) ResolveReferences(ctx context.Context, objs search.Results, props search.SelectProperties, groupBy *searchparams.GroupBy, additional additional.Properties, tenant string) (search.Results, error) { + // Convert res1 to search.Results + out := make(search.Results, len(objs)) + CopyElems(out, objs, 0) + + return out, nil +} + +func TestHybridOverSearch(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + QueryLimit: 20, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, nil, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(context.TODO())) + defer repo.Shutdown(context.Background()) + + fos := &fakeObjectSearcher{} + + class := SetupFusionClass(t, repo, schemaGetter, logger, 1.2, 0.75) + idx := repo.GetIndex("MyClass") + require.NotNil(t, idx) + + t.Run("Hybrid", func(t *testing.T) { + params := dto.GetParams{ + ClassName: "MyClass", + HybridSearch: &searchparams.HybridSearch{ + Query: "elephant", + Vector: elephantVector(), + Alpha: 0.5, + }, + Pagination: &filters.Pagination{ + Offset: 0, + Limit: 1, + }, + } + + prov := modules.NewProvider(logger, config.Config{}) + prov.SetClassDefaults(class) + prov.SetSchemaGetter(schemaGetter) + testerModule := &TesterModule{} + testerModule.AddVector("elephant", elephantVector()) + testerModule.AddVector("journey", JourneyVector()) + prov.Register(testerModule) + + log, _ := test.NewNullLogger() + explorer := traverser.NewExplorer(fos, log, prov, nil, defaultConfig) + explorer.SetSchemaGetter(schemaGetter) + hybridResults, err := explorer.Hybrid(context.TODO(), params) + require.Nil(t, err) + require.Equal(t, 1, len(hybridResults)) + require.Equal(t, strfmt.UUID("9889a225-3b28-477d-b8fc-5f6071bb4731"), hybridResults[0].ID) + // require.Equal(t, "79a636c2-3314-442e-a4d1-e94d7c0afc3a", hybridResults[1].ID) + }) +} + +type TesterModule struct { + vectors map[string][]float32 +} + +func (m *TesterModule) Name() string { + return "test-vectoriser" +} + +func (m *TesterModule) Type() modulecapabilities.ModuleType { + return modulecapabilities.Text2Vec +} + +func (m *TesterModule) Init(ctx context.Context, + params moduletools.ModuleInitParams, +) error { + return nil +} + +func (m *TesterModule) InitExtension(modules []modulecapabilities.Module) error { + return nil +} + +func (m *TesterModule) VectorizeObject(ctx context.Context, + obj *models.Object, objDiff *models.Object, cfg moduletools.ClassConfig, +) error { + return nil +} + +func (m *TesterModule) MetaInfo() (map[string]interface{}, error) { + return nil, nil +} + +func (m *TesterModule) AdditionalProperties() map[string]modulecapabilities.AdditionalProperty { + return nil +} + +func (m *TesterModule) VectorizeInput(ctx context.Context, + input string, cfg moduletools.ClassConfig, +) ([]float32, error) { + vec, ok := m.vectors[input] + if !ok { + return nil, fmt.Errorf("vector not found") + } + return vec, nil +} + +func (m *TesterModule) AddVector(text string, vector []float32) error { + if m.vectors == nil { + m.vectors = map[string][]float32{} + } + m.vectors[text] = vector + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/idempotent_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/idempotent_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4755e9c412ffa57e385cd03f4f59e11d89734c36 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/idempotent_integration_test.go @@ -0,0 +1,550 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestMigrator_UpdateIndex(t *testing.T) { + var ( + class1Name = "IdempotentClass" + class2Name = "MissingClass" + missingDataClass1 = "missing_data_class_1" + allDataClass1 = "all_data_class_1" + allDataClass2 = "all_data_class_2" + allDataClass1And2 = "all_data_class_1_and_2" + intProp = "someInt" + textProp = "someText" + numberProp = "someNumber" + boolProp = "someBool" + vectorConfig = map[string]models.VectorConfig{"vec1": { + VectorIndexConfig: hnsw.NewDefaultUserConfig(), + VectorIndexType: "hnsw", + }} + singleTenantTestClasses = map[string]*models.Class{ + missingDataClass1: { + Class: class1Name, + Properties: []*models.Property{ + {Name: intProp, DataType: []string{"int"}}, + {Name: textProp, DataType: []string{"text"}}, + }, + InvertedIndexConfig: invertedConfig(), + VectorConfig: vectorConfig, + }, + allDataClass1: { + Class: class1Name, + Properties: []*models.Property{ + {Name: intProp, DataType: []string{"int"}}, + {Name: textProp, DataType: []string{"text"}}, + {Name: numberProp, DataType: []string{"number"}}, + {Name: boolProp, DataType: []string{"boolean"}}, + }, + InvertedIndexConfig: invertedConfig(), + VectorConfig: vectorConfig, + }, + allDataClass2: { + Class: class2Name, + Properties: []*models.Property{ + {Name: intProp, DataType: []string{"int"}}, + }, + InvertedIndexConfig: invertedConfig(), + VectorConfig: vectorConfig, + }, + } + singleTenantShardingState = map[string]*sharding.State{ + missingDataClass1: func() *sharding.State { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + OwnsVirtual: []string{"virtual1", "virtual2"}, + BelongsToNodes: []string{"node1"}, + }, + }, + } + ss.SetLocalName("node1") + return ss + }(), + allDataClass1: func() *sharding.State { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + OwnsVirtual: []string{"virtual1", "virtual2"}, + BelongsToNodes: []string{"node1"}, + }, + "shard2": { + Name: "shard2", + OwnsVirtual: []string{"virtual3", "virtual4"}, + BelongsToNodes: []string{"node1"}, + }, + "shard3": { + Name: "shard3", + OwnsVirtual: []string{"virtual5", "virtual6"}, + // should not affect local repo, belongs to remote node + BelongsToNodes: []string{"node2"}, + }, + }, + } + ss.SetLocalName("node1") + return ss + }(), + allDataClass1And2: func() *sharding.State { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + OwnsVirtual: []string{"virtual1", "virtual2"}, + BelongsToNodes: []string{"node1"}, + }, + "shard2": { + Name: "shard2", + OwnsVirtual: []string{"virtual3", "virtual4"}, + BelongsToNodes: []string{"node1"}, + }, + "shard3": { + Name: "shard3", + OwnsVirtual: []string{"virtual5", "virtual6"}, + // should not affect local repo, belongs to remote node + BelongsToNodes: []string{"node2"}, + }, + "shard4": { + Name: "shard4", + OwnsVirtual: []string{"virtual7", "virtual8"}, + BelongsToNodes: []string{"node1"}, + }, + "shard5": { + Name: "shard5", + OwnsVirtual: []string{"virtual9", "virtual10"}, + BelongsToNodes: []string{"node1"}, + }, + }, + } + ss.SetLocalName("node1") + return ss + }(), + } + multiTenantTestClasses = map[string]*models.Class{ + missingDataClass1: { + Class: class1Name, + Properties: []*models.Property{ + {Name: intProp, DataType: []string{"int"}}, + {Name: textProp, DataType: []string{"text"}}, + }, + InvertedIndexConfig: invertedConfig(), + VectorConfig: vectorConfig, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + }, + allDataClass1: { + Class: class1Name, + Properties: []*models.Property{ + {Name: intProp, DataType: []string{"int"}}, + {Name: textProp, DataType: []string{"text"}}, + {Name: numberProp, DataType: []string{"number"}}, + {Name: boolProp, DataType: []string{"boolean"}}, + }, + InvertedIndexConfig: invertedConfig(), + VectorConfig: vectorConfig, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + }, + } + multiTenantShardingState = map[string]*sharding.State{ + missingDataClass1: func() *sharding.State { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + OwnsVirtual: []string{"virtual1", "virtual2"}, + BelongsToNodes: []string{"node1"}, + Status: models.TenantActivityStatusHOT, + }, + }, + PartitioningEnabled: true, + } + ss.SetLocalName("node1") + return ss + }(), + allDataClass1: func() *sharding.State { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + OwnsVirtual: []string{"virtual1", "virtual2"}, + BelongsToNodes: []string{"node1"}, + Status: models.TenantActivityStatusHOT, + }, + "tenant2": { + Name: "tenant2", + OwnsVirtual: []string{"virtual3", "virtual4"}, + BelongsToNodes: []string{"node1"}, + Status: models.TenantActivityStatusHOT, + }, + "tenant3": { + Name: "tenant3", + OwnsVirtual: []string{"virtual5", "virtual6"}, + BelongsToNodes: []string{"node1"}, + // should not affect local repo, not hot + Status: models.TenantActivityStatusCOLD, + }, + "tenant4": { + Name: "tenant4", + OwnsVirtual: []string{"virtual7", "virtual8"}, + // should not affect local repo, belongs to remote node + BelongsToNodes: []string{"node2"}, + Status: models.TenantActivityStatusHOT, + }, + }, + PartitioningEnabled: true, + } + ss.SetLocalName("node1") + return ss + }(), + } + ) + + t.Run("single tenant, run multiple updates with missing class", func(t *testing.T) { + var ( + ctx = context.Background() + iterations = 5 + remoteDirName = t.TempDir() + localDirName = t.TempDir() + existingClass = singleTenantTestClasses[allDataClass1] + missingClass = singleTenantTestClasses[allDataClass2] + localSS = singleTenantShardingState[allDataClass1] + remoteSS = singleTenantShardingState[allDataClass1And2] + localMigrator = setupTestMigrator(t, localDirName, localSS, existingClass) + remoteMigrator = setupTestMigrator(t, remoteDirName, remoteSS, existingClass, missingClass) + ) + + defer func() { + require.Nil(t, localMigrator.db.Shutdown(context.Background())) + require.Nil(t, remoteMigrator.db.Shutdown(context.Background())) + }() + + t.Run("before index update", func(t *testing.T) { + existing := localMigrator.db.GetIndex(schema.ClassName(existingClass.Class)) + assert.NotNil(t, existing) + missing := localMigrator.db.GetIndex(schema.ClassName(missingClass.Class)) + assert.Nil(t, missing) + }) + + t.Run("run update index", func(t *testing.T) { + // UpdateIndex should be able to run an arbitrary number + // of times without any changes to the internal DB state + for i := 0; i < iterations; i++ { + err := localMigrator.UpdateIndex(ctx, missingClass, remoteSS) + require.Nil(t, err) + } + }) + + t.Run("after index update", func(t *testing.T) { + existing := localMigrator.db.GetIndex(schema.ClassName(existingClass.Class)) + assert.NotNil(t, existing) + missing := localMigrator.db.GetIndex(schema.ClassName(missingClass.Class)) + assert.NotNil(t, missing) + }) + }) + + t.Run("single tenant, run multiple updates with multiple shards", func(t *testing.T) { + var ( + ctx = context.Background() + iterations = 5 + remoteDirName = t.TempDir() + localDirName = t.TempDir() + remoteClass = singleTenantTestClasses[allDataClass1] + localClass = singleTenantTestClasses[missingDataClass1] + localSS = singleTenantShardingState[missingDataClass1] + remoteSS = singleTenantShardingState[allDataClass1] + localMigrator = setupTestMigrator(t, localDirName, localSS, localClass) + remoteMigrator = setupTestMigrator(t, remoteDirName, remoteSS, remoteClass) + someBuckets = []string{ + helpers.BucketFromPropNameLSM(intProp), + helpers.BucketFromPropNameLSM(textProp), + } + missingBuckets = []string{ + helpers.BucketFromPropNameLSM(numberProp), + helpers.BucketFromPropNameLSM(boolProp), + } + allBuckets = append(someBuckets, missingBuckets...) + ) + + defer func() { + require.Nil(t, localMigrator.db.Shutdown(context.Background())) + require.Nil(t, remoteMigrator.db.Shutdown(context.Background())) + }() + + t.Run("before index update", func(t *testing.T) { + idx, ok := localMigrator.db.indices[strings.ToLower(class1Name)] + require.True(t, ok) + shardCount := 0 + idx.ForEachShard(func(_ string, shard ShardLike) error { + for _, name := range someBuckets { + bucket := shard.Store().Bucket(name) + assert.NotNil(t, bucket) + } + for _, name := range missingBuckets { + bucket := shard.Store().Bucket(name) + assert.Nil(t, bucket) + } + shardCount++ + return nil + }) + assert.Equal(t, 1, shardCount) + }) + + t.Run("run update index", func(t *testing.T) { + // UpdateIndex should be able to run an arbitrary number + // of times without any changes to the internal DB state + for i := 0; i < iterations; i++ { + err := localMigrator.UpdateIndex(ctx, remoteClass, remoteSS) + require.Nil(t, err) + } + }) + + t.Run("after index update", func(t *testing.T) { + require.Len(t, localMigrator.db.indices, 1) + idx, ok := localMigrator.db.indices[strings.ToLower(class1Name)] + require.True(t, ok) + + shardCount := 0 + idx.ForEachShard(func(_ string, shard ShardLike) error { + for _, name := range allBuckets { + bucket := shard.Store().Bucket(name) + assert.NotNil(t, bucket) + } + shardCount++ + return nil + }) + assert.Equal(t, 2, shardCount) + }) + }) + + t.Run("multi-tenant, run multiple updates with tenants to add", func(t *testing.T) { + var ( + ctx = context.Background() + iterations = 5 + remoteDirName = t.TempDir() + localDirName = t.TempDir() + remoteClass = multiTenantTestClasses[allDataClass1] + localClass = multiTenantTestClasses[missingDataClass1] + localSS = multiTenantShardingState[missingDataClass1] + remoteSS = multiTenantShardingState[allDataClass1] + localMigrator = setupTestMigrator(t, localDirName, localSS, localClass) + remoteMigrator = setupTestMigrator(t, remoteDirName, remoteSS, remoteClass) + someBuckets = []string{ + helpers.BucketFromPropNameLSM(intProp), + helpers.BucketFromPropNameLSM(textProp), + } + missingBuckets = []string{ + helpers.BucketFromPropNameLSM(numberProp), + helpers.BucketFromPropNameLSM(boolProp), + } + allBuckets = append(someBuckets, missingBuckets...) + ) + + defer func() { + require.Nil(t, localMigrator.db.Shutdown(context.Background())) + require.Nil(t, remoteMigrator.db.Shutdown(context.Background())) + }() + + t.Run("add tenants", func(t *testing.T) { + err := localMigrator.NewTenants(ctx, localClass, []*schemaUC.CreateTenantPayload{ + {Name: "tenant1", Status: models.TenantActivityStatusHOT}, + }) + require.Nil(t, err) + err = remoteMigrator.NewTenants(ctx, localClass, []*schemaUC.CreateTenantPayload{ + {Name: "tenant1", Status: models.TenantActivityStatusHOT}, + {Name: "tenant2", Status: models.TenantActivityStatusHOT}, + {Name: "tenant3", Status: models.TenantActivityStatusCOLD}, + {Name: "tenant4", Status: models.TenantActivityStatusHOT}, + }) + require.Nil(t, err) + }) + + t.Run("before index update", func(t *testing.T) { + idx, ok := localMigrator.db.indices[strings.ToLower(class1Name)] + require.True(t, ok) + shardCount := 0 + idx.ForEachShard(func(_ string, shard ShardLike) error { + for _, name := range someBuckets { + bucket := shard.Store().Bucket(name) + assert.NotNil(t, bucket) + } + for _, name := range missingBuckets { + bucket := shard.Store().Bucket(name) + assert.Nil(t, bucket) + } + shardCount++ + return nil + }) + assert.Equal(t, 1, shardCount) + }) + + t.Run("run update index", func(t *testing.T) { + // UpdateIndex should be able to run an arbitrary number + // of times without any changes to the internal DB state + for i := 0; i < iterations; i++ { + err := localMigrator.UpdateIndex(ctx, remoteClass, remoteSS) + require.Nil(t, err) + } + }) + + t.Run("after index update", func(t *testing.T) { + require.Len(t, localMigrator.db.indices, 1) + idx, ok := localMigrator.db.indices[strings.ToLower(class1Name)] + require.True(t, ok) + + shardCount := 0 + idx.ForEachShard(func(_ string, shard ShardLike) error { + for _, name := range allBuckets { + bucket := shard.Store().Bucket(name) + assert.NotNil(t, bucket) + } + shardCount++ + return nil + }) + assert.Equal(t, 2, shardCount) + }) + }) + + t.Run("multi-tenant, run multiple updates with tenants to delete", func(t *testing.T) { + var ( + ctx = context.Background() + iterations = 5 + remoteDirName = t.TempDir() + localDirName = t.TempDir() + remoteClass = multiTenantTestClasses[allDataClass1] + localClass = multiTenantTestClasses[allDataClass1] + localSS = multiTenantShardingState[allDataClass1] + remoteSS = multiTenantShardingState[missingDataClass1] + localMigrator = setupTestMigrator(t, localDirName, localSS, localClass) + remoteMigrator = setupTestMigrator(t, remoteDirName, remoteSS, remoteClass) + initialTenants = []string{"tenant1", "tenant2"} + remainingTenants = []string{"tenant1"} + ) + + defer func() { + require.Nil(t, localMigrator.db.Shutdown(context.Background())) + require.Nil(t, remoteMigrator.db.Shutdown(context.Background())) + }() + + t.Run("add tenants", func(t *testing.T) { + err := localMigrator.NewTenants(ctx, localClass, []*schemaUC.CreateTenantPayload{ + {Name: "tenant1", Status: models.TenantActivityStatusHOT}, + {Name: "tenant2", Status: models.TenantActivityStatusHOT}, + }) + require.Nil(t, err) + err = remoteMigrator.NewTenants(ctx, localClass, []*schemaUC.CreateTenantPayload{ + {Name: "tenant1", Status: models.TenantActivityStatusHOT}, + }) + require.Nil(t, err) + }) + + t.Run("before index update", func(t *testing.T) { + idx, ok := localMigrator.db.indices[strings.ToLower(class1Name)] + require.True(t, ok) + for _, tenant := range initialTenants { + require.NotNil(t, idx.shards.Load(tenant)) + } + }) + + t.Run("run update index", func(t *testing.T) { + // UpdateIndex should be able to run an arbitrary number + // of times without any changes to the internal DB state + for i := 0; i < iterations; i++ { + err := localMigrator.UpdateIndex(ctx, remoteClass, remoteSS) + require.Nil(t, err) + } + }) + + t.Run("after index update", func(t *testing.T) { + require.Len(t, localMigrator.db.indices, 1) + idx, ok := localMigrator.db.indices[strings.ToLower(class1Name)] + require.True(t, ok) + + shardCount := 0 + for _, tenant := range remainingTenants { + assert.NotNil(t, idx.shards.Load(tenant)) + } + idx.ForEachShard(func(_ string, shard ShardLike) error { + shardCount++ + return nil + }) + assert.Equal(t, 1, shardCount) + }) + }) +} + +func setupTestMigrator(t *testing.T, rootDir string, shardState *sharding.State, classes ...*models.Class) *Migrator { + logger, _ := test.NewNullLogger() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{ + Objects: &models.Schema{ + Classes: classes, + }, + }, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + for _, class := range classes { + if className == class.Class { + return readFunc(class, shardState) + } + } + return nil + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: classes}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: rootDir, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, + &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + return NewMigrator(repo, logger, "node1") +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index.go new file mode 100644 index 0000000000000000000000000000000000000000..f1358248f7d995a6a4b0bb1bab1aea9fa5f22898 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index.go @@ -0,0 +1,3311 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/binary" + "fmt" + "os" + "path" + "path/filepath" + "runtime" + "runtime/debug" + "slices" + golangSort "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/weaviate/weaviate/usecases/multitenancy" + + "github.com/weaviate/weaviate/cluster/router/executor" + routerTypes "github.com/weaviate/weaviate/cluster/router/types" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/aggregator" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/stopwords" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/adapters/repos/db/sorter" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + usagetypes "github.com/weaviate/weaviate/cluster/usage/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/autocut" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/replication" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + esync "github.com/weaviate/weaviate/entities/sync" + authzerrors "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/config" + configRuntime "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +var ( + + // Use runtime.GOMAXPROCS instead of runtime.NumCPU because NumCPU returns + // the physical CPU cores. However, in a containerization context, that might + // not be what we want. The physical node could have 128 cores, but we could + // be cgroup-limited to 2 cores. In that case, we want 2 to be our limit, not + // 128. It isn't guaranteed that MAXPROCS reflects the cgroup limit, but at + // least there is a chance that it was set correctly. If not, it defaults to + // NumCPU anyway, so we're not any worse off. + _NUMCPU = runtime.GOMAXPROCS(0) + ErrShardNotFound = errors.New("shard not found") +) + +// shardMap is a syn.Map which specialized in storing shards +type shardMap sync.Map + +// Range calls f sequentially for each key and value present in the map. +// If f returns an error, range stops the iteration +func (m *shardMap) Range(f func(name string, shard ShardLike) error) (err error) { + (*sync.Map)(m).Range(func(key, value any) bool { + // Safe type assertion for key + name, ok := key.(string) + if !ok { + // Skip invalid keys + return true + } + + // Safe type assertion for value + shard, ok := value.(ShardLike) + if !ok || shard == nil { + // Skip invalid or nil shards + return true + } + + err = f(name, shard) + return err == nil + }) + return err +} + +// RangeConcurrently calls f for each key and value present in the map with at +// most _NUMCPU executors running in parallel. As opposed to [Range] it does +// not guarantee an exit on the first error. +func (m *shardMap) RangeConcurrently(logger logrus.FieldLogger, f func(name string, shard ShardLike) error) (err error) { + eg := enterrors.NewErrorGroupWrapper(logger) + eg.SetLimit(_NUMCPU) + (*sync.Map)(m).Range(func(key, value any) bool { + name, ok := key.(string) + if !ok { + // Skip invalid keys + return true + } + + // Safe type assertion for value + shard, ok := value.(ShardLike) + if !ok || shard == nil { + // Skip invalid or nil shards + return true + } + + eg.Go(func() error { + return f(name, shard) + }, name, shard) + return true + }) + + return eg.Wait() +} + +// Load returns the shard or nil if no shard is present. +// NOTE: this method does not check if the shard is loaded or not and it could +// return a lazy shard that is not loaded which could result in loading it if +// the returned shard is used. +// Use Loaded if you want to check if the shard is loaded without loading it. +func (m *shardMap) Load(name string) ShardLike { + v, ok := (*sync.Map)(m).Load(name) + if !ok { + return nil + } + + shard, ok := v.(ShardLike) + if !ok { + return nil + } + return shard +} + +// Loaded returns the shard or nil if no shard is present. +// If it's a lazy shard, only return it if it's loaded. +func (m *shardMap) Loaded(name string) ShardLike { + v, ok := (*sync.Map)(m).Load(name) + if !ok { + return nil + } + + shard, ok := v.(ShardLike) + if !ok { + return nil + } + + // If it's a lazy shard, only return it if it's loaded + if lazyShard, ok := shard.(*LazyLoadShard); ok { + if !lazyShard.isLoaded() { + return nil + } + } + + return shard +} + +// Store sets a shard giving its name and value +func (m *shardMap) Store(name string, shard ShardLike) { + (*sync.Map)(m).Store(name, shard) +} + +// Swap swaps the shard for a key and returns the previous value if any. +// The loaded result reports whether the key was present. +func (m *shardMap) Swap(name string, shard ShardLike) (previous ShardLike, loaded bool) { + v, ok := (*sync.Map)(m).Swap(name, shard) + if v == nil || !ok { + return nil, ok + } + return v.(ShardLike), ok +} + +// CompareAndSwap swaps the old and new values for key if the value stored in the map is equal to old. +func (m *shardMap) CompareAndSwap(name string, old, new ShardLike) bool { + return (*sync.Map)(m).CompareAndSwap(name, old, new) +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *shardMap) LoadAndDelete(name string) (ShardLike, bool) { + v, ok := (*sync.Map)(m).LoadAndDelete(name) + if v == nil || !ok { + return nil, ok + } + return v.(ShardLike), ok +} + +// Index is the logical unit which contains all the data for one particular +// class. An index can be further broken up into self-contained units, called +// Shards, to allow for easy distribution across Nodes +type Index struct { + classSearcher inverted.ClassSearcher // to allow for nested by-references searches + shards shardMap + Config IndexConfig + globalreplicationConfig *replication.GlobalConfig + + getSchema schemaUC.SchemaGetter + schemaReader schemaUC.SchemaReader + logger logrus.FieldLogger + remote *sharding.RemoteIndex + stopwords *stopwords.Detector + replicator *replica.Replicator + + vectorIndexUserConfigLock sync.Mutex + vectorIndexUserConfig schemaConfig.VectorIndexConfig + vectorIndexUserConfigs map[string]schemaConfig.VectorIndexConfig + + partitioningEnabled bool + + invertedIndexConfig schema.InvertedIndexConfig + invertedIndexConfigLock sync.Mutex + + // This lock should be used together with the db indexLock. + // + // The db indexlock locks the map that contains all indices against changes and should be used while iterating. + // This lock protects this specific index form being deleted while in use. Use Rlock to signal that it is in use. + // This way many goroutines can use a specific index in parallel. The delete-routine will try to acquire a RWlock. + // + // Usage: + // Lock the whole db using db.indexLock + // pick the indices you want and Rlock them + // unlock db.indexLock + // Use the indices + // RUnlock all picked indices + dropIndex sync.RWMutex + + metrics *Metrics + centralJobQueue chan job + scheduler *queue.Scheduler + indexCheckpoints *indexcheckpoint.Checkpoints + + cycleCallbacks *indexCycleCallbacks + + shardTransferMutex shardTransfer + lastBackup atomic.Pointer[BackupState] + + // canceled when either Shutdown or Drop called + closingCtx context.Context + closingCancel context.CancelFunc + + // always true if lazy shard loading is off, in the case of lazy shard + // loading will be set to true once the last shard was loaded. + allShardsReady atomic.Bool + allocChecker memwatch.AllocChecker + shardCreateLocks *esync.KeyLocker + + replicationConfigLock sync.RWMutex + + shardLoadLimiter ShardLoadLimiter + + closeLock sync.RWMutex + closed bool + + shardReindexer ShardReindexerV3 + + router routerTypes.Router + bitmapBufPool roaringset.BitmapBufPool +} + +func (i *Index) ID() string { + return indexID(i.Config.ClassName) +} + +func (i *Index) path() string { + return path.Join(i.Config.RootPath, i.ID()) +} + +type nodeResolver interface { + AllHostnames() []string + NodeHostname(nodeName string) (string, bool) +} + +// NewIndex creates an index with the specified amount of shards, using only +// the shards that are local to a node +func NewIndex( + ctx context.Context, + cfg IndexConfig, + invertedIndexConfig schema.InvertedIndexConfig, + vectorIndexUserConfig schemaConfig.VectorIndexConfig, + vectorIndexUserConfigs map[string]schemaConfig.VectorIndexConfig, + router routerTypes.Router, + sg schemaUC.SchemaGetter, + schemaReader schemaUC.SchemaReader, + cs inverted.ClassSearcher, + logger logrus.FieldLogger, + nodeResolver nodeResolver, + remoteClient sharding.RemoteIndexClient, + replicaClient replica.Client, + globalReplicationConfig *replication.GlobalConfig, + promMetrics *monitoring.PrometheusMetrics, + class *models.Class, + jobQueueCh chan job, + scheduler *queue.Scheduler, + indexCheckpoints *indexcheckpoint.Checkpoints, + allocChecker memwatch.AllocChecker, + shardReindexer ShardReindexerV3, + bitmapBufPool roaringset.BitmapBufPool, +) (*Index, error) { + sd, err := stopwords.NewDetectorFromConfig(invertedIndexConfig.Stopwords) + if err != nil { + return nil, errors.Wrap(err, "failed to create new index") + } + + if cfg.QueryNestedRefLimit == 0 { + cfg.QueryNestedRefLimit = config.DefaultQueryNestedCrossReferenceLimit + } + + if vectorIndexUserConfigs == nil { + vectorIndexUserConfigs = map[string]schemaConfig.VectorIndexConfig{} + } + index := &Index{ + Config: cfg, + globalreplicationConfig: globalReplicationConfig, + getSchema: sg, + schemaReader: schemaReader, + logger: logger, + classSearcher: cs, + vectorIndexUserConfig: vectorIndexUserConfig, + invertedIndexConfig: invertedIndexConfig, + vectorIndexUserConfigs: vectorIndexUserConfigs, + stopwords: sd, + partitioningEnabled: multitenancy.IsMultiTenant(class.MultiTenancyConfig), + remote: sharding.NewRemoteIndex(cfg.ClassName.String(), sg, nodeResolver, remoteClient), + metrics: NewMetrics(logger, promMetrics, cfg.ClassName.String(), "n/a"), + centralJobQueue: jobQueueCh, + shardTransferMutex: shardTransfer{log: logger, retryDuration: mutexRetryDuration, notifyDuration: mutexNotifyDuration}, + scheduler: scheduler, + indexCheckpoints: indexCheckpoints, + allocChecker: allocChecker, + shardCreateLocks: esync.NewKeyLocker(), + shardLoadLimiter: cfg.ShardLoadLimiter, + shardReindexer: shardReindexer, + router: router, + bitmapBufPool: bitmapBufPool, + } + + getDeletionStrategy := func() string { + return index.DeletionStrategy() + } + + // TODO: Fix replica router instantiation to be at the top level + index.replicator = replica.NewReplicator(cfg.ClassName.String(), router, sg.NodeName(), getDeletionStrategy, replicaClient, logger) + + index.closingCtx, index.closingCancel = context.WithCancel(context.Background()) + + index.initCycleCallbacks() + + if err := index.checkSingleShardMigration(); err != nil { + return nil, errors.Wrap(err, "migrating sharding state from previous version") + } + + if err := os.MkdirAll(index.path(), os.ModePerm); err != nil { + return nil, fmt.Errorf("init index %q: %w", index.ID(), err) + } + + if err := index.initAndStoreShards(ctx, class, promMetrics); err != nil { + return nil, err + } + + index.cycleCallbacks.compactionCycle.Start() + index.cycleCallbacks.compactionAuxCycle.Start() + index.cycleCallbacks.flushCycle.Start() + + return index, nil +} + +// since called in Index's constructor there is no risk same shard will be inited/created in parallel, +// therefore shardCreateLocks are not used here +func (i *Index) initAndStoreShards(ctx context.Context, class *models.Class, + promMetrics *monitoring.PrometheusMetrics, +) error { + type shardInfo struct { + name string + activityStatus string + } + + var localShards []shardInfo + className := i.Config.ClassName.String() + + err := i.schemaReader.Read(className, func(_ *models.Class, state *sharding.State) error { + if state == nil { + return fmt.Errorf("unable to retrieve sharding state for class %s", className) + } + + for shardName, physical := range state.Physical { + if state.IsLocalShard(shardName) { + localShards = append(localShards, shardInfo{ + name: shardName, + activityStatus: physical.ActivityStatus(), + }) + } + } + + return nil + }) + if err != nil { + return fmt.Errorf("failed to read sharding state: %w", err) + } + + if i.Config.DisableLazyLoadShards { + eg := enterrors.NewErrorGroupWrapper(i.logger) + eg.SetLimit(_NUMCPU) + + for _, shard := range localShards { + if shard.activityStatus != models.TenantActivityStatusHOT { + continue + } + + shardName := shard.name + eg.Go(func() error { + if err := i.shardLoadLimiter.Acquire(ctx); err != nil { + return fmt.Errorf("acquiring permit to load shard: %w", err) + } + defer i.shardLoadLimiter.Release() + + newShard, err := NewShard(ctx, promMetrics, shardName, i, class, i.centralJobQueue, i.scheduler, + i.indexCheckpoints, i.shardReindexer, false, i.bitmapBufPool) + if err != nil { + return fmt.Errorf("init shard %s of index %s: %w", shardName, i.ID(), err) + } + + i.shards.Store(shardName, newShard) + return nil + }, shardName) + } + + if err := eg.Wait(); err != nil { + return err + } + + i.allShardsReady.Store(true) + return nil + } + + activeShardNames := make([]string, 0, len(localShards)) + + for _, shard := range localShards { + if shard.activityStatus != models.TenantActivityStatusHOT { + continue + } + + activeShardNames = append(activeShardNames, shard.name) + + lazyShard := NewLazyLoadShard(ctx, promMetrics, shard.name, i, class, i.centralJobQueue, i.indexCheckpoints, + i.allocChecker, i.shardLoadLimiter, i.shardReindexer, true, i.bitmapBufPool) + i.shards.Store(shard.name, lazyShard) + } + + // NOTE(dyma): + // 1. So "lazy-loaded" shards are actually loaded "half-eagerly"? + // 2. If <-ctx.Done or we fail to load a shard, should allShardsReady still report true? + initLazyShardsInBackground := func() { + defer i.allShardsReady.Store(true) + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + now := time.Now() + + for _, shardName := range activeShardNames { + select { + case <-i.closingCtx.Done(): + i.logger. + WithField("action", "load_all_shards"). + Errorf("failed to load all shards: %v", i.closingCtx.Err()) + return + case <-ticker.C: + select { + case <-i.closingCtx.Done(): + i.logger. + WithField("action", "load_all_shards"). + Errorf("failed to load all shards: %v", i.closingCtx.Err()) + return + default: + err := i.loadLocalShardIfActive(shardName) + if err != nil { + i.logger. + WithField("action", "load_shard"). + WithField("shard_name", shardName). + Errorf("failed to load shard: %v", err) + return + } + } + } + } + + i.logger. + WithField("action", "load_all_shards"). + WithField("took", time.Since(now).String()). + Debug("finished loading all shards") + } + + enterrors.GoWrapper(initLazyShardsInBackground, i.logger) + + return nil +} + +func (i *Index) loadLocalShardIfActive(shardName string) error { + i.shardCreateLocks.Lock(shardName) + defer i.shardCreateLocks.Unlock(shardName) + + // check if set to inactive in the meantime by concurrent call + shard := i.shards.Load(shardName) + if shard == nil { + return nil + } + + lazyShard, ok := shard.(*LazyLoadShard) + if ok { + return lazyShard.Load(context.Background()) + } + + return nil +} + +// used to init/create shard in different moments of index's lifecycle, therefore it needs to be called +// within shardCreateLocks to prevent parallel create/init of the same shard +func (i *Index) initShard(ctx context.Context, shardName string, class *models.Class, + promMetrics *monitoring.PrometheusMetrics, disableLazyLoad bool, implicitShardLoading bool, +) (ShardLike, error) { + if disableLazyLoad { + if err := i.allocChecker.CheckMappingAndReserve(3, int(lsmkv.FlushAfterDirtyDefault.Seconds())); err != nil { + return nil, errors.Wrap(err, "memory pressure: cannot init shard") + } + + if err := i.shardLoadLimiter.Acquire(ctx); err != nil { + return nil, fmt.Errorf("acquiring permit to load shard: %w", err) + } + defer i.shardLoadLimiter.Release() + + shard, err := NewShard(ctx, promMetrics, shardName, i, class, i.centralJobQueue, i.scheduler, + i.indexCheckpoints, i.shardReindexer, false, i.bitmapBufPool) + if err != nil { + return nil, fmt.Errorf("init shard %s of index %s: %w", shardName, i.ID(), err) + } + + return shard, nil + } + + shard := NewLazyLoadShard(ctx, promMetrics, shardName, i, class, i.centralJobQueue, i.indexCheckpoints, + i.allocChecker, i.shardLoadLimiter, i.shardReindexer, implicitShardLoading, i.bitmapBufPool) + return shard, nil +} + +// Iterate over all objects in the index, applying the callback function to each one. Adding or removing objects during iteration is not supported. +func (i *Index) IterateObjects(ctx context.Context, cb func(index *Index, shard ShardLike, object *storobj.Object) error) (err error) { + return i.ForEachShard(func(_ string, shard ShardLike) error { + wrapper := func(object *storobj.Object) error { + return cb(i, shard, object) + } + bucket := shard.Store().Bucket(helpers.ObjectsBucketLSM) + return bucket.IterateObjects(ctx, wrapper) + }) +} + +// ForEachShard applies func f on each shard in the index. +// +// WARNING: only use this if you expect all LazyLoadShards to be loaded! +// Calling this method may lead to shards being force-loaded, causing +// unexpected CPU spikes. If you only want to apply f on loaded shards, +// call ForEachLoadedShard instead. +// Note: except Dropping and Shutting Down +func (i *Index) ForEachShard(f func(name string, shard ShardLike) error) error { + // Check if the index is being dropped or shut down to avoid panics when the index is being deleted + if i.closingCtx.Err() != nil { + i.logger.WithField("action", "for_each_shard").Debug("index is being dropped or shut down") + return nil + } + + return i.shards.Range(f) +} + +func (i *Index) ForEachLoadedShard(f func(name string, shard ShardLike) error) error { + return i.shards.Range(func(name string, shard ShardLike) error { + // Skip lazy loaded shard which are not loaded + if asLazyLoadShard, ok := shard.(*LazyLoadShard); ok { + if !asLazyLoadShard.isLoaded() { + return nil + } + } + return f(name, shard) + }) +} + +func (i *Index) ForEachShardConcurrently(f func(name string, shard ShardLike) error) error { + // Check if the index is being dropped or shut down to avoid panics when the index is being deleted + if i.closingCtx.Err() != nil { + i.logger.WithField("action", "for_each_shard_concurrently").Debug("index is being dropped or shut down") + return nil + } + return i.shards.RangeConcurrently(i.logger, f) +} + +// Iterate over all objects in the shard, applying the callback function to each one. Adding or removing objects during iteration is not supported. +func (i *Index) IterateShards(ctx context.Context, cb func(index *Index, shard ShardLike) error) (err error) { + return i.ForEachShard(func(key string, shard ShardLike) error { + return cb(i, shard) + }) +} + +func (i *Index) addProperty(ctx context.Context, props ...*models.Property) error { + eg := enterrors.NewErrorGroupWrapper(i.logger) + eg.SetLimit(_NUMCPU) + + i.ForEachShard(func(key string, shard ShardLike) error { + shard.initPropertyBuckets(ctx, eg, false, props...) + return nil + }) + + if err := eg.Wait(); err != nil { + return errors.Wrapf(err, "extend idx '%s' with properties '%v", i.ID(), props) + } + return nil +} + +func (i *Index) updateVectorIndexConfig(ctx context.Context, + updated schemaConfig.VectorIndexConfig, +) error { + // an updated is not specific to one shard, but rather all + err := i.ForEachShard(func(name string, shard ShardLike) error { + // At the moment, we don't do anything in an update that could fail, but + // technically this should be part of some sort of a two-phase commit or + // have another way to rollback if we have updates that could potentially + // fail in the future. For now that's not a realistic risk. + if err := shard.UpdateVectorIndexConfig(ctx, updated); err != nil { + return errors.Wrapf(err, "shard %s", name) + } + return nil + }) + if err != nil { + return err + } + i.vectorIndexUserConfigLock.Lock() + defer i.vectorIndexUserConfigLock.Unlock() + + i.vectorIndexUserConfig = updated + + return nil +} + +func (i *Index) updateVectorIndexConfigs(ctx context.Context, + updated map[string]schemaConfig.VectorIndexConfig, +) error { + err := i.ForEachShard(func(name string, shard ShardLike) error { + if err := shard.UpdateVectorIndexConfigs(ctx, updated); err != nil { + return fmt.Errorf("shard %q: %w", name, err) + } + return nil + }) + if err != nil { + return err + } + + i.vectorIndexUserConfigLock.Lock() + defer i.vectorIndexUserConfigLock.Unlock() + + for targetName, targetCfg := range updated { + i.vectorIndexUserConfigs[targetName] = targetCfg + } + + return nil +} + +func (i *Index) GetInvertedIndexConfig() schema.InvertedIndexConfig { + i.invertedIndexConfigLock.Lock() + defer i.invertedIndexConfigLock.Unlock() + + return i.invertedIndexConfig +} + +func (i *Index) updateInvertedIndexConfig(ctx context.Context, + updated schema.InvertedIndexConfig, +) error { + i.invertedIndexConfigLock.Lock() + defer i.invertedIndexConfigLock.Unlock() + + i.invertedIndexConfig = updated + + return nil +} + +func (i *Index) asyncReplicationGloballyDisabled() bool { + return i.globalreplicationConfig.AsyncReplicationDisabled.Get() +} + +func (i *Index) updateReplicationConfig(ctx context.Context, cfg *models.ReplicationConfig) error { + i.replicationConfigLock.Lock() + defer i.replicationConfigLock.Unlock() + + i.Config.ReplicationFactor = cfg.Factor + i.Config.DeletionStrategy = cfg.DeletionStrategy + i.Config.AsyncReplicationEnabled = cfg.AsyncEnabled && i.Config.ReplicationFactor > 1 && !i.asyncReplicationGloballyDisabled() + + err := i.ForEachLoadedShard(func(name string, shard ShardLike) error { + if err := shard.SetAsyncReplicationEnabled(ctx, i.Config.AsyncReplicationEnabled); err != nil { + return fmt.Errorf("updating async replication on shard %q: %w", name, err) + } + return nil + }) + if err != nil { + return err + } + + return nil +} + +func (i *Index) ReplicationFactor() int64 { + i.replicationConfigLock.RLock() + defer i.replicationConfigLock.RUnlock() + + return i.Config.ReplicationFactor +} + +func (i *Index) DeletionStrategy() string { + i.replicationConfigLock.RLock() + defer i.replicationConfigLock.RUnlock() + + return i.Config.DeletionStrategy +} + +type IndexConfig struct { + RootPath string + ClassName schema.ClassName + QueryMaximumResults int64 + QueryHybridMaximumResults int64 + QueryNestedRefLimit int64 + ResourceUsage config.ResourceUsage + LazySegmentsDisabled bool + SegmentInfoIntoFileNameEnabled bool + WriteMetadataFilesEnabled bool + MemtablesFlushDirtyAfter int + MemtablesInitialSizeMB int + MemtablesMaxSizeMB int + MemtablesMinActiveSeconds int + MemtablesMaxActiveSeconds int + MinMMapSize int64 + MaxReuseWalSize int64 + SegmentsCleanupIntervalSeconds int + SeparateObjectsCompactions bool + CycleManagerRoutinesFactor int + IndexRangeableInMemory bool + MaxSegmentSize int64 + ReplicationFactor int64 + DeletionStrategy string + AsyncReplicationEnabled bool + AvoidMMap bool + DisableLazyLoadShards bool + ForceFullReplicasSearch bool + TransferInactivityTimeout time.Duration + LSMEnableSegmentsChecksumValidation bool + TrackVectorDimensions bool + TrackVectorDimensionsInterval time.Duration + UsageEnabled bool + ShardLoadLimiter ShardLoadLimiter + + HNSWMaxLogSize int64 + HNSWDisableSnapshots bool + HNSWSnapshotIntervalSeconds int + HNSWSnapshotOnStartup bool + HNSWSnapshotMinDeltaCommitlogsNumber int + HNSWSnapshotMinDeltaCommitlogsSizePercentage int + HNSWWaitForCachePrefill bool + HNSWFlatSearchConcurrency int + HNSWAcornFilterRatio float64 + VisitedListPoolMaxSize int + + QuerySlowLogEnabled *configRuntime.DynamicValue[bool] + QuerySlowLogThreshold *configRuntime.DynamicValue[time.Duration] + InvertedSorterDisabled *configRuntime.DynamicValue[bool] + MaintenanceModeEnabled func() bool +} + +func indexID(class schema.ClassName) string { + return strings.ToLower(string(class)) +} + +func (i *Index) determineObjectShard(ctx context.Context, id strfmt.UUID, tenant string) (string, error) { + return i.determineObjectShardByStatus(ctx, id, tenant, nil) +} + +func (i *Index) determineObjectShardByStatus(ctx context.Context, id strfmt.UUID, tenant string, shardsStatus map[string]string) (string, error) { + if tenant == "" { + uuid, err := uuid.Parse(id.String()) + if err != nil { + return "", fmt.Errorf("parse uuid: %q", id.String()) + } + + uuidBytes, err := uuid.MarshalBinary() // cannot error + if err != nil { + return "", fmt.Errorf("marshal uuid: %q", id.String()) + } + return i.getSchema.ShardFromUUID(i.Config.ClassName.String(), uuidBytes), nil + } + + var err error + if len(shardsStatus) == 0 { + shardsStatus, err = i.getSchema.TenantsShards(ctx, i.Config.ClassName.String(), tenant) + if err != nil { + return "", err + } + } + + if status := shardsStatus[tenant]; status != "" { + if status == models.TenantActivityStatusHOT { + return tenant, nil + } + return "", objects.NewErrMultiTenancy(fmt.Errorf("%w: '%s'", enterrors.ErrTenantNotActive, tenant)) + } + class := i.getSchema.ReadOnlyClass(i.Config.ClassName.String()) + if class == nil { + return "", fmt.Errorf("class %q not found in schema", i.Config.ClassName) + } + return "", objects.NewErrMultiTenancy( + fmt.Errorf("%w: %q", enterrors.ErrTenantNotFound, tenant)) +} + +func (i *Index) putObject(ctx context.Context, object *storobj.Object, + replProps *additional.ReplicationProperties, tenantName string, schemaVersion uint64, +) error { + if err := i.validateMultiTenancy(object.Object.Tenant); err != nil { + return err + } + + if i.Config.ClassName != object.Class() { + return fmt.Errorf("cannot import object of class %s into index of class %s", + object.Class(), i.Config.ClassName) + } + + shardName, err := i.determineObjectShard(ctx, object.ID(), object.Object.Tenant) + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return objects.NewErrMultiTenancy(fmt.Errorf("determine shard: %w", err)) + case errors.As(err, &authzerrors.Forbidden{}): + return fmt.Errorf("determine shard: %w", err) + default: + return objects.NewErrInvalidUserInput("determine shard: %v", err) + } + } + if replProps == nil { + replProps = defaultConsistency() + } + if i.shardHasMultipleReplicasWrite(tenantName, shardName) { + cl := routerTypes.ConsistencyLevel(replProps.ConsistencyLevel) + if err := i.replicator.PutObject(ctx, shardName, object, cl, schemaVersion); err != nil { + return fmt.Errorf("replicate insertion: shard=%q: %w", shardName, err) + } + return nil + } + + shard, release, err := i.getShardForDirectLocalOperation(ctx, object.Object.Tenant, shardName, localShardOperationWrite) + defer release() + if err != nil { + return err + } + + // no replication, remote shard (or local not yet inited) + if shard == nil { + if err := i.remote.PutObject(ctx, shardName, object, schemaVersion); err != nil { + return fmt.Errorf("put remote object: shard=%q: %w", shardName, err) + } + return nil + } + + // no replication, local shard + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + err = shard.PutObject(ctx, object) + if err != nil { + return fmt.Errorf("put local object: shard=%q: %w", shardName, err) + } + + return nil +} + +func (i *Index) IncomingPutObject(ctx context.Context, shardName string, + object *storobj.Object, schemaVersion uint64, +) error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + // This is a bit hacky, the problem here is that storobj.Parse() currently + // misses date fields as it has no way of knowing that a date-formatted + // string was actually a date type. However, adding this functionality to + // Parse() would break a lot of code, because it currently + // schema-independent. To find out if a field is a date or date[], we need to + // involve the schema, thus why we are doing it here. This was discovered as + // part of https://github.com/weaviate/weaviate/issues/1775 + if err := i.parseDateFieldsInProps(object.Object.Properties); err != nil { + return err + } + + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return err + } + defer release() + + return shard.PutObject(ctx, object) +} + +func (i *Index) replicationEnabled() bool { + i.replicationConfigLock.RLock() + defer i.replicationConfigLock.RUnlock() + + return i.Config.ReplicationFactor > 1 +} + +func (i *Index) shardHasMultipleReplicasWrite(tenantName, shardName string) bool { + // if replication is enabled, we always have multiple replicas + if i.replicationEnabled() { + return true + } + // if the router is nil, preserve previous behavior by returning false + if i.router == nil { + return false + } + ws, err := i.router.GetWriteReplicasLocation(i.Config.ClassName.String(), tenantName, shardName) + if err != nil { + return false + } + // we're including additional replicas here to make sure we at least try to push the write + // to them if they exist + allReplicas := append(ws.NodeNames(), ws.AdditionalNodeNames()...) + return len(allReplicas) > 1 +} + +func (i *Index) shardHasMultipleReplicasRead(tenantName, shardName string) bool { + // if replication is enabled, we always have multiple replicas + if i.replicationEnabled() { + return true + } + // if the router is nil, preserve previous behavior by returning false + if i.router == nil { + return false + } + replicas, err := i.router.GetReadReplicasLocation(i.Config.ClassName.String(), tenantName, shardName) + if err != nil { + return false + } + return len(replicas.NodeNames()) > 1 +} + +// anyShardHasMultipleReplicasRead returns true if any of the shards has multiple replicas +func (i *Index) anyShardHasMultipleReplicasRead(tenantName string, shardNames []string) bool { + if i.replicationEnabled() { + return true + } + for _, shardName := range shardNames { + if i.shardHasMultipleReplicasRead(tenantName, shardName) { + return true + } + } + return false +} + +type localShardOperation string + +const ( + localShardOperationWrite localShardOperation = "write" + localShardOperationRead localShardOperation = "read" +) + +// getShardForDirectLocalOperation is used to try to get a shard for a local read/write operation. +// It will return the shard if it is found, and a release function to release the shard. +// The shard will be nil if the shard is not found, or if the local shard should not be used. +// The caller should always call the release function. +func (i *Index) getShardForDirectLocalOperation(ctx context.Context, tenantName string, shardName string, operation localShardOperation) (ShardLike, func(), error) { + shard, release, err := i.GetShard(ctx, shardName) + // NOTE release should always be ok to call, even if there is an error or the shard is nil, + // see Index.getOptInitLocalShard for more details. + if err != nil { + return nil, release, err + } + + // if the router is nil, just use the default behavior + if i.router == nil { + return shard, release, nil + } + + // get the replicas for the shard + var rs routerTypes.ReadReplicaSet + var ws routerTypes.WriteReplicaSet + switch operation { + case localShardOperationWrite: + ws, err = i.router.GetWriteReplicasLocation(i.Config.ClassName.String(), tenantName, shardName) + if err != nil { + return shard, release, nil + } + // if the local node is not in the list of replicas, don't return the shard (but still allow the caller to release) + // we should not read/write from the local shard if the local node is not in the list of replicas (eg we should use the remote) + if !slices.Contains(ws.NodeNames(), i.replicator.LocalNodeName()) { + return nil, release, nil + } + case localShardOperationRead: + rs, err = i.router.GetReadReplicasLocation(i.Config.ClassName.String(), tenantName, shardName) + if err != nil { + return shard, release, nil + } + // if the local node is not in the list of replicas, don't return the shard (but still allow the caller to release) + // we should not read/write from the local shard if the local node is not in the list of replicas (eg we should use the remote) + if !slices.Contains(rs.NodeNames(), i.replicator.LocalNodeName()) { + return nil, release, nil + } + default: + return nil, func() {}, fmt.Errorf("invalid local shard operation: %s", operation) + } + + return shard, release, nil +} + +func (i *Index) asyncReplicationEnabled() bool { + i.replicationConfigLock.RLock() + defer i.replicationConfigLock.RUnlock() + + return i.Config.ReplicationFactor > 1 && i.Config.AsyncReplicationEnabled && !i.asyncReplicationGloballyDisabled() +} + +// parseDateFieldsInProps checks the schema for the current class for which +// fields are date fields, then - if they are set - parses them accordingly. +// Works for both date and date[]. +func (i *Index) parseDateFieldsInProps(props interface{}) error { + if props == nil { + return nil + } + + propMap, ok := props.(map[string]interface{}) + if !ok { + // don't know what to do with this + return nil + } + + c := i.getSchema.ReadOnlyClass(i.Config.ClassName.String()) + if c == nil { + return fmt.Errorf("class %s not found in schema", i.Config.ClassName) + } + + for _, prop := range c.Properties { + if prop.DataType[0] == string(schema.DataTypeDate) { + raw, ok := propMap[prop.Name] + if !ok { + // prop is not set, nothing to do + continue + } + + parsed, err := parseAsStringToTime(raw) + if err != nil { + return errors.Wrapf(err, "time prop %q", prop.Name) + } + + propMap[prop.Name] = parsed + } + + if prop.DataType[0] == string(schema.DataTypeDateArray) { + raw, ok := propMap[prop.Name] + if !ok { + // prop is not set, nothing to do + continue + } + + asSlice, ok := raw.([]string) + if !ok { + return errors.Errorf("parse as time array, expected []interface{} got %T", + raw) + } + parsedSlice := make([]interface{}, len(asSlice)) + for j := range asSlice { + parsed, err := parseAsStringToTime(interface{}(asSlice[j])) + if err != nil { + return errors.Wrapf(err, "time array prop %q at pos %d", prop.Name, j) + } + + parsedSlice[j] = parsed + } + propMap[prop.Name] = parsedSlice + + } + } + + return nil +} + +func parseAsStringToTime(in interface{}) (time.Time, error) { + var parsed time.Time + var err error + + asString, ok := in.(string) + if !ok { + return parsed, errors.Errorf("parse as time, expected string got %T", in) + } + + parsed, err = time.Parse(time.RFC3339, asString) + if err != nil { + return parsed, err + } + + return parsed, nil +} + +// return value []error gives the error for the index with the positions +// matching the inputs +func (i *Index) putObjectBatch(ctx context.Context, objects []*storobj.Object, + replProps *additional.ReplicationProperties, schemaVersion uint64, +) []error { + type objsAndPos struct { + objects []*storobj.Object + pos []int + } + out := make([]error, len(objects)) + if replProps == nil { + replProps = defaultConsistency() + } + + byShard := map[string]objsAndPos{} + // get all tenants shards + tenants := make([]string, len(objects)) + tenantsStatus := map[string]string{} + var err error + for _, obj := range objects { + if obj.Object.Tenant == "" { + continue + } + tenants = append(tenants, obj.Object.Tenant) + } + + if len(tenants) > 0 { + tenantsStatus, err = i.getSchema.TenantsShards(ctx, i.Config.ClassName.String(), tenants...) + if err != nil { + return []error{err} + } + } + + for pos, obj := range objects { + if err := i.validateMultiTenancy(obj.Object.Tenant); err != nil { + out[pos] = err + continue + } + shardName, err := i.determineObjectShardByStatus(ctx, obj.ID(), obj.Object.Tenant, tenantsStatus) + if err != nil { + out[pos] = err + continue + } + + group := byShard[shardName] + group.objects = append(group.objects, obj) + group.pos = append(group.pos, pos) + byShard[shardName] = group + } + + wg := &sync.WaitGroup{} + for shardName, group := range byShard { + shardName := shardName + group := group + wg.Add(1) + f := func() { + defer wg.Done() + + defer func() { + err := recover() + if err != nil { + for pos := range group.pos { + out[pos] = fmt.Errorf("an unexpected error occurred: %s", err) + } + fmt.Fprintf(os.Stderr, "panic: %s\n", err) + entsentry.Recover(err) + debug.PrintStack() + } + }() + // All objects in the same shard group have the same tenant since in multi-tenant + // systems all objects belonging to a tenant end up in the same shard. + // For non-multi-tenant collections, Object.Tenant is empty for all objects. + // Therefore, we can safely use the tenant from any object in the group. + tenantName := group.objects[0].Object.Tenant + var errs []error + if i.shardHasMultipleReplicasWrite(tenantName, shardName) { + errs = i.replicator.PutObjects(ctx, shardName, group.objects, + routerTypes.ConsistencyLevel(replProps.ConsistencyLevel), schemaVersion) + } else { + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenantName, shardName, localShardOperationWrite) + defer release() + if err != nil { + errs = []error{err} + } else if shard != nil { + i.shardTransferMutex.RLockGuard(func() error { + errs = shard.PutObjectBatch(ctx, group.objects) + return nil + }) + } else { + errs = i.remote.BatchPutObjects(ctx, shardName, group.objects, schemaVersion) + } + } + + for i, err := range errs { + desiredPos := group.pos[i] + out[desiredPos] = err + } + } + enterrors.GoWrapper(f, i.logger) + } + + wg.Wait() + + return out +} + +func duplicateErr(in error, count int) []error { + out := make([]error, count) + for i := range out { + out[i] = in + } + + return out +} + +func (i *Index) IncomingBatchPutObjects(ctx context.Context, shardName string, + objects []*storobj.Object, schemaVersion uint64, +) []error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + // This is a bit hacky, the problem here is that storobj.Parse() currently + // misses date fields as it has no way of knowing that a date-formatted + // string was actually a date type. However, adding this functionality to + // Parse() would break a lot of code, because it currently + // schema-independent. To find out if a field is a date or date[], we need to + // involve the schema, thus why we are doing it here. This was discovered as + // part of https://github.com/weaviate/weaviate/issues/1775 + for j := range objects { + if err := i.parseDateFieldsInProps(objects[j].Object.Properties); err != nil { + return duplicateErr(err, len(objects)) + } + } + + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return duplicateErr(err, len(objects)) + } + defer release() + + return shard.PutObjectBatch(ctx, objects) +} + +// return value map[int]error gives the error for the index as it received it +func (i *Index) AddReferencesBatch(ctx context.Context, refs objects.BatchReferences, + replProps *additional.ReplicationProperties, schemaVersion uint64, +) []error { + type refsAndPos struct { + refs objects.BatchReferences + pos []int + } + if replProps == nil { + replProps = defaultConsistency() + } + + byShard := map[string]refsAndPos{} + out := make([]error, len(refs)) + + for pos, ref := range refs { + if err := i.validateMultiTenancy(ref.Tenant); err != nil { + out[pos] = err + continue + } + shardName, err := i.determineObjectShard(ctx, ref.From.TargetID, ref.Tenant) + if err != nil { + out[pos] = err + continue + } + + group := byShard[shardName] + group.refs = append(group.refs, ref) + group.pos = append(group.pos, pos) + byShard[shardName] = group + } + + for shardName, group := range byShard { + // All references in the same shard group have the same tenant since in multi-tenant + // systems all objects belonging to a tenant end up in the same shard. + // For non-multi-tenant collections, ref.Tenant is empty for all references. + // Therefore, we can safely use the tenant from any reference in the group. + tenantName := group.refs[0].Tenant + var errs []error + if i.shardHasMultipleReplicasWrite(tenantName, shardName) { + errs = i.replicator.AddReferences(ctx, shardName, group.refs, routerTypes.ConsistencyLevel(replProps.ConsistencyLevel), schemaVersion) + } else { + // anonymous function to ensure that the shard is released after each loop iteration + func() { + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenantName, shardName, localShardOperationWrite) + defer release() + if err != nil { + errs = duplicateErr(err, len(group.refs)) + } else if shard != nil { + i.shardTransferMutex.RLockGuard(func() error { + errs = shard.AddReferencesBatch(ctx, group.refs) + return nil + }) + } else { + errs = i.remote.BatchAddReferences(ctx, shardName, group.refs, schemaVersion) + } + }() + } + + for i, err := range errs { + desiredPos := group.pos[i] + out[desiredPos] = err + } + } + + return out +} + +func (i *Index) IncomingBatchAddReferences(ctx context.Context, shardName string, + refs objects.BatchReferences, schemaVersion uint64, +) []error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return duplicateErr(err, len(refs)) + } + defer release() + + return shard.AddReferencesBatch(ctx, refs) +} + +func (i *Index) objectByID(ctx context.Context, id strfmt.UUID, + props search.SelectProperties, addl additional.Properties, + replProps *additional.ReplicationProperties, tenant string, +) (*storobj.Object, error) { + if err := i.validateMultiTenancy(tenant); err != nil { + return nil, err + } + + shardName, err := i.determineObjectShard(ctx, id, tenant) + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return nil, objects.NewErrMultiTenancy(fmt.Errorf("determine shard: %w", err)) + case errors.As(err, &authzerrors.Forbidden{}): + return nil, fmt.Errorf("determine shard: %w", err) + default: + return nil, objects.NewErrInvalidUserInput("determine shard: %v", err) + } + } + + var obj *storobj.Object + + if i.shardHasMultipleReplicasRead(tenant, shardName) { + if replProps == nil { + replProps = defaultConsistency() + } + if replProps.NodeName != "" { + obj, err = i.replicator.NodeObject(ctx, replProps.NodeName, shardName, id, props, addl) + } else { + obj, err = i.replicator.GetOne(ctx, routerTypes.ConsistencyLevel(replProps.ConsistencyLevel), shardName, id, props, addl) + } + return obj, err + } + + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err != nil { + return obj, err + } + + if shard != nil { + if obj, err = shard.ObjectByID(ctx, id, props, addl); err != nil { + return obj, fmt.Errorf("get local object: shard=%s: %w", shardName, err) + } + } else { + if obj, err = i.remote.GetObject(ctx, shardName, id, props, addl); err != nil { + return obj, fmt.Errorf("get remote object: shard=%s: %w", shardName, err) + } + } + + return obj, nil +} + +func (i *Index) IncomingGetObject(ctx context.Context, shardName string, + id strfmt.UUID, props search.SelectProperties, + additional additional.Properties, +) (*storobj.Object, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return nil, err + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + return shard.ObjectByID(ctx, id, props, additional) +} + +func (i *Index) IncomingMultiGetObjects(ctx context.Context, shardName string, + ids []strfmt.UUID, +) ([]*storobj.Object, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return nil, err + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + return shard.MultiObjectByID(ctx, wrapIDsInMulti(ids)) +} + +func (i *Index) multiObjectByID(ctx context.Context, + query []multi.Identifier, tenant string, +) ([]*storobj.Object, error) { + if err := i.validateMultiTenancy(tenant); err != nil { + return nil, err + } + + type idsAndPos struct { + ids []multi.Identifier + pos []int + } + + byShard := map[string]idsAndPos{} + for pos, id := range query { + shardName, err := i.determineObjectShard(ctx, strfmt.UUID(id.ID), tenant) + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return nil, objects.NewErrMultiTenancy(fmt.Errorf("determine shard: %w", err)) + case errors.As(err, &authzerrors.Forbidden{}): + return nil, fmt.Errorf("determine shard: %w", err) + default: + return nil, objects.NewErrInvalidUserInput("determine shard: %v", err) + } + } + + group := byShard[shardName] + group.ids = append(group.ids, id) + group.pos = append(group.pos, pos) + byShard[shardName] = group + } + + out := make([]*storobj.Object, len(query)) + + for shardName, group := range byShard { + var objects []*storobj.Object + var err error + + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err != nil { + return nil, err + } else if shard != nil { + objects, err = shard.MultiObjectByID(ctx, group.ids) + if err != nil { + return nil, errors.Wrapf(err, "local shard %s", shardId(i.ID(), shardName)) + } + } else { + objects, err = i.remote.MultiGetObjects(ctx, shardName, extractIDsFromMulti(group.ids)) + if err != nil { + return nil, errors.Wrapf(err, "remote shard %s", shardName) + } + } + + for i, obj := range objects { + desiredPos := group.pos[i] + out[desiredPos] = obj + } + } + + return out, nil +} + +func extractIDsFromMulti(in []multi.Identifier) []strfmt.UUID { + out := make([]strfmt.UUID, len(in)) + + for i, id := range in { + out[i] = strfmt.UUID(id.ID) + } + + return out +} + +func wrapIDsInMulti(in []strfmt.UUID) []multi.Identifier { + out := make([]multi.Identifier, len(in)) + + for i, id := range in { + out[i] = multi.Identifier{ID: string(id)} + } + + return out +} + +func (i *Index) exists(ctx context.Context, id strfmt.UUID, + replProps *additional.ReplicationProperties, tenant string, +) (bool, error) { + if err := i.validateMultiTenancy(tenant); err != nil { + return false, err + } + + shardName, err := i.determineObjectShard(ctx, id, tenant) + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return false, objects.NewErrMultiTenancy(fmt.Errorf("determine shard: %w", err)) + case errors.As(err, &authzerrors.Forbidden{}): + return false, fmt.Errorf("determine shard: %w", err) + default: + return false, objects.NewErrInvalidUserInput("determine shard: %v", err) + } + } + + var exists bool + if i.shardHasMultipleReplicasRead(tenant, shardName) { + if replProps == nil { + replProps = defaultConsistency() + } + cl := routerTypes.ConsistencyLevel(replProps.ConsistencyLevel) + return i.replicator.Exists(ctx, cl, shardName, id) + } + + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err != nil { + return exists, err + } + + if shard != nil { + exists, err = shard.Exists(ctx, id) + if err != nil { + err = fmt.Errorf("exists locally: shard=%q: %w", shardName, err) + } + } else { + exists, err = i.remote.Exists(ctx, shardName, id) + if err != nil { + owner, _ := i.getSchema.ShardOwner(i.Config.ClassName.String(), shardName) + err = fmt.Errorf("exists remotely: shard=%q owner=%q: %w", shardName, owner, err) + } + } + + return exists, err +} + +func (i *Index) IncomingExists(ctx context.Context, shardName string, + id strfmt.UUID, +) (bool, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return false, err + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return false, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + return shard.Exists(ctx, id) +} + +func (i *Index) objectSearch(ctx context.Context, limit int, filters *filters.LocalFilter, + keywordRanking *searchparams.KeywordRanking, sort []filters.Sort, cursor *filters.Cursor, + addlProps additional.Properties, replProps *additional.ReplicationProperties, tenant string, autoCut int, + properties []string, +) ([]*storobj.Object, []float32, error) { + cl := i.consistencyLevel(replProps) + readPlan, err := i.buildReadRoutingPlan(cl, tenant) + if err != nil { + return nil, nil, err + } + + // If the request is a BM25F with no properties selected, use all possible properties + if keywordRanking != nil && keywordRanking.Type == "bm25" && len(keywordRanking.Properties) == 0 { + + cl := i.getSchema.ReadOnlyClass(i.Config.ClassName.String()) + if cl == nil { + return nil, nil, fmt.Errorf("class %s not found in schema", i.Config.ClassName) + } + + propHash := cl.Properties + // Get keys of hash + for _, v := range propHash { + if inverted.PropertyHasSearchableIndex(i.getSchema.ReadOnlyClass(i.Config.ClassName.String()), v.Name) { + keywordRanking.Properties = append(keywordRanking.Properties, v.Name) + } + } + + // WEAVIATE-471 - error if we can't find a property to search + if len(keywordRanking.Properties) == 0 { + return nil, []float32{}, errors.New( + "No properties provided, and no indexed properties found in class") + } + } + + outObjects, outScores, err := i.objectSearchByShard(ctx, limit, filters, keywordRanking, sort, cursor, addlProps, tenant, readPlan, properties) + if err != nil { + return nil, nil, err + } + + if len(outObjects) == len(outScores) { + if keywordRanking != nil && keywordRanking.Type == "bm25" { + for ii := range outObjects { + oo := outObjects[ii] + + if oo.AdditionalProperties() == nil { + oo.Object.Additional = make(map[string]interface{}) + } + + // Additional score is filled in by the top level function + + // Collect all keys starting with "BM25F" and add them to the Additional + if keywordRanking.AdditionalExplanations { + explainScore := "" + for k, v := range oo.Object.Additional { + if strings.HasPrefix(k, "BM25F") { + + explainScore = fmt.Sprintf("%v, %v:%v", explainScore, k, v) + delete(oo.Object.Additional, k) + } + } + oo.Object.Additional["explainScore"] = explainScore + } + } + } + } + + if len(sort) > 0 { + if len(readPlan.Shards()) > 1 { + var err error + outObjects, outScores, err = i.sort(outObjects, outScores, sort, limit) + if err != nil { + return nil, nil, errors.Wrap(err, "sort") + } + } + } else if keywordRanking != nil { + outObjects, outScores = i.sortKeywordRanking(outObjects, outScores) + } else if len(readPlan.Shards()) > 1 && !addlProps.ReferenceQuery { + // sort only for multiple shards (already sorted for single) + // and for not reference nested query (sort is applied for root query) + outObjects, outScores = i.sortByID(outObjects, outScores) + } + + if autoCut > 0 { + cutOff := autocut.Autocut(outScores, autoCut) + outObjects = outObjects[:cutOff] + outScores = outScores[:cutOff] + } + + // if this search was caused by a reference property + // search, we should not limit the number of results. + // for example, if the query contains a where filter + // whose operator is `And`, and one of the operands + // contains a path to a reference prop, the Search + // caused by such a ref prop being limited can cause + // the `And` to return no results where results would + // be expected. we won't know that unless we search + // and return all referenced object properties. + if !addlProps.ReferenceQuery && len(outObjects) > limit { + if len(outObjects) == len(outScores) { + outScores = outScores[:limit] + } + outObjects = outObjects[:limit] + } + + if i.anyShardHasMultipleReplicasRead(tenant, readPlan.Shards()) { + err = i.replicator.CheckConsistency(ctx, cl, outObjects) + if err != nil { + i.logger.WithField("action", "object_search"). + Errorf("failed to check consistency of search results: %v", err) + } + } + + return outObjects, outScores, nil +} + +func (i *Index) objectSearchByShard(ctx context.Context, limit int, filters *filters.LocalFilter, + keywordRanking *searchparams.KeywordRanking, sort []filters.Sort, cursor *filters.Cursor, + addlProps additional.Properties, tenant string, readPlan routerTypes.ReadRoutingPlan, properties []string, +) ([]*storobj.Object, []float32, error) { + resultObjects, resultScores := objectSearchPreallocate(limit, readPlan.Shards()) + + eg := enterrors.NewErrorGroupWrapper(i.logger, "filters:", filters) + eg.SetLimit(_NUMCPU * 2) + shardResultLock := sync.Mutex{} + + remoteSearch := func(shardName string) error { + objs, scores, nodeName, err := i.remote.SearchShard(ctx, shardName, nil, nil, 0, limit, filters, keywordRanking, sort, cursor, nil, addlProps, nil, properties) + if err != nil { + return fmt.Errorf( + "remote shard object search %s: %w", shardName, err) + } + + if i.shardHasMultipleReplicasRead(tenant, shardName) { + storobj.AddOwnership(objs, nodeName, shardName) + } + + shardResultLock.Lock() + resultObjects = append(resultObjects, objs...) + resultScores = append(resultScores, scores...) + shardResultLock.Unlock() + + return nil + } + localSeach := func(shardName string) error { + // We need to getOrInit here because the shard might not yet be loaded due to eventual consistency on the schema update + // triggering the shard loading in the database + shard, release, err := i.getOrInitShard(ctx, shardName) + defer release() + if err != nil { + return fmt.Errorf("error getting local shard %s: %w", shardName, err) + } + if shard == nil { + // This will make the code hit other remote replicas, and usually resolve any kind of eventual consistency issues just thanks to delaying + // the search to the other replica. + // This is not ideal, but it works for now. + return remoteSearch(shardName) + } + + localCtx := helpers.InitSlowQueryDetails(ctx) + helpers.AnnotateSlowQueryLog(localCtx, "is_coordinator", true) + objs, scores, err := shard.ObjectSearch(localCtx, limit, filters, keywordRanking, sort, cursor, addlProps, properties) + if err != nil { + return fmt.Errorf( + "local shard object search %s: %w", shard.ID(), err) + } + nodeName := i.getSchema.NodeName() + + if i.shardHasMultipleReplicasRead(tenant, shardName) { + storobj.AddOwnership(objs, nodeName, shardName) + } + + shardResultLock.Lock() + resultObjects = append(resultObjects, objs...) + resultScores = append(resultScores, scores...) + shardResultLock.Unlock() + + return nil + } + err := executor.ExecuteForEachShard(readPlan, + // Local Shard Search + func(replica routerTypes.Replica) error { + shardName := replica.ShardName + eg.Go(func() error { + return localSeach(shardName) + }, shardName) + return nil + }, + func(replica routerTypes.Replica) error { + shardName := replica.ShardName + eg.Go(func() error { + return remoteSearch(shardName) + }, shardName) + return nil + }, + ) + if err != nil { + return nil, nil, fmt.Errorf("error executing search for each shard: %w", err) + } + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + if len(resultObjects) == len(resultScores) { + // Force a stable sort order by UUID + type resultSortable struct { + object *storobj.Object + score float32 + } + objs := resultObjects + scores := resultScores + results := make([]resultSortable, len(objs)) + for i := range objs { + results[i] = resultSortable{ + object: objs[i], + score: scores[i], + } + } + + golangSort.Slice(results, func(i, j int) bool { + if results[i].score == results[j].score { + return results[i].object.Object.ID > results[j].object.Object.ID + } + + return results[i].score > results[j].score + }) + + finalObjs := make([]*storobj.Object, len(results)) + finalScores := make([]float32, len(results)) + for i, result := range results { + finalObjs[i] = result.object + finalScores[i] = result.score + } + + return finalObjs, finalScores, nil + } + + return resultObjects, resultScores, nil +} + +func (i *Index) sortByID(objects []*storobj.Object, scores []float32, +) ([]*storobj.Object, []float32) { + return newIDSorter().sort(objects, scores) +} + +func (i *Index) sortKeywordRanking(objects []*storobj.Object, + scores []float32, +) ([]*storobj.Object, []float32) { + return newScoresSorter().sort(objects, scores) +} + +func (i *Index) sort(objects []*storobj.Object, scores []float32, + sort []filters.Sort, limit int, +) ([]*storobj.Object, []float32, error) { + return sorter.NewObjectsSorter(i.getSchema.ReadOnlyClass). + Sort(objects, scores, limit, sort) +} + +func (i *Index) mergeGroups(objects []*storobj.Object, dists []float32, + groupBy *searchparams.GroupBy, limit, shardCount int, +) ([]*storobj.Object, []float32, error) { + return newGroupMerger(objects, dists, groupBy).Do() +} + +func (i *Index) singleLocalShardObjectVectorSearch(ctx context.Context, searchVectors []models.Vector, + targetVectors []string, dist float32, limit int, filters *filters.LocalFilter, + sort []filters.Sort, groupBy *searchparams.GroupBy, additional additional.Properties, + shard ShardLike, targetCombination *dto.TargetCombination, properties []string, +) ([]*storobj.Object, []float32, error) { + ctx = helpers.InitSlowQueryDetails(ctx) + helpers.AnnotateSlowQueryLog(ctx, "is_coordinator", true) + if shard.GetStatus() == storagestate.StatusLoading { + return nil, nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shard.Name())) + } + res, resDists, err := shard.ObjectVectorSearch( + ctx, searchVectors, targetVectors, dist, limit, filters, sort, groupBy, additional, targetCombination, properties) + if err != nil { + return nil, nil, errors.Wrapf(err, "shard %s", shard.ID()) + } + return res, resDists, nil +} + +func (i *Index) localShardSearch(ctx context.Context, searchVectors []models.Vector, + targetVectors []string, dist float32, limit int, localFilters *filters.LocalFilter, + sort []filters.Sort, groupBy *searchparams.GroupBy, additionalProps additional.Properties, + targetCombination *dto.TargetCombination, properties []string, tenantName string, shardName string, +) ([]*storobj.Object, []float32, error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, nil, err + } + if shard != nil { + defer release() + } + + localCtx := helpers.InitSlowQueryDetails(ctx) + helpers.AnnotateSlowQueryLog(localCtx, "is_coordinator", true) + localShardResult, localShardScores, err := shard.ObjectVectorSearch( + localCtx, searchVectors, targetVectors, dist, limit, localFilters, sort, groupBy, additionalProps, targetCombination, properties) + if err != nil { + return nil, nil, errors.Wrapf(err, "shard %s", shard.ID()) + } + // Append result to out + if i.shardHasMultipleReplicasRead(tenantName, shardName) { + storobj.AddOwnership(localShardResult, i.getSchema.NodeName(), shardName) + } + return localShardResult, localShardScores, nil +} + +func (i *Index) remoteShardSearch(ctx context.Context, searchVectors []models.Vector, + targetVectors []string, distance float32, limit int, localFilters *filters.LocalFilter, + sort []filters.Sort, groupBy *searchparams.GroupBy, additional additional.Properties, + targetCombination *dto.TargetCombination, properties []string, tenantName string, shardName string, +) ([]*storobj.Object, []float32, error) { + var outObjects []*storobj.Object + var outScores []float32 + + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, nil, err + } + if shard != nil { + defer release() + } + + if i.Config.ForceFullReplicasSearch { + // Force a search on all the replicas for the shard + remoteSearchResults, err := i.remote.SearchAllReplicas(ctx, + i.logger, shardName, searchVectors, targetVectors, distance, limit, localFilters, + nil, sort, nil, groupBy, additional, i.getSchema.NodeName(), targetCombination, properties) + // Only return an error if we failed to query remote shards AND we had no local shard to query + if err != nil && shard == nil { + return nil, nil, errors.Wrapf(err, "remote shard %s", shardName) + } + // Append the result of the search to the outgoing result + for _, remoteShardResult := range remoteSearchResults { + if i.shardHasMultipleReplicasRead(tenantName, shardName) { + storobj.AddOwnership(remoteShardResult.Objects, remoteShardResult.Node, shardName) + } + outObjects = append(outObjects, remoteShardResult.Objects...) + outScores = append(outScores, remoteShardResult.Scores...) + } + } else { + // Search only what is necessary + remoteResult, remoteDists, nodeName, err := i.remote.SearchShard(ctx, + shardName, searchVectors, targetVectors, distance, limit, localFilters, + nil, sort, nil, groupBy, additional, targetCombination, properties) + if err != nil { + return nil, nil, errors.Wrapf(err, "remote shard %s", shardName) + } + + if i.shardHasMultipleReplicasRead(tenantName, shardName) { + storobj.AddOwnership(remoteResult, nodeName, shardName) + } + outObjects = remoteResult + outScores = remoteDists + } + return outObjects, outScores, nil +} + +func (i *Index) objectVectorSearch(ctx context.Context, searchVectors []models.Vector, + targetVectors []string, dist float32, limit int, localFilters *filters.LocalFilter, sort []filters.Sort, + groupBy *searchparams.GroupBy, additionalProps additional.Properties, + replProps *additional.ReplicationProperties, tenant string, targetCombination *dto.TargetCombination, properties []string, +) ([]*storobj.Object, []float32, error) { + cl := i.consistencyLevel(replProps) + readPlan, err := i.buildReadRoutingPlan(cl, tenant) + if err != nil { + return nil, nil, err + } + + if len(readPlan.Shards()) == 1 && !i.Config.ForceFullReplicasSearch { + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, readPlan.Shards()[0], localShardOperationRead) + defer release() + if err != nil { + return nil, nil, err + } + if shard != nil { + return i.singleLocalShardObjectVectorSearch(ctx, searchVectors, targetVectors, dist, limit, localFilters, + sort, groupBy, additionalProps, shard, targetCombination, properties) + } + } + + // a limit of -1 is used to signal a search by distance. if that is + // the case we have to adjust how we calculate the output capacity + var shardCap int + if limit < 0 { + shardCap = len(readPlan.Shards()) * hnsw.DefaultSearchByDistInitialLimit + } else { + shardCap = len(readPlan.Shards()) * limit + } + + eg := enterrors.NewErrorGroupWrapper(i.logger, "tenant:", tenant) + eg.SetLimit(_NUMCPU * 2) + m := &sync.Mutex{} + + out := make([]*storobj.Object, 0, shardCap) + dists := make([]float32, 0, shardCap) + var localSearches int64 + var localResponses atomic.Int64 + var remoteSearches int64 + var remoteResponses atomic.Int64 + + for _, sn := range readPlan.Shards() { + shardName := sn + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err != nil { + return nil, nil, err + } + + if shard != nil { + localSearches++ + eg.Go(func() error { + localShardResult, localShardScores, err1 := i.localShardSearch(ctx, searchVectors, targetVectors, dist, limit, localFilters, sort, groupBy, additionalProps, targetCombination, properties, tenant, shardName) + if err1 != nil { + return fmt.Errorf( + "local shard object search %s: %w", shard.ID(), err1) + } + + m.Lock() + localResponses.Add(1) + out = append(out, localShardResult...) + dists = append(dists, localShardScores...) + m.Unlock() + return nil + }) + } + + if shard == nil || i.Config.ForceFullReplicasSearch { + remoteSearches++ + eg.Go(func() error { + // If we have no local shard or if we force the query to reach all replicas + remoteShardObject, remoteShardScores, err2 := i.remoteShardSearch(ctx, searchVectors, targetVectors, dist, limit, localFilters, sort, groupBy, additionalProps, targetCombination, properties, tenant, shardName) + if err2 != nil { + return fmt.Errorf( + "remote shard object search %s: %w", shardName, err2) + } + m.Lock() + remoteResponses.Add(1) + out = append(out, remoteShardObject...) + dists = append(dists, remoteShardScores...) + m.Unlock() + return nil + }) + } + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + // If we are force querying all replicas, we need to run deduplication on the result. + if i.Config.ForceFullReplicasSearch { + if localSearches != localResponses.Load() { + i.logger.Warnf("(in full replica search) local search count does not match local response count: searches=%d responses=%d", localSearches, localResponses.Load()) + } + if remoteSearches != remoteResponses.Load() { + i.logger.Warnf("(in full replica search) remote search count does not match remote response count: searches=%d responses=%d", remoteSearches, remoteResponses.Load()) + } + out, dists, err = searchResultDedup(out, dists) + if err != nil { + return nil, nil, fmt.Errorf("could not deduplicate result after full replicas search: %w", err) + } + } + + if len(readPlan.Shards()) == 1 { + return out, dists, nil + } + + if len(readPlan.Shards()) > 1 && groupBy != nil { + return i.mergeGroups(out, dists, groupBy, limit, len(readPlan.Shards())) + } + + if len(readPlan.Shards()) > 1 && len(sort) > 0 { + return i.sort(out, dists, sort, limit) + } + + out, dists = newDistancesSorter().sort(out, dists) + if limit > 0 && len(out) > limit { + out = out[:limit] + dists = dists[:limit] + } + + if i.anyShardHasMultipleReplicasRead(tenant, readPlan.Shards()) { + err = i.replicator.CheckConsistency(ctx, cl, out) + if err != nil { + i.logger.WithField("action", "object_vector_search"). + Errorf("failed to check consistency of search results: %v", err) + } + } + + return out, dists, nil +} + +func (i *Index) IncomingSearch(ctx context.Context, shardName string, + searchVectors []models.Vector, targetVectors []string, distance float32, limit int, + filters *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, + sort []filters.Sort, cursor *filters.Cursor, groupBy *searchparams.GroupBy, + additional additional.Properties, targetCombination *dto.TargetCombination, properties []string, +) ([]*storobj.Object, []float32, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return nil, nil, err + } + defer release() + + ctx = helpers.InitSlowQueryDetails(ctx) + helpers.AnnotateSlowQueryLog(ctx, "is_coordinator", false) + + // Hacky fix here + // shard.GetStatus() will force a lazy shard to load and we have usecases that rely on that behaviour that a search + // will force a lazy loaded shard to load + // However we also have cases (related to FORCE_FULL_REPLICAS_SEARCH) where we want to avoid waiting for a shard to + // load, therefore we only call GetStatusNoLoad if replication is enabled -> another replica will be able to answer + // the request and we want to exit early + if i.replicationEnabled() && shard.GetStatus() == storagestate.StatusLoading { + return nil, nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } else { + if shard.GetStatus() == storagestate.StatusLoading { + // This effectively never happens with lazy loaded shard as GetStatus will wait for the lazy shard to load + // and then status will never be "StatusLoading" + return nil, nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + } + + if len(searchVectors) == 0 { + res, scores, err := shard.ObjectSearch(ctx, limit, filters, keywordRanking, sort, cursor, additional, properties) + if err != nil { + return nil, nil, err + } + + return res, scores, nil + } + + res, resDists, err := shard.ObjectVectorSearch( + ctx, searchVectors, targetVectors, distance, limit, filters, sort, groupBy, additional, targetCombination, properties) + if err != nil { + return nil, nil, errors.Wrapf(err, "shard %s", shard.ID()) + } + + return res, resDists, nil +} + +func (i *Index) deleteObject(ctx context.Context, id strfmt.UUID, + deletionTime time.Time, replProps *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) error { + if err := i.validateMultiTenancy(tenant); err != nil { + return err + } + + shardName, err := i.determineObjectShard(ctx, id, tenant) + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return objects.NewErrMultiTenancy(fmt.Errorf("determine shard: %w", err)) + case errors.As(err, &authzerrors.Forbidden{}): + return fmt.Errorf("determine shard: %w", err) + default: + return objects.NewErrInvalidUserInput("determine shard: %v", err) + } + } + + if i.shardHasMultipleReplicasWrite(tenant, shardName) { + if replProps == nil { + replProps = defaultConsistency() + } + cl := routerTypes.ConsistencyLevel(replProps.ConsistencyLevel) + if err := i.replicator.DeleteObject(ctx, shardName, id, deletionTime, cl, schemaVersion); err != nil { + return fmt.Errorf("replicate deletion: shard=%q %w", shardName, err) + } + return nil + } + + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationWrite) + defer release() + if err != nil { + return err + } + + // no replication, remote shard (or local not yet inited) + if shard == nil { + if err := i.remote.DeleteObject(ctx, shardName, id, deletionTime, schemaVersion); err != nil { + return fmt.Errorf("delete remote object: shard=%q: %w", shardName, err) + } + return nil + } + + // no replication, local shard + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + if err = shard.DeleteObject(ctx, id, deletionTime); err != nil { + return fmt.Errorf("delete local object: shard=%q: %w", shardName, err) + } + return nil +} + +func (i *Index) IncomingDeleteObject(ctx context.Context, shardName string, + id strfmt.UUID, deletionTime time.Time, schemaVersion uint64, +) error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return err + } + defer release() + + return shard.DeleteObject(ctx, id, deletionTime) +} + +func (i *Index) getClass() *models.Class { + className := i.Config.ClassName.String() + return i.getSchema.ReadOnlyClass(className) +} + +// Intended to run on "receiver" nodes, where local shard +// is expected to exist and be active +// Method first tries to get shard from Index::shards map, +// or inits shard and adds it to the map if shard was not found +func (i *Index) initLocalShard(ctx context.Context, shardName string) error { + return i.initLocalShardWithForcedLoading(ctx, i.getClass(), shardName, false, false) +} + +func (i *Index) LoadLocalShard(ctx context.Context, shardName string, implicitShardLoading bool) error { + mustLoad := !implicitShardLoading + return i.initLocalShardWithForcedLoading(ctx, i.getClass(), shardName, mustLoad, implicitShardLoading) +} + +func (i *Index) initLocalShardWithForcedLoading(ctx context.Context, class *models.Class, shardName string, mustLoad bool, implicitShardLoading bool) error { + i.closeLock.RLock() + defer i.closeLock.RUnlock() + + if i.closed { + return errAlreadyShutdown + } + + // make sure same shard is not inited in parallel + i.shardCreateLocks.Lock(shardName) + defer i.shardCreateLocks.Unlock(shardName) + + // check if created in the meantime by concurrent call + if shard := i.shards.Load(shardName); shard != nil { + if mustLoad { + lazyShard, ok := shard.(*LazyLoadShard) + if ok { + return lazyShard.Load(ctx) + } + } + + return nil + } + + disableLazyLoad := mustLoad || i.Config.DisableLazyLoadShards + + shard, err := i.initShard(ctx, shardName, class, i.metrics.baseMetrics, disableLazyLoad, implicitShardLoading) + if err != nil { + return err + } + + i.shards.Store(shardName, shard) + + return nil +} + +func (i *Index) UnloadLocalShard(ctx context.Context, shardName string) error { + i.closeLock.RLock() + defer i.closeLock.RUnlock() + + if i.closed { + return errAlreadyShutdown + } + + i.shardCreateLocks.Lock(shardName) + defer i.shardCreateLocks.Unlock(shardName) + + shardLike, ok := i.shards.LoadAndDelete(shardName) + if !ok { + return nil // shard was not found, nothing to unload + } + + if err := shardLike.Shutdown(ctx); err != nil { + if !errors.Is(err, errAlreadyShutdown) { + return errors.Wrapf(err, "shutdown shard %q", shardName) + } + return errors.Wrapf(errAlreadyShutdown, "shutdown shard %q", shardName) + } + + return nil +} + +func (i *Index) GetShard(ctx context.Context, shardName string) ( + shard ShardLike, release func(), err error, +) { + return i.getOptInitLocalShard(ctx, shardName, false) +} + +func (i *Index) getOrInitShard(ctx context.Context, shardName string) ( + shard ShardLike, release func(), err error, +) { + return i.getOptInitLocalShard(ctx, shardName, true) +} + +// getOptInitLocalShard returns the local shard with the given name. +// It is ensured that the returned instance is a fully loaded shard if ensureInit is set to true. +// The returned shard may be a lazy shard instance or nil if the shard hasn't yet been initialized. +// The returned shard cannot be closed until release is called. +func (i *Index) getOptInitLocalShard(ctx context.Context, shardName string, ensureInit bool) ( + shard ShardLike, release func(), err error, +) { + i.closeLock.RLock() + defer i.closeLock.RUnlock() + + if i.closed { + return nil, func() {}, errAlreadyShutdown + } + + // make sure same shard is not inited in parallel + i.shardCreateLocks.Lock(shardName) + defer i.shardCreateLocks.Unlock(shardName) + + // check if created in the meantime by concurrent call + shard = i.shards.Load(shardName) + if shard == nil { + if !ensureInit { + return nil, func() {}, nil + } + + className := i.Config.ClassName.String() + class := i.getSchema.ReadOnlyClass(className) + if class == nil { + return nil, func() {}, fmt.Errorf("class %s not found in schema", className) + } + + shard, err = i.initShard(ctx, shardName, class, i.metrics.baseMetrics, true, false) + if err != nil { + return nil, func() {}, err + } + + i.shards.Store(shardName, shard) + } + + release, err = shard.preventShutdown() + if err != nil { + return nil, func() {}, fmt.Errorf("get/init local shard %q, no shutdown: %w", shardName, err) + } + + return shard, release, nil +} + +func (i *Index) mergeObject(ctx context.Context, merge objects.MergeDocument, + replProps *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) error { + if err := i.validateMultiTenancy(tenant); err != nil { + return err + } + + shardName, err := i.determineObjectShard(ctx, merge.ID, tenant) + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return objects.NewErrMultiTenancy(fmt.Errorf("determine shard: %w", err)) + case errors.As(err, &authzerrors.Forbidden{}): + return fmt.Errorf("determine shard: %w", err) + default: + return objects.NewErrInvalidUserInput("determine shard: %v", err) + } + } + + if i.shardHasMultipleReplicasWrite(tenant, shardName) { + if replProps == nil { + replProps = defaultConsistency() + } + cl := routerTypes.ConsistencyLevel(replProps.ConsistencyLevel) + if err := i.replicator.MergeObject(ctx, shardName, &merge, cl, schemaVersion); err != nil { + return fmt.Errorf("replicate single update: %w", err) + } + return nil + } + + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationWrite) + defer release() + if err != nil { + return err + } + + // no replication, remote shard (or local not yet inited) + if shard == nil { + if err := i.remote.MergeObject(ctx, shardName, merge, schemaVersion); err != nil { + return fmt.Errorf("update remote object: shard=%q: %w", shardName, err) + } + return nil + } + + // no replication, local shard + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + if err = shard.MergeObject(ctx, merge); err != nil { + return fmt.Errorf("update local object: shard=%q: %w", shardName, err) + } + + return nil +} + +func (i *Index) IncomingMergeObject(ctx context.Context, shardName string, + mergeDoc objects.MergeDocument, schemaVersion uint64, +) error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return err + } + defer release() + + return shard.MergeObject(ctx, mergeDoc) +} + +func (i *Index) aggregate(ctx context.Context, replProps *additional.ReplicationProperties, + params aggregation.Params, modules *modules.Provider, tenant string, +) (*aggregation.Result, error) { + cl := i.consistencyLevel(replProps) + readPlan, err := i.buildReadRoutingPlan(cl, tenant) + if err != nil { + return nil, err + } + + results := make([]*aggregation.Result, len(readPlan.Shards())) + for j, shardName := range readPlan.Shards() { + var err error + var res *aggregation.Result + + var shard ShardLike + var release func() + // anonymous func is here to ensure release is executed after each loop iteration + func() { + shard, release, err = i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err == nil { + if shard != nil { + res, err = shard.Aggregate(ctx, params, modules) + } else { + res, err = i.remote.Aggregate(ctx, shardName, params) + } + } + }() + + if err != nil { + return nil, errors.Wrapf(err, "shard %s", shardName) + } + + results[j] = res + } + + return aggregator.NewShardCombiner().Do(results), nil +} + +func (i *Index) IncomingAggregate(ctx context.Context, shardName string, + params aggregation.Params, mods interface{}, +) (*aggregation.Result, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return nil, err + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + return shard.Aggregate(ctx, params, mods.(*modules.Provider)) +} + +func (i *Index) drop() error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + i.closeLock.Lock() + defer i.closeLock.Unlock() + + if i.closed { + return errAlreadyShutdown + } + + i.closed = true + + i.closingCancel() + + eg := enterrors.NewErrorGroupWrapper(i.logger) + eg.SetLimit(_NUMCPU * 2) + fields := logrus.Fields{"action": "drop_shard", "class": i.Config.ClassName} + dropShard := func(shardName string, _ ShardLike) error { + eg.Go(func() error { + i.shardCreateLocks.Lock(shardName) + defer i.shardCreateLocks.Unlock(shardName) + + shard, ok := i.shards.LoadAndDelete(shardName) + if !ok { + return nil // shard already does not exist + } + if err := shard.drop(); err != nil { + logrus.WithFields(fields).WithField("id", shard.ID()).Error(err) + } + + return nil + }) + return nil + } + + i.shards.Range(dropShard) + if err := eg.Wait(); err != nil { + return err + } + + // Dropping the shards only unregisters the shards callbacks, but we still + // need to stop the cycle managers that those shards used to register with. + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + i.logger.WithFields(logrus.Fields{ + "action": "drop_index", + "duration": 60 * time.Second, + }).Debug("context.WithTimeout") + + if err := i.stopCycleManagers(ctx, "drop"); err != nil { + return err + } + + return os.RemoveAll(i.path()) +} + +func (i *Index) DropShard(name string) error { + return i.dropShards([]string{name}) +} + +func (i *Index) dropShards(names []string) error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + i.closeLock.RLock() + defer i.closeLock.RUnlock() + + if i.closed { + return errAlreadyShutdown + } + + ec := errorcompounder.New() + eg := enterrors.NewErrorGroupWrapper(i.logger) + eg.SetLimit(_NUMCPU * 2) + + for _, name := range names { + name := name + eg.Go(func() error { + i.shardCreateLocks.Lock(name) + defer i.shardCreateLocks.Unlock(name) + + shard, ok := i.shards.LoadAndDelete(name) + if !ok { + // Ensure that if the shard is not loaded we delete any reference on disk for any data. + // This ensures that we also delete inactive shards/tenants + if err := os.RemoveAll(shardPath(i.path(), name)); err != nil { + ec.Add(err) + i.logger.WithField("action", "drop_shard").WithField("shard", shard.ID()).Error(err) + } + } else { + // If shard is loaded use the native primitive to drop it + if err := shard.drop(); err != nil { + ec.Add(err) + i.logger.WithField("action", "drop_shard").WithField("shard", shard.ID()).Error(err) + } + } + + return nil + }) + } + + eg.Wait() + return ec.ToError() +} + +func (i *Index) dropCloudShards(ctx context.Context, cloud modulecapabilities.OffloadCloud, names []string, nodeId string) error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + i.closeLock.RLock() + defer i.closeLock.RUnlock() + + if i.closed { + return errAlreadyShutdown + } + + ec := &errorcompounder.ErrorCompounder{} + eg := enterrors.NewErrorGroupWrapper(i.logger) + eg.SetLimit(_NUMCPU * 2) + + for _, name := range names { + name := name + eg.Go(func() error { + i.shardCreateLocks.Lock(name) + defer i.shardCreateLocks.Unlock(name) + + if err := cloud.Delete(ctx, i.ID(), name, nodeId); err != nil { + ec.Add(err) + i.logger.WithField("action", "cloud_drop_shard"). + WithField("shard", name).Error(err) + } + return nil + }) + } + + eg.Wait() + return ec.ToError() +} + +func (i *Index) Shutdown(ctx context.Context) error { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + i.closeLock.Lock() + defer i.closeLock.Unlock() + + if i.closed { + return errAlreadyShutdown + } + + i.closed = true + + i.closingCancel() + + // TODO allow every resource cleanup to run, before returning early with error + if err := i.shards.RangeConcurrently(i.logger, func(name string, shard ShardLike) error { + if err := shard.Shutdown(ctx); err != nil { + if !errors.Is(err, errAlreadyShutdown) { + return errors.Wrapf(err, "shutdown shard %q", name) + } + i.logger.WithField("shard", shard.Name()).Debug("was already shut or dropped") + } + return nil + }); err != nil { + return err + } + if err := i.stopCycleManagers(ctx, "shutdown"); err != nil { + return err + } + + return nil +} + +func (i *Index) stopCycleManagers(ctx context.Context, usecase string) error { + if err := i.cycleCallbacks.compactionCycle.StopAndWait(ctx); err != nil { + return fmt.Errorf("%s: stop objects compaction cycle: %w", usecase, err) + } + if err := i.cycleCallbacks.compactionAuxCycle.StopAndWait(ctx); err != nil { + return fmt.Errorf("%s: stop non objects compaction cycle: %w", usecase, err) + } + if err := i.cycleCallbacks.flushCycle.StopAndWait(ctx); err != nil { + return fmt.Errorf("%s: stop flush cycle: %w", usecase, err) + } + if err := i.cycleCallbacks.vectorCommitLoggerCycle.StopAndWait(ctx); err != nil { + return fmt.Errorf("%s: stop vector commit logger cycle: %w", usecase, err) + } + if err := i.cycleCallbacks.vectorTombstoneCleanupCycle.StopAndWait(ctx); err != nil { + return fmt.Errorf("%s: stop vector tombstone cleanup cycle: %w", usecase, err) + } + if err := i.cycleCallbacks.geoPropsCommitLoggerCycle.StopAndWait(ctx); err != nil { + return fmt.Errorf("%s: stop geo props commit logger cycle: %w", usecase, err) + } + if err := i.cycleCallbacks.geoPropsTombstoneCleanupCycle.StopAndWait(ctx); err != nil { + return fmt.Errorf("%s: stop geo props tombstone cleanup cycle: %w", usecase, err) + } + return nil +} + +func (i *Index) getShardsQueueSize(ctx context.Context, tenant string) (map[string]int64, error) { + className := i.Config.ClassName.String() + shardNames, err := i.schemaReader.Shards(className) + if err != nil { + return nil, err + } + + shardsQueueSize := make(map[string]int64) + for _, shardName := range shardNames { + if tenant != "" && shardName != tenant { + continue + } + var err error + var size int64 + var shard ShardLike + var release func() + + // anonymous func is here to ensure release is executed after each loop iteration + func() { + shard, release, err = i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err == nil { + if shard != nil { + _ = shard.ForEachVectorQueue(func(_ string, queue *VectorIndexQueue) error { + size += queue.Size() + return nil + }) + } else { + size, err = i.remote.GetShardQueueSize(ctx, shardName) + } + } + }() + + if err != nil { + return nil, errors.Wrapf(err, "shard %s", shardName) + } + + shardsQueueSize[shardName] = size + } + + return shardsQueueSize, nil +} + +func (i *Index) IncomingGetShardQueueSize(ctx context.Context, shardName string) (int64, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return 0, err + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return 0, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + var size int64 + _ = shard.ForEachVectorQueue(func(_ string, queue *VectorIndexQueue) error { + size += queue.Size() + return nil + }) + return size, nil +} + +func (i *Index) getShardsStatus(ctx context.Context, tenant string) (map[string]string, error) { + className := i.Config.ClassName.String() + shardNames, err := i.schemaReader.Shards(className) + if err != nil { + return nil, err + } + + shardsStatus := make(map[string]string) + + for _, shardName := range shardNames { + if tenant != "" && shardName != tenant { + continue + } + var err error + var status string + var shard ShardLike + var release func() + + // anonymous func is here to ensure release is executed after each loop iteration + func() { + shard, release, err = i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err == nil { + if shard != nil { + status = shard.GetStatus().String() + } else { + status, err = i.remote.GetShardStatus(ctx, shardName) + } + } + }() + + if err != nil { + return nil, errors.Wrapf(err, "shard %s", shardName) + } + + shardsStatus[shardName] = status + } + + return shardsStatus, nil +} + +func (i *Index) IncomingGetShardStatus(ctx context.Context, shardName string) (string, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return "", err + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return "", enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + return shard.GetStatus().String(), nil +} + +func (i *Index) updateShardStatus(ctx context.Context, tenantName, shardName, targetStatus string, schemaVersion uint64) error { + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenantName, shardName, localShardOperationWrite) + if err != nil { + return err + } + if shard == nil { + return i.remote.UpdateShardStatus(ctx, shardName, targetStatus, schemaVersion) + } + defer release() + return shard.UpdateStatus(targetStatus, "manually set by user") +} + +func (i *Index) IncomingUpdateShardStatus(ctx context.Context, shardName, targetStatus string, schemaVersion uint64) error { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return err + } + defer release() + + return shard.UpdateStatus(targetStatus, "manually set by user") +} + +func (i *Index) findUUIDs(ctx context.Context, + filters *filters.LocalFilter, tenant string, repl *additional.ReplicationProperties, +) (map[string][]strfmt.UUID, error) { + before := time.Now() + defer i.metrics.BatchDelete(before, "filter_total") + cl := i.consistencyLevel(repl) + readPlan, err := i.buildReadRoutingPlan(cl, tenant) + if err != nil { + return nil, err + } + className := i.Config.ClassName.String() + + results := make(map[string][]strfmt.UUID) + for _, shardName := range readPlan.Shards() { + var shard ShardLike + var release func() + var err error + + if i.shardHasMultipleReplicasRead(tenant, shardName) { + results[shardName], err = i.replicator.FindUUIDs(ctx, className, shardName, filters, cl) + } else { + // anonymous func is here to ensure release is executed after each loop iteration + func() { + shard, release, err = i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationRead) + defer release() + if err == nil { + if shard != nil { + results[shardName], err = shard.FindUUIDs(ctx, filters) + } else { + results[shardName], err = i.remote.FindUUIDs(ctx, shardName, filters) + } + } + }() + } + + if err != nil { + return nil, fmt.Errorf("find matching doc ids in shard %q: %w", shardName, err) + } + } + + return results, nil +} + +func (i *Index) consistencyLevel(repl *additional.ReplicationProperties) routerTypes.ConsistencyLevel { + if repl == nil { + repl = defaultConsistency() + } + return routerTypes.ConsistencyLevel(repl.ConsistencyLevel) +} + +func (i *Index) IncomingFindUUIDs(ctx context.Context, shardName string, + filters *filters.LocalFilter, +) ([]strfmt.UUID, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return nil, err + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + return shard.FindUUIDs(ctx, filters) +} + +func (i *Index) batchDeleteObjects(ctx context.Context, shardUUIDs map[string][]strfmt.UUID, + deletionTime time.Time, dryRun bool, replProps *additional.ReplicationProperties, schemaVersion uint64, + tenant string, +) (objects.BatchSimpleObjects, error) { + before := time.Now() + defer i.metrics.BatchDelete(before, "delete_from_shards_total") + + type result struct { + objs objects.BatchSimpleObjects + } + + if replProps == nil { + replProps = defaultConsistency() + } + + wg := &sync.WaitGroup{} + ch := make(chan result, len(shardUUIDs)) + for shardName, uuids := range shardUUIDs { + uuids := uuids + shardName := shardName + wg.Add(1) + f := func() { + defer wg.Done() + + var objs objects.BatchSimpleObjects + if i.shardHasMultipleReplicasWrite(tenant, shardName) { + objs = i.replicator.DeleteObjects(ctx, shardName, uuids, deletionTime, + dryRun, routerTypes.ConsistencyLevel(replProps.ConsistencyLevel), schemaVersion) + } else { + shard, release, err := i.getShardForDirectLocalOperation(ctx, tenant, shardName, localShardOperationWrite) + defer release() + if err != nil { + objs = objects.BatchSimpleObjects{ + objects.BatchSimpleObject{Err: err}, + } + } + if shard != nil { + i.shardTransferMutex.RLockGuard(func() error { + objs = shard.DeleteObjectBatch(ctx, uuids, deletionTime, dryRun) + return nil + }) + } else { + objs = i.remote.DeleteObjectBatch(ctx, shardName, uuids, deletionTime, dryRun, schemaVersion) + } + } + + ch <- result{objs} + } + enterrors.GoWrapper(f, i.logger) + } + + wg.Wait() + close(ch) + + var out objects.BatchSimpleObjects + for res := range ch { + out = append(out, res.objs...) + } + + return out, nil +} + +func (i *Index) IncomingDeleteObjectBatch(ctx context.Context, shardName string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) objects.BatchSimpleObjects { + i.shardTransferMutex.RLock() + defer i.shardTransferMutex.RUnlock() + + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return objects.BatchSimpleObjects{ + objects.BatchSimpleObject{Err: err}, + } + } + defer release() + + return shard.DeleteObjectBatch(ctx, uuids, deletionTime, dryRun) +} + +func defaultConsistency(l ...routerTypes.ConsistencyLevel) *additional.ReplicationProperties { + rp := &additional.ReplicationProperties{} + if len(l) != 0 { + rp.ConsistencyLevel = string(l[0]) + } else { + rp.ConsistencyLevel = string(routerTypes.ConsistencyLevelQuorum) + } + return rp +} + +func objectSearchPreallocate(limit int, shards []string) ([]*storobj.Object, []float32) { + perShardLimit := config.DefaultQueryMaximumResults + if perShardLimit > int64(limit) { + perShardLimit = int64(limit) + } + capacity := perShardLimit * int64(len(shards)) + objects := make([]*storobj.Object, 0, capacity) + scores := make([]float32, 0, capacity) + + return objects, scores +} + +func (i *Index) validateMultiTenancy(tenant string) error { + if i.partitioningEnabled && tenant == "" { + return objects.NewErrMultiTenancy( + fmt.Errorf("class %s has multi-tenancy enabled, but request was without tenant", i.Config.ClassName), + ) + } else if !i.partitioningEnabled && tenant != "" { + return objects.NewErrMultiTenancy( + fmt.Errorf("class %s has multi-tenancy disabled, but request was with tenant", i.Config.ClassName), + ) + } + return nil +} + +// GetVectorIndexConfig returns a vector index configuration associated with targetVector. +// In case targetVector is empty string, legacy vector configuration is returned. +// Method expects that configuration associated with targetVector is present. +func (i *Index) GetVectorIndexConfig(targetVector string) schemaConfig.VectorIndexConfig { + i.vectorIndexUserConfigLock.Lock() + defer i.vectorIndexUserConfigLock.Unlock() + + if targetVector == "" { + return i.vectorIndexUserConfig + } + + return i.vectorIndexUserConfigs[targetVector] +} + +// GetVectorIndexConfigs returns a map of vector index configurations. +// If present, legacy vector is return under the key of empty string. +func (i *Index) GetVectorIndexConfigs() map[string]schemaConfig.VectorIndexConfig { + i.vectorIndexUserConfigLock.Lock() + defer i.vectorIndexUserConfigLock.Unlock() + + configs := make(map[string]schemaConfig.VectorIndexConfig, len(i.vectorIndexUserConfigs)+1) + for k, v := range i.vectorIndexUserConfigs { + configs[k] = v + } + + if i.vectorIndexUserConfig != nil { + configs[""] = i.vectorIndexUserConfig + } + + return configs +} + +func convertToVectorIndexConfig(config interface{}) schemaConfig.VectorIndexConfig { + if config == nil { + return nil + } + // in case legacy vector config was set as an empty map/object instead of nil + if empty, ok := config.(map[string]interface{}); ok && len(empty) == 0 { + return nil + } + // Safe type assertion + if vectorIndexConfig, ok := config.(schemaConfig.VectorIndexConfig); ok { + return vectorIndexConfig + } + return nil +} + +func convertToVectorIndexConfigs(configs map[string]models.VectorConfig) map[string]schemaConfig.VectorIndexConfig { + if len(configs) > 0 { + vectorIndexConfigs := make(map[string]schemaConfig.VectorIndexConfig) + for targetVector, vectorConfig := range configs { + if vectorIndexConfig, ok := vectorConfig.VectorIndexConfig.(schemaConfig.VectorIndexConfig); ok { + vectorIndexConfigs[targetVector] = vectorIndexConfig + } + } + return vectorIndexConfigs + } + return nil +} + +// IMPORTANT: +// DebugResetVectorIndex is intended to be used for debugging purposes only. +// It drops the selected vector index, creates a new one, then reindexes it in the background. +// This function assumes the node is not receiving any traffic besides the +// debug endpoints and that async indexing is enabled. +func (i *Index) DebugResetVectorIndex(ctx context.Context, shardName, targetVector string) error { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return err + } + if shard == nil { + return errors.New("shard not found") + } + defer release() + + // Get the vector index + vidx, ok := shard.GetVectorIndex(targetVector) + if !ok { + return errors.New("vector index not found") + } + + if !hnsw.IsHNSWIndex(vidx) { + return errors.New("vector index is not hnsw") + } + + // Reset the vector index + err = shard.DebugResetVectorIndex(ctx, targetVector) + if err != nil { + return errors.Wrap(err, "failed to reset vector index") + } + + // Reindex in the background + enterrors.GoWrapper(func() { + err = shard.FillQueue(targetVector, 0) + if err != nil { + i.logger.WithField("shard", shardName).WithError(err).Error("failed to reindex vector index") + return + } + }, i.logger) + + return nil +} + +func (i *Index) DebugRepairIndex(ctx context.Context, shardName, targetVector string) error { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return err + } + if shard == nil { + return errors.New("shard not found") + } + defer release() + + // Repair in the background + enterrors.GoWrapper(func() { + err := shard.RepairIndex(context.Background(), targetVector) + if err != nil { + i.logger.WithField("shard", shardName).WithError(err).Error("failed to repair vector index") + return + } + }, i.logger) + + return nil +} + +// calcTargetVectorDimensionsFromStore calculates dimensions and object count for a target vector from an LSMKV store +func calcTargetVectorDimensionsFromStore(ctx context.Context, store *lsmkv.Store, targetVector string, calcEntry func(dimLen int, v []lsmkv.MapPair) (int, int)) usagetypes.Dimensionality { + b := store.Bucket(helpers.DimensionsBucketLSM) + if b == nil { + return usagetypes.Dimensionality{} + } + return calcTargetVectorDimensionsFromBucket(ctx, b, targetVector, calcEntry) +} + +// calcTargetVectorDimensionsFromBucket calculates dimensions and object count for a target vector from an LSMKV bucket +func calcTargetVectorDimensionsFromBucket(ctx context.Context, b *lsmkv.Bucket, targetVector string, calcEntry func(dimLen int, v []lsmkv.MapPair) (int, int)) usagetypes.Dimensionality { + c := b.MapCursor() + defer c.Close() + + var ( + nameLen = len(targetVector) + expectedKeyLen = 4 + nameLen + dimensionality = usagetypes.Dimensionality{} + ) + + for k, v := c.First(ctx); k != nil; k, v = c.Next(ctx) { + // for named vectors we have to additionally check if the key is prefixed with the vector name + keyMatches := len(k) == expectedKeyLen && (nameLen == 4 || strings.HasPrefix(string(k), targetVector)) + if !keyMatches { + continue + } + + dimLength := int(binary.LittleEndian.Uint32(k[nameLen:])) + size, dim := calcEntry(dimLength, v) + if dimensionality.Dimensions == 0 && dim > 0 { + dimensionality.Dimensions = dim + } + dimensionality.Count += size + } + + return dimensionality +} + +// CalculateUnloadedObjectsMetrics calculates both object count and storage size for a cold tenant without loading it into memory +func (i *Index) CalculateUnloadedObjectsMetrics(ctx context.Context, tenantName string) (usagetypes.ObjectUsage, error) { + // Obtain a lock that prevents tenant activation + i.shardCreateLocks.Lock(tenantName) + defer i.shardCreateLocks.Unlock(tenantName) + + // check if created in the meantime by concurrent call + if shard := i.shards.Loaded(tenantName); shard != nil { + size, err := shard.ObjectStorageSize(ctx) + if err != nil { + return usagetypes.ObjectUsage{}, err + } + + count, err := shard.ObjectCountAsync(ctx) + if err != nil { + return usagetypes.ObjectUsage{}, err + } + + return usagetypes.ObjectUsage{ + Count: count, + StorageBytes: size, + }, nil + } + + // Parse all .cna files in the object store and sum them up + totalObjectCount := int64(0) + totalDiskSize := int64(0) + + // Use a single walk to avoid multiple filepath.Walk calls and reduce file descriptors + if err := filepath.Walk(shardPathObjectsLSM(i.path(), tenantName), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Only count files, not directories + if !info.IsDir() { + totalDiskSize += info.Size() + + // Look for .cna files (net count additions) + if strings.HasSuffix(info.Name(), lsmkv.CountNetAdditionsFileSuffix) { + count, err := lsmkv.ReadCountNetAdditionsFile(path) + if err != nil { + i.logger.WithField("path", path).WithError(err).Warn("failed to read .cna file") + return err + } + totalObjectCount += count + } + + // Look for .metadata files (bloom filters + count net additions) + if strings.HasSuffix(info.Name(), lsmkv.MetadataFileSuffix) { + count, err := lsmkv.ReadObjectCountFromMetadataFile(path) + if err != nil { + i.logger.WithField("path", path).WithError(err).Warn("failed to read .metadata file") + return err + } + totalObjectCount += count + } + } + + return nil + }); err != nil { + return usagetypes.ObjectUsage{}, err + } + + // If we can't determine object count, return the disk size as fallback + return usagetypes.ObjectUsage{ + Count: totalObjectCount, + StorageBytes: totalDiskSize, + }, nil +} + +// CalculateUnloadedDimensionsUsage calculates dimensions and object count for an unloaded shard without loading it into memory +func (i *Index) CalculateUnloadedDimensionsUsage(ctx context.Context, tenantName, targetVector string) (usagetypes.Dimensionality, error) { + // Obtain a lock that prevents tenant activation + i.shardCreateLocks.Lock(tenantName) + defer i.shardCreateLocks.Unlock(tenantName) + + // check if created in the meantime by concurrent call + if shard := i.shards.Loaded(tenantName); shard != nil { + return shard.DimensionsUsage(ctx, targetVector) + } + + bucket, err := lsmkv.NewBucketCreator().NewBucket(ctx, + shardPathDimensionsLSM(i.path(), tenantName), + i.path(), + i.logger, + nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + ) + if err != nil { + return usagetypes.Dimensionality{}, err + } + defer bucket.Shutdown(ctx) + + dimensionality := calcTargetVectorDimensionsFromBucket(ctx, bucket, targetVector, func(dimLen int, v []lsmkv.MapPair) (int, int) { + return len(v), dimLen + }) + + return dimensionality, nil +} + +// CalculateUnloadedVectorsMetrics calculates vector storage size for a cold tenant without loading it into memory +func (i *Index) CalculateUnloadedVectorsMetrics(ctx context.Context, tenantName string) (int64, error) { + // Obtain a lock that prevents tenant activation + i.shardCreateLocks.Lock(tenantName) + defer i.shardCreateLocks.Unlock(tenantName) + + // check if created in the meantime by concurrent call + if shard := i.shards.Loaded(tenantName); shard != nil { + return shard.VectorStorageSize(ctx) + } + + totalSize := int64(0) + + // For each target vector, calculate storage size using dimensions bucket and config-based compression + for targetVector, config := range i.GetVectorIndexConfigs() { + // Get dimensions and object count from the dimensions bucket + bucket, err := lsmkv.NewBucketCreator().NewBucket(ctx, + shardPathDimensionsLSM(i.path(), tenantName), + i.path(), + i.logger, + nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + ) + if err != nil { + return 0, err + } + + dimensionality := calcTargetVectorDimensionsFromBucket(ctx, bucket, targetVector, func(dimLen int, v []lsmkv.MapPair) (int, int) { + return len(v), dimLen + }) + bucket.Shutdown(ctx) + + if dimensionality.Count == 0 || dimensionality.Dimensions == 0 { + continue + } + + // Calculate uncompressed size (float32 = 4 bytes per dimension) + uncompressedSize := int64(dimensionality.Count) * int64(dimensionality.Dimensions) * 4 + + // For inactive tenants, use vector index config for dimension tracking + // This is similar to the original shard dimension tracking approach + totalSize += int64(float64(uncompressedSize) * helpers.CompressionRatioFromConfig(config, dimensionality.Dimensions)) + } + + return totalSize, nil +} + +func (i *Index) buildReadRoutingPlan(cl routerTypes.ConsistencyLevel, tenantName string) (routerTypes.ReadRoutingPlan, error) { + planOptions := routerTypes.RoutingPlanBuildOptions{ + Tenant: tenantName, + ConsistencyLevel: cl, + } + readPlan, err := i.router.BuildReadRoutingPlan(planOptions) + if err != nil { + return routerTypes.ReadRoutingPlan{}, err + } + + return readPlan, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_cyclecallbacks.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_cyclecallbacks.go new file mode 100644 index 0000000000000000000000000000000000000000..8967734c45f989a5c5783da213cc268db5e988c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_cyclecallbacks.go @@ -0,0 +1,162 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "strings" + "time" + + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +type indexCycleCallbacks struct { + compactionCallbacks cyclemanager.CycleCallbackGroup + compactionCycle cyclemanager.CycleManager + compactionAuxCallbacks cyclemanager.CycleCallbackGroup + compactionAuxCycle cyclemanager.CycleManager + + flushCallbacks cyclemanager.CycleCallbackGroup + flushCycle cyclemanager.CycleManager + + vectorCommitLoggerCallbacks cyclemanager.CycleCallbackGroup + vectorCommitLoggerCycle cyclemanager.CycleManager + vectorTombstoneCleanupCallbacks cyclemanager.CycleCallbackGroup + vectorTombstoneCleanupCycle cyclemanager.CycleManager + + geoPropsCommitLoggerCallbacks cyclemanager.CycleCallbackGroup + geoPropsCommitLoggerCycle cyclemanager.CycleManager + geoPropsTombstoneCleanupCallbacks cyclemanager.CycleCallbackGroup + geoPropsTombstoneCleanupCycle cyclemanager.CycleManager +} + +func (index *Index) initCycleCallbacks() { + routinesN := concurrency.TimesNUMCPU(index.Config.CycleManagerRoutinesFactor) + + vectorTombstoneCleanupIntervalSeconds := hnsw.DefaultCleanupIntervalSeconds + if hnswUserConfig, ok := index.GetVectorIndexConfig("").(hnsw.UserConfig); ok { + vectorTombstoneCleanupIntervalSeconds = hnswUserConfig.CleanupIntervalSeconds + } + + id := func(elems ...string) string { + elems = append([]string{"index", index.ID()}, elems...) + return strings.Join(elems, "/") + } + + var compactionCycle cyclemanager.CycleManager + var compactionCallbacks cyclemanager.CycleCallbackGroup + var compactionAuxCycle cyclemanager.CycleManager + var compactionAuxCallbacks cyclemanager.CycleCallbackGroup + + if !index.Config.SeparateObjectsCompactions { + compactionCallbacks = cyclemanager.NewCallbackGroup(id("compaction"), index.logger, routinesN) + compactionCycle = cyclemanager.NewManager( + cyclemanager.CompactionCycleTicker(), + compactionCallbacks.CycleCallback, index.logger) + compactionAuxCycle = cyclemanager.NewManagerNoop() + } else { + routinesNDiv2 := routinesN / 2 + if routinesNDiv2 < 1 { + routinesNDiv2 = 1 + } + compactionCallbacks = cyclemanager.NewCallbackGroup(id("compaction-non-objects"), index.logger, routinesNDiv2) + compactionCycle = cyclemanager.NewManager( + cyclemanager.CompactionCycleTicker(), + compactionCallbacks.CycleCallback, index.logger) + compactionAuxCallbacks = cyclemanager.NewCallbackGroup(id("compaction-objects"), index.logger, routinesNDiv2) + compactionAuxCycle = cyclemanager.NewManager( + cyclemanager.CompactionCycleTicker(), + compactionAuxCallbacks.CycleCallback, index.logger) + } + + flushCallbacks := cyclemanager.NewCallbackGroup(id("flush"), index.logger, routinesN) + flushCycle := cyclemanager.NewManager( + cyclemanager.MemtableFlushCycleTicker(), + flushCallbacks.CycleCallback, index.logger) + + vectorCommitLoggerCallbacks := cyclemanager.NewCallbackGroup(id("vector", "commit_logger"), index.logger, routinesN) + // Previously we had an interval of 10s in here, which was changed to + // 0.5s as part of gh-1867. There's really no way to wait so long in + // between checks: If you are running on a low-powered machine, the + // interval will simply find that there is no work and do nothing in + // each iteration. However, if you are running on a very powerful + // machine within 10s you could have potentially created two units of + // work, but we'll only be handling one every 10s. This means + // uncombined/uncondensed hnsw commit logs will keep piling up can only + // be processes long after the initial insert is complete. This also + // means that if there is a crash during importing a lot of work needs + // to be done at startup, since the commit logs still contain too many + // redundancies. So as of now it seems there are only advantages to + // running the cleanup checks and work much more often. + // + // update: switched to dynamic intervals with values between 500ms and 10s + // introduced to address https://github.com/weaviate/weaviate/issues/2783 + vectorCommitLoggerCycle := cyclemanager.NewManager( + cyclemanager.HnswCommitLoggerCycleTicker(), + vectorCommitLoggerCallbacks.CycleCallback, index.logger) + + vectorTombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup(id("vector", "tombstone_cleanup"), index.logger, routinesN) + vectorTombstoneCleanupCycle := cyclemanager.NewManager( + cyclemanager.NewFixedTicker(time.Duration(vectorTombstoneCleanupIntervalSeconds)*time.Second), + vectorTombstoneCleanupCallbacks.CycleCallback, index.logger) + + geoPropsCommitLoggerCallbacks := cyclemanager.NewCallbackGroup(id("geo_props", "commit_logger"), index.logger, routinesN) + geoPropsCommitLoggerCycle := cyclemanager.NewManager( + cyclemanager.GeoCommitLoggerCycleTicker(), + geoPropsCommitLoggerCallbacks.CycleCallback, index.logger) + + geoPropsTombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup(id("geo_props", "tombstone_cleanup"), index.logger, routinesN) + geoPropsTombstoneCleanupCycle := cyclemanager.NewManager( + cyclemanager.NewFixedTicker(hnsw.DefaultCleanupIntervalSeconds*time.Second), + geoPropsTombstoneCleanupCallbacks.CycleCallback, index.logger) + + index.cycleCallbacks = &indexCycleCallbacks{ + compactionCallbacks: compactionCallbacks, + compactionCycle: compactionCycle, + compactionAuxCallbacks: compactionAuxCallbacks, + compactionAuxCycle: compactionAuxCycle, + flushCallbacks: flushCallbacks, + flushCycle: flushCycle, + + vectorCommitLoggerCallbacks: vectorCommitLoggerCallbacks, + vectorCommitLoggerCycle: vectorCommitLoggerCycle, + vectorTombstoneCleanupCallbacks: vectorTombstoneCleanupCallbacks, + vectorTombstoneCleanupCycle: vectorTombstoneCleanupCycle, + + geoPropsCommitLoggerCallbacks: geoPropsCommitLoggerCallbacks, + geoPropsCommitLoggerCycle: geoPropsCommitLoggerCycle, + geoPropsTombstoneCleanupCallbacks: geoPropsTombstoneCleanupCallbacks, + geoPropsTombstoneCleanupCycle: geoPropsTombstoneCleanupCycle, + } +} + +func (index *Index) initCycleCallbacksNoop() { + index.cycleCallbacks = &indexCycleCallbacks{ + compactionCallbacks: cyclemanager.NewCallbackGroupNoop(), + compactionCycle: cyclemanager.NewManagerNoop(), + compactionAuxCallbacks: cyclemanager.NewCallbackGroupNoop(), + compactionAuxCycle: cyclemanager.NewManagerNoop(), + flushCallbacks: cyclemanager.NewCallbackGroupNoop(), + flushCycle: cyclemanager.NewManagerNoop(), + + vectorCommitLoggerCallbacks: cyclemanager.NewCallbackGroupNoop(), + vectorCommitLoggerCycle: cyclemanager.NewManagerNoop(), + vectorTombstoneCleanupCallbacks: cyclemanager.NewCallbackGroupNoop(), + vectorTombstoneCleanupCycle: cyclemanager.NewManagerNoop(), + + geoPropsCommitLoggerCallbacks: cyclemanager.NewCallbackGroupNoop(), + geoPropsCommitLoggerCycle: cyclemanager.NewManagerNoop(), + geoPropsTombstoneCleanupCallbacks: cyclemanager.NewCallbackGroupNoop(), + geoPropsTombstoneCleanupCycle: cyclemanager.NewManagerNoop(), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3574a5577514e30eff3513c44cff0d8ec712fd23 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_integration_test.go @@ -0,0 +1,1111 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package db + +import ( + "context" + "fmt" + "math/rand" + "os" + "path" + "path/filepath" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + routerTypes "github.com/weaviate/weaviate/cluster/router/types" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/replication" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func TestIndex_DropIndex(t *testing.T) { + dirName := t.TempDir() + shardState := singleShardState() + class := &models.Class{Class: "deletetest", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: shardState.PartitioningEnabled}} + index := emptyIdx(t, dirName, class, shardState) + + indexFilesBeforeDelete, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + + err = index.drop() + require.Nil(t, err) + + indexFilesAfterDelete, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + + assert.Equal(t, 6, len(indexFilesBeforeDelete)) + assert.Equal(t, 0, len(indexFilesAfterDelete)) +} + +func TestIndex_DropEmptyAndRecreateEmptyIndex(t *testing.T) { + dirName := t.TempDir() + shardState := singleShardState() + class := &models.Class{Class: "deletetest", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: shardState.PartitioningEnabled}} + index := emptyIdx(t, dirName, class, shardState) + + indexFilesBeforeDelete, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + + // drop the index + err = index.drop() + require.Nil(t, err) + + indexFilesAfterDelete, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + + index = emptyIdx(t, dirName, class, shardState) + + indexFilesAfterRecreate, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + + assert.Equal(t, 6, len(indexFilesBeforeDelete)) + assert.Equal(t, 0, len(indexFilesAfterDelete)) + assert.Equal(t, 6, len(indexFilesAfterRecreate)) + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_DropWithDataAndRecreateWithDataIndex(t *testing.T) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + shardState := singleShardState() + class := &models.Class{ + Class: "deletetest", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{ + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + class, + }, + }, + } + // create index with data + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + shardName := "" + nodeName := "" + for _, physical := range shardState.Physical { + shardName = physical.Name + nodeName = physical.BelongsToNodes[0] + break + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(class.Class, mock.Anything). + RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + router := routerTypes.NewMockRouter(t) + router.EXPECT().GetWriteReplicasLocation(class.Class, mock.Anything, shardName).Return( + routerTypes.WriteReplicaSet{ + Replicas: []routerTypes.Replica{{NodeName: nodeName, ShardName: shardName, HostAddr: "10.12.135.43"}}, + AdditionalReplicas: nil, + }, nil).Maybe() + router.EXPECT().GetReadReplicasLocation(class.Class, mock.Anything, shardName).Return( + routerTypes.ReadReplicaSet{ + Replicas: []routerTypes.Replica{{NodeName: nodeName, ShardName: shardName, HostAddr: "10.12.135.43"}}, + }, nil).Maybe() + + index, err := NewIndex(testCtx(), IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(class.Class), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, router, &fakeSchemaGetter{ + schema: fakeSchema, shardState: shardState, + }, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, + NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.Nil(t, err) + + productsIds := []strfmt.UUID{ + "1295c052-263d-4aae-99dd-920c5a370d06", + "1295c052-263d-4aae-99dd-920c5a370d07", + } + + products := []map[string]interface{}{ + {"name": "one"}, + {"name": "two"}, + } + + err = index.addProperty(context.TODO(), &models.Property{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }) + require.Nil(t, err) + + for i, p := range products { + product := models.Object{ + Class: class.Class, + ID: productsIds[i], + Properties: p, + } + + err := index.putObject(context.TODO(), storobj.FromObject( + &product, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil), nil, product.Tenant, 0) + require.Nil(t, err) + } + + indexFilesBeforeDelete, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + + beforeDeleteObj1, err := index.objectByID(context.TODO(), + productsIds[0], nil, additional.Properties{}, nil, "") + require.Nil(t, err) + + beforeDeleteObj2, err := index.objectByID(context.TODO(), + productsIds[1], nil, additional.Properties{}, nil, "") + require.Nil(t, err) + + // drop the index + err = index.drop() + require.Nil(t, err) + + indexFilesAfterDelete, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + // recreate the index + index, err = NewIndex(testCtx(), IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(class.Class), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, router, &fakeSchemaGetter{ + schema: fakeSchema, + shardState: shardState, + }, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, + NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.Nil(t, err) + + err = index.addProperty(context.TODO(), &models.Property{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }) + require.Nil(t, err) + + indexFilesAfterRecreate, err := getIndexFilenames(dirName, class.Class) + require.Nil(t, err) + + afterRecreateObj1, err := index.objectByID(context.TODO(), + productsIds[0], nil, additional.Properties{}, nil, "") + require.Nil(t, err) + + afterRecreateObj2, err := index.objectByID(context.TODO(), + productsIds[1], nil, additional.Properties{}, nil, "") + require.Nil(t, err) + + // insert some data in the recreated index + for i, p := range products { + thing := models.Object{ + Class: class.Class, + ID: productsIds[i], + Properties: p, + } + + err := index.putObject(context.TODO(), storobj.FromObject( + &thing, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil), nil, thing.Tenant, 0) + require.Nil(t, err) + } + + afterRecreateAndInsertObj1, err := index.objectByID(context.TODO(), + productsIds[0], nil, additional.Properties{}, nil, "") + require.Nil(t, err) + + afterRecreateAndInsertObj2, err := index.objectByID(context.TODO(), + productsIds[1], nil, additional.Properties{}, nil, "") + require.Nil(t, err) + + // update the index vectorIndexUserConfig + beforeVectorConfig, ok := index.GetVectorIndexConfig("").(hnsw.UserConfig) + require.Equal(t, -1, beforeVectorConfig.EF) + require.True(t, ok) + beforeVectorConfig.EF = 99 + err = index.updateVectorIndexConfig(context.TODO(), beforeVectorConfig) + require.Nil(t, err) + afterVectorConfig, ok := index.GetVectorIndexConfig("").(hnsw.UserConfig) + require.True(t, ok) + require.Equal(t, 99, afterVectorConfig.EF) + + assert.Equal(t, 6, len(indexFilesBeforeDelete)) + assert.Equal(t, 0, len(indexFilesAfterDelete)) + assert.Equal(t, 6, len(indexFilesAfterRecreate)) + assert.Equal(t, indexFilesBeforeDelete, indexFilesAfterRecreate) + assert.NotNil(t, beforeDeleteObj1) + assert.NotNil(t, beforeDeleteObj2) + assert.Empty(t, afterRecreateObj1) + assert.Empty(t, afterRecreateObj2) + assert.NotNil(t, afterRecreateAndInsertObj1) + assert.NotNil(t, afterRecreateAndInsertObj2) +} + +func TestIndex_AddNewVectorIndex(t *testing.T) { + var ( + ctx = testCtx() + initialClass = &models.Class{Class: "ClassName"} + shard, index = testShard(t, ctx, initialClass.Class) + ) + + _, ok := shard.GetVectorIndex("new_index") + require.False(t, ok) + + require.NoError(t, index.updateVectorIndexConfigs(ctx, map[string]schemaConfig.VectorIndexConfig{ + "new_index": hnsw.UserConfig{ + Distance: "cosine", + }, + })) + + vectorIndex, ok := shard.GetVectorIndex("new_index") + require.True(t, ok) + require.NotNil(t, vectorIndex) +} + +func TestIndex_DropReadOnlyEmptyIndex(t *testing.T) { + ctx := testCtx() + class := &models.Class{Class: "deletetest"} + shard, index := testShard(t, ctx, class.Class) + + tenantName := "" + if index.partitioningEnabled { + tenantName = shard.Name() + } + + err := index.updateShardStatus(ctx, tenantName, shard.Name(), storagestate.StatusReadOnly.String(), 0) + require.Nil(t, err) + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_DropReadOnlyEmptyIndex_MultiTenant(t *testing.T) { + ctx := testCtx() + class := &models.Class{Class: "deletetest"} + shard, index := testShardMultiTenant(t, ctx, class.Class) + + tenantName := "" + if index.partitioningEnabled { + tenantName = shard.Name() + } + + err := index.updateShardStatus(ctx, tenantName, shard.Name(), storagestate.StatusReadOnly.String(), 0) + require.Nil(t, err) + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_DropReadOnlyIndexWithData(t *testing.T) { + ctx := testCtx() + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + class := &models.Class{ + Class: "deletetest", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{ + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + } + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + class, + }, + }, + } + + shardState := singleShardState() + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + shardName := "" + nodeName := "" + for _, physical := range shardState.Physical { + shardName = physical.Name + nodeName = physical.BelongsToNodes[0] + break + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(class.Class, mock.Anything). + RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + router := routerTypes.NewMockRouter(t) + router.EXPECT().GetWriteReplicasLocation(class.Class, mock.Anything, shardName).Return( + routerTypes.WriteReplicaSet{ + Replicas: []routerTypes.Replica{{NodeName: nodeName, ShardName: shardName, HostAddr: "10.12.135.43"}}, + AdditionalReplicas: nil, + }, nil).Maybe() + router.EXPECT().GetReadReplicasLocation(class.Class, mock.Anything, shardName).Return( + routerTypes.ReadReplicaSet{ + Replicas: []routerTypes.Replica{{NodeName: nodeName, ShardName: shardName, HostAddr: "10.12.135.43"}}, + }, nil).Maybe() + index, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(class.Class), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, router, &fakeSchemaGetter{ + schema: fakeSchema, shardState: shardState, + }, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, + NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.Nil(t, err) + + productsIds := []strfmt.UUID{ + "1295c052-263d-4aae-99dd-920c5a370d06", + "1295c052-263d-4aae-99dd-920c5a370d07", + } + + products := []map[string]interface{}{ + {"name": "one"}, + {"name": "two"}, + } + + err = index.addProperty(ctx, &models.Property{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }) + require.Nil(t, err) + + for i, p := range products { + product := models.Object{ + Class: class.Class, + ID: productsIds[i], + Properties: p, + } + + err := index.putObject(ctx, storobj.FromObject( + &product, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil), nil, product.Tenant, 0) + require.Nil(t, err) + } + + // set all shards to readonly + index.ForEachShard(func(name string, shard ShardLike) error { + err = shard.UpdateStatus(storagestate.StatusReadOnly.String(), "test readonly") + require.Nil(t, err) + return nil + }) + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_DropUnloadedShard(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + shardState := singleShardState() + class := &models.Class{ + Class: "deletetest", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{ + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + class, + }, + }, + } + + // create a checkpoint file + cpFile, err := indexcheckpoint.New(dirName, logger) + require.Nil(t, err) + defer cpFile.Close() + + // create index + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(class.Class, mock.Anything). + RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + router := routerTypes.NewMockRouter(t) + index, err := NewIndex(testCtx(), IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(class.Class), + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, router, &fakeSchemaGetter{ + schema: fakeSchema, shardState: shardState, + }, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, cpFile, nil, + NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.Nil(t, err) + + // at this point the shard is not loaded yet. + // update the checkpoint file to simulate a previously loaded shard + var shardName string + for name := range shardState.Physical { + shardName = name + break + } + require.NotEmpty(t, shardName) + shardID := fmt.Sprintf("%s_%s", index.ID(), shardName) + err = cpFile.Update(shardID, "", 10) + require.Nil(t, err) + + // drop the index before loading the shard + err = index.drop() + require.Nil(t, err) + + // ensure the checkpoint file is not deleted + _, err = os.Stat(filepath.Join(dirName, "index.db")) + require.Nil(t, err) + + // ensure the shard checkpoint is deleted + v, ok, err := cpFile.Get(shardID, "") + require.Nil(t, err) + require.False(t, ok) + require.Zero(t, v) +} + +func TestIndex_DropLoadedShard(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + shardState := singleShardState() + class := &models.Class{ + Class: "deletetest", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{ + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + class, + }, + }, + } + + cpFile, err := indexcheckpoint.New(dirName, logger) + require.Nil(t, err) + defer cpFile.Close() + + // create index + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + shardName := "" + nodeName := "" + for _, physical := range shardState.Physical { + shardName = physical.Name + nodeName = physical.BelongsToNodes[0] + break + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(class.Class, mock.Anything). + RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + router := routerTypes.NewMockRouter(t) + router.EXPECT().GetWriteReplicasLocation(class.Class, mock.Anything, shardName).Return( + routerTypes.WriteReplicaSet{ + Replicas: []routerTypes.Replica{{NodeName: nodeName, ShardName: shardName, HostAddr: "10.12.135.43"}}, + AdditionalReplicas: nil, + }, nil).Maybe() + router.EXPECT().GetReadReplicasLocation(class.Class, mock.Anything, shardName).Return( + routerTypes.ReadReplicaSet{ + Replicas: []routerTypes.Replica{{NodeName: nodeName, ShardName: shardName, HostAddr: "10.12.135.43"}}, + }, nil).Maybe() + index, err := NewIndex(testCtx(), IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(class.Class), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, router, &fakeSchemaGetter{ + schema: fakeSchema, shardState: shardState, + }, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, cpFile, nil, + NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.Nil(t, err) + + // force the index to load the shard + productsIds := []strfmt.UUID{ + "1295c052-263d-4aae-99dd-920c5a370d06", + "1295c052-263d-4aae-99dd-920c5a370d07", + } + + products := []map[string]interface{}{ + {"name": "one"}, + {"name": "two"}, + } + + err = index.addProperty(context.TODO(), &models.Property{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }) + require.Nil(t, err) + + for i, p := range products { + product := models.Object{ + Class: class.Class, + ID: productsIds[i], + Properties: p, + } + + err := index.putObject(context.TODO(), storobj.FromObject( + &product, []float32{0.1, 0.2, 0.01, 0.2}, nil, nil), nil, product.Tenant, 0) + require.Nil(t, err) + } + + // drop the index + err = index.drop() + require.Nil(t, err) + + // ensure the checkpoint file is not deleted + _, err = os.Stat(filepath.Join(dirName, "index.db")) + require.Nil(t, err) +} + +func emptyIdx(t *testing.T, rootDir string, class *models.Class, shardState *sharding.State) *Index { + logger, _ := test.NewNullLogger() + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(class.Class, mock.Anything). + RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + router := routerTypes.NewMockRouter(t) + idx, err := NewIndex(testCtx(), IndexConfig{ + RootPath: rootDir, + ClassName: schema.ClassName(class.Class), + DisableLazyLoadShards: true, + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + }, inverted.ConfigFromModel(invertedConfig()), + hnsw.NewDefaultUserConfig(), nil, router, &fakeSchemaGetter{ + shardState: shardState, + }, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, + NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.Nil(t, err) + return idx +} + +func getIndexFilenames(rootDir, indexName string) ([]string, error) { + var filenames []string + indexRoot, err := os.ReadDir(path.Join(rootDir, indexName)) + if err != nil { + if os.IsNotExist(err) { + // index was dropped, or never existed + return filenames, nil + } + return nil, err + } + if len(indexRoot) == 0 { + return nil, fmt.Errorf("index root length is 0") + } + shardFiles, err := os.ReadDir(path.Join(rootDir, indexName, indexRoot[0].Name())) + if err != nil { + return filenames, err + } + for _, f := range shardFiles { + filenames = append(filenames, f.Name()) + } + return filenames, nil +} + +func TestIndex_DebugResetVectorIndex(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "100ms") + + ctx := context.Background() + class := &models.Class{Class: "reindextest"} + shard, index := testShardWithSettings(t, ctx, &models.Class{Class: class.Class}, hnsw.UserConfig{}, false, true /* withCheckpoints */) + + // unknown shard + err := index.DebugResetVectorIndex(ctx, "unknown", "") + require.Error(t, err) + + // unknown target vector + err = index.DebugResetVectorIndex(ctx, shard.Name(), "unknown") + require.Error(t, err) + + amount := 1000 + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject("reindextest") + obj.Vector = randVector(3) + objs = append(objs, obj) + } + + errs := shard.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + vidx, q := getVectorIndexAndQueue(t, shard, "") + + // wait until the queue is empty + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the in-flight indexing to finish + q.Wait() + + // make sure the new index contains all the objects + for _, obj := range objs { + if !vidx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should be in the vector index", obj.DocID) + } + } + + err = index.DebugResetVectorIndex(ctx, shard.Name(), "") + require.Nil(t, err) + + // wait until the queue is empty + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the in-flight indexing to finish + q.Wait() + + // make sure the new index contains all the objects + for _, obj := range objs { + if !vidx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should be in the vector index", obj.DocID) + } + } + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_DebugResetVectorIndexTargetVector(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "100ms") + + ctx := context.Background() + class := &models.Class{Class: "reindextest"} + shard, index := testShardWithSettings( + t, + ctx, + &models.Class{Class: class.Class}, + nil, + false, + true, + func(i *Index) { + i.vectorIndexUserConfigs = make(map[string]schemaConfig.VectorIndexConfig) + i.vectorIndexUserConfigs["foo"] = hnsw.UserConfig{} + }, + ) + + // unknown shard + err := index.DebugResetVectorIndex(ctx, "unknown", "") + require.Error(t, err) + + // unknown target vector + err = index.DebugResetVectorIndex(ctx, shard.Name(), "unknown") + require.Error(t, err) + + // non-existing main vector + err = index.DebugResetVectorIndex(ctx, shard.Name(), "") + require.Error(t, err) + + amount := 1000 + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject("reindextest") + obj.Vectors = map[string][]float32{ + "foo": {1, 2, 3}, + } + objs = append(objs, obj) + } + + errs := shard.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + vidx, q := getVectorIndexAndQueue(t, shard, "foo") + + // wait until the queue is empty + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the in-flight indexing to finish + q.Wait() + + // make sure the new index contains all the objects + for _, obj := range objs { + if !vidx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should be in the vector index", obj.DocID) + } + } + + err = index.DebugResetVectorIndex(ctx, shard.Name(), "foo") + require.Nil(t, err) + + // wait until the queue is empty + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the in-flight indexing to finish + q.Wait() + + // make sure the new index contains all the objects + for _, obj := range objs { + if !vidx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should be in the vector index", obj.DocID) + } + } + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_DebugResetVectorIndexPQ(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "100ms") + + ctx := context.Background() + var cfg hnsw.UserConfig + cfg.SetDefaults() + cfg.MaxConnections = 16 + cfg.PQ.Enabled = true + cfg.PQ.Centroids = 6 + cfg.PQ.Segments = 4 + cfg.PQ.TrainingLimit = 32 + + shard, index := testShardWithSettings( + t, + ctx, + &models.Class{Class: "reindextest"}, + cfg, + false, + true, + ) + + // unknown shard + err := index.DebugResetVectorIndex(ctx, "unknown", "") + require.Error(t, err) + + // unknown target vector + err = index.DebugResetVectorIndex(ctx, shard.Name(), "unknown") + require.Error(t, err) + + amount := 1000 + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject("reindextest") + obj.Vector = randVector(16) + objs = append(objs, obj) + } + + errs := shard.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + vidx, q := getVectorIndexAndQueue(t, shard, "") + + // wait until the queue is empty + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + q.Wait() + + // wait until the index is compressed + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if vidx.Compressed() { + break + } + } + + err = index.DebugResetVectorIndex(ctx, shard.Name(), "") + require.Nil(t, err) + + // wait until the queue is empty + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the in-flight indexing to finish + q.Wait() + + // wait until the index is compressed + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if vidx.Compressed() { + break + } + } + + // make sure the new index contains all the objects + for _, obj := range objs { + if !vidx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should be in the vector index", obj.DocID) + } + } + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_DebugResetVectorIndexFlat(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEX_INTERVAL", "100ms") + + ctx := context.Background() + class := &models.Class{Class: "reindextest"} + shard, index := testShardWithSettings( + t, + ctx, + &models.Class{Class: class.Class, VectorIndexType: "flat"}, + flat.UserConfig{}, + false, + true, + ) + + err := index.DebugResetVectorIndex(ctx, shard.Name(), "") + require.Error(t, err) + + err = index.drop() + require.Nil(t, err) +} + +func randVector(dim int) []float32 { + vec := make([]float32, dim) + for i := range vec { + vec[i] = rand.Float32() + } + + return vec +} + +func TestIndex_ConvertQueue(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + + ctx := context.Background() + class := &models.Class{Class: "preloadtest"} + shard, index := testShardWithSettings( + t, + ctx, + &models.Class{Class: class.Class}, + hnsw.UserConfig{}, + false, + true, + ) + amount := 1000 + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject("preloadtest") + obj.Vector = randVector(16) + objs = append(objs, obj) + } + + errs := shard.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + // reset the queue + vidx, q := getVectorIndexAndQueue(t, shard, "") + q.ResetWith(vidx) + q.Resume() + + err := shard.ConvertQueue("") + require.Nil(t, err) + + // wait until the queue is empty + for i := 0; i < 200; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the in-flight indexing to finish + q.Wait() + + // make sure the index contains all the objects + for _, obj := range objs { + if !vidx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should be in the vector index", obj.DocID) + } + } + + err = index.drop() + require.Nil(t, err) +} + +func TestIndex_ConvertQueueTargetVector(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + + ctx := context.Background() + class := &models.Class{Class: "preloadtest"} + shard, index := testShardWithSettings( + t, + ctx, + &models.Class{Class: class.Class}, + hnsw.UserConfig{}, + false, + true, + func(i *Index) { + i.vectorIndexUserConfigs = make(map[string]schemaConfig.VectorIndexConfig) + i.vectorIndexUserConfigs["foo"] = hnsw.UserConfig{} + }, + ) + amount := 1000 + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject("preloadtest") + obj.Vectors = map[string][]float32{ + "foo": {1, 2, 3}, + } + objs = append(objs, obj) + } + + errs := shard.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + vectorIndex, q := getVectorIndexAndQueue(t, shard, "foo") + + // reset the queue + q.Pause() + q.ResetWith(vectorIndex) + q.Resume() + + err := shard.ConvertQueue("foo") + require.Nil(t, err) + + // wait until the queue is empty + for i := 0; i < 200; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the in-flight indexing to finish + q.Wait() + + // make sure the index contains all the objects + for _, obj := range objs { + if !vectorIndex.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should be in the vector index", obj.DocID) + } + } + + err = index.drop() + require.Nil(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_object_storage_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_object_storage_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bbec4bf79e925d9821c46b01b718ffdc4edea188 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_object_storage_test.go @@ -0,0 +1,441 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/weaviate/weaviate/cluster/router/types" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/replication" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/monitoring" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestIndex_ObjectStorageSize_Comprehensive(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + tests := []struct { + name string + className string + shardName string + objectCount int + objectSize int // approximate size in bytes per object + expectedObjectCount int + expectedStorageSizeMin int64 // minimum expected storage size + expectedStorageSizeMax int64 // maximum expected storage size (allowing for overhead) + setupData bool + description string + }{ + { + name: "empty shard", + className: "TestClass", + shardName: "test-shard-empty", + setupData: false, + description: "Empty shard should have zero storage size", + }, + { + name: "shard with small objects", + className: "TestClass", + shardName: "test-shard-small", + objectCount: 10, + objectSize: 100, // ~100 bytes per object + expectedObjectCount: 10, + expectedStorageSizeMin: int64(10 * 100), // minimum: just the data + expectedStorageSizeMax: int64(10 * 100 * 5), // maximum: data + overhead (increased to 5x) + setupData: true, + description: "Shard with small objects should have proportional storage size", + }, + { + name: "shard with medium objects", + className: "TestClass", + shardName: "test-shard-medium", + objectCount: 50, + objectSize: 500, // ~500 bytes per object + expectedObjectCount: 50, + expectedStorageSizeMin: int64(50 * 500), // minimum: just the data + expectedStorageSizeMax: int64(50 * 500 * 3), // maximum: data + overhead + setupData: true, + description: "Shard with medium objects should have proportional storage size", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create sharding state + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + tt.shardName: { + Name: tt.shardName, + BelongsToNodes: []string{"test-node"}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardState.SetLocalName("test-node") + // Create test class + class := &models.Class{ + Class: tt.className, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "count", + DataType: schema.DataTypeInt.PropString(), + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{}, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + + // Create fake schema + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + // Create scheduler + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + + // Create mock schema getter + mockSchema := schemaUC.NewMockSchemaGetter(t) + mockSchema.EXPECT().GetSchemaSkipAuth().Maybe().Return(fakeSchema) + mockSchema.EXPECT().ReadOnlyClass(tt.className).Maybe().Return(class) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchema.EXPECT().NodeName().Maybe().Return("test-node") + mockSchema.EXPECT().ShardFromUUID("TestClass", mock.Anything).Return(tt.shardName).Maybe() + mockSchema.EXPECT().ShardOwner(tt.className, tt.shardName).Maybe().Return("test-node", nil) + + mockRouter := types.NewMockRouter(t) + mockRouter.EXPECT().GetWriteReplicasLocation(tt.className, mock.Anything, tt.shardName). + Return(types.WriteReplicaSet{Replicas: []types.Replica{{NodeName: "test-node", ShardName: tt.shardName, HostAddr: "110.12.15.23"}}}, nil).Maybe() + // Create index + index, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(tt.className), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + TrackVectorDimensions: true, + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, nil, mockRouter, mockSchema, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + defer index.Shutdown(ctx) + + // Add properties + for _, prop := range class.Properties { + err = index.addProperty(ctx, prop) + require.NoError(t, err) + } + + if tt.setupData { + // Create objects with varying sizes + for i := 0; i < tt.objectCount; i++ { + // Create object with properties that approximate the desired size + obj := &models.Object{ + Class: tt.className, + ID: strfmt.UUID(fmt.Sprintf("00000000-0000-0000-0000-%012d", i)), + Properties: map[string]interface{}{ + "name": fmt.Sprintf("test-object-%d", i), + "description": generateStringOfSize(tt.objectSize - 50), // Leave room for other properties + "count": i, + }, + } + storageObj := storobj.FromObject(obj, nil, nil, nil) + err := index.putObject(ctx, storageObj, nil, obj.Tenant, 0) + require.NoError(t, err) + } + + // Wait for indexing to complete + time.Sleep(2 * time.Second) + + // Test object storage size + shard, release, err := index.GetShard(ctx, tt.shardName) + require.NoError(t, err) + require.NotNil(t, shard) + defer release() + + objectStorageSize, err := shard.ObjectStorageSize(ctx) + require.NoError(t, err) + objectCount := shard.ObjectCount() + + // Verify object count + assert.Equal(t, tt.expectedObjectCount, objectCount, "Object count should match expected") + + // Verify storage size is within expected range + assert.GreaterOrEqual(t, objectStorageSize, tt.expectedStorageSizeMin, + "Storage size should be at least the minimum expected size") + assert.LessOrEqual(t, objectStorageSize, tt.expectedStorageSizeMax, + "Storage size should not exceed the maximum expected size") + + } else { + // Test empty shard + shard, release, err := index.GetShard(ctx, tt.shardName) + require.NoError(t, err) + require.NotNil(t, shard) + defer release() + + objectStorageSize, err := shard.ObjectStorageSize(ctx) + require.NoError(t, err) + objectCount := shard.ObjectCount() + + assert.Equal(t, tt.expectedObjectCount, objectCount, "Empty shard should have 0 objects") + assert.Equal(t, tt.expectedStorageSizeMin, objectStorageSize, "Empty shard should have 0 storage size") + } + mockSchema.AssertExpectations(t) + }) + } +} + +func TestIndex_CalculateUnloadedObjectsMetrics_ActiveVsUnloaded(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + className := "TestClass" + tenantName := "test-tenant" + objectCount := 50 + objectSize := 500 // ~500 bytes per object + + // Create sharding state with multi-tenancy enabled + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + tenantName: { + Name: tenantName, + BelongsToNodes: []string{"test-node"}, + Status: models.TenantActivityStatusHOT, + }, + }, + PartitioningEnabled: true, + } + shardState.SetLocalName("test-node") + + // Create test class with multi-tenancy enabled + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{}, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + + // Create fake schema + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + // Create scheduler + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + + // Create mock schema getter + mockSchema := schemaUC.NewMockSchemaGetter(t) + mockSchema.EXPECT().GetSchemaSkipAuth().Maybe().Return(fakeSchema) + mockSchema.EXPECT().ReadOnlyClass(className).Maybe().Return(class) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchema.EXPECT().NodeName().Maybe().Return("test-node") + mockSchema.EXPECT().ShardFromUUID("TestClass", mock.Anything).Return(tenantName).Maybe() + mockSchema.EXPECT().ShardOwner(className, tenantName).Maybe().Return("test-node", nil) + mockSchema.EXPECT().TenantsShards(ctx, className, tenantName).Maybe().Return(map[string]string{tenantName: models.TenantActivityStatusHOT}, nil) + + mockRouter := types.NewMockRouter(t) + mockRouter.EXPECT().GetWriteReplicasLocation(className, mock.Anything, tenantName). + Return(types.WriteReplicaSet{Replicas: []types.Replica{{NodeName: "test-node", ShardName: tenantName, HostAddr: "110.12.15.23"}}}, nil).Maybe() + + // Create index with lazy loading disabled to test active calculation methods + index, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(className), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + TrackVectorDimensions: true, + DisableLazyLoadShards: true, // we have to make sure lazyload shard disabled to load directly + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, nil, nil, mockSchema, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + + // Add properties + for _, prop := range class.Properties { + err = index.addProperty(ctx, prop) + require.NoError(t, err) + } + + // Add test objects + for i := 0; i < objectCount; i++ { + obj := &models.Object{ + Class: className, + ID: strfmt.UUID(fmt.Sprintf("00000000-0000-0000-0000-%012d", i)), + Tenant: tenantName, + Properties: map[string]interface{}{ + "name": fmt.Sprintf("test-object-%d", i), + "description": generateStringOfSize(objectSize - 50), // Leave room for other properties + }, + } + storageObj := storobj.FromObject(obj, nil, nil, nil) + err := index.putObject(ctx, storageObj, nil, obj.Tenant, 0) + require.NoError(t, err) + } + + // Wait for indexing to complete + time.Sleep(1 * time.Second) + + // Test active shard object storage size + activeShard, release, err := index.GetShard(ctx, tenantName) + require.NoError(t, err) + require.NotNil(t, activeShard) + + // Force flush to ensure .cna files are created + objectsBucket := activeShard.Store().Bucket(helpers.ObjectsBucketLSM) + require.NotNil(t, objectsBucket) + require.NoError(t, objectsBucket.FlushMemtable()) + + activeObjectStorageSize, err := activeShard.ObjectStorageSize(ctx) + require.NoError(t, err) + activeObjectCount := activeShard.ObjectCount() + assert.Greater(t, activeObjectStorageSize, int64(0), "Active shard calculation should have object storage size > 0") + + // Test that active calculations are correct + assert.Equal(t, objectCount, activeObjectCount, "Active shard object count should match") + assert.Greater(t, activeObjectStorageSize, int64(objectCount*objectSize/2), "Active object storage size should be reasonable") + + // Release the shard (this will flush all data to disk) + release() + + // Explicitly shutdown all shards to ensure data is flushed to disk + err = index.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + }) + require.NoError(t, err) + + // Wait a bit for all shards to complete shutdown and data to be flushed + time.Sleep(1 * time.Second) + + // Unload the shard from memory to test inactive calculation methods + index.shards.LoadAndDelete(tenantName) + + // Shut down the entire index to ensure all store metadata is persisted + require.NoError(t, index.Shutdown(ctx)) + + // Create a new index instance to test inactive calculation methods + // This ensures we're testing the inactive methods on a fresh index that reads from disk + newIndex, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(className), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + TrackVectorDimensions: true, + DisableLazyLoadShards: false, // we have to make sure lazyload enabled + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, index.GetVectorIndexConfigs(), nil, mockSchema, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + defer newIndex.Shutdown(ctx) + + // Explicitly shutdown all shards to ensure data is flushed to disk + require.NoError(t, newIndex.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + })) + newIndex.shards.LoadAndDelete(tenantName) + + inactiveObjectUsage, err := newIndex.CalculateUnloadedObjectsMetrics(ctx, tenantName) + require.NoError(t, err) + // Compare active and inactive metrics + assert.Equal(t, int64(activeObjectCount), inactiveObjectUsage.Count, "Active and inactive object count should match") + assert.InDelta(t, activeObjectStorageSize, inactiveObjectUsage.StorageBytes, 1024, "Active and inactive object storage size should be close") + + // Verify all mock expectations were met + mockSchema.AssertExpectations(t) +} + +// Helper function to generate a string of approximately the given size +func generateStringOfSize(size int) string { + if size <= 0 { + return "" + } + + // Use a repeating pattern to create a string of approximately the desired size + pattern := "abcdefghijklmnopqrstuvwxyz0123456789" + result := strings.Repeat(pattern, size/len(pattern)) + if remainder := size % len(pattern); remainder > 0 { + result += pattern[:remainder] + } + return result +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_sharding_backward_compatibility.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_sharding_backward_compatibility.go new file mode 100644 index 0000000000000000000000000000000000000000..ccab8c26081d830b930028734b12e64154c34b80 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_sharding_backward_compatibility.go @@ -0,0 +1,75 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +func (i *Index) checkSingleShardMigration() error { + dirEntries, err := os.ReadDir(i.Config.RootPath) + if err != nil { + return err + } + + singleIndexId := i.ID() + "_single" + if !needsSingleShardMigration(dirEntries, singleIndexId) { + return nil + } + + var singleShardName string + className := i.Config.ClassName.String() + + shards, err := i.schemaReader.Shards(className) + if err != nil { + return err + } + if len(shards) < 1 { + return fmt.Errorf("no shards found for class %s", className) + } + singleShardName = shards[0] + for _, entry := range dirEntries { + if !strings.HasPrefix(entry.Name(), singleIndexId) { + continue + } + + newName := i.ID() + "_" + singleShardName + strings.TrimPrefix(entry.Name(), singleIndexId) + oldPath := filepath.Join(i.Config.RootPath, entry.Name()) + newPath := filepath.Join(i.Config.RootPath, newName) + + if err := os.Rename(oldPath, newPath); err != nil { + return errors.Wrapf(err, "migrate shard %q to %q", oldPath, newPath) + } + + i.logger.WithField("action", "index_startup_migrate_shards_successful"). + WithField("old_shard", oldPath). + WithField("new_shard", newPath). + Infof("successfully migrated shard file %q to %q", oldPath, newPath) + } + + return nil +} + +func needsSingleShardMigration(dirEntries []os.DirEntry, indexID string) bool { + singleShardPrefix := indexID + "_single" + for _, dirEntry := range dirEntries { + if strings.HasPrefix(dirEntry.Name(), singleShardPrefix) { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_vector_storage_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_vector_storage_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ba1ab0a473b63349ec593b3db6bb6e2c83fc5913 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/index_vector_storage_test.go @@ -0,0 +1,810 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/weaviate/weaviate/cluster/router/types" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/replication" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/storobj" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/monitoring" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +const ( + defaultVectorDimensions = 1536 + namedVectorDimensions = 768 +) + +func TestIndex_CalculateUnloadedVectorsMetrics(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + tests := []struct { + name string + className string + shardName string + vectorConfigs map[string]schemaConfig.VectorIndexConfig + objectCount int + vectorDimensions int + expectedVectorStorageSize int64 + setupData bool + }{ + { + name: "empty shard with standard compression", + className: "TestClass", + shardName: "test-shard", + vectorConfigs: map[string]schemaConfig.VectorIndexConfig{ + "": enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, + }, + objectCount: 0, + vectorDimensions: defaultVectorDimensions, + expectedVectorStorageSize: 0, + setupData: false, + }, + { + name: "shard with data and standard compression", + className: "TestClass", + shardName: "test-shard", + vectorConfigs: map[string]schemaConfig.VectorIndexConfig{ + "": enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, + }, + objectCount: 100, + vectorDimensions: defaultVectorDimensions, + expectedVectorStorageSize: int64(100 * defaultVectorDimensions * 4), // 100 objects * defaultVectorDimensions dimensions * 4 bytes per float32 + setupData: true, + }, + { + name: "shard with named vectors", + className: "TestClass", + shardName: "test-shard-named", + vectorConfigs: map[string]schemaConfig.VectorIndexConfig{ + "text": enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, + }, + objectCount: 50, + vectorDimensions: namedVectorDimensions, + expectedVectorStorageSize: int64(50 * namedVectorDimensions * 4), // 50 objects * namedVectorDimensions dimensions * 4 bytes per float32 + setupData: true, + }, + { + name: "shard with PQ compression", + className: "TestClass", + shardName: "test-shard-pq", + vectorConfigs: map[string]schemaConfig.VectorIndexConfig{ + "": enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + PQ: enthnsw.PQConfig{ + Enabled: true, + Segments: 96, + Centroids: 256, + }, + }, + }, + objectCount: 1000, + vectorDimensions: defaultVectorDimensions, + expectedVectorStorageSize: 0, // Will be calculated based on actual compression ratio + setupData: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create sharding state + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + tt.shardName: { + Name: tt.shardName, + BelongsToNodes: []string{"test-node"}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardState.SetLocalName("test-node") + // Create test class + class := &models.Class{ + Class: tt.className, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{}, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + + // Create fake schema + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + // Create scheduler + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + + // Create mock schema getter + mockSchema := schemaUC.NewMockSchemaGetter(t) + mockSchema.EXPECT().GetSchemaSkipAuth().Maybe().Return(fakeSchema) + mockSchema.EXPECT().ReadOnlyClass(tt.className).Maybe().Return(class) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{class}}).Maybe() + mockSchema.EXPECT().NodeName().Maybe().Return("test-node") + mockSchema.EXPECT().ShardFromUUID("TestClass", mock.Anything).Return(tt.shardName).Maybe() + // Add ShardOwner expectation for all test cases + mockSchema.EXPECT().ShardOwner(tt.className, tt.shardName).Maybe().Return("test-node", nil) + + // Create index + var defaultVectorConfig schemaConfig.VectorIndexConfig + var vectorConfigs map[string]schemaConfig.VectorIndexConfig + + if len(tt.vectorConfigs) > 0 && tt.vectorConfigs[""] != nil { + // For legacy vector tests, only use the default config, not both + defaultVectorConfig = tt.vectorConfigs[""] + // Don't pass the empty string config in vectorConfigs to avoid duplication + vectorConfigs = make(map[string]schemaConfig.VectorIndexConfig) + for k, v := range tt.vectorConfigs { + if k != "" { + vectorConfigs[k] = v + } + } + } else { + // Use a default config for legacy vectors + defaultVectorConfig = enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + } + vectorConfigs = tt.vectorConfigs + } + + mockRouter := types.NewMockRouter(t) + mockRouter.EXPECT().GetWriteReplicasLocation(tt.className, mock.Anything, tt.shardName). + Return(types.WriteReplicaSet{ + Replicas: []types.Replica{{NodeName: "test-node", ShardName: tt.shardName, HostAddr: "10.14.57.56"}}, + AdditionalReplicas: nil, + }, nil).Maybe() + index, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(tt.className), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + TrackVectorDimensions: true, + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + defaultVectorConfig, vectorConfigs, mockRouter, mockSchema, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + defer index.Shutdown(ctx) + + // Add properties + err = index.addProperty(ctx, &models.Property{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }) + require.NoError(t, err) + + if tt.setupData { + if len(tt.vectorConfigs) > 0 && tt.vectorConfigs["text"] != nil { + // Named vector + for i := 0; i < tt.objectCount; i++ { + obj := &models.Object{ + Class: tt.className, + ID: strfmt.UUID(fmt.Sprintf("00000000-0000-0000-0000-%012d", i)), + Properties: map[string]interface{}{ + "name": fmt.Sprintf("test-object-%d", i), + }, + } + vectors := map[string][]float32{ + "text": make([]float32, tt.vectorDimensions), + } + for j := range vectors["text"] { + vectors["text"][j] = float32(i+j) / 1000.0 + } + storageObj := storobj.FromObject(obj, nil, vectors, nil) + err := index.putObject(ctx, storageObj, nil, obj.Tenant, 0) + require.NoError(t, err) + } + } else if len(tt.vectorConfigs) > 0 && tt.vectorConfigs[""] != nil { + // Legacy vector + for i := 0; i < tt.objectCount; i++ { + obj := &models.Object{ + Class: tt.className, + ID: strfmt.UUID(fmt.Sprintf("00000000-0000-0000-0000-%012d", i)), + Properties: map[string]interface{}{ + "name": fmt.Sprintf("test-object-%d", i), + }, + } + vector := make([]float32, tt.vectorDimensions) + for j := range vector { + vector[j] = float32(i+j) / 1000.0 + } + storageObj := storobj.FromObject(obj, vector, nil, nil) + err := index.putObject(ctx, storageObj, nil, obj.Tenant, 0) + require.NoError(t, err) + } + } + + // Wait for vector indexing to complete + time.Sleep(1 * time.Second) + + // Vector dimensions are always aggregated from nodeWideMetricsObserver, + // but we don't need DB for this test. Gimicky, but it does the job. + db := createTestDatabaseWithClass(t, monitoring.GetMetrics(), class) + publishVectorMetricsFromDB(t, db) + + // Test active shard vector storage size + shard, release, err := index.GetShard(ctx, tt.shardName) + require.NoError(t, err) + require.NotNil(t, shard) + + // Get active metrics BEFORE releasing the shard + vectorStorageSize, err := shard.VectorStorageSize(ctx) + require.NoError(t, err) + dimensions, err := shard.Dimensions(ctx, "") + require.NoError(t, err) + if len(tt.vectorConfigs) > 0 && tt.vectorConfigs["text"] != nil { + // Named vector + dimensions, err = shard.Dimensions(ctx, "text") + require.NoError(t, err) + } + objectCount := shard.ObjectCount() + + // For PQ compression, we need to account for the actual compression ratio + if len(tt.vectorConfigs) == 1 { + if pqConfig, ok := tt.vectorConfigs[""].(enthnsw.UserConfig); ok && pqConfig.PQ.Enabled { + // In test, PQ compression is not simulated, so expect uncompressed size + expectedSize := int64(tt.objectCount * tt.vectorDimensions * 4) + assert.Equal(t, expectedSize, vectorStorageSize) + } else { + assert.Equal(t, tt.expectedVectorStorageSize, vectorStorageSize) + } + } else { + assert.Equal(t, tt.expectedVectorStorageSize, vectorStorageSize) + } + + // Test dimensions tracking + expectedDimensions := tt.vectorDimensions * tt.objectCount // Dimensions returns total across all objects + assert.Equal(t, expectedDimensions, dimensions, "Dimensions should match expected") + + // Test object count + assert.Equal(t, tt.objectCount, objectCount, "Object count should match expected") + + // Release the shard (this will flush all data to disk) + release() + + // Explicitly shutdown all shards to ensure data is flushed to disk + err = index.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + }) + require.NoError(t, err) + + // Wait a bit for all shards to complete shutdown and data to be flushed + time.Sleep(1 * time.Second) + + // Unload the shard from memory to test inactive calculation methods + index.shards.LoadAndDelete(tt.shardName) + } else { + // Test empty shard + shard, release, err := index.GetShard(ctx, tt.shardName) + require.NoError(t, err) + require.NotNil(t, shard) + + // Get active metrics BEFORE releasing the shard + vectorStorageSize, err := shard.VectorStorageSize(ctx) + require.NoError(t, err) + dimensions, err := shard.Dimensions(ctx, "") + require.NoError(t, err) + objectCount := shard.ObjectCount() + + assert.Equal(t, tt.expectedVectorStorageSize, vectorStorageSize) + assert.Equal(t, 0, dimensions, "Empty shard should have 0 dimensions") + assert.Equal(t, 0, objectCount, "Empty shard should have 0 objects") + + // Release the shard (this will flush all data to disk) + release() + + // Explicitly shutdown all shards to ensure data is flushed to disk + err = index.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + }) + require.NoError(t, err) + + // Wait a bit for all shards to complete shutdown and data to be flushed + time.Sleep(1 * time.Second) + + // Unload the shard from memory to test inactive calculation methods + index.shards.LoadAndDelete(tt.shardName) + } + + // Verify all mock expectations were met + mockSchema.AssertExpectations(t) + }) + } +} + +func TestIndex_CalculateUnloadedDimensionsUsage(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + tests := []struct { + name string + className string + shardName string + targetVector string + objectCount int + vectorDimensions int + expectedCount int + expectedDims int + setupData bool + }{ + { + name: "empty shard", + className: "TestClass", + shardName: "test-shard", + targetVector: "", + objectCount: 0, + vectorDimensions: defaultVectorDimensions, + expectedCount: 0, + expectedDims: 0, + setupData: false, + }, + { + name: "shard with legacy vector", + className: "TestClass", + shardName: "test-shard", + targetVector: "", + objectCount: 100, + vectorDimensions: defaultVectorDimensions, + expectedCount: 100, + expectedDims: defaultVectorDimensions, + setupData: true, + }, + { + name: "shard with named vector", + className: "TestClass", + shardName: "test-shard", + targetVector: "text", + objectCount: 50, + vectorDimensions: namedVectorDimensions, + expectedCount: 50, + expectedDims: namedVectorDimensions, + setupData: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create sharding state + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + tt.shardName: { + Name: tt.shardName, + BelongsToNodes: []string{"test-node"}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardState.SetLocalName("test-node") + // Create test class + class := &models.Class{ + Class: tt.className, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{}, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + + // Create fake schema + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + // Create scheduler + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + + // Create mock schema getter + mockSchema := schemaUC.NewMockSchemaGetter(t) + mockSchema.EXPECT().GetSchemaSkipAuth().Maybe().Return(fakeSchema) + mockSchema.EXPECT().ReadOnlyClass(tt.className).Maybe().Return(class) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{class}}).Maybe() + mockSchema.EXPECT().NodeName().Maybe().Return("test-node") + mockSchema.EXPECT().ShardFromUUID("TestClass", mock.Anything).Return("test-shard").Maybe() + + // Create index with named vector config + vectorConfigs := map[string]schemaConfig.VectorIndexConfig{ + "text": enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, + } + mockRouter := types.NewMockRouter(t) + mockRouter.EXPECT().GetWriteReplicasLocation(tt.className, mock.Anything, tt.shardName). + Return(types.WriteReplicaSet{ + Replicas: []types.Replica{{NodeName: "test-node", ShardName: tt.shardName, HostAddr: "10.14.57.56"}}, + AdditionalReplicas: nil, + }, nil).Maybe() + index, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(tt.className), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + TrackVectorDimensions: true, + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, vectorConfigs, mockRouter, mockSchema, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + defer index.Shutdown(ctx) + + // Add properties + err = index.addProperty(ctx, &models.Property{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }) + require.NoError(t, err) + + if tt.setupData { + // Add test objects with vectors + for i := 0; i < tt.objectCount; i++ { + obj := &models.Object{ + Class: tt.className, + ID: strfmt.UUID(fmt.Sprintf("00000000-0000-0000-0000-%012d", i)), + Properties: map[string]interface{}{ + "name": fmt.Sprintf("test-object-%d", i), + }, + } + + // Create storage object with vectors + var storageObj *storobj.Object + if tt.targetVector != "" { + // Named vector + vectors := map[string][]float32{ + tt.targetVector: make([]float32, tt.vectorDimensions), + } + for j := range vectors[tt.targetVector] { + vectors[tt.targetVector][j] = float32(i+j) / 1000.0 + } + storageObj = storobj.FromObject(obj, nil, vectors, nil) + } else { + // Legacy vector + vector := make([]float32, tt.vectorDimensions) + for j := range vector { + vector[j] = float32(i+j) / 1000.0 + } + storageObj = storobj.FromObject(obj, vector, nil, nil) + } + + err := index.putObject(ctx, storageObj, nil, obj.Tenant, 0) + require.NoError(t, err) + } + + // Wait for vector indexing to complete + time.Sleep(1 * time.Second) + + // Vector dimensions are always aggregated from nodeWideMetricsObserver, + // but we don't need DB for this test. Gimicky, but it does the job. + db := createTestDatabaseWithClass(t, monitoring.GetMetrics(), class) + publishVectorMetricsFromDB(t, db) + + // Test active shard dimensions usage + shard, release, err := index.GetShard(ctx, tt.shardName) + require.NoError(t, err) + require.NotNil(t, shard) + + // Get active metrics BEFORE releasing the shard + dimensionality, err := shard.DimensionsUsage(ctx, tt.targetVector) + require.NoError(t, err) + + assert.Equal(t, tt.expectedCount, dimensionality.Count) + assert.Equal(t, tt.expectedDims, dimensionality.Dimensions) + + // Release the shard (this will flush all data to disk) + release() + + // Explicitly shutdown all shards to ensure data is flushed to disk + err = index.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + }) + require.NoError(t, err) + + // Wait a bit for all shards to complete shutdown and data to be flushed + time.Sleep(1 * time.Second) + + // Unload the shard from memory to test inactive calculation methods + index.shards.LoadAndDelete(tt.shardName) + } else { + // Test empty shard + shard, release, err := index.GetShard(ctx, tt.shardName) + require.NoError(t, err) + require.NotNil(t, shard) + + // Get active metrics BEFORE releasing the shard + dimensionality, err := shard.DimensionsUsage(ctx, tt.targetVector) + require.NoError(t, err) + + assert.Equal(t, tt.expectedCount, dimensionality.Count) + assert.Equal(t, tt.expectedDims, dimensionality.Dimensions) + + // Release the shard (this will flush all data to disk) + release() + + // Explicitly shutdown all shards to ensure data is flushed to disk + err = index.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + }) + require.NoError(t, err) + + // Wait a bit for all shards to complete shutdown and data to be flushed + time.Sleep(1 * time.Second) + + // Unload the shard from memory to test inactive calculation methods + index.shards.LoadAndDelete(tt.shardName) + } + + // Verify all mock expectations were met + mockSchema.AssertExpectations(t) + }) + } +} + +func TestIndex_VectorStorageSize_ActiveVsUnloaded(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + className := "TestClass" + shardName := "test-shard" + objectCount := 50 + vectorDimensions := defaultVectorDimensions + + // Create sharding state + shardState := &sharding.State{ + Physical: map[string]sharding.Physical{ + shardName: { + Name: shardName, + BelongsToNodes: []string{"test-node"}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + shardState.SetLocalName("test-node") + + // Create test class + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + InvertedIndexConfig: &models.InvertedIndexConfig{}, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardState.PartitioningEnabled, + }, + } + + // Create fake schema + fakeSchema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + // Create scheduler + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readerFunc func(*models.Class, *sharding.State) error) error { + return readerFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{class}}).Maybe() + + // Create mock schema getter + mockSchema := schemaUC.NewMockSchemaGetter(t) + mockSchema.EXPECT().GetSchemaSkipAuth().Maybe().Return(fakeSchema) + mockSchema.EXPECT().ReadOnlyClass(className).Maybe().Return(class) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchema.EXPECT().NodeName().Maybe().Return("test-node") + mockSchema.EXPECT().ShardFromUUID("TestClass", mock.Anything).Return("test-shard").Maybe() + + mockRouter := types.NewMockRouter(t) + mockRouter.EXPECT().GetWriteReplicasLocation(className, mock.Anything, shardName). + Return(types.WriteReplicaSet{ + Replicas: []types.Replica{{NodeName: "test-node", ShardName: shardName, HostAddr: "10.14.57.56"}}, + AdditionalReplicas: nil, + }, nil).Maybe() + // Create index with lazy loading disabled to test active calculation methods + index, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(className), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + TrackVectorDimensions: true, + DisableLazyLoadShards: true, // we have to make sure lazyload shard disabled to load directly + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, nil, mockRouter, mockSchema, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + + // Add properties + err = index.addProperty(ctx, &models.Property{ + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }) + require.NoError(t, err) + + // Add test objects + for i := 0; i < objectCount; i++ { + obj := &models.Object{ + Class: className, + ID: strfmt.UUID(fmt.Sprintf("00000000-0000-0000-0000-%012d", i)), + Properties: map[string]interface{}{ + "name": fmt.Sprintf("test-object-%d", i), + }, + } + + vector := make([]float32, vectorDimensions) + for j := range vector { + vector[j] = float32(i+j) / 1000.0 + } + storageObj := storobj.FromObject(obj, vector, nil, nil) + + err := index.putObject(ctx, storageObj, nil, obj.Tenant, 0) + require.NoError(t, err) + } + + // Wait for indexing to complete + time.Sleep(1 * time.Second) + + // Vector dimensions are always aggregated from nodeWideMetricsObserver, + // but we don't need DB for this test. Gimicky, but it does the job. + db := createTestDatabaseWithClass(t, monitoring.GetMetrics(), class) + publishVectorMetricsFromDB(t, db) + + // Test active shard vector storage size + activeShard, release, err := index.GetShard(ctx, shardName) + require.NoError(t, err) + require.NotNil(t, activeShard) + + activeVectorStorageSize, err := activeShard.VectorStorageSize(ctx) + require.NoError(t, err) + dimensionality, err := activeShard.DimensionsUsage(ctx, "") + require.NoError(t, err) + activeObjectCount := activeShard.ObjectCount() + assert.Greater(t, activeVectorStorageSize, int64(0), "Active shard calculation should have vector storage size > 0") + + // Test that active calculations are correct + expectedSize := int64(objectCount * vectorDimensions * 4) + assert.Equal(t, expectedSize, activeVectorStorageSize, "Active vector storage size should be close to expected") + assert.Equal(t, objectCount, dimensionality.Count, "Active shard object count should match") + assert.Equal(t, vectorDimensions, dimensionality.Dimensions, "Active shard dimensions should match") + assert.Equal(t, objectCount, activeObjectCount, "Active object count should match") + + // Release the shard (this will flush all data to disk) + release() + + // Explicitly shutdown all shards to ensure data is flushed to disk + err = index.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + }) + require.NoError(t, err) + + // Wait a bit for all shards to complete shutdown and data to be flushed + time.Sleep(1 * time.Second) + + // Unload the shard from memory to test inactive calculation methods + index.shards.LoadAndDelete(shardName) + + // Shut down the entire index to ensure all store metadata is persisted + require.NoError(t, index.Shutdown(ctx)) + + // Create a new index instance to test inactive calculation methods + // This ensures we're testing the inactive methods on a fresh index that reads from disk + newIndex, err := NewIndex(ctx, IndexConfig{ + RootPath: dirName, + ClassName: schema.ClassName(className), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + TrackVectorDimensions: true, + DisableLazyLoadShards: false, // we have to make sure lazyload enabled + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + enthnsw.UserConfig{ + VectorCacheMaxObjects: 1000, + }, index.GetVectorIndexConfigs(), mockRouter, mockSchema, mockSchemaReader, nil, logger, nil, nil, nil, &replication.GlobalConfig{}, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + defer newIndex.Shutdown(ctx) + + // Explicitly shutdown all shards to ensure data is flushed to disk + require.NoError(t, newIndex.ForEachShard(func(name string, shard ShardLike) error { + return shard.Shutdown(ctx) + })) + newIndex.shards.LoadAndDelete(shardName) + + inactiveVectorStorageSize, err := newIndex.CalculateUnloadedVectorsMetrics(ctx, shardName) + require.NoError(t, err) + dimensionality, err = newIndex.CalculateUnloadedDimensionsUsage(ctx, shardName, "") + require.NoError(t, err) + + // Compare active and inactive metrics + assert.Equal(t, activeVectorStorageSize, inactiveVectorStorageSize, "Active and inactive vector storage size should be very similar") + assert.Equal(t, objectCount, dimensionality.Count, "Active and inactive object count should match") + assert.Equal(t, vectorDimensions, dimensionality.Dimensions, "Active and inactive dimensions should match") + // Verify all mock expectations were met + mockSchema.AssertExpectations(t) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcheckpoint/checkpoint.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcheckpoint/checkpoint.go new file mode 100644 index 0000000000000000000000000000000000000000..6a7efa19d7e75acf23b8daa56fc7466a307b985d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcheckpoint/checkpoint.go @@ -0,0 +1,195 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package indexcheckpoint + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "path/filepath" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" +) + +var checkpointBucket = []byte("checkpoint") + +// Checkpoints keeps track of the last indexed vector id for each shard. +// It stores the ids in a BoltDB file. +type Checkpoints struct { + db *bolt.DB + path string +} + +func New(dir string, logger logrus.FieldLogger) (*Checkpoints, error) { + path := filepath.Join(dir, "index.db") + + db, err := bolt.Open(path, 0o600, nil) + if err != nil { + return nil, errors.Wrapf(err, "open %q", path) + } + + ic := Checkpoints{ + db: db, + path: path, + } + + err = ic.initDB() + if err != nil { + return nil, err + } + + return &ic, nil +} + +func (c *Checkpoints) initDB() error { + err := c.db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(checkpointBucket) + return err + }) + + return errors.Wrap(err, "init db") +} + +// Close the underlying DB +func (c *Checkpoints) Close() { + c.db.Close() +} + +func (c *Checkpoints) getID(shardID, targetVector string) string { + if targetVector != "" { + return fmt.Sprintf("%s_%s", shardID, targetVector) + } + return shardID +} + +func (c *Checkpoints) Get(shardID, targetVector string) (count uint64, exists bool, err error) { + err = c.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(checkpointBucket) + v := b.Get([]byte(c.getID(shardID, targetVector))) + if v == nil { + return nil + } + + count = binary.LittleEndian.Uint64(v) + exists = true + return nil + }) + if err != nil { + return 0, false, errors.Wrap(err, "get checkpoint") + } + + return +} + +func (c *Checkpoints) Update(shardID, targetVector string, id uint64) error { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, id) + + err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(checkpointBucket) + key := []byte(c.getID(shardID, targetVector)) + return b.Put(key, buf) + }) + if err != nil { + return errors.Wrap(err, "update checkpoint") + } + + return nil +} + +func (c *Checkpoints) UpdateIfNewer(shardID, targetVector string, id uint64) error { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, id) + + err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(checkpointBucket) + key := []byte(c.getID(shardID, targetVector)) + + // do not update if the current checkpoint is newer + old := b.Get(key) + if old != nil { + oldID := binary.LittleEndian.Uint64(old) + if oldID > id { + return errors.Errorf("current checkpoint %d is newer than %d", oldID, id) + } + } + + return b.Put(key, buf) + }) + if err != nil { + return errors.Wrap(err, "update checkpoint") + } + + return nil +} + +func (c *Checkpoints) Delete(shardID, targetVector string) error { + err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(checkpointBucket) + return b.Delete([]byte(c.getID(shardID, targetVector))) + }) + if err != nil { + return errors.Wrap(err, "delete checkpoint") + } + + return nil +} + +// DeleteShard removes all checkpoints for a shard. +// It works for both single and multi vector shards. +func (c *Checkpoints) DeleteShard(shardID string) error { + err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(checkpointBucket) + + c := b.Cursor() + sID := []byte(shardID) + var toDelete [][]byte + + for k, _ := c.Seek(sID); k != nil; k, _ = c.Next() { + if !bytes.HasPrefix(k, sID) { + break + } + + // ensure the key is either the shardID or shardID_vector + if !bytes.Equal(k, sID) && k[len(sID)] != '_' { + continue + } + + toDelete = append(toDelete, k) + } + + for _, k := range toDelete { + if err := b.Delete(k); err != nil { + return err + } + } + + return nil + }) + if err != nil { + return errors.Wrap(err, "delete shard checkpoints") + } + + return nil +} + +func (c *Checkpoints) Drop() error { + c.db.Close() + return os.Remove(c.Filename()) +} + +func (c *Checkpoints) Filename() string { + return c.path +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcheckpoint/checkpoint_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcheckpoint/checkpoint_test.go new file mode 100644 index 0000000000000000000000000000000000000000..988c8819527499cf00dff66d8eb3addb571331e3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcheckpoint/checkpoint_test.go @@ -0,0 +1,147 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package indexcheckpoint + +import ( + "io" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestCheckpoint(t *testing.T) { + l := logrus.New() + l.SetOutput(io.Discard) + + c, err := New(t.TempDir(), l) + require.NoError(t, err) + defer c.Close() + + t.Run("get non-existing", func(t *testing.T) { + v, ok, err := c.Get("shard1", "a") + require.NoError(t, err) + require.False(t, ok) + require.Zero(t, v) + }) + + t.Run("set and get", func(t *testing.T) { + err := c.UpdateIfNewer("shard1", "a", 123) + require.NoError(t, err) + + v, ok, err := c.Get("shard1", "a") + require.NoError(t, err) + require.True(t, ok) + require.EqualValues(t, 123, v) + }) + + t.Run("set and get: no target", func(t *testing.T) { + err := c.UpdateIfNewer("shard1", "", 123) + require.NoError(t, err) + + v, ok, err := c.Get("shard1", "") + require.NoError(t, err) + require.True(t, ok) + require.EqualValues(t, 123, v) + }) + + t.Run("overwrite", func(t *testing.T) { + err := c.UpdateIfNewer("shard1", "a", 456) + require.NoError(t, err) + + v, ok, err := c.Get("shard1", "a") + require.NoError(t, err) + require.True(t, ok) + require.EqualValues(t, 456, v) + }) + + t.Run("delete", func(t *testing.T) { + err := c.Delete("shard1", "a") + require.NoError(t, err) + + v, ok, err := c.Get("shard1", "a") + require.NoError(t, err) + require.False(t, ok) + require.Zero(t, v) + }) + + t.Run("deleteShard: single vector", func(t *testing.T) { + err = c.Update("shard1", "", 123) + require.NoError(t, err) + + err := c.DeleteShard("shard1") + require.NoError(t, err) + + v, ok, err := c.Get("shard1", "") + require.NoError(t, err) + require.False(t, ok) + require.Zero(t, v) + }) + + t.Run("deleteShard: named vectors", func(t *testing.T) { + err = c.Update("vector_wKFB6FDP7hdS", "a", 1) + require.NoError(t, err) + + err = c.Update("vector_wKFB6FDP7hdS", "b", 2) + require.NoError(t, err) + + // ensure it doesn't delete other shards + err = c.Update("vector_wKFB6FDP7hdS2", "", 3) + require.NoError(t, err) + err = c.Update("vector_wKFB6FDP7hd_", "a", 4) + require.NoError(t, err) + + err := c.DeleteShard("vector_wKFB6FDP7hdS") + require.NoError(t, err) + + v, ok, err := c.Get("vector_wKFB6FDP7hdS", "a") + require.NoError(t, err) + require.False(t, ok) + require.Zero(t, v) + + v, ok, err = c.Get("vector_wKFB6FDP7hdS", "b") + require.NoError(t, err) + require.False(t, ok) + require.Zero(t, v) + + v, ok, err = c.Get("vector_wKFB6FDP7hdS2", "") + require.NoError(t, err) + require.True(t, ok) + require.EqualValues(t, 3, v) + + v, ok, err = c.Get("vector_wKFB6FDP7hd_", "a") + require.NoError(t, err) + require.True(t, ok) + require.EqualValues(t, 4, v) + }) + + t.Run("drop", func(t *testing.T) { + c, err := New(t.TempDir(), l) + require.NoError(t, err) + defer c.Close() + + err = c.Drop() + require.NoError(t, err) + + _, _, err = c.Get("shard1", "a") + require.Error(t, err) + + err = c.UpdateIfNewer("shard1", "a", 123) + require.Error(t, err) + + err = c.Delete("shard1", "a") + require.Error(t, err) + + err = c.Drop() + require.Error(t, err) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcounter/counter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcounter/counter.go new file mode 100644 index 0000000000000000000000000000000000000000..f4439c90e86b2858fd82fd35dc4198b86eef579a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/indexcounter/counter.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package indexcounter + +import ( + "encoding/binary" + "fmt" + "os" + "sync" + + "github.com/pkg/errors" +) + +type Counter struct { + count uint64 + sync.Mutex + f *os.File +} + +func New(shardPath string) (cr *Counter, rerr error) { + fileName := fmt.Sprintf("%s/indexcount", shardPath) + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) + if err != nil { + return nil, err + } + + // The lifetime of the `f` exceeds this constructor as we store the open file for later use in Counter. + // invariant: We close `f` **only** if any error happened after successfully opening the file. To avoid leaking open file descriptor. + // NOTE: This `defer` works even with `err` being shadowed in the whole function because defer checks for named `rerr` return value. + defer func() { + if rerr != nil { + f.Close() + } + }() + + stat, err := f.Stat() + if err != nil { + return nil, err + } + + var initialCount uint64 = 0 + if stat.Size() > 0 { + // the file has existed before, we need to initialize with its content + err := binary.Read(f, binary.LittleEndian, &initialCount) + if err != nil { + return nil, errors.Wrap(err, "read initial count from file") + } + + } + + return &Counter{ + count: initialCount, + f: f, + }, nil +} + +func (c *Counter) Get() uint64 { + c.Lock() + defer c.Unlock() + return c.count +} + +func (c *Counter) GetAndInc() (uint64, error) { + c.Lock() + defer c.Unlock() + before := c.count + c.count++ + c.f.Seek(0, 0) + err := binary.Write(c.f, binary.LittleEndian, &c.count) + if err != nil { + return 0, errors.Wrap(err, "increase counter on disk") + } + c.f.Seek(0, 0) + return before, nil +} + +// PreviewNext can be used to check if there is data present in the index, if +// it returns 0, you can be certain that no data exists +func (c *Counter) PreviewNext() uint64 { + c.Lock() + defer c.Unlock() + + return c.count +} + +func (c *Counter) Drop() error { + c.Lock() + defer c.Unlock() + if c.f == nil { + return nil + } + filename := c.FileName() + c.f.Close() + err := os.Remove(filename) + if err != nil { + return errors.Wrap(err, "drop counter file") + } + return nil +} + +func (c *Counter) FileName() string { + return c.f.Name() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/init.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/init.go new file mode 100644 index 0000000000000000000000000000000000000000..c77e8b038797989b892ae5894d1420eed30d8547 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/init.go @@ -0,0 +1,207 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "os" + "path" + "time" + + "github.com/weaviate/weaviate/usecases/multitenancy" + + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/cluster/router" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/tenantactivity" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/replica" + migratefs "github.com/weaviate/weaviate/usecases/schema/migrate/fs" +) + +// init gets the current schema and creates one index object per class. +// The indices will in turn create shards, which will either read an +// existing db file from disk, or create a new one if none exists +func (db *DB) init(ctx context.Context) error { + if err := os.MkdirAll(db.config.RootPath, 0o777); err != nil { + return fmt.Errorf("create root path directory at %s: %w", db.config.RootPath, err) + } + + // As of v1.22, db files are stored in a hierarchical structure + // rather than a flat one. If weaviate is started with files + // that are still in the flat structure, we will migrate them + // over. + if err := db.migrateFileStructureIfNecessary(); err != nil { + return err + } + + if asyncEnabled() { + // init the index checkpoint file + var err error + db.indexCheckpoints, err = indexcheckpoint.New(db.config.RootPath, db.logger) + if err != nil { + return errors.Wrap(err, "init index checkpoint") + } + } + + objects := db.schemaGetter.GetSchemaSkipAuth().Objects + if objects != nil { + for _, class := range objects.Classes { + invertedConfig := class.InvertedIndexConfig + if invertedConfig == nil { + // for backward compatibility, this field was introduced in v1.0.4, + // prior schemas will not yet have the field. Init with the defaults + // which were previously hard-coded. + // In this method we are essentially reading the schema from disk, so + // it could have been created before v1.0.4 + invertedConfig = &models.InvertedIndexConfig{ + CleanupIntervalSeconds: config.DefaultCleanupIntervalSeconds, + Bm25: &models.BM25Config{ + K1: config.DefaultBM25k1, + B: config.DefaultBM25b, + }, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + } + } + if err := replica.ValidateConfig(class, db.config.Replication); err != nil { + return fmt.Errorf("replication config: %w", err) + } + + collection := schema.ClassName(class.Class).String() + indexRouter := router.NewBuilder( + collection, + multitenancy.IsMultiTenant(class.MultiTenancyConfig), + db.nodeSelector, + db.schemaGetter, + db.schemaReader, + db.replicationFSM, + ).Build() + idx, err := NewIndex(ctx, IndexConfig{ + ClassName: schema.ClassName(class.Class), + RootPath: db.config.RootPath, + ResourceUsage: db.config.ResourceUsage, + QueryMaximumResults: db.config.QueryMaximumResults, + QueryHybridMaximumResults: db.config.QueryHybridMaximumResults, + QueryNestedRefLimit: db.config.QueryNestedRefLimit, + MemtablesFlushDirtyAfter: db.config.MemtablesFlushDirtyAfter, + MemtablesInitialSizeMB: db.config.MemtablesInitialSizeMB, + MemtablesMaxSizeMB: db.config.MemtablesMaxSizeMB, + MemtablesMinActiveSeconds: db.config.MemtablesMinActiveSeconds, + MemtablesMaxActiveSeconds: db.config.MemtablesMaxActiveSeconds, + MinMMapSize: db.config.MinMMapSize, + LazySegmentsDisabled: db.config.LazySegmentsDisabled, + SegmentInfoIntoFileNameEnabled: db.config.SegmentInfoIntoFileNameEnabled, + WriteMetadataFilesEnabled: db.config.WriteMetadataFilesEnabled, + MaxReuseWalSize: db.config.MaxReuseWalSize, + SegmentsCleanupIntervalSeconds: db.config.SegmentsCleanupIntervalSeconds, + SeparateObjectsCompactions: db.config.SeparateObjectsCompactions, + CycleManagerRoutinesFactor: db.config.CycleManagerRoutinesFactor, + IndexRangeableInMemory: db.config.IndexRangeableInMemory, + MaxSegmentSize: db.config.MaxSegmentSize, + TrackVectorDimensions: db.config.TrackVectorDimensions, + TrackVectorDimensionsInterval: db.config.TrackVectorDimensionsInterval, + UsageEnabled: db.config.UsageEnabled, + AvoidMMap: db.config.AvoidMMap, + DisableLazyLoadShards: db.config.DisableLazyLoadShards, + ForceFullReplicasSearch: db.config.ForceFullReplicasSearch, + TransferInactivityTimeout: db.config.TransferInactivityTimeout, + LSMEnableSegmentsChecksumValidation: db.config.LSMEnableSegmentsChecksumValidation, + ReplicationFactor: class.ReplicationConfig.Factor, + AsyncReplicationEnabled: class.ReplicationConfig.AsyncEnabled, + DeletionStrategy: class.ReplicationConfig.DeletionStrategy, + ShardLoadLimiter: db.shardLoadLimiter, + HNSWMaxLogSize: db.config.HNSWMaxLogSize, + HNSWDisableSnapshots: db.config.HNSWDisableSnapshots, + HNSWSnapshotIntervalSeconds: db.config.HNSWSnapshotIntervalSeconds, + HNSWSnapshotOnStartup: db.config.HNSWSnapshotOnStartup, + HNSWSnapshotMinDeltaCommitlogsNumber: db.config.HNSWSnapshotMinDeltaCommitlogsNumber, + HNSWSnapshotMinDeltaCommitlogsSizePercentage: db.config.HNSWSnapshotMinDeltaCommitlogsSizePercentage, + HNSWWaitForCachePrefill: db.config.HNSWWaitForCachePrefill, + HNSWFlatSearchConcurrency: db.config.HNSWFlatSearchConcurrency, + HNSWAcornFilterRatio: db.config.HNSWAcornFilterRatio, + VisitedListPoolMaxSize: db.config.VisitedListPoolMaxSize, + QuerySlowLogEnabled: db.config.QuerySlowLogEnabled, + QuerySlowLogThreshold: db.config.QuerySlowLogThreshold, + InvertedSorterDisabled: db.config.InvertedSorterDisabled, + MaintenanceModeEnabled: db.config.MaintenanceModeEnabled, + }, + inverted.ConfigFromModel(invertedConfig), + convertToVectorIndexConfig(class.VectorIndexConfig), + convertToVectorIndexConfigs(class.VectorConfig), + indexRouter, db.schemaGetter, db.schemaReader, db, db.logger, db.nodeResolver, db.remoteIndex, + db.replicaClient, &db.config.Replication, db.promMetrics, class, db.jobQueueCh, db.scheduler, db.indexCheckpoints, + db.memMonitor, db.reindexer, db.bitmapBufPool) + if err != nil { + return errors.Wrap(err, "create index") + } + + db.indexLock.Lock() + db.indices[idx.ID()] = idx + db.indexLock.Unlock() + } + } + + // Collecting metrics that _can_ be aggregated on a node level, + // i.e. replacing className and shardName labels with "n/a", + // should be delegated to nodeWideMetricsObserver to centralize + // control over how these metrics are aggregated. + // + // See also https://github.com/weaviate/weaviate/issues/4396 + // + // NB: nodeWideMetricsObserver only tracks object_count if + // node-level aggregation is enabled -- a decision made during + // its original implementation. + if db.promMetrics != nil { + db.metricsObserver = newNodeWideMetricsObserver(db) + db.metricsObserver.Start() + } + + return nil +} + +func (db *DB) LocalTenantActivity(filter tenantactivity.UsageFilter) tenantactivity.ByCollection { + return db.metricsObserver.Usage(filter) +} + +func (db *DB) migrateFileStructureIfNecessary() error { + fsMigrationPath := path.Join(db.config.RootPath, "migration1.22.fs.hierarchy") + exists, err := diskio.FileExists(fsMigrationPath) + if err != nil { + return err + } + if !exists { + if err = db.migrateToHierarchicalFS(); err != nil { + return fmt.Errorf("migrate to hierarchical fs: %w", err) + } + if _, err = os.Create(fsMigrationPath); err != nil { + return fmt.Errorf("create hierarchical fs indicator: %w", err) + } + } + return nil +} + +func (db *DB) migrateToHierarchicalFS() error { + before := time.Now() + + if err := migratefs.MigrateToHierarchicalFS(db.config.RootPath, db.schemaReader); err != nil { + return err + } + db.logger.WithField("action", "hierarchical_fs_migration"). + Debugf("fs migration took %s\n", time.Since(before)) + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/analyzer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/analyzer.go new file mode 100644 index 0000000000000000000000000000000000000000..5aa86a46b8a3b9e54ff18f0685f92ca6445a4733 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/analyzer.go @@ -0,0 +1,248 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "encoding/binary" + + "github.com/google/uuid" + ent "github.com/weaviate/weaviate/entities/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/tokenizer" +) + +type IsFallbackToSearchable func() bool + +type Countable struct { + Data []byte + TermFrequency float32 +} + +type Property struct { + Name string + Items []Countable + Length int + HasFilterableIndex bool // roaring set index + HasSearchableIndex bool // map index (with frequencies) + HasRangeableIndex bool // roaring set index for ranged queries +} + +type NilProperty struct { + Name string + AddToPropertyLength bool +} + +func DedupItems(props []Property) []Property { + for i := range props { + seen := map[string]struct{}{} + items := props[i].Items + + var key string + // reverse order to keep latest elements + for j := len(items) - 1; j >= 0; j-- { + key = string(items[j].Data) + if _, ok := seen[key]; ok { + // remove element already seen + items = append(items[:j], items[j+1:]...) + } + seen[key] = struct{}{} + } + props[i].Items = items + } + return props +} + +type Analyzer struct { + isFallbackToSearchable IsFallbackToSearchable +} + +// Text tokenizes given input according to selected tokenization, +// then aggregates duplicates +func (a *Analyzer) Text(tokenization, in string) []Countable { + return a.TextArray(tokenization, []string{in}) +} + +// TextArray tokenizes given input according to selected tokenization, +// then aggregates duplicates +func (a *Analyzer) TextArray(tokenization string, inArr []string) []Countable { + var terms []string + for _, in := range inArr { + terms = append(terms, tokenizer.Tokenize(tokenization, in)...) + } + + counts := map[string]uint64{} + for _, term := range terms { + counts[term]++ + } + + countable := make([]Countable, len(counts)) + i := 0 + for term, count := range counts { + countable[i] = Countable{ + Data: []byte(term), + TermFrequency: float32(count), + } + i++ + } + return countable +} + +// Int requires no analysis, so it's actually just a simple conversion to a +// string-formatted byte slice of the int +func (a *Analyzer) Int(in int64) ([]Countable, error) { + data, err := ent.LexicographicallySortableInt64(in) + if err != nil { + return nil, err + } + + return []Countable{ + { + Data: data, + }, + }, nil +} + +// UUID requires no analysis, so it's just dumping the raw binary representation +func (a *Analyzer) UUID(in uuid.UUID) ([]Countable, error) { + return []Countable{ + { + Data: in[:], + }, + }, nil +} + +// UUID array requires no analysis, so it's just dumping the raw binary +// representation of each contained element +func (a *Analyzer) UUIDArray(in []uuid.UUID) ([]Countable, error) { + out := make([]Countable, len(in)) + for i := range in { + out[i] = Countable{ + Data: in[i][:], + } + } + + return out, nil +} + +// Int array requires no analysis, so it's actually just a simple conversion to a +// string-formatted byte slice of the int +func (a *Analyzer) IntArray(in []int64) ([]Countable, error) { + out := make([]Countable, len(in)) + for i := range in { + data, err := ent.LexicographicallySortableInt64(in[i]) + if err != nil { + return nil, err + } + out[i] = Countable{Data: data} + } + + return out, nil +} + +// Float requires no analysis, so it's actually just a simple conversion to a +// lexicographically sortable byte slice. +func (a *Analyzer) Float(in float64) ([]Countable, error) { + data, err := ent.LexicographicallySortableFloat64(in) + if err != nil { + return nil, err + } + + return []Countable{ + { + Data: data, + }, + }, nil +} + +// Float array requires no analysis, so it's actually just a simple conversion to a +// lexicographically sortable byte slice. +func (a *Analyzer) FloatArray(in []float64) ([]Countable, error) { + out := make([]Countable, len(in)) + for i := range in { + data, err := ent.LexicographicallySortableFloat64(in[i]) + if err != nil { + return nil, err + } + out[i] = Countable{Data: data} + } + + return out, nil +} + +// BoolArray requires no analysis, so it's actually just a simple conversion to a +// little-endian ordered byte slice +func (a *Analyzer) BoolArray(in []bool) ([]Countable, error) { + out := make([]Countable, len(in)) + for i := range in { + b := bytes.NewBuffer(nil) + err := binary.Write(b, binary.LittleEndian, &in[i]) + if err != nil { + return nil, err + } + out[i] = Countable{Data: b.Bytes()} + } + + return out, nil +} + +// Bool requires no analysis, so it's actually just a simple conversion to a +// little-endian ordered byte slice +func (a *Analyzer) Bool(in bool) ([]Countable, error) { + b := bytes.NewBuffer(nil) + err := binary.Write(b, binary.LittleEndian, &in) + if err != nil { + return nil, err + } + + return []Countable{ + { + Data: b.Bytes(), + }, + }, nil +} + +// RefCount does not index the content of the refs, but only the count with 0 +// being an explicitly allowed value as well. +func (a *Analyzer) RefCount(in models.MultipleRef) ([]Countable, error) { + length := uint64(len(in)) + data, err := ent.LexicographicallySortableUint64(length) + if err != nil { + return nil, err + } + + return []Countable{ + { + Data: data, + }, + }, nil +} + +// Ref indexes references as beacon-strings +func (a *Analyzer) Ref(in models.MultipleRef) ([]Countable, error) { + out := make([]Countable, len(in)) + + for i, ref := range in { + out[i] = Countable{ + Data: []byte(ref.Beacon), + } + } + + return out, nil +} + +func NewAnalyzer(isFallbackToSearchable IsFallbackToSearchable) *Analyzer { + if isFallbackToSearchable == nil { + isFallbackToSearchable = func() bool { return false } + } + return &Analyzer{isFallbackToSearchable: isFallbackToSearchable} +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/analyzer_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/analyzer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..06b633646bba1d99572e55330cd8ff534e8475e6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/analyzer_test.go @@ -0,0 +1,598 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "math" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" +) + +func TestAnalyzer(t *testing.T) { + a := NewAnalyzer(nil) + + countable := func(data []string, freq []int) []Countable { + countable := make([]Countable, len(data)) + for i := range data { + countable[i] = Countable{ + Data: []byte(data[i]), + TermFrequency: float32(freq[i]), + } + } + return countable + } + + t.Run("with text", func(t *testing.T) { + type testCase struct { + name string + input string + tokenization string + expectedCountable []Countable + } + + testCases := []testCase{ + { + name: "tokenization word, unique words", + input: "Hello, my name is John Doe", + tokenization: models.PropertyTokenizationWord, + expectedCountable: countable( + []string{"hello", "my", "name", "is", "john", "doe"}, + []int{1, 1, 1, 1, 1, 1}, + ), + }, + { + name: "tokenization word, duplicated words", + input: "Du. Du hast. Du hast. Du hast mich gefragt.", + tokenization: models.PropertyTokenizationWord, + expectedCountable: countable( + []string{"du", "hast", "mich", "gefragt"}, + []int{4, 3, 1, 1}, + ), + }, + { + name: "tokenization lowercase, unique words", + input: "My email is john-thats-jay.ohh.age.n+alloneword@doe.com", + tokenization: models.PropertyTokenizationLowercase, + expectedCountable: countable( + []string{"my", "email", "is", "john-thats-jay.ohh.age.n+alloneword@doe.com"}, + []int{1, 1, 1, 1}, + ), + }, + { + name: "tokenization lowercase, duplicated words", + input: "Du. Du hast. Du hast. Du hast mich gefragt.", + tokenization: models.PropertyTokenizationLowercase, + expectedCountable: countable( + []string{"du.", "du", "hast.", "hast", "mich", "gefragt."}, + []int{1, 3, 2, 1, 1, 1}, + ), + }, + { + name: "tokenization whitespace, unique words", + input: "My email is john-thats-jay.ohh.age.n+alloneword@doe.com", + tokenization: models.PropertyTokenizationWhitespace, + expectedCountable: countable( + []string{"My", "email", "is", "john-thats-jay.ohh.age.n+alloneword@doe.com"}, + []int{1, 1, 1, 1}, + ), + }, + { + name: "tokenization whitespace, duplicated words", + input: "Du. Du hast. Du hast. Du hast mich gefragt.", + tokenization: models.PropertyTokenizationWhitespace, + expectedCountable: countable( + []string{"Du.", "Du", "hast.", "hast", "mich", "gefragt."}, + []int{1, 3, 2, 1, 1, 1}, + ), + }, + { + name: "tokenization field", + input: "\n Du. Du hast. Du hast. Du hast mich gefragt.\t ", + tokenization: models.PropertyTokenizationField, + expectedCountable: countable( + []string{"Du. Du hast. Du hast. Du hast mich gefragt."}, + []int{1}, + ), + }, + { + name: "non existing tokenization", + input: "Du. Du hast. Du hast. Du hast mich gefragt.", + tokenization: "non_existing", + expectedCountable: []Countable{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + countable := a.Text(tc.tokenization, tc.input) + assert.ElementsMatch(t, tc.expectedCountable, countable) + }) + } + }) + + t.Run("with text array", func(t *testing.T) { + type testCase struct { + name string + input []string + tokenization string + expectedCountable []Countable + } + + testCases := []testCase{ + { + name: "tokenization word, unique words", + input: []string{"Hello,", "my name is John Doe"}, + tokenization: models.PropertyTokenizationWord, + expectedCountable: countable( + []string{"hello", "my", "name", "is", "john", "doe"}, + []int{1, 1, 1, 1, 1, 1}, + ), + }, + { + name: "tokenization word, duplicated words", + input: []string{"Du. Du hast. Du hast.", "Du hast mich gefragt."}, + tokenization: models.PropertyTokenizationWord, + expectedCountable: countable( + []string{"du", "hast", "mich", "gefragt"}, + []int{4, 3, 1, 1}, + ), + }, + { + name: "tokenization lowercase, unique words", + input: []string{"My email", "is john-thats-jay.ohh.age.n+alloneword@doe.com"}, + tokenization: models.PropertyTokenizationLowercase, + expectedCountable: countable( + []string{"my", "email", "is", "john-thats-jay.ohh.age.n+alloneword@doe.com"}, + []int{1, 1, 1, 1}, + ), + }, + { + name: "tokenization lowercase, duplicated words", + input: []string{"Du. Du hast. Du hast.", "Du hast mich gefragt."}, + tokenization: models.PropertyTokenizationLowercase, + expectedCountable: countable( + []string{"du.", "du", "hast.", "hast", "mich", "gefragt."}, + []int{1, 3, 2, 1, 1, 1}, + ), + }, + { + name: "tokenization whitespace, unique words", + input: []string{"My email", "is john-thats-jay.ohh.age.n+alloneword@doe.com"}, + tokenization: models.PropertyTokenizationWhitespace, + expectedCountable: countable( + []string{"My", "email", "is", "john-thats-jay.ohh.age.n+alloneword@doe.com"}, + []int{1, 1, 1, 1}, + ), + }, + { + name: "tokenization whitespace, duplicated words", + input: []string{"Du. Du hast. Du hast.", "Du hast mich gefragt."}, + tokenization: models.PropertyTokenizationWhitespace, + expectedCountable: countable( + []string{"Du.", "Du", "hast.", "hast", "mich", "gefragt."}, + []int{1, 3, 2, 1, 1, 1}, + ), + }, + { + name: "tokenization field", + input: []string{"\n Du. Du hast. Du hast.", "Du hast mich gefragt.\t "}, + tokenization: models.PropertyTokenizationField, + expectedCountable: countable( + []string{"Du. Du hast. Du hast.", "Du hast mich gefragt."}, + []int{1, 1}, + ), + }, + { + name: "non existing tokenization", + input: []string{"Du. Du hast. Du hast.", "Du hast mich gefragt."}, + tokenization: "non_existing", + expectedCountable: []Countable{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + countable := a.TextArray(tc.tokenization, tc.input) + assert.ElementsMatch(t, tc.expectedCountable, countable) + }) + } + }) + + t.Run("with int it stays sortable", func(t *testing.T) { + getData := func(in []Countable, err error) []byte { + require.Nil(t, err) + return in[0].Data + } + + results := [][]byte{ + getData(a.Float(math.MinInt64)), + getData(a.Int(-1000000)), + getData(a.Int(-400000)), + getData(a.Int(-20000)), + getData(a.Int(-9000)), + getData(a.Int(-301)), + getData(a.Int(-300)), + getData(a.Int(-299)), + getData(a.Int(-1)), + getData(a.Int(0)), + getData(a.Int(1)), + getData(a.Int(299)), + getData(a.Int(300)), + getData(a.Int(301)), + getData(a.Int(9000)), + getData(a.Int(20000)), + getData(a.Int(400000)), + getData(a.Int(1000000)), + getData(a.Float(math.MaxInt64)), + } + + afterSort := make([][]byte, len(results)) + copy(afterSort, results) + sort.Slice(afterSort, func(a, b int) bool { return bytes.Compare(afterSort[a], afterSort[b]) == -1 }) + assert.Equal(t, results, afterSort) + }) + + t.Run("with float it stays sortable", func(t *testing.T) { + getData := func(in []Countable, err error) []byte { + require.Nil(t, err) + return in[0].Data + } + + results := [][]byte{ + getData(a.Float(-math.MaxFloat64)), + getData(a.Float(-1000000)), + getData(a.Float(-400000)), + getData(a.Float(-20000)), + getData(a.Float(-9000.9)), + getData(a.Float(-9000.8999)), + getData(a.Float(-9000.8998)), + getData(a.Float(-9000.79999)), + getData(a.Float(-301)), + getData(a.Float(-300)), + getData(a.Float(-299)), + getData(a.Float(-1)), + getData(a.Float(-0.09)), + getData(a.Float(-0.01)), + getData(a.Float(-0.009)), + getData(a.Float(0)), + getData(a.Float(math.SmallestNonzeroFloat64)), + getData(a.Float(0.009)), + getData(a.Float(0.01)), + getData(a.Float(0.09)), + getData(a.Float(0.1)), + getData(a.Float(0.9)), + getData(a.Float(1)), + getData(a.Float(299)), + getData(a.Float(300)), + getData(a.Float(301)), + getData(a.Float(9000)), + getData(a.Float(20000)), + getData(a.Float(400000)), + getData(a.Float(1000000)), + getData(a.Float(math.MaxFloat64)), + } + + afterSort := make([][]byte, len(results)) + copy(afterSort, results) + sort.Slice(afterSort, func(a, b int) bool { return bytes.Compare(afterSort[a], afterSort[b]) == -1 }) + assert.Equal(t, results, afterSort) + }) + + t.Run("with refCount it stays sortable", func(t *testing.T) { + getData := func(in []Countable, err error) []byte { + require.Nil(t, err) + return in[0].Data + } + + results := [][]byte{ + getData(a.RefCount(make(models.MultipleRef, 0))), + getData(a.RefCount(make(models.MultipleRef, 1))), + getData(a.RefCount(make(models.MultipleRef, 2))), + getData(a.RefCount(make(models.MultipleRef, 99))), + getData(a.RefCount(make(models.MultipleRef, 100))), + getData(a.RefCount(make(models.MultipleRef, 101))), + getData(a.RefCount(make(models.MultipleRef, 256))), + getData(a.RefCount(make(models.MultipleRef, 300))), + getData(a.RefCount(make(models.MultipleRef, 456))), + } + + afterSort := make([][]byte, len(results)) + copy(afterSort, results) + sort.Slice(afterSort, func(a, b int) bool { return bytes.Compare(afterSort[a], afterSort[b]) == -1 }) + assert.Equal(t, results, afterSort) + }) + + byteTrue := []byte{0x1} + byteFalse := []byte{0x0} + + t.Run("analyze bool", func(t *testing.T) { + t.Run("true", func(t *testing.T) { + countable, err := a.Bool(true) + require.Nil(t, err) + require.Len(t, countable, 1) + + c := countable[0] + assert.Equal(t, byteTrue, c.Data) + assert.Equal(t, float32(0), c.TermFrequency) + }) + + t.Run("false", func(t *testing.T) { + countable, err := a.Bool(false) + require.Nil(t, err) + require.Len(t, countable, 1) + + c := countable[0] + assert.Equal(t, byteFalse, c.Data) + assert.Equal(t, float32(0), c.TermFrequency) + }) + }) + + t.Run("analyze bool array", func(t *testing.T) { + type testCase struct { + name string + values []bool + expected [][]byte + } + + testCases := []testCase{ + { + name: "[true]", + values: []bool{true}, + expected: [][]byte{byteTrue}, + }, + { + name: "[false]", + values: []bool{false}, + expected: [][]byte{byteFalse}, + }, + { + name: "[true, true, true]", + values: []bool{true, true, true}, + expected: [][]byte{byteTrue, byteTrue, byteTrue}, + }, + { + name: "[false, false, false]", + values: []bool{false, false, false}, + expected: [][]byte{byteFalse, byteFalse, byteFalse}, + }, + { + name: "[false, true, false, true]", + values: []bool{false, true, false, true}, + expected: [][]byte{byteFalse, byteTrue, byteFalse, byteTrue}, + }, + { + name: "[]", + values: []bool{}, + expected: [][]byte{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + countable, err := a.BoolArray(tc.values) + require.Nil(t, err) + require.Len(t, countable, len(tc.expected)) + + for i := range countable { + assert.Equal(t, tc.expected[i], countable[i].Data) + assert.Equal(t, float32(0), countable[i].TermFrequency) + } + }) + } + }) +} + +func TestAnalyzer_DefaultEngPreset(t *testing.T) { + countable := func(data []string, freq []int) []Countable { + countable := make([]Countable, len(data)) + for i := range data { + countable[i] = Countable{ + Data: []byte(data[i]), + TermFrequency: float32(freq[i]), + } + } + return countable + } + + a := NewAnalyzer(nil) + input := "Hello you-beautiful_World" + + t.Run("with text", func(t *testing.T) { + type testCase struct { + name string + tokenization string + input string + expectedCountable []Countable + } + + testCases := []testCase{ + { + name: "tokenization word", + tokenization: models.PropertyTokenizationWord, + input: input, + expectedCountable: countable( + []string{"hello", "you", "beautiful", "world"}, + []int{1, 1, 1, 1}, + ), + }, + { + name: "tokenization lowercase", + tokenization: models.PropertyTokenizationLowercase, + input: input, + expectedCountable: countable( + []string{"hello", "you-beautiful_world"}, + []int{1, 1}, + ), + }, + { + name: "tokenization whitespace", + tokenization: models.PropertyTokenizationWhitespace, + input: input, + expectedCountable: countable( + []string{"Hello", "you-beautiful_World"}, + []int{1, 1}, + ), + }, + { + name: "tokenization field", + tokenization: models.PropertyTokenizationField, + input: input, + expectedCountable: countable( + []string{"Hello you-beautiful_World"}, + []int{1}, + ), + }, + { + name: "non existing tokenization", + tokenization: "non_existing", + input: input, + expectedCountable: []Countable{}, + }, + } + + for _, tc := range testCases { + countable := a.Text(tc.tokenization, tc.input) + assert.ElementsMatch(t, tc.expectedCountable, countable) + } + }) + + t.Run("with text array", func(t *testing.T) { + type testCase struct { + name string + tokenization string + input []string + expectedCountable []Countable + } + + testCases := []testCase{ + { + name: "tokenization word", + tokenization: models.PropertyTokenizationWord, + input: []string{input, input}, + expectedCountable: countable( + []string{"hello", "you", "beautiful", "world"}, + []int{2, 2, 2, 2}, + ), + }, + { + name: "tokenization lowercase", + tokenization: models.PropertyTokenizationLowercase, + input: []string{input, input}, + expectedCountable: countable( + []string{"hello", "you-beautiful_world"}, + []int{2, 2}, + ), + }, + { + name: "tokenization whitespace", + tokenization: models.PropertyTokenizationWhitespace, + input: []string{input, input}, + expectedCountable: countable( + []string{"Hello", "you-beautiful_World"}, + []int{2, 2}, + ), + }, + { + name: "tokenization field", + tokenization: models.PropertyTokenizationField, + input: []string{input, input}, + expectedCountable: countable( + []string{"Hello you-beautiful_World"}, + []int{2}, + ), + }, + { + name: "non existing tokenization", + tokenization: "non_existing", + input: []string{input, input}, + expectedCountable: []Countable{}, + }, + } + + for _, tc := range testCases { + countable := a.TextArray(tc.tokenization, tc.input) + assert.ElementsMatch(t, tc.expectedCountable, countable) + } + }) +} + +type fakeStopwordDetector struct{} + +func (fsd fakeStopwordDetector) IsStopword(word string) bool { + return false +} + +func TestDedupItems(t *testing.T) { + props := []Property{ + { + Name: "propNothingToDo", + Items: []Countable{ + {Data: []byte("fff"), TermFrequency: 3}, + {Data: []byte("eee"), TermFrequency: 2}, + {Data: []byte("ddd"), TermFrequency: 1}, + }, + }, + { + Name: "propToDedup1", + Items: []Countable{ + {Data: []byte("aaa"), TermFrequency: 1}, + {Data: []byte("bbb"), TermFrequency: 2}, + {Data: []byte("ccc"), TermFrequency: 3}, + {Data: []byte("aaa"), TermFrequency: 4}, + {Data: []byte("ccc"), TermFrequency: 0}, + }, + }, + { + Name: "propToDedup2", + Items: []Countable{ + {Data: []uint8{1}, TermFrequency: 5}, + {Data: []uint8{1}, TermFrequency: 4}, + {Data: []uint8{1}, TermFrequency: 3}, + {Data: []uint8{1}, TermFrequency: 2}, + {Data: []uint8{1}, TermFrequency: 1}, + }, + }, + } + + expectedProps := []Property{ + { + Name: "propNothingToDo", + Items: []Countable{ + {Data: []byte("fff"), TermFrequency: 3}, + {Data: []byte("eee"), TermFrequency: 2}, + {Data: []byte("ddd"), TermFrequency: 1}, + }, + }, + { + Name: "propToDedup1", + Items: []Countable{ + {Data: []byte("bbb"), TermFrequency: 2}, + {Data: []byte("aaa"), TermFrequency: 4}, + {Data: []byte("ccc"), TermFrequency: 0}, + }, + }, + { + Name: "propToDedup2", + Items: []Countable{ + {Data: []uint8{1}, TermFrequency: 1}, + }, + }, + } + + dedupProps := DedupItems(props) + assert.Equal(t, expectedProps, dedupProps) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/bm25_searcher.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/bm25_searcher.go new file mode 100644 index 0000000000000000000000000000000000000000..21359511487082c81d033564ef2d3900a5ac4910 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/bm25_searcher.go @@ -0,0 +1,616 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "context" + "fmt" + "math" + "os" + "runtime/debug" + "strconv" + "strings" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/concurrency" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/stopwords" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/adapters/repos/db/propertyspecific" + "github.com/weaviate/weaviate/entities/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/tokenizer" +) + +type BM25Searcher struct { + config schema.BM25Config + store *lsmkv.Store + getClass func(string) *models.Class + classSearcher ClassSearcher // to allow recursive searches on ref-props + propIndices propertyspecific.Indices + propLenTracker propLengthRetriever + logger logrus.FieldLogger + shardVersion uint16 +} + +type propLengthRetriever interface { + PropertyMean(prop string) (float32, error) +} + +type termListRequest struct { + term string + termId int + duplicateTextBoost int + propertyNames []string + propertyBoosts map[string]float32 +} + +func NewBM25Searcher(config schema.BM25Config, store *lsmkv.Store, + getClass func(string) *models.Class, propIndices propertyspecific.Indices, + classSearcher ClassSearcher, propLenTracker propLengthRetriever, + logger logrus.FieldLogger, shardVersion uint16, +) *BM25Searcher { + return &BM25Searcher{ + config: config, + store: store, + getClass: getClass, + propIndices: propIndices, + classSearcher: classSearcher, + propLenTracker: propLenTracker, + logger: logger.WithField("action", "bm25_search"), + shardVersion: shardVersion, + } +} + +func (b *BM25Searcher) BM25F(ctx context.Context, filterDocIds helpers.AllowList, + className schema.ClassName, limit int, keywordRanking searchparams.KeywordRanking, additional additional.Properties, +) ([]*storobj.Object, []float32, error) { + // WEAVIATE-471 - If a property is not searchable, return an error + for _, property := range keywordRanking.Properties { + if !PropertyHasSearchableIndex(b.getClass(className.String()), property) { + return nil, nil, inverted.NewMissingSearchableIndexError(property) + } + } + + class := b.getClass(className.String()) + if class == nil { + return nil, nil, fmt.Errorf("could not find class %s in schema", className) + } + + var objs []*storobj.Object + var scores []float32 + var err error + + // TODO: amourao - move this to the global config + if os.Getenv("USE_BLOCKMAX_WAND") == "false" { + objs, scores, err = b.wand(ctx, filterDocIds, class, keywordRanking, limit, additional) + } else { + objs, scores, err = b.wandBlock(ctx, filterDocIds, class, keywordRanking, limit, additional) + } + if err != nil { + return nil, nil, errors.Wrap(err, "wand") + } + + return objs, scores, nil +} + +func (b *BM25Searcher) GetPropertyLengthTracker() *JsonShardMetaData { + return b.propLenTracker.(*JsonShardMetaData) +} + +func (b *BM25Searcher) generateQueryTermsAndStats(class *models.Class, params searchparams.KeywordRanking) (bool, float64, map[string][]string, map[string][]string, map[string][]int, map[string]float32, float64, error) { + N := float64(b.store.Bucket(helpers.ObjectsBucketLSM).Count()) + + // This flag checks whether all buckets are of the inverted strategy, + // and thus, compatible with BlockMaxWAND, or if there are other strategies present, + // which would require the old WAND implementation. + allBucketsAreInverted := true + + var stopWordDetector *stopwords.Detector + if class.InvertedIndexConfig != nil && class.InvertedIndexConfig.Stopwords != nil { + var err error + stopWordDetector, err = stopwords.NewDetectorFromConfig(*(class.InvertedIndexConfig.Stopwords)) + if err != nil { + return false, 0, nil, nil, nil, nil, 0, err + } + } + + // There are currently cases, for different tokenization: + // word, lowercase, whitespace and field. + // Query is tokenized and respective properties are then searched for the search terms, + // results at the end are combined using WAND + queryTermsByTokenization := map[string][]string{} + duplicateBoostsByTokenization := map[string][]int{} + propNamesByTokenization := map[string][]string{} + propertyBoosts := make(map[string]float32, len(params.Properties)) + + for _, tokenization := range tokenizer.Tokenizations { + queryTerms, dupBoosts := tokenizer.TokenizeAndCountDuplicates(tokenization, params.Query) + queryTermsByTokenization[tokenization] = queryTerms + duplicateBoostsByTokenization[tokenization] = dupBoosts + + // stopword filtering for word tokenization + if tokenization == models.PropertyTokenizationWord { + queryTerms, dupBoosts = b.removeStopwordsFromQueryTerms(queryTermsByTokenization[tokenization], + duplicateBoostsByTokenization[tokenization], stopWordDetector) + queryTermsByTokenization[tokenization] = queryTerms + duplicateBoostsByTokenization[tokenization] = dupBoosts + } + + propNamesByTokenization[tokenization] = make([]string, 0) + } + + averagePropLength := 0. + averagePropLengthCount := 0 + for _, propertyWithBoost := range params.Properties { + property := propertyWithBoost + propBoost := 1 + if strings.Contains(propertyWithBoost, "^") { + property = strings.Split(propertyWithBoost, "^")[0] + boostStr := strings.Split(propertyWithBoost, "^")[1] + propBoost, _ = strconv.Atoi(boostStr) + } + propertyBoosts[property] = float32(propBoost) + + propMean, err := b.GetPropertyLengthTracker().PropertyMean(property) + if err != nil { + return false, 0, nil, nil, nil, nil, 0, err + } + + bucket := b.GetBucket(property) + if bucket == nil { + return false, 0, nil, nil, nil, nil, 0, fmt.Errorf("could not find bucket for property %v", property) + } + + if bucket.Strategy() != lsmkv.StrategyInverted { + allBucketsAreInverted = false + } + + // A NaN here is the results of a corrupted prop length tracker. + // This is a workaround to try and avoid 0 or NaN scores. + // There is an extra check below in case all prop lengths are NaN or 0. + // Related issue https://github.com/weaviate/weaviate/issues/6247 + if !math.IsNaN(float64(propMean)) { + averagePropLength += float64(propMean) + averagePropLengthCount++ + } + + prop, err := schema.GetPropertyByName(class, property) + if err != nil { + return false, 0, nil, nil, nil, nil, 0, err + } + + switch dt, _ := schema.AsPrimitive(prop.DataType); dt { + case schema.DataTypeText, schema.DataTypeTextArray: + if _, exists := propNamesByTokenization[prop.Tokenization]; !exists { + return false, 0, nil, nil, nil, nil, 0, fmt.Errorf("cannot handle tokenization '%v' of property '%s'", + prop.Tokenization, prop.Name) + } + propNamesByTokenization[prop.Tokenization] = append(propNamesByTokenization[prop.Tokenization], property) + default: + return false, 0, nil, nil, nil, nil, 0, fmt.Errorf("cannot handle datatype '%v' of property '%s'", dt, prop.Name) + } + } + + averagePropLength = averagePropLength / float64(averagePropLengthCount) + + // If this value is zero or NaN, the prop length tracker is fully corrupted. + // This is a workaround to avoid 0 or NaN scores. + // Related issue https://github.com/weaviate/weaviate/issues/6247 + // sane default, if all prop lengths are NaN or 0 + if math.IsNaN(averagePropLength) || averagePropLength == 0 { + averagePropLength = 40.0 + } + return allBucketsAreInverted, N, propNamesByTokenization, queryTermsByTokenization, duplicateBoostsByTokenization, propertyBoosts, averagePropLength, nil +} + +func (b *BM25Searcher) wand( + ctx context.Context, filterDocIds helpers.AllowList, class *models.Class, params searchparams.KeywordRanking, limit int, additional additional.Properties, +) ([]*storobj.Object, []float32, error) { + _, N, propNamesByTokenization, queryTermsByTokenization, duplicateBoostsByTokenization, propertyBoosts, averagePropLength, err := b.generateQueryTermsAndStats(class, params) + if err != nil { + return nil, nil, err + } + + allRequests := make([]termListRequest, 0, 1000) + allQueryTerms := make([]string, 0, 1000) + minimumOrTokensMatch := math.MaxInt64 + + for _, tokenization := range tokenizer.Tokenizations { + propNames := propNamesByTokenization[tokenization] + if len(propNames) > 0 { + queryTerms, duplicateBoosts := queryTermsByTokenization[tokenization], duplicateBoostsByTokenization[tokenization] + for queryTermIndex, queryTerm := range queryTerms { + allRequests = append(allRequests, termListRequest{ + term: queryTerm, + termId: len(allRequests), + duplicateTextBoost: duplicateBoosts[queryTermIndex], + propertyNames: propNames, + propertyBoosts: propertyBoosts, + }) + allQueryTerms = append(allQueryTerms, queryTerm) + } + minimumOrTokensMatchByTokenization := params.MinimumOrTokensMatch + if params.SearchOperator == common_filters.SearchOperatorAnd { + minimumOrTokensMatchByTokenization = len(queryTerms) + } + if minimumOrTokensMatchByTokenization < minimumOrTokensMatch { + minimumOrTokensMatch = minimumOrTokensMatchByTokenization + } + } + } + + results := make([]*terms.Term, len(allRequests)) + + eg := enterrors.NewErrorGroupWrapper(b.logger) + eg.SetLimit(_NUMCPU) + + for _, request := range allRequests { + term := request.term + termId := request.termId + propNames := request.propertyNames + duplicateBoost := request.duplicateTextBoost + + eg.Go(func() (err error) { + defer func() { + p := recover() + if p != nil { + b.logger. + WithField("query_term", term). + WithField("prop_names", propNames). + WithField("has_filter", filterDocIds != nil). + Errorf("panic: %v", p) + debug.PrintStack() + err = fmt.Errorf("an internal error occurred during BM25 search") + } + }() + + termResult, termErr := b.createTerm(N, filterDocIds, term, termId, propNames, propertyBoosts, duplicateBoost, ctx) + if termErr != nil { + err = termErr + return + } + results[termId] = termResult + return + }) + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + // all results. Sum up the length of the results from all terms to get an upper bound of how many results there are + if limit == 0 { + for _, res := range results { + if res != nil { + limit += len(res.Data) + } + } + } + + resultsNonNil := make([]terms.TermInterface, 0, len(results)) + for _, res := range results { + if res != nil { + resultsNonNil = append(resultsNonNil, res) + } + } + + combinedTerms := &terms.Terms{ + T: resultsNonNil, + Count: len(allRequests), + } + + topKHeap := lsmkv.DoWand(limit, combinedTerms, averagePropLength, params.AdditionalExplanations, minimumOrTokensMatch) + + return b.getTopKObjects(topKHeap, params.AdditionalExplanations, allQueryTerms, additional) +} + +func (b *BM25Searcher) removeStopwordsFromQueryTerms(queryTerms []string, + duplicateBoost []int, detector *stopwords.Detector, +) ([]string, []int) { + if detector == nil || len(queryTerms) == 0 { + return queryTerms, duplicateBoost + } + + i := 0 +WordLoop: + for { + if i == len(queryTerms) { + return queryTerms, duplicateBoost + } + queryTerm := queryTerms[i] + if detector.IsStopword(queryTerm) { + queryTerms[i] = queryTerms[len(queryTerms)-1] + queryTerms = queryTerms[:len(queryTerms)-1] + duplicateBoost[i] = duplicateBoost[len(duplicateBoost)-1] + duplicateBoost = duplicateBoost[:len(duplicateBoost)-1] + + continue WordLoop + } + + i++ + } +} + +func (b *BM25Searcher) getTopKObjects(topKHeap *priorityqueue.Queue[[]*terms.DocPointerWithScore], additionalExplanations bool, + allRequests []string, additional additional.Properties, +) ([]*storobj.Object, []float32, error) { + objectsBucket := b.store.Bucket(helpers.ObjectsBucketLSM) + scores := make([]float32, 0, topKHeap.Len()) + ids := make([]uint64, 0, topKHeap.Len()) + explanations := make([][]*terms.DocPointerWithScore, 0, topKHeap.Len()) + for topKHeap.Len() > 0 { + res := topKHeap.Pop() + ids = append(ids, res.ID) + scores = append(scores, res.Dist) + explanations = append(explanations, res.Value) + } + + objs, err := storobj.ObjectsByDocID(objectsBucket, ids, additional, nil, b.logger) + if err != nil { + return objs, nil, errors.Errorf("objects loading") + } + + // handle case that an object was removed + if len(objs) != len(scores) { + idsTmp := make([]uint64, len(objs)) + j := 0 + for i := range scores { + if j >= len(objs) { + break + } + if objs[j].DocID != ids[i] { + continue + } + scores[j] = scores[i] + idsTmp[j] = ids[i] + j++ + } + scores = scores[:j] + } + + if additionalExplanations { + for k := range objs { + // add score explanation + if objs[k].AdditionalProperties() == nil { + objs[k].Object.Additional = make(map[string]interface{}) + } + for j, result := range explanations[k] { + if result == nil { + continue + } + queryTerm := allRequests[j] + objs[k].Object.Additional["BM25F_"+queryTerm+"_frequency"] = result.Frequency + objs[k].Object.Additional["BM25F_"+queryTerm+"_propLength"] = result.PropLength + } + } + } + + return objs, scores, nil +} + +func (b *BM25Searcher) getTopKIds(topKHeap *priorityqueue.Queue[[]*terms.DocPointerWithScore]) ([]uint64, []float32, [][]*terms.DocPointerWithScore, error) { + scores := make([]float32, 0, topKHeap.Len()) + ids := make([]uint64, 0, topKHeap.Len()) + explanations := make([][]*terms.DocPointerWithScore, 0, topKHeap.Len()) + for topKHeap.Len() > 0 { + res := topKHeap.Pop() + ids = append(ids, res.ID) + scores = append(scores, res.Dist) + if res.Value != nil { + explanations = append(explanations, res.Value) + } + } + return ids, scores, explanations, nil +} + +func (b *BM25Searcher) createTerm(N float64, filterDocIds helpers.AllowList, query string, queryTermIndex int, propertyNames []string, propertyBoosts map[string]float32, duplicateTextBoost int, ctx context.Context) (*terms.Term, error) { + termResult := terms.NewTerm(query, queryTermIndex, float32(1.0), b.config) + + var filteredDocIDs *sroar.Bitmap + var filteredDocIDsThread []*sroar.Bitmap + if filterDocIds != nil { + filteredDocIDs = sroar.NewBitmap() // to build the global n if there is a filter + filteredDocIDsThread = make([]*sroar.Bitmap, len(propertyNames)) + } + + eg := enterrors.NewErrorGroupWrapper(b.logger) + eg.SetLimit(_NUMCPU) + + allMsAndProps := make([][]terms.DocPointerWithScore, len(propertyNames)) + for i, propName := range propertyNames { + i := i + propName := propName + + eg.Go( + func() error { + bucket := b.store.Bucket(helpers.BucketSearchableFromPropNameLSM(propName)) + if bucket == nil { + return fmt.Errorf("could not find bucket for property %v", propName) + } + preM, err := bucket.DocPointerWithScoreList(ctx, []byte(query), propertyBoosts[propName]) + if err != nil { + return err + } + + var m []terms.DocPointerWithScore + if filterDocIds != nil { + if filteredDocIDsThread[i] == nil { + filteredDocIDsThread[i] = sroar.NewBitmap() + } + m = make([]terms.DocPointerWithScore, 0, len(preM)) + for _, val := range preM { + docID := val.Id + if filterDocIds.Contains(docID) { + m = append(m, val) + } else { + filteredDocIDsThread[i].Set(docID) + } + } + } else { + m = preM + } + + allMsAndProps[i] = m + return nil + }, + ) + } + if err := eg.Wait(); err != nil { + return termResult, err + } + + if filterDocIds != nil { + for _, docIDs := range filteredDocIDsThread { + if docIDs != nil { + filteredDocIDs.OrConc(docIDs, concurrency.SROAR_MERGE) + } + } + } + + largestN := 0 + // remove empty results from allMsAndProps + nonEmptyMsAndProps := make([][]terms.DocPointerWithScore, 0, len(allMsAndProps)) + for _, m := range allMsAndProps { + if len(m) > 0 { + nonEmptyMsAndProps = append(nonEmptyMsAndProps, m) + } + if len(m) > largestN { + largestN = len(m) + } + } + allMsAndProps = nonEmptyMsAndProps + + if len(nonEmptyMsAndProps) == 0 { + return nil, nil + } + + if len(nonEmptyMsAndProps) == 1 { + termResult.Data = allMsAndProps[0] + n := float64(len(termResult.Data)) + if filterDocIds != nil { + n += float64(filteredDocIDs.GetCardinality()) + } + termResult.SetIdf(math.Log(float64(1)+(N-float64(n)+0.5)/(float64(n)+0.5)) * float64(duplicateTextBoost)) + termResult.SetPosPointer(0) + termResult.SetIdPointer(termResult.Data[0].Id) + return termResult, nil + } + indices := make([]int, len(allMsAndProps)) + var docMapPairs []terms.DocPointerWithScore = nil + + // The indices are needed to combining the results of different properties + // They were previously used to keep track of additional explanations TF and prop len, + // but this is now done when adding terms to the heap in the getTopKHeap function + var docMapPairsIndices map[uint64]int = nil + for { + i := -1 + minId := uint64(0) + for ti, mAndProps := range allMsAndProps { + if indices[ti] >= len(mAndProps) { + continue + } + ki := mAndProps[indices[ti]].Id + if i == -1 || ki < minId { + i = ti + minId = ki + } + } + + if i == -1 { + break + } + + m := allMsAndProps[i] + k := indices[i] + val := m[indices[i]] + + indices[i]++ + + // only create maps/slices if we know how many entries there are + if docMapPairs == nil { + docMapPairs = make([]terms.DocPointerWithScore, 0, largestN) + docMapPairsIndices = make(map[uint64]int, largestN) + + docMapPairs = append(docMapPairs, val) + docMapPairsIndices[val.Id] = k + } else { + key := val.Id + ind, ok := docMapPairsIndices[key] + if ok { + if ind >= len(docMapPairs) { + // the index is not valid anymore, but the key is still in the map + b.logger.Warnf("Skipping pair in BM25: Index %d is out of range for key %d, length %d.", ind, key, len(docMapPairs)) + continue + } + if ind < len(docMapPairs) && docMapPairs[ind].Id != key { + b.logger.Warnf("Skipping pair in BM25: id at %d in doc map pairs, %d, differs from current key, %d", ind, docMapPairs[ind].Id, key) + continue + } + + docMapPairs[ind].PropLength += val.PropLength + docMapPairs[ind].Frequency += val.Frequency + } else { + docMapPairs = append(docMapPairs, val) + docMapPairsIndices[val.Id] = len(docMapPairs) - 1 // current last entry + } + + } + } + if docMapPairs == nil { + return nil, nil + } + termResult.Data = docMapPairs + + n := float64(len(docMapPairs)) + if filterDocIds != nil { + n += float64(filteredDocIDs.GetCardinality()) + } + termResult.SetIdf(math.Log(float64(1)+(N-n+0.5)/(n+0.5)) * float64(duplicateTextBoost)) + + // catch special case where there are no results and would panic termResult.data[0].id + // related to #4125 + if len(termResult.Data) == 0 { + return nil, nil + } + + termResult.SetPosPointer(0) + termResult.SetIdPointer(termResult.Data[0].Id) + return termResult, nil +} + +func PropertyHasSearchableIndex(class *models.Class, tentativePropertyName string) bool { + if class == nil { + return false + } + + propertyName := strings.Split(tentativePropertyName, "^")[0] + p, err := schema.GetPropertyByName(class, propertyName) + if err != nil { + return false + } + return HasSearchableIndex(p) +} + +func (b *BM25Searcher) GetBucket(propName string) *lsmkv.Bucket { + return b.store.Bucket(helpers.BucketSearchableFromPropNameLSM(propName)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/bm25_searcher_block.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/bm25_searcher_block.go new file mode 100644 index 0000000000000000000000000000000000000000..755bd17f6fc024ccf11a61c190b5cece0652504a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/bm25_searcher_block.go @@ -0,0 +1,423 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "context" + "math" + "os" + "runtime/debug" + "slices" + "sort" + "strconv" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/handlers/graphql/local/common_filters" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/entities/additional" + entcfg "github.com/weaviate/weaviate/entities/config" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/tokenizer" +) + +// var metrics = lsmkv.BlockMetrics{} + +func (b *BM25Searcher) createBlockTerm(N float64, filterDocIds helpers.AllowList, query []string, propName string, propertyBoost float32, duplicateTextBoosts []int, config schema.BM25Config, ctx context.Context) ([][]*lsmkv.SegmentBlockMax, map[string]uint64, func(), error) { + bucket := b.store.Bucket(helpers.BucketSearchableFromPropNameLSM(propName)) + return bucket.CreateDiskTerm(N, filterDocIds, query, propName, propertyBoost, duplicateTextBoosts, config, ctx) +} + +func (b *BM25Searcher) wandBlock( + ctx context.Context, filterDocIds helpers.AllowList, class *models.Class, params searchparams.KeywordRanking, limit int, additional additional.Properties, +) ([]*storobj.Object, []float32, error) { + defer func() { + if !entcfg.Enabled(os.Getenv("DISABLE_RECOVERY_ON_PANIC")) { + if r := recover(); r != nil { + b.logger.Errorf("Recovered from panic in wandBlock: %v", r) + debug.PrintStack() + } + } + }() + + // if the filter is empty, we can skip the search + // as no documents will match it + if filterDocIds != nil && filterDocIds.IsEmpty() { + return []*storobj.Object{}, []float32{}, nil + } + + allBucketsAreInverted, N, propNamesByTokenization, queryTermsByTokenization, duplicateBoostsByTokenization, propertyBoosts, averagePropLength, err := b.generateQueryTermsAndStats(class, params) + if err != nil { + return nil, nil, err + } + + // fallback to the old search process if not all buckets are inverted + if !allBucketsAreInverted { + return b.wand(ctx, filterDocIds, class, params, limit, additional) + } + + allResults := make([][][]*lsmkv.SegmentBlockMax, 0, len(params.Properties)) + termCounts := make([][]string, 0, len(params.Properties)) + minimumOrTokensMatchByProperty := make([]int, 0, len(params.Properties)) + + // These locks are the segmentCompactions locks for the searched properties + // The old search process locked the compactions and read the full postings list into memory. + // We don't do that anymore, as the goal of BlockMaxWAND is to avoid reading the full postings list into memory. + // The locks are needed here instead of at DoBlockMaxWand only, as we separate term creation from the actual search. + // TODO: We should consider if we can remove these locks and only lock at DoBlockMaxWand + releaseCallbacks := make(map[string]func(), len(params.Properties)) + + defer func() { + for _, release := range releaseCallbacks { + release() + } + }() + + for _, tokenization := range tokenizer.Tokenizations { + propNames := propNamesByTokenization[tokenization] + if len(propNames) > 0 { + lenAllResults := len(allResults) + queryTerms, duplicateBoosts := queryTermsByTokenization[tokenization], duplicateBoostsByTokenization[tokenization] + duplicateBoostsByTerm := make(map[string]int, len(duplicateBoosts)) + for i, term := range queryTerms { + duplicateBoostsByTerm[term] = duplicateBoosts[i] + } + globalIdfCounts := make(map[string]uint64, len(queryTerms)) + nonZeroTerms := make(map[string]uint64, len(queryTerms)) + for _, propName := range propNames { + results, idfCounts, release, err := b.createBlockTerm(N, filterDocIds, queryTerms, propName, propertyBoosts[propName], duplicateBoosts, b.config, ctx) + if err != nil { + return nil, nil, err + } + + if release != nil { + releaseCallbacks[propName] = release + } + + allResults = append(allResults, results) + termCounts = append(termCounts, queryTerms) + + minimumOrTokensMatch := params.MinimumOrTokensMatch + if params.SearchOperator == common_filters.SearchOperatorAnd { + minimumOrTokensMatch = len(queryTerms) + } + + minimumOrTokensMatchByProperty = append(minimumOrTokensMatchByProperty, minimumOrTokensMatch) + for _, term := range queryTerms { + globalIdfCounts[term] += idfCounts[term] + if idfCounts[term] > 0 { + nonZeroTerms[term]++ + } + } + } + globalIdfs := make(map[string]float64, len(queryTerms)) + for term := range globalIdfCounts { + if nonZeroTerms[term] == 0 { + continue + } + n := globalIdfCounts[term] / nonZeroTerms[term] + + globalIdfs[term] = math.Log(float64(1)+(N-float64(n)+0.5)/(float64(n)+0.5)) * float64(duplicateBoostsByTerm[term]) + } + for _, result := range allResults[lenAllResults:] { + if len(result) == 0 { + continue + } + for j := range result { + if len(result[j]) == 0 { + continue + } + for k := range result[j] { + if result[j][k] != nil { + result[j][k].SetIdf(globalIdfs[result[j][k].QueryTerm()]) + } + } + } + } + + } + } + + // all results. Sum up the length of the results from all terms to get an upper bound of how many results there are + internalLimit := limit + if limit == 0 { + for _, perProperty := range allResults { + for _, perSegment := range perProperty { + for _, perTerm := range perSegment { + if perTerm != nil { + limit += perTerm.Count() + } + } + } + } + internalLimit = limit + + } else if len(allResults) > 1 { + // we only need to increase the limit if there are multiple properties + // TODO: the limit is increased by 10 to make sure candidates that are on the edge of the limit are not missed for multi-property search + // the proper fix is to either make sure that the limit is always high enough, or force a rerank of the top results from all properties + defaultLimit := int(math.Max(float64(limit)*1.1, float64(limit+10))) + // allow overriding the defaultLimit with an env var + internalLimitString := os.Getenv("BLOCKMAX_WAND_PER_SEGMENT_LIMIT") + if internalLimitString != "" { + // if the env var is set, use it as the limit + internalLimit, _ = strconv.Atoi(internalLimitString) + } + + if internalLimit < defaultLimit { + // if the limit is smaller than the defaultLimit, use the defaultLimit + internalLimit = defaultLimit + } + } + + eg := enterrors.NewErrorGroupWrapper(b.logger) + eg.SetLimit(_NUMCPU) + + allIds := make([][][]uint64, len(allResults)) + allScores := make([][][]float32, len(allResults)) + allExplanation := make([][][][]*terms.DocPointerWithScore, len(allResults)) + for i, perProperty := range allResults { + allIds[i] = make([][]uint64, len(perProperty)) + allScores[i] = make([][]float32, len(perProperty)) + allExplanation[i] = make([][][]*terms.DocPointerWithScore, len(perProperty)) + + // per segment + for j := range perProperty { + + i := i + j := j + + if len(allResults[i][j]) == 0 { + continue + } + + // return early if there aren't enough terms to match + if len(allResults[i][j]) < minimumOrTokensMatchByProperty[i] { + continue + } + + eg.Go(func() (err error) { + var topKHeap *priorityqueue.Queue[[]*terms.DocPointerWithScore] + if params.SearchOperator == common_filters.SearchOperatorAnd { + topKHeap = lsmkv.DoBlockMaxAnd(ctx, internalLimit, allResults[i][j], averagePropLength, params.AdditionalExplanations, len(termCounts[i]), minimumOrTokensMatchByProperty[i], b.logger) + } else { + topKHeap, _ = lsmkv.DoBlockMaxWand(ctx, internalLimit, allResults[i][j], averagePropLength, params.AdditionalExplanations, len(termCounts[i]), minimumOrTokensMatchByProperty[i], b.logger) + } + ids, scores, explanations, err := b.getTopKIds(topKHeap) + if err != nil { + return err + } + + allIds[i][j] = ids + allScores[i][j] = scores + if len(explanations) > 0 { + allExplanation[i][j] = explanations + } + + return nil + }) + } + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + objects, scores := b.combineResults(allIds, allScores, allExplanation, termCounts, additional, limit) + return objects, scores, nil +} + +func (b *BM25Searcher) combineResults(allIds [][][]uint64, allScores [][][]float32, allExplanation [][][][]*terms.DocPointerWithScore, queryTerms [][]string, additional additional.Properties, limit int) ([]*storobj.Object, []float32) { + // combine all results + combinedIds := make([]uint64, 0, limit*len(allIds)) + combinedScores := make([]float32, 0, limit*len(allIds)) + combinedExplanations := make([][]*terms.DocPointerWithScore, 0, limit*len(allIds)) + combinedTerms := make([]string, 0, limit*len(allIds)) + + // combine all results + for i := range allIds { + singlePropIds := slices.Concat(allIds[i]...) + singlePropScores := slices.Concat(allScores[i]...) + singlePropExplanation := slices.Concat(allExplanation[i]...) + // Choose the highest score for each object if it appears in multiple segments + combinedIdsProp, combinedScoresProp, combinedExplanationProp := b.combineResultsForMultiProp(singlePropIds, singlePropScores, singlePropExplanation, func(a, b float32) float32 { return b }, true) + combinedIds = append(combinedIds, combinedIdsProp...) + combinedScores = append(combinedScores, combinedScoresProp...) + combinedExplanations = append(combinedExplanations, combinedExplanationProp...) + combinedTerms = append(combinedTerms, queryTerms[i]...) + } + + // Choose the sum of the scores for each object if it appears in multiple properties + combinedIds, combinedScores, combinedExplanations = b.combineResultsForMultiProp(combinedIds, combinedScores, combinedExplanations, func(a, b float32) float32 { return a + b }, false) + + combinedIds, combinedScores, combinedExplanations = b.sortResultsByScore(combinedIds, combinedScores, combinedExplanations) + + limit = int(math.Min(float64(limit), float64(len(combinedIds)))) + + combinedObjects, combinedScores, err := b.getObjectsAndScores(combinedIds, combinedScores, combinedExplanations, combinedTerms, additional, limit) + if err != nil { + return nil, nil + } + return combinedObjects, combinedScores +} + +type aggregate func(float32, float32) float32 + +func (b *BM25Searcher) combineResultsForMultiProp(allIds []uint64, allScores []float32, allExplanation [][]*terms.DocPointerWithScore, aggregateFn aggregate, singleProp bool) ([]uint64, []float32, [][]*terms.DocPointerWithScore) { + // if ids are the same, sum the scores + combinedScores := make(map[uint64]float32) + combinedExplanations := make(map[uint64][]*terms.DocPointerWithScore) + + for i, obj := range allIds { + id := obj + if _, ok := combinedScores[id]; !ok { + combinedScores[id] = allScores[i] + if len(allExplanation) > 0 { + combinedExplanations[id] = allExplanation[i] + } + } else { + combinedScores[id] = aggregateFn(combinedScores[id], allScores[i]) + if len(allExplanation) > 0 { + if singleProp { + combinedExplanations[id] = allExplanation[i] + } else { + combinedExplanations[id] = append(combinedExplanations[id], allExplanation[i]...) + } + } + + } + } + + ids := make([]uint64, 0, len(combinedScores)) + scores := make([]float32, 0, len(combinedScores)) + exp := make([][]*terms.DocPointerWithScore, 0, len(combinedScores)) + for id, score := range combinedScores { + ids = append(ids, id) + scores = append(scores, score) + if allExplanation != nil { + exp = append(exp, combinedExplanations[id]) + } + } + return ids, scores, exp +} + +func (b *BM25Searcher) sortResultsByScore(ids []uint64, scores []float32, explanations [][]*terms.DocPointerWithScore) ([]uint64, []float32, [][]*terms.DocPointerWithScore) { + sorter := &scoreSorter{ + ids: ids, + scores: scores, + explanations: explanations, + } + sort.Sort(sorter) + return sorter.ids, sorter.scores, sorter.explanations +} + +func (b *BM25Searcher) getObjectsAndScores(ids []uint64, scores []float32, explanations [][]*terms.DocPointerWithScore, queryTerms []string, additionalProps additional.Properties, limit int) ([]*storobj.Object, []float32, error) { + // reverse arrays to start with the highest score + slices.Reverse(ids) + slices.Reverse(scores) + if explanations != nil { + slices.Reverse(explanations) + } + + objs := make([]*storobj.Object, 0, limit) + scoresResult := make([]float32, 0, limit) + explanationsResults := make([][]*terms.DocPointerWithScore, 0, limit) + + objectsBucket := b.store.Bucket(helpers.ObjectsBucketLSM) + + startAt := 0 + endAt := limit + // try to get docs up to the limit + // if there are not enough docs, get limit more docs until we've exhausted the list of ids + for len(objs) < limit && startAt < len(ids) { + // storobj.ObjectsByDocID may return fewer than limit objects + // notFoundCount keeps track of the number of objects that were not found, + // so we can keep matching scores and explanations to the correct object + notFoundCount := 0 + objsBatch, err := storobj.ObjectsByDocID(objectsBucket, ids[startAt:endAt], additionalProps, nil, b.logger) + if err != nil { + return objs, nil, errors.Errorf("objects loading") + } + for i, obj := range objsBatch { + if obj == nil { + continue + } + // move forward the notFoundCount until we find the next object + // if we enter the loop, it means that doc at ids[startAt+notFoundCount+i] + // was not found, so we need to skip it + for obj.DocID != ids[startAt+notFoundCount+i] { + notFoundCount++ + } + objs = append(objs, obj) + scoresResult = append(scoresResult, scores[startAt+notFoundCount+i]) + if explanations != nil { + explanationsResults = append(explanationsResults, explanations[startAt+notFoundCount+i]) + } + } + startAt = endAt + endAt = int(math.Min(float64(endAt+limit), float64(len(ids)))) + } + + if explanationsResults != nil && len(explanationsResults) == len(scoresResult) { + for k := range objs { + // add score explanation + if objs[k].AdditionalProperties() == nil { + objs[k].Object.Additional = make(map[string]interface{}) + } + for j, result := range explanationsResults[k] { + if result == nil { + continue + } + queryTerm := queryTerms[j] + objs[k].Object.Additional["BM25F_"+queryTerm+"_frequency"] = result.Frequency + objs[k].Object.Additional["BM25F_"+queryTerm+"_propLength"] = result.PropLength + } + } + } + + // reverse back the arrays to the expected order + slices.Reverse(objs) + slices.Reverse(scoresResult) + + return objs, scoresResult, nil +} + +type scoreSorter struct { + ids []uint64 + scores []float32 + explanations [][]*terms.DocPointerWithScore +} + +func (s *scoreSorter) Len() int { + return len(s.ids) +} + +func (s *scoreSorter) Less(i, j int) bool { + if s.scores[i] == s.scores[j] { + return s.ids[i] > s.ids[j] + } + return s.scores[i] < s.scores[j] +} + +func (s *scoreSorter) Swap(i, j int) { + s.ids[i], s.ids[j] = s.ids[j], s.ids[i] + s.scores[i], s.scores[j] = s.scores[j], s.scores[i] + if s.explanations != nil { + s.explanations[i], s.explanations[j] = s.explanations[j], s.explanations[i] + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config.go new file mode 100644 index 0000000000000000000000000000000000000000..8bf63dbd5310212d8a0ea2e576c42ad4da065dbe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config.go @@ -0,0 +1,168 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "runtime" + "strings" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/stopwords" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" +) + +var _NUMCPU = runtime.NumCPU() + +func ValidateConfig(conf *models.InvertedIndexConfig) error { + if conf.CleanupIntervalSeconds < 0 { + return errors.Errorf("cleanup interval seconds must be > 0") + } + + err := validateBM25Config(conf.Bm25) + if err != nil { + return err + } + + err = validateStopwordConfig(conf.Stopwords) + if err != nil { + return err + } + + return nil +} + +func ConfigFromModel(iicm *models.InvertedIndexConfig) schema.InvertedIndexConfig { + var conf schema.InvertedIndexConfig + + conf.IndexTimestamps = iicm.IndexTimestamps + conf.IndexNullState = iicm.IndexNullState + conf.IndexPropertyLength = iicm.IndexPropertyLength + + if iicm.Bm25 == nil { + conf.BM25.K1 = float64(config.DefaultBM25k1) + conf.BM25.B = float64(config.DefaultBM25b) + } else { + conf.BM25.K1 = float64(iicm.Bm25.K1) + conf.BM25.B = float64(iicm.Bm25.B) + } + + if iicm.Stopwords == nil { + conf.Stopwords = models.StopwordConfig{ + Preset: stopwords.EnglishPreset, + } + } else { + conf.Stopwords.Preset = iicm.Stopwords.Preset + conf.Stopwords.Additions = iicm.Stopwords.Additions + conf.Stopwords.Removals = iicm.Stopwords.Removals + } + + conf.UsingBlockMaxWAND = iicm.UsingBlockMaxWAND + + return conf +} + +func validateBM25Config(conf *models.BM25Config) error { + if conf == nil { + return nil + } + + if conf.K1 < 0 { + return errors.Errorf("BM25.k1 must be >= 0") + } + if conf.B < 0 || conf.B > 1 { + return errors.Errorf("BM25.b must be <= 0 and <= 1") + } + + return nil +} + +func validateStopwordConfig(conf *models.StopwordConfig) error { + if conf == nil { + conf = &models.StopwordConfig{} + } + + if conf.Preset == "" { + conf.Preset = stopwords.EnglishPreset + } + + if _, ok := stopwords.Presets[conf.Preset]; !ok { + return errors.Errorf("stopwordPreset '%s' does not exist", conf.Preset) + } + + err := validateStopwordAdditionsRemovals(conf) + if err != nil { + return err + } + + return nil +} + +func validateStopwordAdditionsRemovals(conf *models.StopwordConfig) error { + // the same stopword cannot exist + // in both additions and removals + foundAdditions := make(map[string]int) + + for idx, add := range conf.Additions { + if strings.TrimSpace(add) == "" { + return errors.Errorf("cannot use whitespace in stopword.additions") + } + + // save the index of the addition since it + // is readily available here. we will need + // this below when trimming additions that + // already exist in the selected preset + foundAdditions[add] = idx + } + + for _, rem := range conf.Removals { + if strings.TrimSpace(rem) == "" { + return errors.Errorf("cannot use whitespace in stopword.removals") + } + + if _, ok := foundAdditions[rem]; ok { + return errors.Errorf( + "found '%s' in both stopwords.additions and stopwords.removals", rem) + } + } + + removeStopwordAdditionsIfInPreset(conf, foundAdditions) + return nil +} + +func removeStopwordAdditionsIfInPreset(conf *models.StopwordConfig, foundAdditions map[string]int) { + presets := stopwords.Presets[conf.Preset] + + // if any of the elements in stopwords.additions + // already exist in the preset, mark it as to + // be removed + indicesToRemove := make(map[int]bool) + for _, preset := range presets { + if idx, ok := foundAdditions[preset]; ok { + indicesToRemove[idx] = true + } + } + + if len(indicesToRemove) == 0 { + return + } + + // take remaining additions, build new list + var trimmedAdditions []string + for idx, add := range conf.Additions { + if _, ok := indicesToRemove[idx]; !ok { + trimmedAdditions = append(trimmedAdditions, add) + } + } + conf.Additions = trimmedAdditions +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_test.go new file mode 100644 index 0000000000000000000000000000000000000000..933b028b44e2036807f8ad4524b801d39625bacb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_test.go @@ -0,0 +1,234 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" +) + +const float64EqualityThreshold = 1e-6 + +func almostEqual(t *testing.T, a, b float64) bool { + closeEnough := math.Abs(a-b) <= float64EqualityThreshold + if !closeEnough { + t.Logf("%f and %f differ by more than a threshold of %f", + a, b, float64EqualityThreshold) + } + return closeEnough +} + +func TestValidateConfig(t *testing.T) { + t.Run("with invalid BM25.k1", func(t *testing.T) { + in := &models.InvertedIndexConfig{ + Bm25: &models.BM25Config{ + K1: -1, + B: 0.7, + }, + } + + err := ValidateConfig(in) + assert.EqualError(t, err, "BM25.k1 must be >= 0") + }) + + t.Run("with invalid BM25.b", func(t *testing.T) { + in := &models.InvertedIndexConfig{ + Bm25: &models.BM25Config{ + K1: 1, + B: 1.001, + }, + } + + err := ValidateConfig(in) + assert.EqualError(t, err, "BM25.b must be <= 0 and <= 1") + }) + + t.Run("with valid config", func(t *testing.T) { + in := &models.InvertedIndexConfig{ + Bm25: &models.BM25Config{ + K1: 1, + B: 0.1, + }, + } + + err := ValidateConfig(in) + assert.Nil(t, err) + }) + + t.Run("with nonexistent stopword preset", func(t *testing.T) { + in := &models.InvertedIndexConfig{ + Stopwords: &models.StopwordConfig{ + Preset: "DNE", + }, + } + + err := ValidateConfig(in) + assert.EqualError(t, err, "stopwordPreset 'DNE' does not exist") + }) + + t.Run("with whitespace stopword additions", func(t *testing.T) { + additions := [][]string{ + {"bats", " "}, + {""}, + {"something", " ", "skippable"}, + } + + for _, addList := range additions { + in := &models.InvertedIndexConfig{ + Stopwords: &models.StopwordConfig{ + Additions: addList, + }, + } + + err := ValidateConfig(in) + assert.EqualError(t, err, "cannot use whitespace in stopword.additions") + } + }) + + t.Run("with whitespace stopword removals", func(t *testing.T) { + removals := [][]string{ + {"bats", " "}, + {""}, + {"something", " ", "skippable"}, + } + + for _, remList := range removals { + in := &models.InvertedIndexConfig{ + Stopwords: &models.StopwordConfig{ + Removals: remList, + }, + } + + err := ValidateConfig(in) + assert.EqualError(t, err, "cannot use whitespace in stopword.removals") + } + }) + + t.Run("with shared additions/removals items", func(t *testing.T) { + in := &models.InvertedIndexConfig{ + Stopwords: &models.StopwordConfig{ + Additions: []string{"some", "words", "are", "different"}, + Removals: []string{"and", "some", "the", "same"}, + }, + } + + err := ValidateConfig(in) + assert.EqualError(t, err, + "found 'some' in both stopwords.additions and stopwords.removals") + }) + + t.Run("with additions that exist in preset", func(t *testing.T) { + tests := []struct { + additions []string + expectedLength int + }{ + { + additions: []string{"superfluous", "extravagant", "a"}, + expectedLength: 2, + }, + { + additions: []string{"a", "are", "the"}, + expectedLength: 0, + }, + { + additions: []string{"everyone", "sleeps", "eventually"}, + expectedLength: 3, + }, + } + + for _, test := range tests { + in := &models.InvertedIndexConfig{ + Stopwords: &models.StopwordConfig{ + Preset: "en", + Additions: test.additions, + }, + } + + err := ValidateConfig(in) + assert.Nil(t, err) + assert.Equal(t, test.expectedLength, len(in.Stopwords.Additions)) + } + }) +} + +func TestConfigFromModel(t *testing.T) { + t.Run("with all fields set", func(t *testing.T) { + k1 := 1.12 + b := 0.7 + + in := &models.InvertedIndexConfig{ + Bm25: &models.BM25Config{ + K1: float32(k1), + B: float32(b), + }, + Stopwords: &models.StopwordConfig{ + Preset: "en", + }, + } + + expected := schema.InvertedIndexConfig{ + BM25: schema.BM25Config{ + K1: k1, + B: b, + }, + Stopwords: models.StopwordConfig{ + Preset: "en", + }, + } + + conf := ConfigFromModel(in) + assert.True(t, almostEqual(t, conf.BM25.K1, expected.BM25.K1)) + assert.True(t, almostEqual(t, conf.BM25.B, expected.BM25.B)) + assert.Equal(t, expected.Stopwords, conf.Stopwords) + }) + + t.Run("with no BM25 params set", func(t *testing.T) { + interval := int64(1) + + in := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: interval, + } + + expected := schema.InvertedIndexConfig{ + BM25: schema.BM25Config{ + K1: float64(config.DefaultBM25k1), + B: float64(config.DefaultBM25b), + }, + } + + conf := ConfigFromModel(in) + assert.True(t, almostEqual(t, conf.BM25.K1, expected.BM25.K1)) + assert.True(t, almostEqual(t, conf.BM25.B, expected.BM25.B)) + }) + + t.Run("with no Stopword config set", func(t *testing.T) { + interval := int64(1) + + in := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: interval, + } + + expected := schema.InvertedIndexConfig{ + Stopwords: models.StopwordConfig{ + Preset: "en", + }, + } + + conf := ConfigFromModel(in) + assert.Equal(t, expected.Stopwords, conf.Stopwords) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_update.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_update.go new file mode 100644 index 0000000000000000000000000000000000000000..483aa360244a5859372d5321360763aea0435278 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_update.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/models" +) + +func ValidateUserConfigUpdate(initial, updated *models.InvertedIndexConfig) error { + if updated.CleanupIntervalSeconds < 0 { + return errors.Errorf("cleanup interval seconds must be > 0") + } + + err := validateBM25ConfigUpdate(initial, updated) + if err != nil { + return err + } + + err = validateInvertedIndexConfigUpdate(initial, updated) + if err != nil { + return err + } + + err = validateStopwordsConfigUpdate(initial, updated) + if err != nil { + return err + } + + return nil +} + +func validateBM25ConfigUpdate(initial, updated *models.InvertedIndexConfig) error { + if updated.Bm25 == nil { + updated.Bm25 = &models.BM25Config{ + K1: initial.Bm25.K1, + B: initial.Bm25.B, + } + return nil + } + + err := validateBM25Config(updated.Bm25) + if err != nil { + return err + } + + return nil +} + +func validateInvertedIndexConfigUpdate(initial, updated *models.InvertedIndexConfig) error { + if updated.IndexPropertyLength != initial.IndexPropertyLength { + return errors.New("IndexPropertyLength cannot be changed when updating a schema") + } + + if updated.IndexNullState != initial.IndexNullState { + return errors.New("IndexNullState cannot be changed when updating a schema") + } + + return nil +} + +func validateStopwordsConfigUpdate(initial, updated *models.InvertedIndexConfig) error { + if updated.Stopwords == nil { + updated.Stopwords = &models.StopwordConfig{ + Preset: initial.Stopwords.Preset, + Additions: initial.Stopwords.Additions, + Removals: initial.Stopwords.Removals, + } + return nil + } + + err := validateStopwordConfig(updated.Stopwords) + if err != nil { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_update_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_update_test.go new file mode 100644 index 0000000000000000000000000000000000000000..42cec970e1d4f70cc782b5ce2a2b12735bbbca98 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/config_update_test.go @@ -0,0 +1,141 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/stopwords" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestValidateUserConfigUpdate(t *testing.T) { + validInitial := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 1, + Bm25: &models.BM25Config{ + K1: config.DefaultBM25k1, + B: config.DefaultBM25b, + }, + Stopwords: &models.StopwordConfig{ + Preset: stopwords.EnglishPreset, + }, + } + + t.Run("with valid updated config all fields", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 2, + Bm25: &models.BM25Config{ + K1: 1.3, + B: 0.778, + }, + Stopwords: &models.StopwordConfig{ + Preset: "en", + Additions: []string{"star", "nebula"}, + Removals: []string{"the", "a"}, + }, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.Nil(t, err) + }) + + t.Run("with valid updated config missing BM25", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 2, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.Nil(t, err) + assert.Equal(t, validInitial.Bm25.K1, updated.Bm25.K1) + assert.Equal(t, validInitial.Bm25.B, updated.Bm25.B) + }) + + t.Run("with valid updated config missing Stopwords", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 2, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.Nil(t, err) + assert.Equal(t, validInitial.Stopwords.Preset, updated.Stopwords.Preset) + assert.Equal(t, validInitial.Stopwords.Additions, updated.Stopwords.Additions) + assert.Equal(t, validInitial.Stopwords.Removals, updated.Stopwords.Removals) + }) + + t.Run("with invalid cleanup interval", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: -1, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.EqualError(t, err, "cleanup interval seconds must be > 0") + }) + + t.Run("with invalid updated Bm25 config", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 1, + Bm25: &models.BM25Config{ + K1: 1.2, + B: 1.2, + }, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.EqualError(t, err, "BM25.b must be <= 0 and <= 1") + }) + + t.Run("with invalid updated Stopwords preset config", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 1, + Stopwords: &models.StopwordConfig{ + Preset: "mongolian", + }, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.EqualError(t, err, "stopwordPreset 'mongolian' does not exist") + }) + + t.Run("with invalid updated Stopwords addition/removal config", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 1, + Stopwords: &models.StopwordConfig{ + Additions: []string{"duplicate"}, + Removals: []string{"duplicate"}, + }, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.EqualError(t, err, "found 'duplicate' in both stopwords.additions and stopwords.removals") + }) + + t.Run("with invalid updated inverted index null state change", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + IndexNullState: true, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.EqualError(t, err, "IndexNullState cannot be changed when updating a schema") + }) + + t.Run("with invalid updated inverted index property length change", func(t *testing.T) { + updated := &models.InvertedIndexConfig{ + IndexPropertyLength: true, + } + + err := ValidateUserConfigUpdate(validInitial, updated) + require.EqualError(t, err, "IndexPropertyLength cannot be changed when updating a schema") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_analyzer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_analyzer.go new file mode 100644 index 0000000000000000000000000000000000000000..8d3641082389dd22af3fcfbd16f2d36d68ece953 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_analyzer.go @@ -0,0 +1,282 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import "bytes" + +type DeltaResults struct { + ToDelete []Property + ToAdd []Property +} + +func Delta(previous, next []Property) DeltaResults { + return DeltaSkipSearchable(previous, next, nil) +} + +// skipDeltaSearchableProps - names of properties having searchable index of StrategyInverted +// (StrategyInverted requires complete set of items to be added or deleted (not delta), +// therefore for such properties calculating delta should be skipped. If same properties +// have other indexes (filterable / rangeable) property will be duplicated to contain +// complete items sets for searchable index and delta for remaining ones. +func DeltaSkipSearchable(previous, next []Property, skipDeltaSearchableProps []string) DeltaResults { + out := DeltaResults{} + previous = DedupItems(previous) + next = DedupItems(next) + + if previous == nil { + out.ToAdd = next + return out + } + + skipDeltaPropsNames := map[string]struct{}{} + for i := range skipDeltaSearchableProps { + skipDeltaPropsNames[skipDeltaSearchableProps[i]] = struct{}{} + } + + previousByProp := map[string]Property{} + for _, prevProp := range previous { + previousByProp[prevProp.Name] = prevProp + } + + for _, nextProp := range next { + prevProp, ok := previousByProp[nextProp.Name] + if !ok { + if len(nextProp.Items) == 0 { + // effectively nothing is added + continue + } + + // this prop didn't exist before so we can add all of it + out.ToAdd = append(out.ToAdd, nextProp) + if nextProp.Length != -1 { + // if length supported, remove prev length + out.ToDelete = append(out.ToDelete, Property{ + Name: nextProp.Name, + Items: []Countable{}, + Length: 0, + HasFilterableIndex: nextProp.HasFilterableIndex, + HasSearchableIndex: nextProp.HasSearchableIndex, + HasRangeableIndex: nextProp.HasRangeableIndex, + }) + } + continue + } + delete(previousByProp, nextProp.Name) + + // there is a chance they're identical, such a check is pretty cheap and + // it could prevent us from running an expensive merge, so let's try our + // luck + if listsIdentical(prevProp.Items, nextProp.Items) { + // then we don't need to do anything about this prop + continue + } + + if lenPrev, lenNext := len(prevProp.Items), len(nextProp.Items); lenPrev == 0 || lenNext == 0 { + out.ToAdd = append(out.ToAdd, nextProp) + out.ToDelete = append(out.ToDelete, prevProp) + } else { + _, skipDeltaSearchable := skipDeltaPropsNames[nextProp.Name] + + if skipDeltaSearchable && nextProp.HasSearchableIndex { + // property with searchable index of StrategyInverted + if !nextProp.HasFilterableIndex && !nextProp.HasRangeableIndex { + // no other indexes, skip calculating delta + out.ToAdd = append(out.ToAdd, nextProp) + out.ToDelete = append(out.ToDelete, prevProp) + } else { + // other indexes present + toAdd, toDel, cleaned := countableDelta(prevProp.Items, nextProp.Items) + + // if delta same as inputs + if !cleaned { + out.ToAdd = append(out.ToAdd, nextProp) + out.ToDelete = append(out.ToDelete, prevProp) + } else { + // separate entries for !searchable indexes with calculated delta + out.ToAdd = append(out.ToAdd, Property{ + Name: nextProp.Name, + Items: toAdd, + Length: nextProp.Length, + HasFilterableIndex: nextProp.HasFilterableIndex, + HasSearchableIndex: false, + HasRangeableIndex: nextProp.HasRangeableIndex, + }) + out.ToDelete = append(out.ToDelete, Property{ + Name: prevProp.Name, + Items: toDel, + Length: prevProp.Length, + HasFilterableIndex: prevProp.HasFilterableIndex, + HasSearchableIndex: false, + HasRangeableIndex: prevProp.HasRangeableIndex, + }) + + // separate entries for searchable index of StrategyInverted with complete item sets + // length/nil indexes will be handled by delta entries, therefore -1 not to be processed twice + out.ToAdd = append(out.ToAdd, Property{ + Name: nextProp.Name, + Items: nextProp.Items, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + HasRangeableIndex: false, + }) + out.ToDelete = append(out.ToDelete, Property{ + Name: prevProp.Name, + Items: prevProp.Items, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + HasRangeableIndex: false, + }) + } + } + } else { + // property of other indexes, calculate delta + toAdd, toDel, _ := countableDelta(prevProp.Items, nextProp.Items) + out.ToAdd = append(out.ToAdd, Property{ + Name: nextProp.Name, + Items: toAdd, + Length: nextProp.Length, + HasFilterableIndex: nextProp.HasFilterableIndex, + HasSearchableIndex: nextProp.HasSearchableIndex, + HasRangeableIndex: nextProp.HasRangeableIndex, + }) + out.ToDelete = append(out.ToDelete, Property{ + Name: prevProp.Name, + Items: toDel, + Length: prevProp.Length, + HasFilterableIndex: prevProp.HasFilterableIndex, + HasSearchableIndex: prevProp.HasSearchableIndex, + HasRangeableIndex: prevProp.HasRangeableIndex, + }) + } + } + } + + // extend ToDelete with props from previous missing in next + for _, prevProp := range previous { + if _, ok := previousByProp[prevProp.Name]; ok { + if len(prevProp.Items) == 0 { + // effectively nothing is removed + continue + } + + if prevProp.Length != -1 { + // if length supported, add next length + out.ToAdd = append(out.ToAdd, Property{ + Name: prevProp.Name, + Items: []Countable{}, + Length: 0, + HasFilterableIndex: prevProp.HasFilterableIndex, + HasSearchableIndex: prevProp.HasSearchableIndex, + HasRangeableIndex: prevProp.HasRangeableIndex, + }) + } + out.ToDelete = append(out.ToDelete, prevProp) + } + } + + return out +} + +func countableDelta(prev, next []Countable) ([]Countable, []Countable, bool) { + add := []Countable{} + del := []Countable{} + + seenInPrev := map[string]Countable{} + cleaned := false + + for _, prevItem := range prev { + seenInPrev[string(prevItem.Data)] = prevItem + } + + for _, nextItem := range next { + prev, ok := seenInPrev[string(nextItem.Data)] + if ok && prev.TermFrequency == nextItem.TermFrequency { + cleaned = true + // we have an identical overlap, delete from old list + delete(seenInPrev, string(nextItem.Data)) + // don't add to new list + continue + } + + add = append(add, nextItem) + } + + // anything that's now left on the seenInPrev map must be deleted because + // it either + // - is no longer present + // - is still present, but with updated values + for _, prevItem := range prev { + if _, ok := seenInPrev[string(prevItem.Data)]; ok { + del = append(del, prevItem) + } + } + + return add, del, cleaned +} + +func listsIdentical(a []Countable, b []Countable) bool { + if len(a) != len(b) { + // can't possibly be identical if they have different lengths, exit early + return false + } + + for i := range a { + if !bytes.Equal(a[i].Data, b[i].Data) || + a[i].TermFrequency != b[i].TermFrequency { + // return as soon as an item didn't match + return false + } + } + + // we have proven in O(n) time that both lists are identical + // while O(n) is the worst case for this check it prevents us from running a + // considerably more expensive merge + return true +} + +type DeltaNilResults struct { + ToDelete []NilProperty + ToAdd []NilProperty +} + +func DeltaNil(previous, next []NilProperty) DeltaNilResults { + out := DeltaNilResults{} + + if previous == nil { + out.ToAdd = next + return out + } + + previousByProp := map[string]NilProperty{} + for _, prevProp := range previous { + previousByProp[prevProp.Name] = prevProp + } + + for _, nextProp := range next { + if _, ok := previousByProp[nextProp.Name]; !ok { + out.ToAdd = append(out.ToAdd, nextProp) + continue + } + delete(previousByProp, nextProp.Name) + } + + // extend ToDelete with props from previous missing in next + for _, prevProp := range previous { + if _, ok := previousByProp[prevProp.Name]; ok { + out.ToDelete = append(out.ToDelete, prevProp) + } + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_analyzer_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_analyzer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4898547ccb46fe1acabcc76e275121f49edb0221 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_analyzer_test.go @@ -0,0 +1,2640 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + ent "github.com/weaviate/weaviate/entities/inverted" +) + +func TestDeltaAnalyzer(t *testing.T) { + t.Run("without previous indexing", func(t *testing.T) { + previous := []Property(nil) + next := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value1"), + TermFrequency: 7, + }, + { + Data: []byte("value2"), + TermFrequency: 3, + }, + }, + Length: 2, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value3"), + TermFrequency: 7, + }, + { + Data: []byte("value4"), + TermFrequency: 3, + }, + }, + Length: 2, + }, + } + + res := Delta(previous, next) + + assert.ElementsMatch(t, next, res.ToAdd) + assert.Len(t, res.ToDelete, 0) + }) + + t.Run("with previous indexing and no changes", func(t *testing.T) { + previous := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value1"), + TermFrequency: 7, + }, + { + Data: []byte("value2"), + TermFrequency: 3, + }, + }, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value3"), + TermFrequency: 7, + }, + { + Data: []byte("value4"), + TermFrequency: 3, + }, + }, + }, + } + next := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value1"), + TermFrequency: 7, + }, + { + Data: []byte("value2"), + TermFrequency: 3, + }, + }, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value3"), + TermFrequency: 7, + }, + { + Data: []byte("value4"), + TermFrequency: 3, + }, + }, + }, + } + + res := Delta(previous, next) + + assert.Len(t, res.ToDelete, 0) + assert.Len(t, res.ToAdd, 0) + }) + + t.Run("with previous indexing - only additions", func(t *testing.T) { + previous := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value2"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value4"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + } + next := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value1"), + TermFrequency: 7, + }, + { + Data: []byte("value2"), + TermFrequency: 3, + }, + }, + Length: 2, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value3"), + TermFrequency: 7, + }, + { + Data: []byte("value4"), + TermFrequency: 3, + }, + }, + Length: 2, + }, + } + + expectedAdd := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value1"), + TermFrequency: 7, + }, + }, + Length: 2, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value3"), + TermFrequency: 7, + }, + }, + Length: 2, + }, + } + expectedDelete := []Property{ + { + Name: "prop1", + Items: []Countable{}, + Length: 1, + }, + { + Name: "prop2", + Items: []Countable{}, + Length: 1, + }, + } + + res := Delta(previous, next) + + assert.ElementsMatch(t, expectedAdd, res.ToAdd) + assert.ElementsMatch(t, expectedDelete, res.ToDelete) + }) + + t.Run("with previous indexing - both additions and deletions", func(t *testing.T) { + previous := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value2"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value4"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + { + Name: "prop3", + Items: []Countable{ + { + Data: []byte("value6"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + { + Name: "prop4", + Items: []Countable{ + { + Data: []byte("value6"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + } + next := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value1"), + TermFrequency: 7, + }, + }, + Length: 1, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value3"), + TermFrequency: 7, + }, + { + Data: []byte("value4"), + TermFrequency: 3, + }, + }, + Length: 2, + }, + { + Name: "prop3", + Items: []Countable{ + { + Data: []byte("value6"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + { + Name: "prop5", + Items: []Countable{ + { + Data: []byte("value10"), + TermFrequency: 10, + }, + }, + Length: 1, + }, + } + + expectedAdd := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value1"), + TermFrequency: 7, + }, + }, + Length: 1, + }, + { + Name: "prop2", + Items: []Countable{ + { + Data: []byte("value3"), + TermFrequency: 7, + }, + }, + Length: 2, + }, + { + Name: "prop4", + Items: []Countable{}, + Length: 0, + }, + { + Name: "prop5", + Items: []Countable{ + { + Data: []byte("value10"), + TermFrequency: 10, + }, + }, + Length: 1, + }, + } + + expectedDelete := []Property{ + { + Name: "prop1", + Items: []Countable{ + { + Data: []byte("value2"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + { + Name: "prop2", + Items: []Countable{}, + Length: 1, + }, + { + Name: "prop4", + Items: []Countable{ + { + Data: []byte("value6"), + TermFrequency: 3, + }, + }, + Length: 1, + }, + { + Name: "prop5", + Items: []Countable{}, + Length: 0, + }, + } + + res := Delta(previous, next) + + assert.ElementsMatch(t, expectedAdd, res.ToAdd) + assert.ElementsMatch(t, expectedDelete, res.ToDelete) + }) +} + +func TestDeltaAnalyzer_SkipSearchable(t *testing.T) { + t.Run("without previous indexing", func(t *testing.T) { + var allNext []Property + var allExpectedToAdd []Property + var allSkipSearchable []string + + prev := []Property(nil) + + t.Run("searchableInv/filterable", func(t *testing.T) { + prop1_new_searchableInv_filterable_next := Property{ + Name: "new_sif", + Items: []Countable{{ + Data: []byte("new_01"), + TermFrequency: 7, + }, { + Data: []byte("new_02"), + TermFrequency: 3, + }}, + Length: 13, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + delta := DeltaSkipSearchable(prev, + []Property{prop1_new_searchableInv_filterable_next}, + []string{}) + + assert.ElementsMatch(t, []Property{prop1_new_searchableInv_filterable_next}, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allNext = append(allNext, prop1_new_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, prop1_new_searchableInv_filterable_next) + allSkipSearchable = append(allSkipSearchable, prop1_new_searchableInv_filterable_next.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop2_new_searchableInv_next := Property{ + Name: "new_si", + Items: []Countable{{ + Data: []byte("new_01"), + TermFrequency: 7, + }, { + Data: []byte("new_02"), + TermFrequency: 3, + }}, + Length: 13, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + delta := DeltaSkipSearchable(prev, + []Property{prop2_new_searchableInv_next}, + []string{}) + + assert.ElementsMatch(t, []Property{prop2_new_searchableInv_next}, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allNext = append(allNext, prop2_new_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, prop2_new_searchableInv_next) + allSkipSearchable = append(allSkipSearchable, prop2_new_searchableInv_next.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop3_new_searchableMap_filterable_next := Property{ + Name: "new_smf", + Items: []Countable{{ + Data: []byte("new_01"), + TermFrequency: 7, + }, { + Data: []byte("new_02"), + TermFrequency: 3, + }}, + Length: 13, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + delta := DeltaSkipSearchable(prev, + []Property{prop3_new_searchableMap_filterable_next}, + []string{}) + + assert.ElementsMatch(t, []Property{prop3_new_searchableMap_filterable_next}, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allNext = append(allNext, prop3_new_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, prop3_new_searchableMap_filterable_next) + }) + + t.Run("filterable", func(t *testing.T) { + prop4_new_filterable_next := Property{ + Name: "new_f", + Items: []Countable{{ + Data: []byte("new_01"), + TermFrequency: 7, + }, { + Data: []byte("new_02"), + TermFrequency: 3, + }}, + Length: 13, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + delta := DeltaSkipSearchable(prev, + []Property{prop4_new_filterable_next}, + []string{}) + + assert.ElementsMatch(t, []Property{prop4_new_filterable_next}, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allNext = append(allNext, prop4_new_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, prop4_new_filterable_next) + }) + + t.Run("sanity check - all properties at once", func(t *testing.T) { + delta := DeltaSkipSearchable(prev, allNext, allSkipSearchable) + + assert.ElementsMatch(t, allExpectedToAdd, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + }) + }) + + t.Run("with previous indexing", func(t *testing.T) { + var allPrev []Property + var allNext []Property + var allExpectedToAdd []Property + var allExpectedToDel []Property + var allSkipSearchable []string + + t.Run("adding 2 values to 0 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop01_adding2values_searchableInv_filterable_prev := Property{ + Name: "adding2values_sif", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop01_adding2values_searchableInv_filterable_next := Property{ + Name: "adding2values_sif", + Items: []Countable{{ + Data: []byte("added_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 17, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop01_adding2values_searchableInv_filterable_toAdd := []Property{ + prop01_adding2values_searchableInv_filterable_next, + } + expected_prop01_adding2values_searchableInv_filterable_toDel := []Property{ + prop01_adding2values_searchableInv_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop01_adding2values_searchableInv_filterable_prev}, + []Property{prop01_adding2values_searchableInv_filterable_next}, + []string{prop01_adding2values_searchableInv_filterable_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop01_adding2values_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop01_adding2values_searchableInv_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop01_adding2values_searchableInv_filterable_prev) + allNext = append(allNext, prop01_adding2values_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop01_adding2values_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop01_adding2values_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop01_adding2values_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop11_adding2values_searchableInv_prev := Property{ + Name: "adding2values_si", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + prop11_adding2values_searchableInv_next := Property{ + Name: "adding2values_si", + Items: []Countable{{ + Data: []byte("added_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 17, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop11_adding2values_searchableInv_toAdd := []Property{ + prop11_adding2values_searchableInv_next, + } + expected_prop11_adding2values_searchableInv_toDel := []Property{ + prop11_adding2values_searchableInv_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop11_adding2values_searchableInv_prev}, + []Property{prop11_adding2values_searchableInv_next}, + []string{prop11_adding2values_searchableInv_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop11_adding2values_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop11_adding2values_searchableInv_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop11_adding2values_searchableInv_prev) + allNext = append(allNext, prop11_adding2values_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop11_adding2values_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop11_adding2values_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop11_adding2values_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop21_adding2values_searchableMap_filterable_prev := Property{ + Name: "adding2values_smf", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop21_adding2values_searchableMap_filterable_next := Property{ + Name: "adding2values_smf", + Items: []Countable{{ + Data: []byte("added_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 17, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop21_adding2values_searchableMap_filterable_toAdd := []Property{ + prop21_adding2values_searchableMap_filterable_next, + } + expected_prop21_adding2values_searchableMap_filterable_toDel := []Property{ + prop21_adding2values_searchableMap_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop21_adding2values_searchableMap_filterable_prev}, + []Property{prop21_adding2values_searchableMap_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop21_adding2values_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop21_adding2values_searchableMap_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop21_adding2values_searchableMap_filterable_prev) + allNext = append(allNext, prop21_adding2values_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop21_adding2values_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop21_adding2values_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop31_adding2values_filterable_prev := Property{ + Name: "adding2values_f", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + prop31_adding2values_filterable_next := Property{ + Name: "adding2values_f", + Items: []Countable{{ + Data: []byte("added_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 17, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop31_adding2values_filterable_toAdd := []Property{ + prop31_adding2values_filterable_next, + } + expected_prop31_adding2values_filterable_toDel := []Property{ + prop31_adding2values_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop31_adding2values_filterable_prev}, + []Property{prop31_adding2values_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop31_adding2values_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop31_adding2values_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop31_adding2values_filterable_prev) + allNext = append(allNext, prop31_adding2values_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop31_adding2values_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop31_adding2values_filterable_toDel...) + }) + }) + + t.Run("adding 1 value to 1 value", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop02_adding1value_searchableInv_filterable_prev := Property{ + Name: "adding1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop02_adding1value_searchableInv_filterable_next := Property{ + Name: "adding1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop02_adding1value_searchableInv_filterable_toAdd := []Property{ + { + Name: "adding1value_sif", + Items: []Countable{{ + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "adding1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + expected_prop02_adding1value_searchableInv_filterable_toDel := []Property{ + { + Name: "adding1value_sif", + Items: []Countable{}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "adding1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop02_adding1value_searchableInv_filterable_prev}, + []Property{prop02_adding1value_searchableInv_filterable_next}, + []string{prop02_adding1value_searchableInv_filterable_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop02_adding1value_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop02_adding1value_searchableInv_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop02_adding1value_searchableInv_filterable_prev) + allNext = append(allNext, prop02_adding1value_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop02_adding1value_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop02_adding1value_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop02_adding1value_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop12_adding1value_searchableInv_prev := Property{ + Name: "adding1value_si", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + prop12_adding1value_searchableInv_next := Property{ + Name: "adding1value_si", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop12_adding1value_searchableInv_toAdd := []Property{ + prop12_adding1value_searchableInv_next, + } + expected_prop12_adding1value_searchableInv_toDel := []Property{ + prop12_adding1value_searchableInv_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop12_adding1value_searchableInv_prev}, + []Property{prop12_adding1value_searchableInv_next}, + []string{prop12_adding1value_searchableInv_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop12_adding1value_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop12_adding1value_searchableInv_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop12_adding1value_searchableInv_prev) + allNext = append(allNext, prop12_adding1value_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop12_adding1value_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop12_adding1value_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop12_adding1value_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop22_adding1value_searchableMap_filterable_prev := Property{ + Name: "adding1value_smf", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop22_adding1value_searchableMap_filterable_next := Property{ + Name: "adding1value_smf", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop22_adding1value_searchableMap_filterable_toAdd := []Property{ + { + Name: "adding1value_smf", + Items: []Countable{{ + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + expected_prop22_adding1value_searchableMap_filterable_toDel := []Property{ + { + Name: "adding1value_smf", + Items: []Countable{}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop22_adding1value_searchableMap_filterable_prev}, + []Property{prop22_adding1value_searchableMap_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop22_adding1value_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop22_adding1value_searchableMap_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop22_adding1value_searchableMap_filterable_prev) + allNext = append(allNext, prop22_adding1value_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop22_adding1value_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop22_adding1value_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop32_adding1value_filterable_prev := Property{ + Name: "adding1value_f", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + prop32_adding1value_filterable_next := Property{ + Name: "adding1value_f", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop32_adding1value_filterable_toAdd := []Property{ + { + Name: "adding1value_f", + Items: []Countable{{ + Data: []byte("added_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + expected_prop32_adding1value_filterable_toDel := []Property{ + { + Name: "adding1value_f", + Items: []Countable{}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop32_adding1value_filterable_prev}, + []Property{prop32_adding1value_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop32_adding1value_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop32_adding1value_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop32_adding1value_filterable_prev) + allNext = append(allNext, prop32_adding1value_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop32_adding1value_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop32_adding1value_filterable_toDel...) + }) + }) + + t.Run("deleting 2 values from 2 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop03_deleting2values_searchableInv_filterable_prev := Property{ + Name: "deleting2values_sif", + Items: []Countable{{ + Data: []byte("toBeDeleted_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop03_deleting2values_searchableInv_filterable_next := Property{ + Name: "deleting2values_sif", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop03_deleting2values_searchableInv_filterable_toAdd := []Property{ + prop03_deleting2values_searchableInv_filterable_next, + } + expected_prop03_deleting2values_searchableInv_filterable_toDel := []Property{ + prop03_deleting2values_searchableInv_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop03_deleting2values_searchableInv_filterable_prev}, + []Property{prop03_deleting2values_searchableInv_filterable_next}, + []string{prop03_deleting2values_searchableInv_filterable_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop03_deleting2values_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop03_deleting2values_searchableInv_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop03_deleting2values_searchableInv_filterable_prev) + allNext = append(allNext, prop03_deleting2values_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop03_deleting2values_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop03_deleting2values_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop03_deleting2values_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop13_deleting2values_searchableInv_prev := Property{ + Name: "deleting2values_si", + Items: []Countable{{ + Data: []byte("toBeDeleted_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + prop13_deleting2values_searchableInv_next := Property{ + Name: "deleting2values_si", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop13_deleting2values_searchableInv_toAdd := []Property{ + prop13_deleting2values_searchableInv_next, + } + expected_prop13_deleting2values_searchableInv_toDel := []Property{ + prop13_deleting2values_searchableInv_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop13_deleting2values_searchableInv_prev}, + []Property{prop13_deleting2values_searchableInv_next}, + []string{prop13_deleting2values_searchableInv_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop13_deleting2values_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop13_deleting2values_searchableInv_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop13_deleting2values_searchableInv_prev) + allNext = append(allNext, prop13_deleting2values_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop13_deleting2values_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop13_deleting2values_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop13_deleting2values_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop23_deleting2values_searchableMap_filterable_prev := Property{ + Name: "deleting2values_smf", + Items: []Countable{{ + Data: []byte("toBeDeleted_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop23_deleting2values_searchableMap_filterable_next := Property{ + Name: "deleting2values_smf", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop23_deleting2values_searchableMap_filterable_toAdd := []Property{ + prop23_deleting2values_searchableMap_filterable_next, + } + expected_prop23_deleting2values_searchableMap_filterable_toDel := []Property{ + prop23_deleting2values_searchableMap_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop23_deleting2values_searchableMap_filterable_prev}, + []Property{prop23_deleting2values_searchableMap_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop23_deleting2values_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop23_deleting2values_searchableMap_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop23_deleting2values_searchableMap_filterable_prev) + allNext = append(allNext, prop23_deleting2values_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop23_deleting2values_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop23_deleting2values_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop33_deleting2values_filterable_prev := Property{ + Name: "deleting2values_f", + Items: []Countable{{ + Data: []byte("toBeDeleted_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + prop33_deleting2values_filterable_next := Property{ + Name: "deleting2values_f", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop33_deleting2values_filterable_toAdd := []Property{ + prop33_deleting2values_filterable_next, + } + expected_prop33_deleting2values_filterable_toDel := []Property{ + prop33_deleting2values_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop33_deleting2values_filterable_prev}, + []Property{prop33_deleting2values_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop33_deleting2values_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop33_deleting2values_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop33_deleting2values_filterable_prev) + allNext = append(allNext, prop33_deleting2values_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop33_deleting2values_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop33_deleting2values_filterable_toDel...) + }) + }) + + t.Run("deleting 1 value from 2 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop04_deleting1value_searchableInv_filterable_prev := Property{ + Name: "deleting1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 27, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop04_deleting1value_searchableInv_filterable_next := Property{ + Name: "deleting1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop04_deleting1value_searchableInv_filterable_toAdd := []Property{ + { + Name: "deleting1value_sif", + Items: []Countable{}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "deleting1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + expected_prop04_deleting1value_searchableInv_filterable_toDel := []Property{ + { + Name: "deleting1value_sif", + Items: []Countable{{ + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 27, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "deleting1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop04_deleting1value_searchableInv_filterable_prev}, + []Property{prop04_deleting1value_searchableInv_filterable_next}, + []string{prop04_deleting1value_searchableInv_filterable_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop04_deleting1value_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop04_deleting1value_searchableInv_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop04_deleting1value_searchableInv_filterable_prev) + allNext = append(allNext, prop04_deleting1value_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop04_deleting1value_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop04_deleting1value_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop04_deleting1value_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop14_deleting1value_searchableInv_prev := Property{ + Name: "deleting1value_si", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 27, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + prop14_deleting1value_searchableInv_next := Property{ + Name: "deleting1value_si", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop14_deleting1value_searchableInv_toAdd := []Property{ + prop14_deleting1value_searchableInv_next, + } + expected_prop14_deleting1value_searchableInv_toDel := []Property{ + prop14_deleting1value_searchableInv_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop14_deleting1value_searchableInv_prev}, + []Property{prop14_deleting1value_searchableInv_next}, + []string{prop14_deleting1value_searchableInv_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop14_deleting1value_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop14_deleting1value_searchableInv_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop14_deleting1value_searchableInv_prev) + allNext = append(allNext, prop14_deleting1value_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop14_deleting1value_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop14_deleting1value_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop14_deleting1value_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop24_deleting1value_searchableMap_filterable_prev := Property{ + Name: "deleting1value_smf", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 27, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop24_deleting1value_searchableMap_filterable_next := Property{ + Name: "deleting1value_smf", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop24_deleting1value_searchableMap_filterable_toAdd := []Property{ + { + Name: "deleting1value_smf", + Items: []Countable{}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + expected_prop24_deleting1value_searchableMap_filterable_toDel := []Property{ + { + Name: "deleting1value_smf", + Items: []Countable{{ + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 27, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop24_deleting1value_searchableMap_filterable_prev}, + []Property{prop24_deleting1value_searchableMap_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop24_deleting1value_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop24_deleting1value_searchableMap_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop24_deleting1value_searchableMap_filterable_prev) + allNext = append(allNext, prop24_deleting1value_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop24_deleting1value_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop24_deleting1value_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop34_deleting1value_filterable_prev := Property{ + Name: "deleting1value_f", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 27, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + prop34_deleting1value_filterable_next := Property{ + Name: "deleting1value_f", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop34_deleting1value_filterable_toAdd := []Property{ + { + Name: "deleting1value_f", + Items: []Countable{}, + Length: 12, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + expected_prop34_deleting1value_filterable_toDel := []Property{ + { + Name: "deleting1value_f", + Items: []Countable{{ + Data: []byte("toBeDeleted_02"), + TermFrequency: 3, + }}, + Length: 27, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop34_deleting1value_filterable_prev}, + []Property{prop34_deleting1value_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop34_deleting1value_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop34_deleting1value_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop34_deleting1value_filterable_prev) + allNext = append(allNext, prop34_deleting1value_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop34_deleting1value_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop34_deleting1value_filterable_toDel...) + }) + }) + + t.Run("replacing 2 values of 2 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop05_replacing2values_searchableInv_filterable_prev := Property{ + Name: "replacing2values_sif", + Items: []Countable{{ + Data: []byte("toBeReplaced_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 31, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop05_replacing2values_searchableInv_filterable_next := Property{ + Name: "replacing2values_sif", + Items: []Countable{{ + Data: []byte("replaced_03"), + TermFrequency: 7, + }, { + Data: []byte("replaced_04"), + TermFrequency: 3, + }}, + Length: 23, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop05_replacing2values_searchableInv_filterable_toAdd := []Property{ + prop05_replacing2values_searchableInv_filterable_next, + } + expected_prop05_replacing2values_searchableInv_filterable_toDel := []Property{ + prop05_replacing2values_searchableInv_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop05_replacing2values_searchableInv_filterable_prev}, + []Property{prop05_replacing2values_searchableInv_filterable_next}, + []string{prop05_replacing2values_searchableInv_filterable_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop05_replacing2values_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop05_replacing2values_searchableInv_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop05_replacing2values_searchableInv_filterable_prev) + allNext = append(allNext, prop05_replacing2values_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop05_replacing2values_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop05_replacing2values_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop05_replacing2values_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop15_replacing2values_searchableInv_prev := Property{ + Name: "replacing2values_si", + Items: []Countable{{ + Data: []byte("toBeReplaced_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 31, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + prop15_replacing2values_searchableInv_next := Property{ + Name: "replacing2values_si", + Items: []Countable{{ + Data: []byte("replaced_03"), + TermFrequency: 7, + }, { + Data: []byte("replaced_04"), + TermFrequency: 3, + }}, + Length: 23, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop15_replacing2values_searchableInv_toAdd := []Property{ + prop15_replacing2values_searchableInv_next, + } + expected_prop15_replacing2values_searchableInv_toDel := []Property{ + prop15_replacing2values_searchableInv_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop15_replacing2values_searchableInv_prev}, + []Property{prop15_replacing2values_searchableInv_next}, + []string{prop15_replacing2values_searchableInv_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop15_replacing2values_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop15_replacing2values_searchableInv_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop15_replacing2values_searchableInv_prev) + allNext = append(allNext, prop15_replacing2values_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop15_replacing2values_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop15_replacing2values_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop15_replacing2values_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop25_replacing2values_searchableMap_filterable_prev := Property{ + Name: "replacing2values_smf", + Items: []Countable{{ + Data: []byte("toBeReplaced_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 31, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop25_replacing2values_searchableMap_filterable_next := Property{ + Name: "replacing2values_smf", + Items: []Countable{{ + Data: []byte("replaced_03"), + TermFrequency: 7, + }, { + Data: []byte("replaced_04"), + TermFrequency: 3, + }}, + Length: 23, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop25_replacing2values_searchableMap_filterable_toAdd := []Property{ + prop25_replacing2values_searchableMap_filterable_next, + } + expected_prop25_replacing2values_searchableMap_filterable_toDel := []Property{ + prop25_replacing2values_searchableMap_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop25_replacing2values_searchableMap_filterable_prev}, + []Property{prop25_replacing2values_searchableMap_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop25_replacing2values_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop25_replacing2values_searchableMap_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop25_replacing2values_searchableMap_filterable_prev) + allNext = append(allNext, prop25_replacing2values_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop25_replacing2values_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop25_replacing2values_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop35_replacing2values_filterable_prev := Property{ + Name: "replacing2values_f", + Items: []Countable{{ + Data: []byte("toBeReplaced_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 31, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + prop35_replacing2values_filterable_next := Property{ + Name: "replacing2values_f", + Items: []Countable{{ + Data: []byte("replaced_03"), + TermFrequency: 7, + }, { + Data: []byte("replaced_04"), + TermFrequency: 3, + }}, + Length: 23, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop35_replacing2values_filterable_toAdd := []Property{ + prop35_replacing2values_filterable_next, + } + expected_prop35_replacing2values_filterable_toDel := []Property{ + prop35_replacing2values_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop35_replacing2values_filterable_prev}, + []Property{prop35_replacing2values_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop35_replacing2values_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop35_replacing2values_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop35_replacing2values_filterable_prev) + allNext = append(allNext, prop35_replacing2values_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop35_replacing2values_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop35_replacing2values_filterable_toDel...) + }) + }) + + t.Run("replacing 1 value of 2 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop06_replacing1value_searchableInv_filterable_prev := Property{ + Name: "replacing1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 28, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop06_replacing1value_searchableInv_filterable_next := Property{ + Name: "replacing1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: 24, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop06_replacing1value_searchableInv_filterable_toAdd := []Property{ + { + Name: "replacing1value_sif", + Items: []Countable{{ + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: 24, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "replacing1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + expected_prop06_replacing1value_searchableInv_filterable_toDel := []Property{ + { + Name: "replacing1value_sif", + Items: []Countable{{ + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 28, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "replacing1value_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: -1, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop06_replacing1value_searchableInv_filterable_prev}, + []Property{prop06_replacing1value_searchableInv_filterable_next}, + []string{prop06_replacing1value_searchableInv_filterable_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop06_replacing1value_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop06_replacing1value_searchableInv_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop06_replacing1value_searchableInv_filterable_prev) + allNext = append(allNext, prop06_replacing1value_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop06_replacing1value_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop06_replacing1value_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop06_replacing1value_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop16_replacing1value_searchableInv_prev := Property{ + Name: "replacing1value_si", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 28, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + prop16_replacing1value_searchableInv_next := Property{ + Name: "replacing1value_si", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: 24, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop16_replacing1value_searchableInv_toAdd := []Property{ + prop16_replacing1value_searchableInv_next, + } + expected_prop16_replacing1value_searchableInv_toDel := []Property{ + prop16_replacing1value_searchableInv_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop16_replacing1value_searchableInv_prev}, + []Property{prop16_replacing1value_searchableInv_next}, + []string{prop16_replacing1value_searchableInv_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop16_replacing1value_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop16_replacing1value_searchableInv_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop16_replacing1value_searchableInv_prev) + allNext = append(allNext, prop16_replacing1value_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop16_replacing1value_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop16_replacing1value_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop16_replacing1value_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop26_replacing1value_searchableMap_filterable_prev := Property{ + Name: "replacing1value_smf", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 28, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop26_replacing1value_searchableMap_filterable_next := Property{ + Name: "replacing1value_smf", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: 24, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop26_replacing1value_searchableMap_filterable_toAdd := []Property{ + { + Name: "replacing1value_smf", + Items: []Countable{{ + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: 24, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + expected_prop26_replacing1value_searchableMap_filterable_toDel := []Property{ + { + Name: "replacing1value_smf", + Items: []Countable{{ + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 28, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop26_replacing1value_searchableMap_filterable_prev}, + []Property{prop26_replacing1value_searchableMap_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop26_replacing1value_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop26_replacing1value_searchableMap_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop26_replacing1value_searchableMap_filterable_prev) + allNext = append(allNext, prop26_replacing1value_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop26_replacing1value_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop26_replacing1value_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop36_replacing1value_filterable_prev := Property{ + Name: "replacing1value_f", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 28, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + prop36_replacing1value_filterable_next := Property{ + Name: "replacing1value_f", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: 24, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop36_replacing1value_filterable_toAdd := []Property{ + { + Name: "replacing1value_f", + Items: []Countable{{ + Data: []byte("replaced_03"), + TermFrequency: 3, + }}, + Length: 24, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + expected_prop36_replacing1value_filterable_toDel := []Property{ + { + Name: "replacing1value_f", + Items: []Countable{{ + Data: []byte("toBeReplaced_02"), + TermFrequency: 3, + }}, + Length: 28, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + + delta := DeltaSkipSearchable( + []Property{prop36_replacing1value_filterable_prev}, + []Property{prop36_replacing1value_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop36_replacing1value_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop36_replacing1value_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop36_replacing1value_filterable_prev) + allNext = append(allNext, prop36_replacing1value_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop36_replacing1value_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop36_replacing1value_filterable_toDel...) + }) + }) + + t.Run("creating 2 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop07_creating2values_searchableInv_filterable_next := Property{ + Name: "creating2values_sif", + Items: []Countable{{ + Data: []byte("created_01"), + TermFrequency: 7, + }, { + Data: []byte("created_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop07_creating2values_searchableInv_filterable_toAdd := []Property{ + prop07_creating2values_searchableInv_filterable_next, + } + expected_prop07_creating2values_searchableInv_filterable_toDel := []Property{ + { + Name: "creating2values_sif", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{}, + []Property{prop07_creating2values_searchableInv_filterable_next}, + []string{prop07_creating2values_searchableInv_filterable_next.Name}, + ) + + assert.ElementsMatch(t, expected_prop07_creating2values_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop07_creating2values_searchableInv_filterable_toDel, delta.ToDelete) + + allNext = append(allNext, prop07_creating2values_searchableInv_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop07_creating2values_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop07_creating2values_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop07_creating2values_searchableInv_filterable_next.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop17_creating2values_searchableInv_next := Property{ + Name: "creating2values_si", + Items: []Countable{{ + Data: []byte("created_01"), + TermFrequency: 7, + }, { + Data: []byte("created_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop17_creating2values_searchableInv_toAdd := []Property{ + prop17_creating2values_searchableInv_next, + } + expected_prop17_creating2values_searchableInv_toDel := []Property{ + { + Name: "creating2values_si", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{}, + []Property{prop17_creating2values_searchableInv_next}, + []string{prop17_creating2values_searchableInv_next.Name}, + ) + + assert.ElementsMatch(t, expected_prop17_creating2values_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop17_creating2values_searchableInv_toDel, delta.ToDelete) + + allNext = append(allNext, prop17_creating2values_searchableInv_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop17_creating2values_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop17_creating2values_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop17_creating2values_searchableInv_next.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop27_creating2values_searchableMap_filterable_next := Property{ + Name: "creating2values_smf", + Items: []Countable{{ + Data: []byte("created_01"), + TermFrequency: 7, + }, { + Data: []byte("created_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop27_creating2values_searchableMap_filterable_toAdd := []Property{ + prop27_creating2values_searchableMap_filterable_next, + } + expected_prop27_creating2values_searchableMap_filterable_toDel := []Property{ + { + Name: "creating2values_smf", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + + delta := DeltaSkipSearchable( + []Property{}, + []Property{prop27_creating2values_searchableMap_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop27_creating2values_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop27_creating2values_searchableMap_filterable_toDel, delta.ToDelete) + + allNext = append(allNext, prop27_creating2values_searchableMap_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop27_creating2values_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop27_creating2values_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop37_creating2values_filterable_next := Property{ + Name: "creating2values_f", + Items: []Countable{{ + Data: []byte("created_01"), + TermFrequency: 7, + }, { + Data: []byte("created_02"), + TermFrequency: 3, + }}, + Length: 21, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop37_creating2values_filterable_toAdd := []Property{ + prop37_creating2values_filterable_next, + } + expected_prop37_creating2values_filterable_toDel := []Property{ + { + Name: "creating2values_f", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + + delta := DeltaSkipSearchable( + []Property{}, + []Property{prop37_creating2values_filterable_next}, + nil, + ) + + assert.ElementsMatch(t, expected_prop37_creating2values_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop37_creating2values_filterable_toDel, delta.ToDelete) + + allNext = append(allNext, prop37_creating2values_filterable_next) + allExpectedToAdd = append(allExpectedToAdd, expected_prop37_creating2values_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop37_creating2values_filterable_toDel...) + }) + }) + + t.Run("dropping 2 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop08_dropping2values_searchableInv_filterable_prev := Property{ + Name: "dropping2values_sif", + Items: []Countable{{ + Data: []byte("toBeDropped_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDropped_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop08_dropping2values_searchableInv_filterable_toAdd := []Property{ + { + Name: "dropping2values_sif", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + expected_prop08_dropping2values_searchableInv_filterable_toDel := []Property{ + prop08_dropping2values_searchableInv_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop08_dropping2values_searchableInv_filterable_prev}, + []Property{}, + []string{prop08_dropping2values_searchableInv_filterable_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop08_dropping2values_searchableInv_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop08_dropping2values_searchableInv_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop08_dropping2values_searchableInv_filterable_prev) + allExpectedToAdd = append(allExpectedToAdd, expected_prop08_dropping2values_searchableInv_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop08_dropping2values_searchableInv_filterable_toDel...) + allSkipSearchable = append(allSkipSearchable, prop08_dropping2values_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop18_dropping2values_searchableInv_prev := Property{ + Name: "dropping2values_si", + Items: []Countable{{ + Data: []byte("toBeDropped_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDropped_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + + expected_prop18_dropping2values_searchableInv_toAdd := []Property{ + { + Name: "dropping2values_si", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: false, + HasSearchableIndex: true, + }, + } + expected_prop18_dropping2values_searchableInv_toDel := []Property{ + prop18_dropping2values_searchableInv_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop18_dropping2values_searchableInv_prev}, + []Property{}, + []string{prop18_dropping2values_searchableInv_prev.Name}, + ) + + assert.ElementsMatch(t, expected_prop18_dropping2values_searchableInv_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop18_dropping2values_searchableInv_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop18_dropping2values_searchableInv_prev) + allExpectedToAdd = append(allExpectedToAdd, expected_prop18_dropping2values_searchableInv_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop18_dropping2values_searchableInv_toDel...) + allSkipSearchable = append(allSkipSearchable, prop18_dropping2values_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop28_dropping2values_searchableMap_filterable_prev := Property{ + Name: "dropping2values_smf", + Items: []Countable{{ + Data: []byte("toBeDropped_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDropped_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + + expected_prop28_dropping2values_searchableMap_filterable_toAdd := []Property{ + { + Name: "dropping2values_smf", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + } + expected_prop28_dropping2values_searchableMap_filterable_toDel := []Property{ + prop28_dropping2values_searchableMap_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop28_dropping2values_searchableMap_filterable_prev}, + []Property{}, + nil, + ) + + assert.ElementsMatch(t, expected_prop28_dropping2values_searchableMap_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop28_dropping2values_searchableMap_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop28_dropping2values_searchableMap_filterable_prev) + allExpectedToAdd = append(allExpectedToAdd, expected_prop28_dropping2values_searchableMap_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop28_dropping2values_searchableMap_filterable_toDel...) + }) + + t.Run("filterable", func(t *testing.T) { + prop38_dropping2values_filterable_prev := Property{ + Name: "dropping2values_smf", + Items: []Countable{{ + Data: []byte("toBeDropped_01"), + TermFrequency: 7, + }, { + Data: []byte("toBeDropped_02"), + TermFrequency: 3, + }}, + Length: 29, + HasFilterableIndex: true, + HasSearchableIndex: false, + } + + expected_prop38_dropping2values_filterable_toAdd := []Property{ + { + Name: "dropping2values_smf", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + expected_prop38_dropping2values_filterable_toDel := []Property{ + prop38_dropping2values_filterable_prev, + } + + delta := DeltaSkipSearchable( + []Property{prop38_dropping2values_filterable_prev}, + []Property{}, + nil, + ) + + assert.ElementsMatch(t, expected_prop38_dropping2values_filterable_toAdd, delta.ToAdd) + assert.ElementsMatch(t, expected_prop38_dropping2values_filterable_toDel, delta.ToDelete) + + allPrev = append(allPrev, prop38_dropping2values_filterable_prev) + allExpectedToAdd = append(allExpectedToAdd, expected_prop38_dropping2values_filterable_toAdd...) + allExpectedToDel = append(allExpectedToDel, expected_prop38_dropping2values_filterable_toDel...) + }) + }) + + t.Run("no changes to 2 values", func(t *testing.T) { + t.Run("searchableInv/filterable", func(t *testing.T) { + prop09_noChanges_searchableInv_filterable_prev := Property{ + Name: "noChanges_sif", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("immutable_02"), + TermFrequency: 3, + }}, + Length: 25, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop09_noChanges_searchableInv_filterable_next := prop09_noChanges_searchableInv_filterable_prev + + delta := DeltaSkipSearchable( + []Property{prop09_noChanges_searchableInv_filterable_prev}, + []Property{prop09_noChanges_searchableInv_filterable_next}, + []string{prop09_noChanges_searchableInv_filterable_prev.Name}, + ) + + assert.Empty(t, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allPrev = append(allPrev, prop09_noChanges_searchableInv_filterable_prev) + allNext = append(allNext, prop09_noChanges_searchableInv_filterable_next) + allSkipSearchable = append(allSkipSearchable, prop09_noChanges_searchableInv_filterable_prev.Name) + }) + + t.Run("searchableInv", func(t *testing.T) { + prop19_noChanges_searchableInv_prev := Property{ + Name: "noChanges_si", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("immutable_02"), + TermFrequency: 3, + }}, + Length: 25, + HasFilterableIndex: false, + HasSearchableIndex: true, + } + prop19_noChanges_searchableInv_next := prop19_noChanges_searchableInv_prev + + delta := DeltaSkipSearchable( + []Property{prop19_noChanges_searchableInv_prev}, + []Property{prop19_noChanges_searchableInv_next}, + []string{prop19_noChanges_searchableInv_prev.Name}, + ) + + assert.Empty(t, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allPrev = append(allPrev, prop19_noChanges_searchableInv_prev) + allNext = append(allNext, prop19_noChanges_searchableInv_next) + allSkipSearchable = append(allSkipSearchable, prop19_noChanges_searchableInv_prev.Name) + }) + + t.Run("searchableMap/filterable", func(t *testing.T) { + prop29_noChanges_searchableMap_filterable_prev := Property{ + Name: "noChanges_smf", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("immutable_02"), + TermFrequency: 3, + }}, + Length: 25, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop29_noChanges_searchableMap_filterable_next := prop29_noChanges_searchableMap_filterable_prev + + delta := DeltaSkipSearchable( + []Property{prop29_noChanges_searchableMap_filterable_prev}, + []Property{prop29_noChanges_searchableMap_filterable_next}, + nil, + ) + + assert.Empty(t, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allPrev = append(allPrev, prop29_noChanges_searchableMap_filterable_prev) + allNext = append(allNext, prop29_noChanges_searchableMap_filterable_next) + }) + + t.Run("filterable", func(t *testing.T) { + prop39_noChanges_filterable_prev := Property{ + Name: "noChanges_f", + Items: []Countable{{ + Data: []byte("immutable_01"), + TermFrequency: 7, + }, { + Data: []byte("immutable_02"), + TermFrequency: 3, + }}, + Length: 25, + HasFilterableIndex: true, + HasSearchableIndex: true, + } + prop39_noChanges_filterable_next := prop39_noChanges_filterable_prev + + delta := DeltaSkipSearchable( + []Property{prop39_noChanges_filterable_prev}, + []Property{prop39_noChanges_filterable_next}, + nil, + ) + + assert.Empty(t, delta.ToAdd) + assert.Empty(t, delta.ToDelete) + + allPrev = append(allPrev, prop39_noChanges_filterable_prev) + allNext = append(allNext, prop39_noChanges_filterable_next) + }) + }) + + t.Run("sanity check - all properties at once", func(t *testing.T) { + delta := DeltaSkipSearchable(allPrev, allNext, allSkipSearchable) + + assert.ElementsMatch(t, allExpectedToAdd, delta.ToAdd) + assert.ElementsMatch(t, allExpectedToDel, delta.ToDelete) + }) + }) +} + +func TestDeltaAnalyzer_Arrays(t *testing.T) { + lexInt64 := func(val int64) []byte { + bytes, _ := ent.LexicographicallySortableInt64(val) + return bytes + } + lexBool := func(val bool) []byte { + if val { + return []uint8{1} + } + return []uint8{0} + } + + t.Run("with previous indexing - both additions and deletions", func(t *testing.T) { + previous := []Property{ + { + Name: "ints", + Items: []Countable{ + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(102)}, + {Data: lexInt64(103)}, + {Data: lexInt64(104)}, + }, + Length: 9, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "booleans", + Items: []Countable{ + {Data: lexBool(true)}, + {Data: lexBool(true)}, + {Data: lexBool(true)}, + {Data: lexBool(false)}, + }, + Length: 4, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "numbers", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "texts", + Items: []Countable{ + {Data: []byte("aaa")}, + {Data: []byte("bbb")}, + {Data: []byte("ccc")}, + }, + Length: 3, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "dates", + Items: []Countable{ + {Data: []byte("2021-06-01T22:18:59.640162Z")}, + {Data: []byte("2022-06-01T22:18:59.640162Z")}, + }, + Length: 2, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_creationTimeUnix", + Items: []Countable{ + {Data: []byte("1703778000000")}, + }, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_lastUpdateTimeUnix", + Items: []Countable{ + {Data: []byte("1703778000000")}, + }, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + next := []Property{ + { + Name: "ints", + Items: []Countable{ + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(101)}, + {Data: lexInt64(103)}, + {Data: lexInt64(104)}, + {Data: lexInt64(105)}, + }, + Length: 7, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "booleans", + Items: []Countable{ + {Data: lexBool(true)}, + {Data: lexBool(true)}, + {Data: lexBool(true)}, + {Data: lexBool(false)}, + }, + Length: 4, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "texts", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_creationTimeUnix", + Items: []Countable{ + {Data: []byte("1703778000000")}, + }, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_lastUpdateTimeUnix", + Items: []Countable{ + {Data: []byte("1703778500000")}, + }, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + + expectedAdd := []Property{ + { + Name: "ints", + Items: []Countable{ + {Data: lexInt64(105)}, + }, + Length: 7, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "texts", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "dates", + Items: []Countable{}, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_lastUpdateTimeUnix", + Items: []Countable{ + {Data: []byte("1703778500000")}, + }, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + expectedDelete := []Property{ + { + Name: "ints", + Items: []Countable{ + {Data: lexInt64(102)}, + }, + Length: 9, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "texts", + Items: []Countable{ + {Data: []byte("aaa")}, + {Data: []byte("bbb")}, + {Data: []byte("ccc")}, + }, + Length: 3, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_lastUpdateTimeUnix", + Items: []Countable{ + {Data: []byte("1703778000000")}, + }, + Length: 0, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "dates", + Items: []Countable{ + {Data: []byte("2021-06-01T22:18:59.640162Z")}, + {Data: []byte("2022-06-01T22:18:59.640162Z")}, + }, + Length: 2, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + + delta := Delta(previous, next) + + assert.ElementsMatch(t, expectedAdd, delta.ToAdd) + assert.ElementsMatch(t, expectedDelete, delta.ToDelete) + }) +} + +func TestDeltaNilAnalyzer(t *testing.T) { + previous := []NilProperty{ + { + Name: "ints", + AddToPropertyLength: false, + }, + { + Name: "booleans", + AddToPropertyLength: true, + }, + { + Name: "numbers", + AddToPropertyLength: true, + }, + } + next := []NilProperty{ + { + Name: "booleans", + AddToPropertyLength: true, + }, + { + Name: "texts", + AddToPropertyLength: true, + }, + { + Name: "dates", + AddToPropertyLength: false, + }, + } + + expectedAdd := []NilProperty{ + { + Name: "texts", + AddToPropertyLength: true, + }, + { + Name: "dates", + AddToPropertyLength: false, + }, + } + expectedDelete := []NilProperty{ + { + Name: "ints", + AddToPropertyLength: false, + }, + { + Name: "numbers", + AddToPropertyLength: true, + }, + } + + deltaNil := DeltaNil(previous, next) + assert.Equal(t, expectedAdd, deltaNil.ToAdd) + assert.Equal(t, expectedDelete, deltaNil.ToDelete) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_merger.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_merger.go new file mode 100644 index 0000000000000000000000000000000000000000..33aa354a0da796f4cac9b93b15eea4360a2e2ec1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_merger.go @@ -0,0 +1,226 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +// DeltaMerger can be used to condense the number of single writes into one big +// one. Additionally it removes overlaps between additions and deletions. It is +// meant to be used in batch situation, where 5 ref objects in a row might each +// increase the doc count by one. Instead of writing 5 additions and 4 +// deletions, this can be condensed to write just one addition +type DeltaMerger struct { + additions propsByName + deletions propsByName +} + +func NewDeltaMerger() *DeltaMerger { + return &DeltaMerger{ + additions: propsByName{}, + deletions: propsByName{}, + } +} + +func (dm *DeltaMerger) AddAdditions(props []Property, docID uint64) { + for _, prop := range props { + storedProp := dm.additions.getOrCreate(prop.Name) + storedProp.hasFilterableIndex = prop.HasFilterableIndex + storedProp.hasSearchableIndex = prop.HasSearchableIndex + for _, item := range prop.Items { + storedItem := storedProp.getOrCreateItem(item.Data) + storedItem.addDocIDAndFrequency(docID, item.TermFrequency) + } + } +} + +func (dm *DeltaMerger) AddDeletions(props []Property, docID uint64) { + for _, prop := range props { + additionProp := dm.additions.getOrCreate(prop.Name) + for _, item := range prop.Items { + additionItem := additionProp.getOrCreateItem(item.Data) + ok := additionItem.deleteIfPresent(docID) + if ok { + // we are done with this prop, no need to register an explicit deletion + continue + } + + // this was not added by us, we need to remove it + deletionProp := dm.deletions.getOrCreate(prop.Name) + deletionProp.hasFilterableIndex = prop.HasFilterableIndex + deletionProp.hasSearchableIndex = prop.HasSearchableIndex + deletionItem := deletionProp.getOrCreateItem(item.Data) + deletionItem.addDocIDAndFrequency(docID, 0) // frequency does not matter on deletion + } + } +} + +func (dm *DeltaMerger) Merge() DeltaMergeResult { + return DeltaMergeResult{ + Additions: dm.additions.merge(), + Deletions: dm.deletions.merge(), + } +} + +type DeltaMergeResult struct { + Additions []MergeProperty + Deletions []MergeProperty +} + +type MergeProperty struct { + Name string + MergeItems []MergeItem + HasFilterableIndex bool + HasSearchableIndex bool +} + +type MergeItem struct { + Data []byte + DocIDs []MergeDocIDWithFrequency +} + +// IDs is meant for cases such as deletion, where the frequency is irrelevant, +// but the expected format is a []docID +func (mi MergeItem) IDs() []uint64 { + out := make([]uint64, len(mi.DocIDs)) + for i, tuple := range mi.DocIDs { + out[i] = tuple.DocID + } + + return out +} + +// Countable converts the merge item to a regular (non-merge) Countable. Note +// that this loses the IDs and Frequency information, so IDs have to be passed +// separately using .IDs() +func (mi MergeItem) Countable() Countable { + return Countable{ + Data: mi.Data, + } +} + +type MergeDocIDWithFrequency struct { + DocID uint64 + Frequency float32 +} + +type propsByName map[string]*propWithDocIDs + +func (pbn propsByName) getOrCreate(name string) *propWithDocIDs { + prop, ok := pbn[name] + if ok { + return prop + } + prop = &propWithDocIDs{name: name, items: map[string]*countableWithDocIDs{}} + pbn[name] = prop + return prop +} + +func (pbn propsByName) merge() []MergeProperty { + out := make([]MergeProperty, len(pbn)) + i := 0 + for _, prop := range pbn { + mergedProp := prop.merge() + if mergedProp == nil { + continue + } + out[i] = *mergedProp + i++ + } + + if i == 0 { + return nil + } + + return out[:i] +} + +type propWithDocIDs struct { + name string + items map[string]*countableWithDocIDs + hasFilterableIndex bool + hasSearchableIndex bool +} + +func (pwd *propWithDocIDs) getOrCreateItem(data []byte) *countableWithDocIDs { + name := string(data) + item, ok := pwd.items[name] + if ok { + return item + } + item = &countableWithDocIDs{ + value: data, + docIDs: map[uint64]float32{}, + } + pwd.items[name] = item + return item +} + +func (pwd *propWithDocIDs) merge() *MergeProperty { + items := make([]MergeItem, len(pwd.items)) + + i := 0 + for _, item := range pwd.items { + mergedItem := item.merge() + if mergedItem == nil { + continue + } + + items[i] = *mergedItem + i++ + } + + if i == 0 { + return nil + } + + return &MergeProperty{ + Name: pwd.name, + MergeItems: items[:i], + HasFilterableIndex: pwd.hasFilterableIndex, + HasSearchableIndex: pwd.hasSearchableIndex, + } +} + +type countableWithDocIDs struct { + value []byte + docIDs map[uint64]float32 // map[docid]frequency +} + +func (cwd *countableWithDocIDs) addDocIDAndFrequency(docID uint64, freq float32) { + cwd.docIDs[docID] = freq +} + +func (cwd *countableWithDocIDs) deleteIfPresent(docID uint64) bool { + _, ok := cwd.docIDs[docID] + if !ok { + return false + } + + delete(cwd.docIDs, docID) + return true +} + +func (cwd *countableWithDocIDs) merge() *MergeItem { + if len(cwd.docIDs) == 0 { + return nil + } + + ids := make([]MergeDocIDWithFrequency, len(cwd.docIDs)) + i := 0 + for docID, freq := range cwd.docIDs { + ids[i] = MergeDocIDWithFrequency{DocID: docID, Frequency: freq} + i++ + } + + return &MergeItem{ + Data: cwd.value, + DocIDs: ids, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_merger_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_merger_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2eda30083268e27af359f74cd463b600f5f67e4b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/delta_merger_test.go @@ -0,0 +1,62 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDeltaMerger(t *testing.T) { + dm := NewDeltaMerger() + + t.Run("a simple add and delete with one prop and one doc id", func(t *testing.T) { + dm.AddAdditions([]Property{{ + Name: "field1", Items: []Countable{ + {Data: []byte("a")}, + {Data: []byte("b")}, + }, + HasFilterableIndex: false, + HasSearchableIndex: true, + }}, 0) + + dm.AddDeletions([]Property{{ + Name: "field1", Items: []Countable{ + {Data: []byte("a")}, + }, + }}, 0) + + expected := DeltaMergeResult{ + Additions: []MergeProperty{ + { + Name: "field1", + HasFilterableIndex: false, + HasSearchableIndex: true, + MergeItems: []MergeItem{ + { + Data: []byte("b"), + DocIDs: []MergeDocIDWithFrequency{ + { + DocID: 0, + }, + }, + }, + }, + }, + }, + } + + actual := dm.Merge() + assert.Equal(t, expected, actual) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/filters_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/filters_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bc22a7ca98af509661d4157ab61da4ae2a93df35 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/filters_integration_test.go @@ -0,0 +1,1317 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package inverted + +import ( + "context" + "encoding/binary" + "fmt" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" + entinverted "github.com/weaviate/weaviate/entities/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" +) + +const ( + className = "TestClass" +) + +// TODO amourao, check if this is needed for SegmentInverted as well +func Test_Filters_String(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + propName := "inverted-with-frequency" + bucketName := helpers.BucketSearchableFromPropNameLSM(propName) + require.Nil(t, store.CreateOrLoadBucket(context.Background(), + bucketName, lsmkv.WithStrategy(lsmkv.StrategyMapCollection))) + bWithFrequency := store.Bucket(bucketName) + + defer store.Shutdown(context.Background()) + + fakeInvertedIndex := map[string][]uint64{ + "modulo-2": {2, 4, 6, 8, 10, 12, 14, 16}, + "modulo-3": {3, 6, 9, 12, 15}, + "modulo-4": {4, 8, 12, 16}, + "modulo-5": {5, 10, 15}, + "modulo-6": {6, 12}, + "modulo-7": {7, 14}, + "modulo-8": {8, 16}, + "modulo-9": {9}, + "modulo-10": {10}, + "modulo-11": {11}, + "modulo-12": {12}, + "modulo-13": {13}, + "modulo-14": {14}, + "modulo-15": {15}, + "modulo-16": {16}, + } + + t.Run("import data", func(t *testing.T) { + for value, ids := range fakeInvertedIndex { + idsMapValues := idsToBinaryMapValues(ids) + for _, pair := range idsMapValues { + require.Nil(t, bWithFrequency.MapSet([]byte(value), pair)) + } + } + + require.Nil(t, bWithFrequency.FlushAndSwitch()) + }) + + maxDocID := uint64(25) + bitmapFactory := roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), newFakeMaxIDGetter(maxDocID)) + + searcher := NewSearcher(logger, store, createSchema().GetClass, nil, nil, + fakeStopwordDetector{}, 2, func() bool { return false }, "", + config.DefaultQueryNestedCrossReferenceLimit, bitmapFactory) + + type test struct { + name string + filter *filters.LocalFilter + expectedListBeforeUpdate *sroar.Bitmap + expectedListAfterUpdate *sroar.Bitmap + } + + createNotTest := func(tt test) test { + return test{ + name: fmt.Sprintf("NOT %s", tt.name), + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{*tt.filter.Root}, + }, + }, + expectedListBeforeUpdate: sroar.Prefill(maxDocID).AndNot(tt.expectedListBeforeUpdate), + expectedListAfterUpdate: sroar.Prefill(maxDocID).AndNot(tt.expectedListAfterUpdate), + } + } + + tests := []test{ + { + name: "exact match - single level", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-7", + Type: schema.DataTypeText, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 14), + expectedListAfterUpdate: roaringset.NewBitmap(7, 14, 21), + }, + { + name: "like operator", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLike, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-1*", + Type: schema.DataTypeText, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(10, 11, 12, 13, 14, 15, 16), + expectedListAfterUpdate: roaringset.NewBitmap(10, 11, 12, 13, 14, 15, 16, 17), + }, + { + name: "exact match - or filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-7", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-8", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 8, 14, 16), + expectedListAfterUpdate: roaringset.NewBitmap(7, 8, 14, 16, 21), + }, + { + name: "exact match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-7", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-14", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(14), + expectedListAfterUpdate: roaringset.NewBitmap(14), + }, + { + // This test prevents a regression on + // https://github.com/weaviate/weaviate/issues/1770 + name: "combined and/or filter, see gh-1770", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + // This part will produce results + { + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-7", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-8", + Type: schema.DataTypeText, + }, + }, + }, + }, + + // This part will produce no results + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "modulo-7000000", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + // prior to the fix of gh-1770 the second AND operand was ignored due to + // a missing hash in the merge and we would get results here, when we + // shouldn't + expectedListBeforeUpdate: roaringset.NewBitmap(), + expectedListAfterUpdate: roaringset.NewBitmap(), + }, + } + notTests := make([]test, len(tests)) + for i := range tests { + notTests[i] = createNotTest(tests[i]) + } + + for _, test := range append(tests, notTests...) { + t.Run(test.name, func(t *testing.T) { + t.Run("before update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.Nil(t, err) + assert.ElementsMatch(t, test.expectedListBeforeUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("update", func(t *testing.T) { + value := []byte("modulo-7") + idsMapValues := idsToBinaryMapValues([]uint64{21}) + for _, pair := range idsMapValues { + require.Nil(t, bWithFrequency.MapSet([]byte(value), pair)) + } + + // for like filter + value = []byte("modulo-17") + idsMapValues = idsToBinaryMapValues([]uint64{17}) + for _, pair := range idsMapValues { + require.Nil(t, bWithFrequency.MapSet([]byte(value), pair)) + } + }) + + t.Run("after update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.Nil(t, err) + assert.ElementsMatch(t, test.expectedListAfterUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("restore inverted index, so test suite can be run again", func(t *testing.T) { + idsMapValues := idsToBinaryMapValues([]uint64{21}) + require.Nil(t, bWithFrequency.MapDeleteKey([]byte("modulo-7"), + idsMapValues[0].Key)) + + idsMapValues = idsToBinaryMapValues([]uint64{17}) + require.Nil(t, bWithFrequency.MapDeleteKey([]byte("modulo-17"), + idsMapValues[0].Key)) + }) + }) + } +} + +func Test_Filters_Int(t *testing.T) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.NoError(t, err) + defer store.Shutdown(context.Background()) + + maxDocID := uint64(25) + fakeInvertedIndex := []struct { + val int64 + ids []uint64 + }{ + {val: 2, ids: []uint64{2, 4, 6, 8, 10, 12, 14, 16}}, + {val: 3, ids: []uint64{3, 6, 9, 12, 15}}, + {val: 4, ids: []uint64{4, 8, 12, 16}}, + {val: 5, ids: []uint64{5, 10, 15}}, + {val: 6, ids: []uint64{6, 12}}, + {val: 7, ids: []uint64{7, 14}}, + {val: 8, ids: []uint64{8, 16}}, + {val: 9, ids: []uint64{9}}, + {val: 10, ids: []uint64{10}}, + {val: 11, ids: []uint64{11}}, + {val: 12, ids: []uint64{12}}, + {val: 13, ids: []uint64{13}}, + {val: 14, ids: []uint64{14}}, + {val: 15, ids: []uint64{15}}, + {val: 16, ids: []uint64{16}}, + } + + bitmapFactory := roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), newFakeMaxIDGetter(maxDocID)) + searcher := NewSearcher(logger, store, createSchema().GetClass, nil, nil, + fakeStopwordDetector{}, 2, func() bool { return false }, "", + config.DefaultQueryNestedCrossReferenceLimit, bitmapFactory) + + type test struct { + name string + filter *filters.LocalFilter + expectedListBeforeUpdate *sroar.Bitmap + expectedListAfterUpdate *sroar.Bitmap + } + createNotTest := func(tt test) test { + return test{ + name: fmt.Sprintf("NOT %s", tt.name), + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{*tt.filter.Root}, + }, + }, + expectedListBeforeUpdate: sroar.Prefill(maxDocID).AndNot(tt.expectedListBeforeUpdate), + expectedListAfterUpdate: sroar.Prefill(maxDocID).AndNot(tt.expectedListAfterUpdate), + } + } + + t.Run("strategy set", func(t *testing.T) { + propName := "inverted-without-frequency-set" + bucketName := helpers.BucketFromPropNameLSM(propName) + require.NoError(t, store.CreateOrLoadBucket(context.Background(), + bucketName, lsmkv.WithStrategy(lsmkv.StrategySetCollection))) + bucket := store.Bucket(bucketName) + + t.Run("import data", func(t *testing.T) { + for _, idx := range fakeInvertedIndex { + idValues := idsToBinaryList(idx.ids) + valueBytes, err := entinverted.LexicographicallySortableInt64(idx.val) + require.NoError(t, err) + require.NoError(t, bucket.SetAdd(valueBytes, idValues)) + } + + require.Nil(t, bucket.FlushAndSwitch()) + }) + + tests := []test{ + { + name: "exact match - single level", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 14), + expectedListAfterUpdate: roaringset.NewBitmap(7, 14, 21), + }, + { + name: "not equal", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 13, + Type: schema.DataTypeInt, + }, + }, + }, + // For NotEqual, all doc ids not matching will be returned, up to `maxDocID` + expectedListBeforeUpdate: sroar.Prefill(maxDocID).AndNot(roaringset.NewBitmap(13)), + expectedListAfterUpdate: sroar.Prefill(maxDocID).AndNot(roaringset.NewBitmap(13)), + }, + { + name: "exact match - or filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 8, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 8, 14, 16), + expectedListAfterUpdate: roaringset.NewBitmap(7, 8, 14, 16, 21), + }, + { + name: "exact match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(14), + expectedListAfterUpdate: roaringset.NewBitmap(14), + }, + { + name: "range match - or filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorLessThanEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorGreaterThan, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16), + expectedListAfterUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16, 21), + }, + { + name: "range match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorLessThan, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 8, 9, 10, 11, 12, 13, 14, 15, 16), + expectedListAfterUpdate: roaringset.NewBitmap(7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 21), + }, + } + notTests := make([]test, len(tests)) + for i := range tests { + notTests[i] = createNotTest(tests[i]) + } + + for _, test := range append(tests, notTests...) { + t.Run(test.name, func(t *testing.T) { + t.Run("before update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.NoError(t, err) + assert.ElementsMatch(t, test.expectedListBeforeUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("update", func(t *testing.T) { + valueBytes, _ := entinverted.LexicographicallySortableInt64(7) + idsBinary := idsToBinaryList([]uint64{21}) + require.Nil(t, bucket.SetAdd(valueBytes, idsBinary)) + }) + + t.Run("after update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.NoError(t, err) + assert.ElementsMatch(t, test.expectedListAfterUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("restore inverted index, so we can run test suite again", func(t *testing.T) { + idsList := idsToBinaryList([]uint64{21}) + valueBytes, _ := entinverted.LexicographicallySortableInt64(7) + require.NoError(t, bucket.SetDeleteSingle(valueBytes, idsList[0])) + }) + }) + } + }) + + t.Run("strategy roaringset", func(t *testing.T) { + propName := "inverted-without-frequency-roaringset" + bucketName := helpers.BucketFromPropNameLSM(propName) + require.NoError(t, store.CreateOrLoadBucket(context.Background(), bucketName, + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + )) + bucket := store.Bucket(bucketName) + + t.Run("import data", func(t *testing.T) { + for _, idx := range fakeInvertedIndex { + valueBytes, err := entinverted.LexicographicallySortableInt64(idx.val) + require.NoError(t, err) + require.NoError(t, bucket.RoaringSetAddList(valueBytes, idx.ids)) + } + + require.Nil(t, bucket.FlushAndSwitch()) + }) + + tests := []test{ + { + name: "exact match - single level", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 14), + expectedListAfterUpdate: roaringset.NewBitmap(7, 14, 21), + }, + { + name: "not equal", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 13, + Type: schema.DataTypeInt, + }, + }, + }, + // For NotEqual, all doc ids not matching will be returned, up to `maxDocID` + expectedListBeforeUpdate: sroar.Prefill(maxDocID).AndNot(roaringset.NewBitmap(13)), + expectedListAfterUpdate: sroar.Prefill(maxDocID).AndNot(roaringset.NewBitmap(13)), + }, + { + name: "exact match - or filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 8, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 8, 14, 16), + expectedListAfterUpdate: roaringset.NewBitmap(7, 8, 14, 16, 21), + }, + { + name: "exact match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(14), + expectedListAfterUpdate: roaringset.NewBitmap(14), + }, + { + name: "range match - or filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorLessThanEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorGreaterThan, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16), + expectedListAfterUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16, 21), + }, + { + name: "range match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorLessThan, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 8, 9, 10, 11, 12, 13, 14, 15, 16), + expectedListAfterUpdate: roaringset.NewBitmap(7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 21), + }, + } + notTests := make([]test, len(tests)) + for i := range tests { + notTests[i] = createNotTest(tests[i]) + } + + for _, test := range append(tests, notTests...) { + t.Run(test.name, func(t *testing.T) { + t.Run("before update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.NoError(t, err) + assert.ElementsMatch(t, test.expectedListBeforeUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("update", func(t *testing.T) { + valueBytes, _ := entinverted.LexicographicallySortableInt64(7) + require.Nil(t, bucket.RoaringSetAddOne(valueBytes, 21)) + }) + + t.Run("after update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.NoError(t, err) + assert.ElementsMatch(t, test.expectedListAfterUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("restore inverted index, so we can run test suite again", func(t *testing.T) { + valueBytes, _ := entinverted.LexicographicallySortableInt64(7) + require.NoError(t, bucket.RoaringSetRemoveOne(valueBytes, 21)) + }) + }) + } + }) + + t.Run("strategy roaringsetrange", func(t *testing.T) { + run := func(t *testing.T, propName string, bucketName string) { + bucket := store.Bucket(bucketName) + + t.Run("import data", func(t *testing.T) { + for _, idx := range fakeInvertedIndex { + valueBytes, err := entinverted.LexicographicallySortableInt64(idx.val) + require.NoError(t, err) + require.NoError(t, bucket.RoaringSetRangeAdd(binary.BigEndian.Uint64(valueBytes), idx.ids...)) + } + + require.Nil(t, bucket.FlushAndSwitch()) + }) + + tests := []test{ + { + name: "exact match - single level", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7), + expectedListAfterUpdate: roaringset.NewBitmap(7, 21), + }, + { + name: "not equal", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 13, + Type: schema.DataTypeInt, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16), + expectedListAfterUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16, 21), + }, + { + name: "exact match - or filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 8, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 8), + expectedListAfterUpdate: roaringset.NewBitmap(7, 8, 21), + }, + { + name: "exact match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(), + expectedListAfterUpdate: roaringset.NewBitmap(), + }, + { + name: "range match - or filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorLessThanEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorGreaterThan, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 15, 16), + expectedListAfterUpdate: roaringset.NewBitmap(2, 3, 4, 5, 6, 7, 15, 16, 21), + }, + { + name: "range match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 7, + Type: schema.DataTypeInt, + }, + }, + { + Operator: filters.OperatorLessThan, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: 14, + Type: schema.DataTypeInt, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: roaringset.NewBitmap(7, 8, 9, 10, 11, 12, 13), + expectedListAfterUpdate: roaringset.NewBitmap(7, 8, 9, 10, 11, 12, 13, 21), + }, + } + notTests := make([]test, len(tests)) + for i := range tests { + notTests[i] = createNotTest(tests[i]) + } + + for _, test := range append(tests, notTests...) { + t.Run(test.name, func(t *testing.T) { + t.Run("before update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.NoError(t, err) + assert.ElementsMatch(t, test.expectedListBeforeUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("update", func(t *testing.T) { + valueBytes, _ := entinverted.LexicographicallySortableInt64(7) + require.Nil(t, bucket.RoaringSetRangeAdd(binary.BigEndian.Uint64(valueBytes), 21)) + }) + + t.Run("after update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.NoError(t, err) + assert.ElementsMatch(t, test.expectedListAfterUpdate.ToArray(), res.Slice()) + res.Close() + }) + + t.Run("restore inverted index, so we can run test suite again", func(t *testing.T) { + valueBytes, _ := entinverted.LexicographicallySortableInt64(7) + require.NoError(t, bucket.RoaringSetRangeRemove(binary.BigEndian.Uint64(valueBytes), 21)) + }) + }) + } + } + + t.Run("segments on disk", func(t *testing.T) { + propName := "inverted-roaringsetrange-on-disk" + bucketName := helpers.BucketRangeableFromPropNameLSM(propName) + err := store.CreateOrLoadBucket(context.Background(), bucketName, + lsmkv.WithStrategy(lsmkv.StrategyRoaringSetRange)) + require.NoError(t, err) + + run(t, propName, bucketName) + }) + + t.Run("segment in memory", func(t *testing.T) { + propName := "inverted-roaringsetrange-in-memory" + bucketName := helpers.BucketRangeableFromPropNameLSM(propName) + err := store.CreateOrLoadBucket(context.Background(), bucketName, + lsmkv.WithStrategy(lsmkv.StrategyRoaringSetRange), + lsmkv.WithKeepSegmentsInMemory(true), + lsmkv.WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + ) + require.NoError(t, err) + + run(t, propName, bucketName) + }) + }) +} + +// This prevents a regression on +// https://github.com/weaviate/weaviate/issues/1772 +func Test_Filters_String_DuplicateEntriesInAnd(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + + propName := "inverted-with-frequency" + bucketName := helpers.BucketSearchableFromPropNameLSM(propName) + require.Nil(t, store.CreateOrLoadBucket(context.Background(), + bucketName, lsmkv.WithStrategy(lsmkv.StrategyMapCollection))) + bWithFrequency := store.Bucket(bucketName) + + defer store.Shutdown(context.Background()) + + fakeInvertedIndex := map[string][]uint64{ + "list_a": {0, 1}, + "list_b": {1, 1, 1, 1, 1}, + } + + t.Run("import data", func(t *testing.T) { + for value, ids := range fakeInvertedIndex { + idsMapValues := idsToBinaryMapValues(ids) + for _, pair := range idsMapValues { + require.Nil(t, bWithFrequency.MapSet([]byte(value), pair)) + } + } + require.Nil(t, bWithFrequency.FlushAndSwitch()) + }) + + bitmapFactory := roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), newFakeMaxIDGetter(200)) + + searcher := NewSearcher(logger, store, createSchema().GetClass, nil, nil, + fakeStopwordDetector{}, 2, func() bool { return false }, "", + config.DefaultQueryNestedCrossReferenceLimit, bitmapFactory) + + type test struct { + name string + filter *filters.LocalFilter + expectedListBeforeUpdate helpers.AllowList + expectedListAfterUpdate helpers.AllowList + } + + tests := []test{ + { + name: "exact match - and filter", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "list_a", + Type: schema.DataTypeText, + }, + }, + { + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "foo", + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "list_b", + Type: schema.DataTypeText, + }, + }, + }, + }, + }, + expectedListBeforeUpdate: helpers.NewAllowList(1), + expectedListAfterUpdate: helpers.NewAllowList(1, 3), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("before update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.Nil(t, err) + assert.Equal(t, test.expectedListBeforeUpdate.Slice(), res.Slice()) + res.Close() + }) + + t.Run("update", func(t *testing.T) { + value := []byte("list_a") + idsMapValues := idsToBinaryMapValues([]uint64{3}) + for _, pair := range idsMapValues { + require.Nil(t, bWithFrequency.MapSet([]byte(value), pair)) + } + + value = []byte("list_b") + idsMapValues = idsToBinaryMapValues([]uint64{3}) + for _, pair := range idsMapValues { + require.Nil(t, bWithFrequency.MapSet([]byte(value), pair)) + } + }) + + t.Run("after update", func(t *testing.T) { + res, err := searcher.DocIDs(context.Background(), test.filter, + additional.Properties{}, className) + assert.Nil(t, err) + assert.Equal(t, test.expectedListAfterUpdate.Slice(), res.Slice()) + res.Close() + }) + + t.Run("restore inverted index, so we can run test suite again", + func(t *testing.T) { + idsMapValues := idsToBinaryMapValues([]uint64{3}) + require.Nil(t, bWithFrequency.MapDeleteKey([]byte("list_a"), + idsMapValues[0].Key)) + require.Nil(t, bWithFrequency.MapDeleteKey([]byte("list_b"), + idsMapValues[0].Key)) + }) + }) + } +} + +func idsToBinaryList(ids []uint64) [][]byte { + out := make([][]byte, len(ids)) + for i, id := range ids { + out[i] = make([]byte, 8) + binary.LittleEndian.PutUint64(out[i], id) + } + + return out +} + +func idsToBinaryMapValues(ids []uint64) []lsmkv.MapPair { + out := make([]lsmkv.MapPair, len(ids)) + for i, id := range ids { + out[i] = lsmkv.MapPair{ + Key: make([]byte, 8), + Value: make([]byte, 8), + } + binary.BigEndian.PutUint64(out[i].Key, id) + // leave frequency empty for now + } + + return out +} + +func createSchema() *schema.Schema { + vFalse := false + vTrue := true + + return &schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: className, + Properties: []*models.Property{ + { + Name: "inverted-with-frequency", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vFalse, + IndexSearchable: &vTrue, + IndexRangeFilters: &vFalse, + }, + { + Name: "inverted-without-frequency-set", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + }, + { + Name: "inverted-without-frequency-roaringset", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + }, + { + Name: "inverted-text-roaringset", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + IndexFilterable: &vTrue, + IndexSearchable: &vFalse, + IndexRangeFilters: &vFalse, + }, + { + Name: "inverted-roaringsetrange-on-disk", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + IndexRangeFilters: &vTrue, + }, + { + Name: "inverted-roaringsetrange-in-memory", + DataType: schema.DataTypeInt.PropString(), + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + IndexRangeFilters: &vTrue, + }, + }, + }, + }, + }, + } +} + +func newFakeMaxIDGetter(maxID uint64) func() uint64 { + return func() uint64 { return maxID } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/like_regexp.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/like_regexp.go new file mode 100644 index 0000000000000000000000000000000000000000..376b93f07842354210099da3e6e23baa401f2339 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/like_regexp.go @@ -0,0 +1,62 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "regexp" + + "github.com/pkg/errors" +) + +type likeRegexp struct { + optimizable bool + min []byte + regexp *regexp.Regexp +} + +func parseLikeRegexp(in []byte) (*likeRegexp, error) { + r, err := regexp.Compile(transformLikeStringToRegexp(in)) + if err != nil { + return nil, errors.Wrap(err, "compile regex from 'like' string") + } + + min, ok := optimizable(in) + return &likeRegexp{ + regexp: r, + min: min, + optimizable: ok, + }, nil +} + +func transformLikeStringToRegexp(in []byte) string { + in = []byte(regexp.QuoteMeta(string(in))) + in = bytes.ReplaceAll(in, []byte("\\?"), []byte(".")) + in = bytes.ReplaceAll(in, []byte("\\*"), []byte(".*")) + return "^" + string(in) + "$" +} + +func optimizable(in []byte) ([]byte, bool) { + maxCharsWithoutWildcard := 0 + for _, char := range in { + if isWildcardCharacter(char) { + break + } + maxCharsWithoutWildcard++ + } + + return in[:maxCharsWithoutWildcard], maxCharsWithoutWildcard > 0 +} + +func isWildcardCharacter(in byte) bool { + return in == '?' || in == '*' +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/like_regexp_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/like_regexp_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6014a2a0accf4d07b5f3eda3ad6299abed5834e1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/like_regexp_test.go @@ -0,0 +1,164 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLikeRegexp(t *testing.T) { + type test struct { + input []byte + subject []byte + shouldMatch bool + expectedError error + } + + run := func(t *testing.T, tests []test) { + for _, test := range tests { + t.Run(fmt.Sprintf("for input %q and subject %q", string(test.input), + string(test.subject)), func(t *testing.T) { + res, err := parseLikeRegexp(test.input) + if test.expectedError != nil { + assert.Equal(t, test.expectedError, err) + return + } + + require.Nil(t, err) + assert.Equal(t, test.shouldMatch, res.regexp.Match(test.subject)) + }) + } + } + + t.Run("without a wildcard", func(t *testing.T) { + input := []byte("car") + tests := []test{ + {input: input, subject: []byte("car"), shouldMatch: true}, + {input: input, subject: []byte("care"), shouldMatch: false}, + {input: input, subject: []byte("supercar"), shouldMatch: false}, + } + + run(t, tests) + }) + + t.Run("with a single-character wildcard", func(t *testing.T) { + input := []byte("car?") + tests := []test{ + {input: input, subject: []byte("car"), shouldMatch: false}, + {input: input, subject: []byte("cap"), shouldMatch: false}, + {input: input, subject: []byte("care"), shouldMatch: true}, + {input: input, subject: []byte("supercar"), shouldMatch: false}, + {input: input, subject: []byte("carer"), shouldMatch: false}, + } + + run(t, tests) + }) + + t.Run("with a multi-character wildcard", func(t *testing.T) { + input := []byte("car*") + tests := []test{ + {input: input, subject: []byte("car"), shouldMatch: true}, + {input: input, subject: []byte("cap"), shouldMatch: false}, + {input: input, subject: []byte("care"), shouldMatch: true}, + {input: input, subject: []byte("supercar"), shouldMatch: false}, + {input: input, subject: []byte("carer"), shouldMatch: true}, + } + + run(t, tests) + }) + + t.Run("with several wildcards", func(t *testing.T) { + input := []byte("*c?r*") + tests := []test{ + {input: input, subject: []byte("car"), shouldMatch: true}, + {input: input, subject: []byte("cap"), shouldMatch: false}, + {input: input, subject: []byte("care"), shouldMatch: true}, + {input: input, subject: []byte("supercar"), shouldMatch: true}, + {input: input, subject: []byte("carer"), shouldMatch: true}, + } + + run(t, tests) + }) + + t.Run("with special characters", func(t *testing.T) { + input := []byte("car)") + tests := []test{ + {input: input, subject: []byte("car)"), shouldMatch: true}, + {input: input, subject: []byte("car))"), shouldMatch: false}, + {input: input, subject: []byte("care}}"), shouldMatch: false}, + {input: input, subject: []byte("/s/up{e)rca\\r"), shouldMatch: false}, + } + + run(t, tests) + }) + + t.Run("with complex special characters", func(t *testing.T) { + input := []byte("this-/is(my complex).text!") + tests := []test{ + {input: input, subject: []byte("this-/is(my complex).text!"), shouldMatch: true}, + {input: input, subject: []byte("this-/is(my complex).text!))"), shouldMatch: false}, + {input: input, subject: []byte("///this-/is(my complex).text!}}"), shouldMatch: false}, + } + + run(t, tests) + }) + + t.Run("with special characters and wildcard", func(t *testing.T) { + subject := []byte("I love this fast car) that is yellow") + tests := []test{ + {input: []byte("*car)*"), subject: subject, shouldMatch: true}, + {input: []byte("*car))*"), subject: subject, shouldMatch: false}, + {input: []byte("*care}}*"), subject: subject, shouldMatch: false}, + {input: []byte("*/s/up{e)rca\\r*"), subject: subject, shouldMatch: false}, + } + + run(t, tests) + }) +} + +func TestLikeRegexp_ForOptimizability(t *testing.T) { + type test struct { + input []byte + shouldBeOptimizable bool + expectedMin []byte + } + + run := func(t *testing.T, tests []test) { + for _, test := range tests { + t.Run(fmt.Sprintf("for input %q", string(test.input)), func(t *testing.T) { + res, err := parseLikeRegexp(test.input) + require.Nil(t, err) + assert.Equal(t, test.shouldBeOptimizable, res.optimizable) + assert.Equal(t, test.expectedMin, res.min) + }) + } + } + + tests := []test{ + {input: []byte("car"), shouldBeOptimizable: true, expectedMin: []byte("car")}, + {input: []byte("car*"), shouldBeOptimizable: true, expectedMin: []byte("car")}, + {input: []byte("car?"), shouldBeOptimizable: true, expectedMin: []byte("car")}, + {input: []byte("c?r"), shouldBeOptimizable: true, expectedMin: []byte("c")}, + {input: []byte("car*taker"), shouldBeOptimizable: true, expectedMin: []byte("car")}, + {input: []byte("car?tak*?*er"), shouldBeOptimizable: true, expectedMin: []byte("car")}, + {input: []byte("?car"), shouldBeOptimizable: false, expectedMin: []byte{}}, + {input: []byte("*car"), shouldBeOptimizable: false, expectedMin: []byte{}}, + {input: []byte("*ca}r"), shouldBeOptimizable: false, expectedMin: []byte{}}, + {input: []byte("*car)"), shouldBeOptimizable: false, expectedMin: []byte{}}, + } + + run(t, tests) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/merge_benchmarks_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/merge_benchmarks_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9f79f8be2fe47981a6f05526debf4edbe12df1aa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/merge_benchmarks_test.go @@ -0,0 +1,223 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "math" + "math/rand" + "sort" + "testing" +) + +// func BenchmarkAnd10k1m_Old(b *testing.B) { +// b.StopTimer() + +// list1 := propValuePair{ +// docIDs: docPointers{ +// docIDs: randomIDs(1e4), +// checksum: []byte{0x01}, +// }, +// operator: filters.OperatorEqual, +// } + +// list2 := propValuePair{ +// docIDs: docPointers{ +// docIDs: randomIDs(1e6), +// checksum: []byte{0x02}, +// }, +// operator: filters.OperatorEqual, +// } + +// b.StartTimer() +// for i := 0; i < b.N; i++ { +// mergeAnd([]*propValuePair{&list1, &list2}, false) +// } +// } + +// func BenchmarkAnd10k1m_Optimized(b *testing.B) { +// b.StopTimer() + +// list1 := propValuePair{ +// docIDs: docPointers{ +// docIDs: randomIDs(1e4), +// checksum: []byte{0x01}, +// }, +// operator: filters.OperatorEqual, +// } + +// list2 := propValuePair{ +// docIDs: docPointers{ +// docIDs: randomIDs(1e6), +// checksum: []byte{0x02}, +// }, +// operator: filters.OperatorEqual, +// } + +// b.StartTimer() +// for i := 0; i < b.N; i++ { +// mergeAndOptimized([]*propValuePair{&list1, &list2}, false) +// } +// } + +// func BenchmarkMultipleListsOf20k_Old(b *testing.B) { +// b.StopTimer() + +// lists := make([]*propValuePair, 10) +// for i := range lists { +// lists[i] = &propValuePair{ +// docIDs: docPointers{ +// docIDs: randomIDs(2e4), +// checksum: []byte{uint8(i)}, +// }, +// operator: filters.OperatorEqual, +// } +// } + +// b.StartTimer() +// for i := 0; i < b.N; i++ { +// mergeAnd(lists, false) +// } +// } + +// func BenchmarkMultipleListsOf20k_Optimized(b *testing.B) { +// b.StopTimer() + +// lists := make([]*propValuePair, 10) +// for i := range lists { +// lists[i] = &propValuePair{ +// docIDs: docPointers{ +// docIDs: randomIDs(2e4), +// checksum: []byte{uint8(i)}, +// }, +// operator: filters.OperatorEqual, +// } +// } + +// b.StartTimer() +// for i := 0; i < b.N; i++ { +// mergeAndOptimized(lists, false) +// } +// } + +func BenchmarkSort10k(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + list := randomIDs(1e4) + b.StartTimer() + + sort.Slice(list, func(a, b int) bool { + return list[a] < list[b] + }) + } +} + +func BenchmarkUnsortedLinearSearch(b *testing.B) { + searchTargets := randomIDs(1e5) + + for i := 0; i < b.N; i++ { + b.StopTimer() + list := randomIDs(1e5) + b.StartTimer() + + for i := range searchTargets { + linearSearchUnsorted(list, searchTargets[i]) + } + } +} + +func BenchmarkSortedBinarySearch(b *testing.B) { + searchTargets := randomIDs(1e6) + + for i := 0; i < b.N; i++ { + b.StopTimer() + list := randomIDs(1e4) + b.StartTimer() + + sort.Slice(list, func(a, b int) bool { + return list[a] < list[b] + }) + + for i := range searchTargets { + binarySearch(list, searchTargets[i]) + } + } +} + +func BenchmarkHashmap(b *testing.B) { + searchTargets := randomIDs(1e6) + + for i := 0; i < b.N; i++ { + b.StopTimer() + list := randomIDs(1e4) + b.StartTimer() + + lookup := make(map[uint64]struct{}, len(list)) + for i := range list { + lookup[list[i]] = struct{}{} + } + + for i := range searchTargets { + _, ok := lookup[searchTargets[i]] + _ = ok + } + } +} + +func randomIDs(count int) []uint64 { + out := make([]uint64, count) + for i := range out { + out[i] = rand.Uint64() + } + + return out +} + +func linearSearchUnsorted(in []uint64, needle uint64) bool { + for i := range in { + if in[i] == needle { + return true + } + } + + return false +} + +// function binary_search(A, n, T) is +// L := 0 +// R := n − 1 +// while L ≤ R do +// m := floor((L + R) / 2) +// if A[m] < T then +// L := m + 1 +// else if A[m] > T then +// R := m − 1 +// else: +// return m +// return unsuccessful + +func binarySearch(in []uint64, needle uint64) bool { + left := 0 + right := len(in) - 1 + + for left <= right { + m := int(math.Floor(float64((left + right)) / float64(2))) + if in[m] < needle { + left = m + 1 + } else if in[m] > needle { + right = m - 1 + } else { + return true + } + } + + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/new_prop_length_tracker.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/new_prop_length_tracker.go new file mode 100644 index 0000000000000000000000000000000000000000..06c04d82e117da01c3e47d77e3bbd6c7686f7102 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/new_prop_length_tracker.go @@ -0,0 +1,422 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "encoding/json" + "fmt" + "math" + "os" + "sync" + + entsentry "github.com/weaviate/weaviate/entities/sentry" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var MAX_BUCKETS = 64 + +type ShardMetaData struct { + BucketedData map[string]map[int]int + SumData map[string]int + CountData map[string]int + ObjectCount int +} + +type JsonShardMetaData struct { + path string + data *ShardMetaData // Only this part is saved in the file + sync.Mutex + UnlimitedBuckets bool + logger logrus.FieldLogger + closed bool +} + +// This class replaces the old PropertyLengthTracker. It fixes a bug and provides a +// simpler, easier to maintain implementation. The format is future-proofed, new +// data can be added to the file without breaking old versions of Weaviate. +// +// * We need to know the mean length of all properties for BM25 calculations +// * The prop length tracker is an approximate tracker that uses buckets and simply counts the entries in the buckets +// * There is a precise global counter for the sum of all lengths and a precise global counter for the number of entries +// * It only exists for string/text (and their array forms) because these are the only prop types that can be used with BM25 +// * It should probably always exist when indexSearchable is set on a text prop going forward +// +// Property lengths are put into one of 64 buckets. The value of a bucket is given by the formula: +// +// float32(4 * math.Pow(1.25, float64(bucket)-3.5)) +// +// Which as implemented gives bucket values of 0,1,2,3,4,5,6,8,10,13,17,21,26,33,41,52,65,81,101,127,158,198,248,310,387,484,606,757,947,1183,1479,1849,2312,2890,3612,4515,5644,7055,8819,11024,13780,17226,21532,26915,33644,42055,52569,65712,82140,102675,128344,160430,200537,250671,313339,391674,489593,611991,764989,956237,1195296,1494120,1867651,2334564 +// +// These buckets are then recorded to disk. The original implementation was a binary format where all the data was tracked using manual pointer arithmetic. The new version tracks the statistics in a go map, and marshals that into JSON before writing it to disk. There is no measurable difference in speed between these two implementations while importing data, however it appears to slow the queries by about 15% (while improving recall by ~25%). +// +// The new tracker is exactly compatible with the old format to enable migration, which is why there is a -1 bucket. Altering the number of buckets or their values will break compatibility. +// +// Set UnlimitedBuckets to true for precise length tracking +// +// Note that some of the code in this file is forced by the need to be backwards-compatible with the old format. Once we are confident that all users have migrated to the new format, we can remove the old format code and simplify this file. + +// NewJsonShardMetaData creates a new tracker and loads the data from the given path. If the file is in the old format, it will be converted to the new format. +func NewJsonShardMetaData(path string, logger logrus.FieldLogger) (t *JsonShardMetaData, err error) { + // Recover and return empty tracker on panic + defer func() { + if r := recover(); r != nil { + entsentry.Recover(r) + t.logger.Warnf("Recovered from panic in NewJsonShardMetaData, original error: %v", r) + t = &JsonShardMetaData{ + data: &ShardMetaData{make(map[string]map[int]int), make(map[string]int), make(map[string]int), 0}, + path: path, + UnlimitedBuckets: false, + } + err = errors.Errorf("Recovered from panic in NewJsonShardMetaData, original error: %v", r) + } + }() + + t = &JsonShardMetaData{ + data: &ShardMetaData{make(map[string]map[int]int), make(map[string]int), make(map[string]int), 0}, + path: path, + UnlimitedBuckets: false, + logger: logger, + } + + // read the file into memory + bytes, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { // File doesn't exist, probably a new class(or a recount), return empty tracker + logger.Warnf("prop len tracker file %s does not exist, creating new tracker", path) + t.Flush() + return t, nil + } + return nil, errors.Wrap(err, "read property length tracker file:"+path) + } + + if len(bytes) == 0 { + return nil, errors.Errorf("failed sanity check, empty prop len tracker file %s has length 0. Delete file and set environment variable RECOUNT_PROPERTIES_AT_STARTUP to true", path) + } + + // We don't have data file versioning, so we try to parse it as json. If the parse fails, it is probably the old format file, so we call the old format loader and copy everything across. + if err = json.Unmarshal(bytes, &t.data); err != nil { + // It's probably the old format file, load the old format and convert it to the new format + plt, err := NewPropertyLengthTracker(path) + if err != nil { + return nil, errors.Wrap(err, "convert old property length tracker") + } + + propertyNames := plt.PropertyNames() + data := &ShardMetaData{make(map[string]map[int]int), make(map[string]int), make(map[string]int), 0} + // Loop over every page and bucket in the old tracker and add it to the new tracker + for _, name := range propertyNames { + data.BucketedData[name] = make(map[int]int, MAX_BUCKETS) + data.CountData[name] = 0 + data.SumData[name] = 0 + for i := 0; i <= MAX_BUCKETS; i++ { + fromBucket := i + if i == MAX_BUCKETS { + fromBucket = -1 + } + count, err := plt.BucketCount(name, uint16(fromBucket)) + if err != nil { + return nil, errors.Wrap(err, "convert old property length tracker") + } + data.BucketedData[name][fromBucket] = int(count) + value := float32(0) + if fromBucket == -1 { + value = 0 + } else { + value = plt.valueFromBucket(uint16(fromBucket)) + } + + data.SumData[name] = data.SumData[name] + int(value)*int(count) + data.CountData[name] = data.CountData[name] + int(count) + } + } + t.data = data + plt.Close() + plt.Drop() + t.Flush() + } + t.path = path + + // Make really sure we aren't going to crash on a nil pointer + if t.data == nil { + return nil, errors.Errorf("failed sanity check, prop len tracker file %s has nil data. Delete file and set environment variable RECOUNT_PROPERTIES_AT_STARTUP to true", path) + } + t.Flush() + return t, nil +} + +func (t *JsonShardMetaData) Clear() { + if t == nil { + return + } + t.Lock() + defer t.Unlock() + if t.closed { + return + } + + t.data = &ShardMetaData{make(map[string]map[int]int), make(map[string]int), make(map[string]int), 0} + t.lockFreeFlush() +} + +// Path to the file on disk +func (t *JsonShardMetaData) FileName() string { + if t == nil { + return "" + } + + return t.path +} + +func (t *JsonShardMetaData) TrackObjects(delta int) error { + if t == nil { + return nil + } + + t.Lock() + defer t.Unlock() + if t.closed { + return fmt.Errorf("tracker is closed") + } + + t.data.ObjectCount = t.data.ObjectCount + delta + return nil +} + +// Adds a new value to the tracker +func (t *JsonShardMetaData) TrackProperty(propName string, value float32) error { + if t == nil { + return nil + } + + t.Lock() + defer t.Unlock() + if t.closed { + return fmt.Errorf("tracker is closed") + } + + // Remove this check once we are confident that all users have migrated to the new format + if t.data == nil { + t.logger.Print("WARNING: t.data is nil in TrackProperty, initializing to empty tracker") + t.data = &ShardMetaData{make(map[string]map[int]int), make(map[string]int), make(map[string]int), 0} + } + t.data.SumData[propName] = t.data.SumData[propName] + int(value) + t.data.CountData[propName] = t.data.CountData[propName] + 1 + + bucketId := t.bucketFromValue(value) + if _, ok := t.data.BucketedData[propName]; ok { + t.data.BucketedData[propName][int(bucketId)] = t.data.BucketedData[propName][int(bucketId)] + 1 + } else { + + t.data.BucketedData[propName] = make(map[int]int, 64+1) + t.data.BucketedData[propName][int(bucketId)] = 1 + } + + return nil +} + +// Removes a value from the tracker +func (t *JsonShardMetaData) UnTrackProperty(propName string, value float32) error { + if t == nil { + return nil + } + + t.Lock() + defer t.Unlock() + if t.closed { + return fmt.Errorf("tracker is closed") + } + + // Remove this check once we are confident that all users have migrated to the new format + if t.data == nil { + t.logger.Print("WARNING: t.data is nil in TrackProperty, initializing to empty tracker") + t.data = &ShardMetaData{make(map[string]map[int]int), make(map[string]int), make(map[string]int), 0} + } + t.data.SumData[propName] = t.data.SumData[propName] - int(value) + t.data.CountData[propName] = t.data.CountData[propName] - 1 + + bucketId := t.bucketFromValue(value) + if _, ok := t.data.BucketedData[propName]; ok { + t.data.BucketedData[propName][int(bucketId)] = t.data.BucketedData[propName][int(bucketId)] - 1 + } else { + return errors.New("property not found") + } + + return nil +} + +// Returns the bucket that the given value belongs to +func (t *JsonShardMetaData) bucketFromValue(value float32) int { + if t == nil { + return 0 + } + if t.UnlimitedBuckets { + return int(value) + } + if value <= 5.00 { + return int(value) - 1 + } + + bucket := int(math.Log(float64(value)/4.0)/math.Log(1.25) + 4) + if bucket > MAX_BUCKETS-1 { + return MAX_BUCKETS + } + return int(bucket) +} + +// Returns the average length of the given property +func (t *JsonShardMetaData) PropertyMean(propName string) (float32, error) { + if t == nil { + return 0, nil + } + + t.Lock() + defer t.Unlock() + if t.closed { + return 0, fmt.Errorf("tracker is closed") + } + + sum, ok := t.data.SumData[propName] + if !ok { + return 0, nil + } + count, ok := t.data.CountData[propName] + if !ok { + return 0, nil + } + + return float32(sum) / float32(count), nil +} + +// returns totalPropertyLength, totalCount, average propertyLength = sum / totalCount, total propertylength, totalCount, error +func (t *JsonShardMetaData) PropertyTally(propName string) (int, int, float64, error) { + if t == nil { + return 0, 0, 0, nil + } + + t.Lock() + defer t.Unlock() + if t.closed { + return 0, 0, 0, fmt.Errorf("tracker is closed") + } + + sum, ok := t.data.SumData[propName] + if !ok { + return 0, 0, 0, nil // Required to match the old prop tracker (for now) + } + count, ok := t.data.CountData[propName] + if !ok { + return 0, 0, 0, nil // Required to match the old prop tracker (for now) + } + return sum, count, float64(sum) / float64(count), nil +} + +// Returns the number of documents stored in the shard +func (t *JsonShardMetaData) ObjectTally() int { + if t == nil { + return 0 + } + + t.Lock() + defer t.Unlock() + + return t.data.ObjectCount +} + +func (t *JsonShardMetaData) Flush() error { + if t == nil { + return nil + } + + t.Lock() + defer t.Unlock() + + return t.lockFreeFlush() +} + +func (t *JsonShardMetaData) lockFreeFlush() error { + if t.closed { + return fmt.Errorf("cannot flush closed tracker") + } + + bytes, err := json.Marshal(t.data) + if err != nil { + return err + } + + filename := t.path + + // Do a write+rename to avoid corrupting the file if we crash while writing + tempfile := filename + ".tmp" + + err = os.WriteFile(tempfile, bytes, 0o666) + if err != nil { + return err + } + + err = os.Rename(tempfile, filename) + if err != nil { + return err + } + + return nil +} + +func (t *JsonShardMetaData) SetWantFlush(val bool) { + t.Lock() + defer t.Unlock() + if t.closed { + return + } +} + +// Closes the tracker and removes the backup file +func (t *JsonShardMetaData) Close() error { + if t == nil { + return nil + } + if err := t.Flush(); err != nil { + return errors.Wrap(err, "flush before closing") + } + + t.Lock() + defer t.Unlock() + + clear(t.data.BucketedData) + t.closed = true + + return nil +} + +// Drop removes the tracker from disk +func (t *JsonShardMetaData) Drop() error { + if t == nil { + return nil + } + t.Close() + + t.Lock() + defer t.Unlock() + + clear(t.data.BucketedData) + + os.Remove(t.path) + os.Remove(t.path + ".bak") + + return nil +} + +func (t *JsonShardMetaData) CycleFlush() bool { + err := t.Flush() + return err == nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/objects.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/objects.go new file mode 100644 index 0000000000000000000000000000000000000000..f7fe17ce987b0b7d65cfedc901486d7ef6c4d80c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/objects.go @@ -0,0 +1,646 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "encoding/json" + "fmt" + "time" + "unicode/utf8" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/objects/validation" +) + +func (a *Analyzer) Object(input map[string]any, props []*models.Property, + uuid strfmt.UUID, +) ([]Property, error) { + propsMap := map[string]*models.Property{} + for _, prop := range props { + propsMap[prop.Name] = prop + } + + properties, err := a.analyzeProps(propsMap, input) + if err != nil { + return nil, fmt.Errorf("analyze props: %w", err) + } + + idProp, err := a.analyzeIDProp(uuid) + if err != nil { + return nil, fmt.Errorf("analyze uuid prop: %w", err) + } + properties = append(properties, *idProp) + + tsProps, err := a.analyzeTimestampProps(input) + if err != nil { + return nil, fmt.Errorf("analyze timestamp props: %w", err) + } + // tsProps will be nil here if weaviate is + // not setup to index by timestamps + if tsProps != nil { + properties = append(properties, tsProps...) + } + + return properties, nil +} + +func (a *Analyzer) analyzeProps(propsMap map[string]*models.Property, + input map[string]any, +) ([]Property, error) { + var out []Property + for key, prop := range propsMap { + if len(prop.DataType) < 1 { + return nil, fmt.Errorf("prop %q has no datatype", prop.Name) + } + + if !HasAnyInvertedIndex(prop) { + continue + } + + if schema.IsBlobDataType(prop.DataType) { + continue + } + + if schema.IsRefDataType(prop.DataType) { + if err := a.extendPropertiesWithReference(&out, prop, input, key); err != nil { + return nil, err + } + } else if schema.IsArrayDataType(prop.DataType) { + if err := a.extendPropertiesWithArrayType(&out, prop, input, key); err != nil { + return nil, err + } + } else { + if err := a.extendPropertiesWithPrimitive(&out, prop, input, key); err != nil { + return nil, err + } + } + + } + return out, nil +} + +func (a *Analyzer) analyzeIDProp(id strfmt.UUID) (*Property, error) { + value, err := id.MarshalText() + if err != nil { + return nil, fmt.Errorf("marshal id prop: %w", err) + } + return &Property{ + Name: filters.InternalPropID, + Items: []Countable{ + { + Data: value, + }, + }, + HasFilterableIndex: HasFilterableIndexIdProp, + HasSearchableIndex: HasSearchableIndexIdProp, + }, nil +} + +func (a *Analyzer) analyzeTimestampProps(input map[string]any) ([]Property, error) { + createTime, createTimeOK := input[filters.InternalPropCreationTimeUnix] + updateTime, updateTimeOK := input[filters.InternalPropLastUpdateTimeUnix] + + var props []Property + if createTimeOK { + b, err := json.Marshal(createTime) + if err != nil { + return nil, fmt.Errorf("analyze create timestamp prop: %w", err) + } + props = append(props, Property{ + Name: filters.InternalPropCreationTimeUnix, + Items: []Countable{{Data: b}}, + HasFilterableIndex: HasFilterableIndexTimestampProp, + HasSearchableIndex: HasSearchableIndexTimestampProp, + }) + } + + if updateTimeOK { + b, err := json.Marshal(updateTime) + if err != nil { + return nil, fmt.Errorf("analyze update timestamp prop: %w", err) + } + props = append(props, Property{ + Name: filters.InternalPropLastUpdateTimeUnix, + Items: []Countable{{Data: b}}, + HasFilterableIndex: HasFilterableIndexTimestampProp, + HasSearchableIndex: HasSearchableIndexTimestampProp, + }) + } + + return props, nil +} + +func (a *Analyzer) extendPropertiesWithArrayType(properties *[]Property, + prop *models.Property, input map[string]any, propName string, +) error { + value, ok := input[propName] + if !ok { + // skip any primitive prop that's not set + return nil + } + + var err error + value, err = typedSliceToUntyped(value) + if err != nil { + return fmt.Errorf("extend properties with array type: %w", err) + } + + values, ok := value.([]any) + if !ok { + // skip any primitive prop that's not set + return errors.New("analyze array prop: expected array prop") + } + + property, err := a.analyzeArrayProp(prop, values) + if err != nil { + return fmt.Errorf("analyze array prop: %w", err) + } + if property == nil { + return nil + } + + *properties = append(*properties, *property) + return nil +} + +// extendPropertiesWithPrimitive mutates the passed in properties, by extending +// it with an additional property - if applicable +func (a *Analyzer) extendPropertiesWithPrimitive(properties *[]Property, + prop *models.Property, input map[string]any, propName string, +) error { + var property *Property + var err error + + value, ok := input[propName] + if !ok { + // skip any primitive prop that's not set + return nil + } + property, err = a.analyzePrimitiveProp(prop, value) + if err != nil { + return fmt.Errorf("analyze primitive prop: %w", err) + } + if property == nil { + return nil + } + + *properties = append(*properties, *property) + return nil +} + +func (a *Analyzer) analyzeArrayProp(prop *models.Property, values []any) (*Property, error) { + var items []Countable + hasFilterableIndex := HasFilterableIndex(prop) + hasSearchableIndex := HasSearchableIndex(prop) + hasRangeableIndex := HasRangeableIndex(prop) + + switch dt := schema.DataType(prop.DataType[0]); dt { + case schema.DataTypeTextArray: + hasFilterableIndex = hasFilterableIndex && !a.isFallbackToSearchable() + in, err := stringsFromValues(prop, values) + if err != nil { + return nil, err + } + items = a.TextArray(prop.Tokenization, in) + case schema.DataTypeIntArray: + in := make([]int64, len(values)) + for i, value := range values { + if asJsonNumber, ok := value.(json.Number); ok { + var err error + value, err = asJsonNumber.Float64() + if err != nil { + return nil, err + } + } + + if asFloat, ok := value.(float64); ok { + // unmarshaling from json into a dynamic schema will assume every number + // is a float64 + value = int64(asFloat) + } + + asInt, ok := value.(int64) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type int64, but got %T", prop.Name, value) + } + in[i] = asInt + } + + var err error + items, err = a.IntArray(in) + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeNumberArray: + in := make([]float64, len(values)) + for i, value := range values { + if asJsonNumber, ok := value.(json.Number); ok { + var err error + value, err = asJsonNumber.Float64() + if err != nil { + return nil, err + } + } + + asFloat, ok := value.(float64) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type float64, but got %T", prop.Name, value) + } + in[i] = asFloat + } + + var err error + items, err = a.FloatArray(in) // convert to int before analyzing + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeBooleanArray: + in := make([]bool, len(values)) + for i, value := range values { + asBool, ok := value.(bool) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type bool, but got %T", prop.Name, value) + } + in[i] = asBool + } + + var err error + items, err = a.BoolArray(in) // convert to int before analyzing + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeDateArray: + in := make([]int64, len(values)) + for i, value := range values { + // dates can be either a date-string or directly a time object. Try to parse both + if asTime, okTime := value.(time.Time); okTime { + in[i] = asTime.UnixNano() + } else if asString, okString := value.(string); okString { + parsedTime, err := time.Parse(time.RFC3339Nano, asString) + if err != nil { + return nil, fmt.Errorf("parse time: %w", err) + } + in[i] = parsedTime.UnixNano() + } else { + return nil, fmt.Errorf("expected property %s to be a time-string or time object, but got %T", prop.Name, value) + } + } + + var err error + items, err = a.IntArray(in) + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeUUIDArray: + parsed, err := validation.ParseUUIDArray(values) + if err != nil { + return nil, fmt.Errorf("parse uuid array: %w", err) + } + + items, err = a.UUIDArray(parsed) + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + + default: + // ignore unsupported prop type + return nil, nil + } + + return &Property{ + Name: prop.Name, + Items: items, + Length: len(values), + HasFilterableIndex: hasFilterableIndex, + HasSearchableIndex: hasSearchableIndex, + HasRangeableIndex: hasRangeableIndex, + }, nil +} + +func stringsFromValues(prop *models.Property, values []any) ([]string, error) { + in := make([]string, len(values)) + for i, value := range values { + asString, ok := value.(string) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type string, but got %T", prop.Name, value) + } + in[i] = asString + } + return in, nil +} + +func (a *Analyzer) analyzePrimitiveProp(prop *models.Property, value any) (*Property, error) { + var items []Countable + propertyLength := -1 // will be overwritten for string/text, signals not to add the other types. + hasFilterableIndex := HasFilterableIndex(prop) + hasSearchableIndex := HasSearchableIndex(prop) + hasRangeableIndex := HasRangeableIndex(prop) + + switch dt := schema.DataType(prop.DataType[0]); dt { + case schema.DataTypeText: + hasFilterableIndex = hasFilterableIndex && !a.isFallbackToSearchable() + asString, ok := value.(string) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type string, but got %T", prop.Name, value) + } + items = a.Text(prop.Tokenization, asString) + propertyLength = utf8.RuneCountInString(asString) + case schema.DataTypeInt: + if asFloat, ok := value.(float64); ok { + // unmarshaling from json into a dynamic schema will assume every number + // is a float64 + value = int64(asFloat) + } + + if asInt, ok := value.(int); ok { + // when merging an existing object we may retrieve an untyped int + value = int64(asInt) + } + + asInt, ok := value.(int64) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type int64, but got %T", prop.Name, value) + } + + var err error + items, err = a.Int(asInt) + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeNumber: + asFloat, ok := value.(float64) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type float64, but got %T", prop.Name, value) + } + + var err error + items, err = a.Float(asFloat) // convert to int before analyzing + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeBoolean: + asBool, ok := value.(bool) + if !ok { + return nil, fmt.Errorf("expected property %s to be of type bool, but got %T", prop.Name, value) + } + + var err error + items, err = a.Bool(asBool) // convert to int before analyzing + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeDate: + var err error + if asString, ok := value.(string); ok { + // for example when patching the date may have been loaded as a string + value, err = time.Parse(time.RFC3339Nano, asString) + if err != nil { + return nil, fmt.Errorf("parse stringified timestamp: %w", err) + } + } + asTime, ok := value.(time.Time) + if !ok { + return nil, fmt.Errorf("expected property %s to be time.Time, but got %T", prop.Name, value) + } + + items, err = a.Int(asTime.UnixNano()) + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + case schema.DataTypeUUID: + var err error + + if asString, ok := value.(string); ok { + // for example when patching the uuid may have been loaded as a string + value, err = uuid.Parse(asString) + if err != nil { + return nil, fmt.Errorf("parse stringified uuid: %w", err) + } + } + + asUUID, ok := value.(uuid.UUID) + if !ok { + return nil, fmt.Errorf("expected property %s to be uuid.UUID, but got %T", prop.Name, value) + } + + items, err = a.UUID(asUUID) + if err != nil { + return nil, fmt.Errorf("analyze property %s: %w", prop.Name, err) + } + default: + // ignore unsupported prop type + return nil, nil + } + + return &Property{ + Name: prop.Name, + Items: items, + Length: propertyLength, + HasFilterableIndex: hasFilterableIndex, + HasSearchableIndex: hasSearchableIndex, + HasRangeableIndex: hasRangeableIndex, + }, nil +} + +// extendPropertiesWithReference extends the specified properties arrays with +// either 1 or 2 entries: If the ref is not set, only the ref-count property +// will be added. If the ref is set the ref-prop itself will also be added and +// contain all references as values +func (a *Analyzer) extendPropertiesWithReference(properties *[]Property, + prop *models.Property, input map[string]any, propName string, +) error { + value, ok := input[propName] + if !ok { + // explicitly set zero-value, so we can index for "ref not set" + value = make(models.MultipleRef, 0) + } + + var asRefs models.MultipleRef + asRefs, ok = value.(models.MultipleRef) + if !ok { + // due to the fix introduced in https://github.com/weaviate/weaviate/pull/2320, + // MultipleRef's can appear as empty []any when no actual refs are provided for + // an object's reference property. + // + // if we encounter []any, assume it indicates an empty ref prop, and skip it. + _, ok := value.([]any) + if !ok { + return fmt.Errorf("expected property %q to be of type models.MutlipleRef,"+ + " but got %T", prop.Name, value) + } + return nil + } + + property, err := a.analyzeRefPropCount(prop, asRefs) + if err != nil { + return fmt.Errorf("ref count: %w", err) + } + + *properties = append(*properties, *property) + + if len(asRefs) == 0 { + return nil + } + + property, err = a.analyzeRefProp(prop, asRefs) + if err != nil { + return fmt.Errorf("refs: %w", err) + } + + *properties = append(*properties, *property) + return nil +} + +func (a *Analyzer) analyzeRefPropCount(prop *models.Property, + value models.MultipleRef, +) (*Property, error) { + items, err := a.RefCount(value) + if err != nil { + return nil, fmt.Errorf("analyze ref-property %q: %w", prop.Name, err) + } + + return &Property{ + Name: helpers.MetaCountProp(prop.Name), + Items: items, + Length: len(value), + HasFilterableIndex: HasFilterableIndex(prop), + HasSearchableIndex: HasSearchableIndex(prop), + HasRangeableIndex: HasRangeableIndex(prop), + }, nil +} + +func (a *Analyzer) analyzeRefProp(prop *models.Property, + value models.MultipleRef, +) (*Property, error) { + items, err := a.Ref(value) + if err != nil { + return nil, fmt.Errorf("analyze ref-property %q: %w", prop.Name, err) + } + + return &Property{ + Name: prop.Name, + Items: items, + Length: len(value), + HasFilterableIndex: HasFilterableIndex(prop), + HasSearchableIndex: HasSearchableIndex(prop), + HasRangeableIndex: HasRangeableIndex(prop), + }, nil +} + +func typedSliceToUntyped(in any) ([]any, error) { + switch typed := in.(type) { + case []any: + // nothing to do + return typed, nil + case []string: + return convertToUntyped[string](typed), nil + case []int: + return convertToUntyped[int](typed), nil + case []time.Time: + return convertToUntyped[time.Time](typed), nil + case []bool: + return convertToUntyped[bool](typed), nil + case []float64: + return convertToUntyped[float64](typed), nil + case []uuid.UUID: + return convertToUntyped[uuid.UUID](typed), nil + default: + return nil, errors.Errorf("unsupported type %T", in) + } +} + +func convertToUntyped[T comparable](in []T) []any { + out := make([]any, len(in)) + for i := range out { + out[i] = in[i] + } + return out +} + +// Indicates whether property should be indexed +// Index holds document ids with property of/containing particular value +// and number of its occurrences in that property +// (index created using bucket of StrategyMapCollection) +func HasSearchableIndex(prop *models.Property) bool { + switch dt, _ := schema.AsPrimitive(prop.DataType); dt { + case schema.DataTypeText, schema.DataTypeTextArray: + // by default property has searchable index only for text/text[] props + if prop.IndexSearchable == nil { + return true + } + return *prop.IndexSearchable + default: + return false + } +} + +// Indicates whether property should be indexed +// Index holds document ids with property of/containing particular value +// (index created using bucket of StrategyRoaringSet) +func HasFilterableIndex(prop *models.Property) bool { + // by default property has filterable index + if prop.IndexFilterable == nil { + return true + } + return *prop.IndexFilterable +} + +func HasRangeableIndex(prop *models.Property) bool { + switch dt, _ := schema.AsPrimitive(prop.DataType); dt { + case schema.DataTypeInt, schema.DataTypeNumber, schema.DataTypeDate: + if prop.IndexRangeFilters == nil { + return false + } + return *prop.IndexRangeFilters + default: + return false + } +} + +func HasAnyInvertedIndex(prop *models.Property) bool { + return HasFilterableIndex(prop) || HasSearchableIndex(prop) || HasRangeableIndex(prop) +} + +const ( + // always + HasFilterableIndexIdProp = true + HasSearchableIndexIdProp = false + HasRangeableIndexIdProp = false + + // only if index.invertedIndexConfig.IndexTimestamps set + HasFilterableIndexTimestampProp = true + HasSearchableIndexTimestampProp = false + HasRangeableIndexTimestampProp = false + + // only if property.indexFilterable or property.indexSearchable set + HasFilterableIndexMetaCount = true + HasSearchableIndexMetaCount = false + HasRangeableIndexMetaCount = false + + // only if index.invertedIndexConfig.IndexNullState set + // and either property.indexFilterable or property.indexSearchable set + HasFilterableIndexPropNull = true + HasSearchableIndexPropNull = false + HasRangeableIndexPropNull = false + + // only if index.invertedIndexConfig.IndexPropertyLength set + // and either property.indexFilterable or property.indexSearchable set + HasFilterableIndexPropLength = true + HasSearchableIndexPropLength = false + HasRangeableIndexPropLength = false +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/objects_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/objects_test.go new file mode 100644 index 0000000000000000000000000000000000000000..096767c34617504d21e6f48e3250399825fd9491 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/objects_test.go @@ -0,0 +1,987 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "fmt" + "reflect" + "strconv" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + ent "github.com/weaviate/weaviate/entities/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func TestAnalyzeObject(t *testing.T) { + a := NewAnalyzer(nil) + + t.Run("with multiple properties", func(t *testing.T) { + id1 := uuid.New() + id2 := uuid.New() + sch := map[string]interface{}{ + "description": "I am great!", + "email": "john@doe.com", + "about_me": "I like reading sci-fi books", + "profession": "Mechanical Engineer", + "id1": id1, // correctly parsed + "id2": id2.String(), // untyped + "idArray1": []uuid.UUID{id1}, // correctly parsed + "idArray2": []any{id2.String()}, // untyped + } + + uuid := "2609f1bc-7693-48f3-b531-6ddc52cd2501" + props := []*models.Property{ + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "email", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "about_me", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationLowercase, + }, + { + Name: "profession", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + { + Name: "id1", + DataType: []string{"uuid"}, + }, + { + Name: "id2", + DataType: []string{"uuid"}, + }, + { + Name: "idArray1", + DataType: []string{"uuid[]"}, + }, + { + Name: "idArray2", + DataType: []string{"uuid[]"}, + }, + } + res, err := a.Object(sch, props, strfmt.UUID(uuid)) + require.Nil(t, err) + + expectedDescription := []Countable{ + { + Data: []byte("i"), + TermFrequency: float32(1), + }, + { + Data: []byte("am"), + TermFrequency: float32(1), + }, + { + Data: []byte("great"), + TermFrequency: float32(1), + }, + } + + expectedEmail := []Countable{ + { + Data: []byte("john@doe.com"), + TermFrequency: float32(1), + }, + } + + expectedAboutMe := []Countable{ + { + Data: []byte("i"), + TermFrequency: float32(1), + }, + { + Data: []byte("like"), + TermFrequency: float32(1), + }, + { + Data: []byte("reading"), + TermFrequency: float32(1), + }, + { + Data: []byte("sci-fi"), + TermFrequency: float32(1), + }, + { + Data: []byte("books"), + TermFrequency: float32(1), + }, + } + + expectedProfession := []Countable{ + { + Data: []byte("Mechanical Engineer"), + TermFrequency: float32(1), + }, + } + + expectedUUID := []Countable{ + { + Data: []byte(uuid), + TermFrequency: 0, + }, + } + + expectedID1 := []Countable{ + { + Data: []byte(id1[:]), + TermFrequency: 0, + }, + } + + expectedID2 := []Countable{ + { + Data: []byte(id2[:]), + TermFrequency: 0, + }, + } + + expectedIDArray1 := []Countable{ + { + Data: []byte(id1[:]), + TermFrequency: 0, + }, + } + + expectedIDArray2 := []Countable{ + { + Data: []byte(id2[:]), + TermFrequency: 0, + }, + } + + require.Len(t, res, 9) + var actualDescription []Countable + var actualEmail []Countable + var actualAboutMe []Countable + var actualProfession []Countable + var actualUUID []Countable + var actualID1 []Countable + var actualID2 []Countable + var actualIDArray1 []Countable + var actualIDArray2 []Countable + + for _, elem := range res { + if elem.Name == "email" { + actualEmail = elem.Items + } + + if elem.Name == "description" { + actualDescription = elem.Items + } + + if elem.Name == "about_me" { + actualAboutMe = elem.Items + } + + if elem.Name == "profession" { + actualProfession = elem.Items + } + + if elem.Name == "_id" { + actualUUID = elem.Items + } + + if elem.Name == "id1" { + actualID1 = elem.Items + } + + if elem.Name == "id2" { + actualID2 = elem.Items + } + + if elem.Name == "idArray1" { + actualIDArray1 = elem.Items + } + + if elem.Name == "idArray2" { + actualIDArray2 = elem.Items + } + } + + assert.ElementsMatch(t, expectedEmail, actualEmail, res) + assert.ElementsMatch(t, expectedDescription, actualDescription, res) + assert.ElementsMatch(t, expectedAboutMe, actualAboutMe, res) + assert.ElementsMatch(t, expectedProfession, actualProfession, res) + assert.ElementsMatch(t, expectedUUID, actualUUID, res) + assert.ElementsMatch(t, expectedID1, actualID1, res) + assert.ElementsMatch(t, expectedID2, actualID2, res) + assert.ElementsMatch(t, expectedIDArray1, actualIDArray1, res) + assert.ElementsMatch(t, expectedIDArray2, actualIDArray2, res) + }) + + t.Run("with array properties", func(t *testing.T) { + sch := map[string]interface{}{ + "descriptions": []interface{}{"I am great!", "I am also great!"}, + "emails": []interface{}{"john@doe.com", "john2@doe.com"}, + "about_me": []interface{}{"I like reading sci-fi books", "I like playing piano"}, + "professions": []interface{}{"Mechanical Engineer", "Marketing Analyst"}, + "integers": []interface{}{int64(1), int64(2), int64(3), int64(4)}, + "numbers": []interface{}{float64(1.1), float64(2.2), float64(3.0), float64(4)}, + } + + uuid := "2609f1bc-7693-48f3-b531-6ddc52cd2501" + props := []*models.Property{ + { + Name: "descriptions", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "emails", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "about_me", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationLowercase, + }, + { + Name: "professions", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + { + Name: "integers", + DataType: []string{"int[]"}, + }, + { + Name: "numbers", + DataType: []string{"number[]"}, + }, + } + res, err := a.Object(sch, props, strfmt.UUID(uuid)) + require.Nil(t, err) + + expectedDescriptions := []Countable{ + { + Data: []byte("i"), + TermFrequency: float32(2), + }, + { + Data: []byte("am"), + TermFrequency: float32(2), + }, + { + Data: []byte("great"), + TermFrequency: float32(2), + }, + { + Data: []byte("also"), + TermFrequency: float32(1), + }, + } + + expectedEmails := []Countable{ + { + Data: []byte("john@doe.com"), + TermFrequency: float32(1), + }, + { + Data: []byte("john2@doe.com"), + TermFrequency: float32(1), + }, + } + + expectedAboutMe := []Countable{ + { + Data: []byte("i"), + TermFrequency: float32(2), + }, + { + Data: []byte("like"), + TermFrequency: float32(2), + }, + { + Data: []byte("reading"), + TermFrequency: float32(1), + }, + { + Data: []byte("sci-fi"), + TermFrequency: float32(1), + }, + { + Data: []byte("books"), + TermFrequency: float32(1), + }, + { + Data: []byte("playing"), + TermFrequency: float32(1), + }, + { + Data: []byte("piano"), + TermFrequency: float32(1), + }, + } + + expectedProfessions := []Countable{ + { + Data: []byte("Mechanical Engineer"), + TermFrequency: float32(1), + }, + { + Data: []byte("Marketing Analyst"), + TermFrequency: float32(1), + }, + } + + expectedIntegers := []Countable{ + { + Data: mustGetByteIntNumber(1), + }, + { + Data: mustGetByteIntNumber(2), + }, + { + Data: mustGetByteIntNumber(3), + }, + { + Data: mustGetByteIntNumber(4), + }, + } + + expectedNumbers := []Countable{ + { + Data: mustGetByteFloatNumber(1.1), + }, + { + Data: mustGetByteFloatNumber(2.2), + }, + { + Data: mustGetByteFloatNumber(3.0), + }, + { + Data: mustGetByteFloatNumber(4), + }, + } + + expectedUUID := []Countable{ + { + Data: []byte(uuid), + TermFrequency: 0, + }, + } + + assert.Len(t, res, 7) + var actualDescriptions []Countable + var actualEmails []Countable + var actualAboutMe []Countable + var actualProfessions []Countable + var actualIntegers []Countable + var actualNumbers []Countable + var actualUUID []Countable + + for _, elem := range res { + if elem.Name == "emails" { + actualEmails = elem.Items + } + + if elem.Name == "descriptions" { + actualDescriptions = elem.Items + } + + if elem.Name == "about_me" { + actualAboutMe = elem.Items + } + + if elem.Name == "professions" { + actualProfessions = elem.Items + } + + if elem.Name == "integers" { + actualIntegers = elem.Items + } + + if elem.Name == "numbers" { + actualNumbers = elem.Items + } + + if elem.Name == "_id" { + actualUUID = elem.Items + } + } + + assert.ElementsMatch(t, expectedEmails, actualEmails, res) + assert.ElementsMatch(t, expectedDescriptions, actualDescriptions, res) + assert.ElementsMatch(t, expectedAboutMe, actualAboutMe, res) + assert.ElementsMatch(t, expectedProfessions, actualProfessions, res) + assert.ElementsMatch(t, expectedIntegers, actualIntegers, res) + assert.ElementsMatch(t, expectedNumbers, actualNumbers, res) + assert.ElementsMatch(t, expectedUUID, actualUUID, res) + }) + + t.Run("with refProps", func(t *testing.T) { + t.Run("with a single ref set in the object schema", func(t *testing.T) { + beacon := strfmt.URI( + "weaviate://localhost/c563d7fa-4a36-4eff-9f39-af1e1db276c4") + schema := map[string]interface{}{ + "myRef": models.MultipleRef{ + &models.SingleRef{ + Beacon: beacon, + }, + }, + } + + uuid := "2609f1bc-7693-48f3-b531-6ddc52cd2501" + props := []*models.Property{ + { + Name: "myRef", + DataType: []string{"RefClass"}, + }, + } + res, err := a.Object(schema, props, strfmt.UUID(uuid)) + require.Nil(t, err) + + expectedRefCount := []Countable{ + {Data: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, + } + + expectedRef := []Countable{ + {Data: []byte(beacon)}, + } + + expectedUUID := []Countable{ + { + Data: []byte(uuid), + TermFrequency: 0, + }, + } + + require.Len(t, res, 3) + var actualRefCount []Countable + var actualUUID []Countable + var actualRef []Countable + + for _, elem := range res { + switch elem.Name { + case helpers.MetaCountProp("myRef"): + actualRefCount = elem.Items + case "_id": + actualUUID = elem.Items + case "myRef": + actualRef = elem.Items + } + } + + assert.ElementsMatch(t, expectedRefCount, actualRefCount, res) + assert.ElementsMatch(t, expectedUUID, actualUUID, res) + assert.ElementsMatch(t, expectedRef, actualRef, res) + }) + + t.Run("with multiple refs set in the object schema", func(t *testing.T) { + beacon1 := strfmt.URI( + "weaviate://localhost/c563d7fa-4a36-4eff-9f39-af1e1db276c4") + beacon2 := strfmt.URI( + "weaviate://localhost/49fe5d33-0b52-4189-8e8d-4268427c4317") + + schema := map[string]interface{}{ + "myRef": models.MultipleRef{ + {Beacon: beacon1}, + {Beacon: beacon2}, + }, + } + + uuid := "2609f1bc-7693-48f3-b531-6ddc52cd2501" + props := []*models.Property{ + { + Name: "myRef", + DataType: []string{"RefClass"}, + }, + } + res, err := a.Object(schema, props, strfmt.UUID(uuid)) + require.Nil(t, err) + + expectedRefCount := []Countable{ + {Data: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}}, + } + + expectedRef := []Countable{ + {Data: []byte(beacon1)}, + {Data: []byte(beacon2)}, + } + + expectedUUID := []Countable{ + { + Data: []byte(uuid), + TermFrequency: 0, + }, + } + + require.Len(t, res, 3) + var actualRefCount []Countable + var actualUUID []Countable + var actualRef []Countable + + for _, elem := range res { + switch elem.Name { + case helpers.MetaCountProp("myRef"): + actualRefCount = elem.Items + case "_id": + actualUUID = elem.Items + case "myRef": + actualRef = elem.Items + } + } + + assert.ElementsMatch(t, expectedRefCount, actualRefCount, res) + assert.ElementsMatch(t, expectedUUID, actualUUID, res) + assert.ElementsMatch(t, expectedRef, actualRef, res) + }) + + t.Run("with the ref omitted in the object schema", func(t *testing.T) { + schema := map[string]interface{}{} + + uuid := "2609f1bc-7693-48f3-b531-6ddc52cd2501" + props := []*models.Property{ + { + Name: "myRef", + DataType: []string{"RefClass"}, + }, + } + res, err := a.Object(schema, props, strfmt.UUID(uuid)) + require.Nil(t, err) + + expectedRefCount := []Countable{ + {Data: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + } + + expectedUUID := []Countable{ + { + Data: []byte(uuid), + TermFrequency: 0, + }, + } + + require.Len(t, res, 2) + var actualRefCount []Countable + var actualUUID []Countable + + for _, elem := range res { + if elem.Name == helpers.MetaCountProp("myRef") { + actualRefCount = elem.Items + } + if elem.Name == "_id" { + actualUUID = elem.Items + } + } + + assert.ElementsMatch(t, expectedRefCount, actualRefCount, res) + assert.ElementsMatch(t, expectedUUID, actualUUID, res) + }) + + // due to the fix introduced in https://github.com/weaviate/weaviate/pull/2320, + // MultipleRef's can appear as empty []interface{} when no actual refs are provided for + // an object's reference property. + // + // this test asserts that reference properties do not break when they are unmarshalled + // as empty interface{} slices. + t.Run("when rep prop is stored as empty interface{} slice", func(t *testing.T) { + uuid := "cf768bb0-03d8-4464-8f54-f787cf174c01" + name := "Transformers" + sch := map[string]interface{}{ + "name": name, + "reference": []interface{}{}, + } + + props := []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "reference", + DataType: []string{"SomeClass"}, + }, + } + res, err := a.Object(sch, props, strfmt.UUID(uuid)) + require.Nil(t, err) + + expectedUUID := []Countable{ + { + Data: []byte(uuid), + TermFrequency: 0, + }, + } + + expectedName := []Countable{ + { + Data: []byte(name), + TermFrequency: 1, + }, + } + + require.Len(t, res, 2) + var actualUUID []Countable + var actualName []Countable + + for _, elem := range res { + switch elem.Name { + case "_id": + actualUUID = elem.Items + case "name": + actualName = elem.Items + } + } + + assert.ElementsMatch(t, expectedUUID, actualUUID, res) + assert.ElementsMatch(t, expectedName, actualName, res) + }) + }) + + t.Run("when objects are indexed by timestamps", func(t *testing.T) { + sch := map[string]interface{}{ + "description": "pretty ok if you ask me", + "_creationTimeUnix": 1650551406404, + "_lastUpdateTimeUnix": 1650551406404, + } + + uuid := strfmt.UUID("2609f1bc-7693-48f3-b531-6ddc52cd2501") + props := []*models.Property{ + { + Name: "description", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + } + + res, err := a.Object(sch, props, uuid) + require.Nil(t, err) + require.Len(t, res, 4) + + expected := []Property{ + { + Name: "description", + Items: []Countable{ + {Data: []byte("pretty"), TermFrequency: 1}, + {Data: []byte("ok"), TermFrequency: 1}, + {Data: []byte("if"), TermFrequency: 1}, + {Data: []byte("you"), TermFrequency: 1}, + {Data: []byte("ask"), TermFrequency: 1}, + {Data: []byte("me"), TermFrequency: 1}, + }, + HasFilterableIndex: true, + HasSearchableIndex: true, + }, + { + Name: "_id", + Items: []Countable{{Data: []byte("2609f1bc-7693-48f3-b531-6ddc52cd2501")}}, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_creationTimeUnix", + Items: []Countable{{Data: []byte("1650551406404")}}, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + { + Name: "_lastUpdateTimeUnix", + Items: []Countable{{Data: []byte("1650551406404")}}, + HasFilterableIndex: true, + HasSearchableIndex: false, + }, + } + + for i := range res { + assert.Equal(t, expected[i].Name, res[i].Name) + assert.Equal(t, expected[i].HasFilterableIndex, res[i].HasFilterableIndex) + assert.Equal(t, expected[i].HasSearchableIndex, res[i].HasSearchableIndex) + assert.ElementsMatch(t, expected[i].Items, res[i].Items) + } + }) +} + +func TestConvertSliceToUntyped(t *testing.T) { + tests := []struct { + name string + input interface{} + expectedErr error + }{ + { + name: "interface{} slice", + input: []interface{}{map[string]interface{}{}}, + }, + { + name: "string slice", + input: []string{"some", "slice"}, + }, + { + name: "int slice", + input: []int{1, 2, 3, 4, 5}, + }, + { + name: "time slice", + input: []time.Time{time.Now(), time.Now(), time.Now()}, + }, + { + name: "bool slice", + input: []bool{false}, + }, + { + name: "float64 slice", + input: []float64{1.2, 53555, 4.123, 2, 7.8877887, 0.0001}, + }, + { + name: "empty slice", + input: []string{}, + }, + { + name: "unsupported uint8 slice", + input: []uint8{1, 2, 3, 4, 5}, + expectedErr: fmt.Errorf("unsupported type []uint8"), + }, + { + name: "unsupported struct{}", + input: struct{}{}, + expectedErr: fmt.Errorf("unsupported type struct {}"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output, err := typedSliceToUntyped(test.input) + if test.expectedErr != nil { + assert.EqualError(t, err, test.expectedErr.Error()) + } else { + require.Nil(t, err) + assert.Len(t, output, reflect.ValueOf(test.input).Len()) + assert.IsType(t, []interface{}{}, output) + } + }) + } +} + +func TestIndexInverted(t *testing.T) { + vFalse := false + vTrue := true + + t.Run("has filterable index", func(t *testing.T) { + type testCase struct { + name string + indexFilterable *bool + dataType schema.DataType + + expextedFilterable bool + } + + testCases := []testCase{ + { + name: "int, filterable null", + indexFilterable: nil, + dataType: schema.DataTypeInt, + + expextedFilterable: true, + }, + { + name: "int, filterable false", + indexFilterable: &vFalse, + dataType: schema.DataTypeInt, + + expextedFilterable: false, + }, + { + name: "int, filterable true", + indexFilterable: &vTrue, + dataType: schema.DataTypeInt, + + expextedFilterable: true, + }, + { + name: "text, filterable null", + indexFilterable: nil, + dataType: schema.DataTypeText, + + expextedFilterable: true, + }, + { + name: "text, filterable false", + indexFilterable: &vFalse, + dataType: schema.DataTypeText, + + expextedFilterable: false, + }, + { + name: "text, filterable true", + indexFilterable: &vTrue, + dataType: schema.DataTypeText, + + expextedFilterable: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hasFilterableIndex := HasFilterableIndex(&models.Property{ + Name: "prop", + DataType: tc.dataType.PropString(), + IndexFilterable: tc.indexFilterable, + }) + + assert.Equal(t, tc.expextedFilterable, hasFilterableIndex) + }) + } + }) + + t.Run("has searchable index", func(t *testing.T) { + type testCase struct { + name string + indexSearchable *bool + dataType schema.DataType + + expextedSearchable bool + } + + testCases := []testCase{ + { + name: "int, searchable null", + indexSearchable: nil, + dataType: schema.DataTypeInt, + + expextedSearchable: false, + }, + { + name: "int, searchable false", + indexSearchable: &vFalse, + dataType: schema.DataTypeInt, + + expextedSearchable: false, + }, + { + name: "int, searchable true", + indexSearchable: &vTrue, + dataType: schema.DataTypeInt, + + expextedSearchable: false, + }, + { + name: "text, searchable null", + indexSearchable: nil, + dataType: schema.DataTypeText, + + expextedSearchable: true, + }, + { + name: "text, searchable false", + indexSearchable: &vFalse, + dataType: schema.DataTypeText, + + expextedSearchable: false, + }, + { + name: "text, searchable true", + indexSearchable: &vTrue, + dataType: schema.DataTypeText, + + expextedSearchable: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hasSearchableIndex := HasSearchableIndex(&models.Property{ + Name: "prop", + DataType: tc.dataType.PropString(), + IndexSearchable: tc.indexSearchable, + }) + + assert.Equal(t, tc.expextedSearchable, hasSearchableIndex) + }) + } + }) + + t.Run("has rangeable index", func(t *testing.T) { + b2s := func(b *bool) string { + if b == nil { + return "nil" + } + return strconv.FormatBool(*b) + } + rangeableDataTypes := map[schema.DataType]struct{}{ + schema.DataTypeInt: {}, + schema.DataTypeNumber: {}, + schema.DataTypeDate: {}, + } + + t.Run("supported types", func(t *testing.T) { + for dataType := range rangeableDataTypes { + for indexRangeFilters, expectedRangeFilters := range map[*bool]bool{ + nil: false, // turned off by default + &vFalse: false, + &vTrue: true, + } { + t.Run(fmt.Sprintf("rangeable_%s_%v", dataType, b2s(indexRangeFilters)), func(t *testing.T) { + hasRangeableIndex := HasRangeableIndex(&models.Property{ + Name: "prop", + DataType: dataType.PropString(), + IndexRangeFilters: indexRangeFilters, + }) + + assert.Equal(t, expectedRangeFilters, hasRangeableIndex) + }) + } + } + }) + + t.Run("not supported types", func(t *testing.T) { + for _, dataType := range schema.PrimitiveDataTypes { + if _, ok := rangeableDataTypes[dataType]; ok { + continue + } + + for _, indexRangeFilters := range []*bool{nil, &vFalse, &vTrue} { + t.Run(fmt.Sprintf("rangeable_%s_%v", dataType, b2s(indexRangeFilters)), func(t *testing.T) { + hasRangeableIndex := HasRangeableIndex(&models.Property{ + Name: "prop", + DataType: dataType.PropString(), + IndexRangeFilters: indexRangeFilters, + }) + + assert.False(t, hasRangeableIndex) + }) + } + } + }) + }) +} + +func mustGetByteIntNumber(in int) []byte { + out, err := ent.LexicographicallySortableInt64(int64(in)) + if err != nil { + panic(err) + } + return out +} + +func mustGetByteFloatNumber(in float64) []byte { + out, err := ent.LexicographicallySortableFloat64(in) + if err != nil { + panic(err) + } + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_length_tracker.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_length_tracker.go new file mode 100644 index 0000000000000000000000000000000000000000..9d83c0211e1859c56faccbbb11fb2dfd979a8e0a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_length_tracker.go @@ -0,0 +1,440 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "os" + "sync" + + "github.com/pkg/errors" +) + +// Page Design +// | Bytes | Description | +// | --------- | ------------------------------------------------ | +// | start | page is now 0 +// | 0-1 | uint16 pointer to last index byte +// | 2-3 | uint16 pointer for property name length +// | 4-n | property name +// | ... | repeat length+pointer pattern +// | 3584-3840 | second property buckets (64 buckets of float32) +// | 3840-4096 | first property buckets +// | repeat | page is now 1, repeat all of above +// +// Fixed Assumptions: +// - First two bytes always used to indicate end of index, minimal value is 02, +// as the first possible value with index length=0 is after the two bytes +// themselves. +// - 64 buckets of float32 per property (=256B per prop), excluding the index +// - One index row is always 4+len(propName), consisting of a uint16 prop name +// length pointer, the name itself and an offset pointer pointing to the start +// (first byte) of the buckets +// +// The counter to the last index byte is only an uint16, so it can at maximum address 65535. This will overflow when the +// 16th page is added (eg at page=15). To avoid a crash an error is returned in this case, but we will need to change +// the byteformat to fix this. +type PropertyLengthTracker struct { + file *os.File + path string + pages []byte + sync.Mutex +} + +func NewPropertyLengthTracker(path string) (pt *PropertyLengthTracker, rerr error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o666) + if err != nil { + return nil, err + } + + // The lifetime of the `f` exceeds this constructor as we store the open file for later use in PropertyLengthTracker. + // invariant: We close `f` **only** if any error happened after successfully opening the file. To avoid leaking open file descriptor. + // NOTE: This `defer` works even with `err` being shadowed in the whole function because defer checks for named `rerr` return value. + defer func() { + if rerr != nil { + f.Close() + } + }() + + stat, err := f.Stat() + if err != nil { + return nil, err + } + + t := &PropertyLengthTracker{ + pages: nil, + file: f, + path: path, + } + + if stat.Size() > 0 { + // the file has existed before, we need to initialize with its content, we + // can read the entire contents into memory + existingPages, err := io.ReadAll(f) + if err != nil { + return nil, errors.Wrap(err, "read initial count from file") + } + + if len(existingPages)%4096 != 0 { + return nil, errors.Errorf( + "failed sanity check, prop len tracker file %s has length %d", path, + len(existingPages)) + } + + t.pages = existingPages + } else { + // this is the first time this is being created, initialize with an empty + // page + t.pages = make([]byte, 4096) + // set initial end-of-index offset to 2 + binary.LittleEndian.PutUint16(t.pages[0:2], 2) + } + + return t, nil +} + +func (t *PropertyLengthTracker) BucketCount(propName string, bucket uint16) (uint16, error) { + t.Lock() + defer t.Unlock() + + page, offset, ok := t.propExists(propName) + if !ok { + return 0, fmt.Errorf("property %v does not exist in OldPropertyLengthTracker", propName) + } + + offset = offset + page*4096 + + o := offset + (bucket * 4) + v := binary.LittleEndian.Uint32(t.pages[o : o+4]) + count := math.Float32frombits(v) + + return uint16(count), nil +} + +func (t *PropertyLengthTracker) PropertyNames() []string { + var names []string + pages := len(t.pages) / int(4096) + for page := 0; page < pages; page++ { + pageStart := page * int(4096) + + relativeEOI := binary.LittleEndian.Uint16(t.pages[pageStart : pageStart+2]) // t.uint16At(pageStart) + EOI := pageStart + int(relativeEOI) + + offset := int(pageStart) + 2 + for offset < EOI { + propNameLength := int(binary.LittleEndian.Uint16(t.pages[offset : offset+2])) // int(t.uint16At(offset)) + offset += 2 + + propName := t.pages[offset : offset+propNameLength] + offset += propNameLength + + offset += 2 + + names = append(names, string(propName)) + } + } + return names +} + +func (t *PropertyLengthTracker) TrackProperty(propName string, value float32) error { + t.Lock() + defer t.Unlock() + + var page uint16 + var relBucketOffset uint16 + if p, o, ok := t.propExists(propName); ok { + page = p + relBucketOffset = o + } else { + var err error + page, relBucketOffset, err = t.addProperty(propName) + if err != nil { + return err + } + } + + bucketOffset := page*4096 + relBucketOffset + t.bucketFromValue(value)*4 + + v := binary.LittleEndian.Uint32(t.pages[bucketOffset : bucketOffset+4]) + currentValue := math.Float32frombits(v) + currentValue += 1 + v = math.Float32bits(currentValue) + binary.LittleEndian.PutUint32(t.pages[bucketOffset:bucketOffset+4], v) + return nil +} + +func (t *PropertyLengthTracker) UnTrackProperty(propName string, value float32) error { + t.Lock() + defer t.Unlock() + + var page uint16 + var relBucketOffset uint16 + if p, o, ok := t.propExists(propName); ok { + page = p + relBucketOffset = o + } else { + return fmt.Errorf("property %v does not exist in OldPropertyLengthTracker", propName) + } + + bucketOffset := page*4096 + relBucketOffset + t.bucketFromValue(value)*4 + + v := binary.LittleEndian.Uint32(t.pages[bucketOffset : bucketOffset+4]) + currentValue := math.Float32frombits(v) + currentValue -= 1 + v = math.Float32bits(currentValue) + binary.LittleEndian.PutUint32(t.pages[bucketOffset:bucketOffset+4], v) + return nil +} + +// propExists returns page number, relative offset on page, and a bool whether +// the prop existed at all. The first to values have no meaning if the latter +// is false +func (t *PropertyLengthTracker) propExists(needle string) (uint16, uint16, bool) { + pages := len(t.pages) / 4096 + for page := 0; page < pages; page++ { + pageStart := page * 4096 + + relativeEOI := binary.LittleEndian.Uint16(t.pages[pageStart : pageStart+2]) + EOI := pageStart + int(relativeEOI) + + offset := int(pageStart) + 2 + for offset < EOI { + propNameLength := int(binary.LittleEndian.Uint16( + t.pages[offset : offset+2])) + offset += 2 + + propName := t.pages[offset : offset+propNameLength] + offset += propNameLength + bucketPointer := binary.LittleEndian.Uint16( + t.pages[offset : offset+2]) + offset += 2 + + if string(propName) == needle { + return uint16(page), bucketPointer, true + } + + } + } + return 0, 0, false +} + +func (t *PropertyLengthTracker) addProperty(propName string) (uint16, uint16, error) { + page := uint16(0) + + for { + propNameBytes := []byte(propName) + t.createPageIfNotExists(page) + pageStart := page * 4096 + lastBucketOffset := pageStart + 4096 + + relativeOffset := binary.LittleEndian.Uint16(t.pages[pageStart : pageStart+2]) + offset := pageStart + relativeOffset + if relativeOffset != 2 { + // relative offset is other than 2, so there are also props in. This + // means we can take the value of offset-2 to read the bucket offset + lastBucketOffset = pageStart + binary.LittleEndian. + Uint16(t.pages[offset-2:offset]) + } + + if !t.canPageFit(propNameBytes, offset, lastBucketOffset) { + page++ + // overflow of uint16 variable that tracks the size of the tracker + if page > 15 { + return 0, 0, fmt.Errorf("could not add property %v, to PropertyLengthTracker, because the total"+ + "length of all properties is too long", propName) + } + continue + } + + propNameLength := uint16(len(propNameBytes)) + binary.LittleEndian.PutUint16(t.pages[offset:offset+2], propNameLength) + offset += 2 + copy(t.pages[offset:offset+propNameLength], propNameBytes) + offset += propNameLength + + newBucketOffset := lastBucketOffset - 256 - pageStart + binary.LittleEndian.PutUint16(t.pages[offset:offset+2], newBucketOffset) + offset += 2 + + // update end of index offset for page, since the prop name index has + // now grown + binary.LittleEndian.PutUint16(t.pages[pageStart:pageStart+2], offset-pageStart) + return page, newBucketOffset, nil + } +} + +func (t *PropertyLengthTracker) canPageFit(propName []byte, + offset uint16, lastBucketOffset uint16, +) bool { + // lastBucketOffset represents the end of the writable area, offset + // represents the start, which means we can take the delta to see // how + // much space is left on this page + spaceLeft := lastBucketOffset - offset + + // we need to write 256 bytes for the buckets, plus two pointers of uint16 + spaceNeeded := uint16(len(propName)+4) + 256 + + return spaceLeft >= spaceNeeded +} + +func (t *PropertyLengthTracker) bucketFromValue(value float32) uint16 { + if value <= 5.00 { + return uint16(value) - 1 + } + + bucket := int(math.Log(float64(value)/4.0)/math.Log(1.25) + 4) + if bucket > 63 { + return 64 + } + return uint16(bucket) +} + +func (t *PropertyLengthTracker) valueFromBucket(bucket uint16) float32 { + if bucket <= 5 { + return float32(bucket + 1) + } + + return float32(4 * math.Pow(1.25, float64(bucket)-3.5)) +} + +func (t *PropertyLengthTracker) PropertyMean(propName string) (float32, error) { + t.Lock() + defer t.Unlock() + + page, offset, ok := t.propExists(propName) + if !ok { + return 0, nil + } + + sum := float32(0) + totalCount := float32(0) + bucket := uint16(0) + + offset = offset + page*4096 + for o := offset; o < offset+256; o += 4 { + v := binary.LittleEndian.Uint32(t.pages[o : o+4]) + count := math.Float32frombits(v) + sum += float32(t.valueFromBucket(bucket)) * count + totalCount += count + + bucket++ + } + + if totalCount == 0 { + return 0, nil + } + + return sum / totalCount, nil +} + +func (t *PropertyLengthTracker) PropertyTally(propName string) (int, int, float32, error) { + t.Lock() + defer t.Unlock() + + page, offset, ok := t.propExists(propName) + if !ok { + return 0, 0, 0, nil + } + + sum := float32(0) + totalCount := float32(0) + bucket := uint16(0) + + offset = offset + page*4096 + for o := offset; o < offset+256; o += 4 { + v := binary.LittleEndian.Uint32(t.pages[o : o+4]) + count := math.Float32frombits(v) + sum += float32(t.valueFromBucket(bucket)) * count + totalCount += count + + bucket++ + } + + if totalCount == 0 { + return 0, 0, 0, nil + } + + return int(sum), int(totalCount), sum / totalCount, nil +} + +func (t *PropertyLengthTracker) createPageIfNotExists(page uint16) { + if uint16(len(t.pages))/4096-1 < page { + // we need to grow the page buffer + newPages := make([]byte, uint64(page)*4096+4096) + copy(newPages[:len(t.pages)], t.pages) + + // the new page must have the correct offset initialized + binary.LittleEndian.PutUint16(newPages[page*4096:page*4096+2], 2) + t.pages = newPages + } +} + +func (t *PropertyLengthTracker) Flush() error { + t.Lock() + defer t.Unlock() + + if err := t.file.Truncate(int64(len(t.pages))); err != nil { + return errors.Wrap(err, "truncate prop tracker file to correct length") + } + + if _, err := t.file.Seek(0, io.SeekStart); err != nil { + return errors.Wrap(err, "seek to beginning of prop tracker file") + } + + if _, err := t.file.Write(t.pages); err != nil { + return errors.Wrap(err, "flush page content to disk") + } + + return nil +} + +func (t *PropertyLengthTracker) Close() error { + if err := t.Flush(); err != nil { + return errors.Wrap(err, "flush before closing") + } + + t.Lock() + defer t.Unlock() + + if err := t.file.Close(); err != nil { + return errors.Wrap(err, "close prop length tracker file") + } + + t.pages = nil + + return nil +} + +func (t *PropertyLengthTracker) Drop() error { + t.Lock() + defer t.Unlock() + + if err := t.file.Close(); err != nil { + _ = err + // explicitly ignore error + } + + t.pages = nil + + if err := os.Remove(t.path); err != nil { + return errors.Wrap(err, "remove prop length tracker state from disk") + } + + return nil +} + +func (t *PropertyLengthTracker) FileName() string { + return t.file.Name() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_length_tracker_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_length_tracker_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9f4e6ab0ae0ade1f4e6e998f6e131e741cb3b72f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_length_tracker_test.go @@ -0,0 +1,682 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "fmt" + "path" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_PropertyLengthTracker(t *testing.T) { + dirName := t.TempDir() + trackerPath := path.Join(dirName, "my_test_shard") + l := logrus.New() + + // This test suite doesn't actually test persistence, there is a separate + // one. However, we still need to supply a valid path. Since nothing is ever + // written, we can use the same one for each sub-test without them + // accidentally sharing state. + + t.Run("single prop", func(t *testing.T) { + type test struct { + values []float32 + name string + floatCompare bool + } + + tests := []test{ + { + values: []float32{2, 2, 3, 100, 100, 500, 7}, + name: "mixed_values", + floatCompare: true, + }, + { + values: []float32{ + 1000, 1200, 1000, 1300, 800, 2000, 2050, + 2070, 900, + }, + name: "high_values", + floatCompare: true, + }, + { + values: []float32{ + 60000, 50000, 65000, + }, + name: "very_high_values", + floatCompare: true, + }, + { + values: []float32{ + 1, 2, 4, 3, 4, 2, 1, 5, 6, 7, 8, 2, 7, 2, 3, 5, + 6, 3, 5, 9, 3, 4, 8, + }, + name: "very_low_values", + floatCompare: true, + }, + { + values: []float32{0, 0}, + name: "zeros", + floatCompare: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tracker, err := NewJsonShardMetaData(trackerPath+test.name, l) + require.Nil(t, err) + + actualMean := float32(0) + for _, v := range test.values { + tracker.TrackProperty("my-very-first-prop", v) + actualMean += v + } + actualMean = actualMean / float32(len(test.values)) + + res, err := tracker.PropertyMean("my-very-first-prop") + require.Nil(t, err) + + if test.floatCompare { + assert.InEpsilon(t, actualMean, res, 0.1) + } else { + assert.Equal(t, actualMean, res) + } + require.Nil(t, tracker.Close()) + }) + } + }) + + t.Run("test untrack", func(t *testing.T) { + tracker, err := NewJsonShardMetaData(trackerPath, l) + require.Nil(t, err) + + tracker.TrackProperty("test-prop", 1) + tracker.TrackProperty("test-prop", 2) + tracker.TrackProperty("test-prop", 3) + tracker.Flush() + + sum, count, mean, err := tracker.PropertyTally("test-prop") + require.Nil(t, err) + assert.Equal(t, 6, sum) + assert.Equal(t, 3, count) + assert.InEpsilon(t, 2, mean, 0.1) + + tracker.UnTrackProperty("test-prop", 2) + sum, count, mean, err = tracker.PropertyTally("test-prop") + require.Nil(t, err) + assert.Equal(t, 4, sum) + assert.Equal(t, 2, count) + assert.InEpsilon(t, 2, mean, 0.1) + + require.Nil(t, tracker.Flush()) + + tracker.UnTrackProperty("test-prop", 1) + sum, count, mean, err = tracker.PropertyTally("test-prop") + require.Nil(t, err) + assert.Equal(t, 3, sum) + assert.Equal(t, 1, count) + assert.InEpsilon(t, 3, mean, 0.1) + + require.Nil(t, tracker.Flush()) + + require.Nil(t, tracker.Close()) + }) + + t.Run("multiple properties (can all fit on one page)", func(t *testing.T) { + type prop struct { + values []float32 + propName string + } + + props := []prop{ + { + values: []float32{2, 2, 3, 100, 100, 500, 7}, + propName: "property-numero-uno", + }, { + values: []float32{ + 1000, 1200, 1000, 1300, 800, 2000, 2050, + 2070, 900, + }, + propName: "the-second-of-the-properties", + }, { + values: []float32{ + 60000, 50000, 65000, + }, + propName: "property_nummer_DREI", + }, + } + + // This time we use a single tracker + tracker, err := NewJsonShardMetaData(trackerPath, l) + require.Nil(t, err) + + for _, prop := range props { + for _, v := range prop.values { + tracker.TrackProperty(prop.propName, v) + } + } + + for _, prop := range props { + actualMean := float32(0) + for _, v := range prop.values { + actualMean += v + } + actualMean = actualMean / float32(len(prop.values)) + + res, err := tracker.PropertyMean(prop.propName) + require.Nil(t, err) + + assert.InEpsilon(t, actualMean, res, 0.1) + } + + require.Nil(t, tracker.Close()) + }) + + t.Run("with more properties that can fit on one page", func(t *testing.T) { + // This time we use a single tracker + tracker, err := NewJsonShardMetaData(trackerPath, l) + require.Nil(t, err) + + create20PropsAndVerify(t, tracker) + + require.Nil(t, tracker.Close()) + }) +} + +func create20PropsAndVerify(t *testing.T, tracker *JsonShardMetaData) { + type prop struct { + values []float32 + propName string + } + + // the most props we could ever fit on a single page is 16 if there was no + // index, which is impossible. This means the practical max is 15, so at + // least 5 props should overflow to the second page. + propCount := 20 + props := make([]prop, propCount) + + for i := range props { + props[i] = prop{ + values: []float32{1, 4, 3, 17}, + propName: fmt.Sprintf("prop_%d", i), + } + } + + for _, prop := range props { + for _, v := range prop.values { + tracker.TrackProperty(prop.propName, v) + } + } + + for _, prop := range props { + actualMean := float32(0) + for _, v := range prop.values { + actualMean += v + } + actualMean = actualMean / float32(len(prop.values)) + + res, err := tracker.PropertyMean(prop.propName) + require.Nil(t, err) + + assert.InEpsilon(t, actualMean, res, 0.1) + } + + // modify a prop on page 2 and verify + tracker.TrackProperty("prop_19", 24) + actualMeanForProp20 := float32(1+4+3+17+25) / 5.0 + res, err := tracker.PropertyMean("prop_19") + require.Nil(t, err) + + assert.InEpsilon(t, actualMeanForProp20, res, 0.1) +} + +func Test_PropertyLengthTracker_Persistence(t *testing.T) { + dirName := t.TempDir() + + path := path.Join(dirName, "my_test_shard") + + var tracker *JsonShardMetaData + l := logrus.New() + + t.Run("initializing an empty tracker, no file present", func(t *testing.T) { + tr, err := NewJsonShardMetaData(path, l) + require.Nil(t, err) + tracker = tr + }) + + t.Run("importing multi-page data and verifying", func(t *testing.T) { + create20PropsAndVerify(t, tracker) + require.Nil(t, tracker.Flush()) + }) + + var dupeTracker *JsonShardMetaData + t.Run("initializing a new tracker from the same file", func(t *testing.T) { + tr, err := NewJsonShardMetaData(path, l) + require.Nil(t, err) + dupeTracker = tr + }) + + t.Run("verify data is correct after read from disk (duplicate tracker test)", func(t *testing.T) { + // root page + actualMeanForProp0 := float32(1+4+3+17) / 4.0 + res, err := dupeTracker.PropertyMean("prop_0") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp0, res, 0.1) + + // later page + actualMeanForProp20 := float32(1+4+3+17+25) / 5.0 + res, err = dupeTracker.PropertyMean("prop_19") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp20, res, 0.1) + }) + + t.Run("commit the state to disk", func(t *testing.T) { + require.Nil(t, tracker.Flush()) + }) + + t.Run("shut down the tracker", func(t *testing.T) { + require.Nil(t, tracker.Close()) + }) + + t.Run("catch use after free propmean", func(t *testing.T) { + _, err := tracker.PropertyMean("prop_0") + require.NotNil(t, err) + require.ErrorContains(t, err, "tracker is closed") + }) + + t.Run("catch use after free trackproperty", func(t *testing.T) { + err := tracker.TrackProperty("prop_0", 1.0) + require.NotNil(t, err) + require.ErrorContains(t, err, "tracker is closed") + }) + + var secondTracker *JsonShardMetaData + t.Run("initializing a new tracker from the same file", func(t *testing.T) { + tr, err := NewJsonShardMetaData(path, l) + require.Nil(t, err) + secondTracker = tr + }) + + t.Run("verify data is correct after read from disk (after close test)", func(t *testing.T) { + // root page + actualMeanForProp0 := float32(1+4+3+17) / 4.0 + res, err := secondTracker.PropertyMean("prop_0") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp0, res, 0.1) + + // later page + actualMeanForProp20 := float32(1+4+3+17+25) / 5.0 + res, err = secondTracker.PropertyMean("prop_19") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp20, res, 0.1) + }) +} + +// Testing the switch from the old property length tracker to the new one +func TestFormatConversion(t *testing.T) { + dirName := t.TempDir() + + path := path.Join(dirName, "my_test_shard") + + var tracker *PropertyLengthTracker + + t.Run("initializing an empty tracker, no file present", func(t *testing.T) { + tr, err := NewPropertyLengthTracker(path) + require.Nil(t, err) + tracker = tr + }) + + t.Run("importing multi-page data and verifying", func(t *testing.T) { + create20PropsAndVerify_old(t, tracker) + }) + + t.Run("commit the state to disk", func(t *testing.T) { + require.Nil(t, tracker.Flush()) + }) + + t.Run("shut down the tracker", func(t *testing.T) { + require.Nil(t, tracker.Close()) + }) + + var newTracker *JsonShardMetaData + l := logrus.New() + + t.Run("initializing a new tracker from the same file", func(t *testing.T) { + tr, err := NewJsonShardMetaData(path, l) + require.Nil(t, err) + newTracker = tr + }) + + t.Run("verify data is correct after read from disk(format conversion)", func(t *testing.T) { + // root page + actualMeanForProp0 := float32(1+4+3+17) / 4.0 + res, err := newTracker.PropertyMean("prop_0") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp0, res, 0.1) + + // later page + actualMeanForProp20 := float32(1+4+3+17+25) / 5.0 + res, err = newTracker.PropertyMean("prop_19") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp20, res, 0.1) + + res, err = newTracker.PropertyMean("prop_22") + require.Nil(t, err) + assert.EqualValues(t, res, 0) + sum, count, average, _ := newTracker.PropertyTally("prop_22") + assert.EqualValues(t, 0, sum) + assert.EqualValues(t, 3, count) + assert.EqualValues(t, 0, average) + }) +} + +func create20PropsAndVerify_old(t *testing.T, tracker *PropertyLengthTracker) { + type prop struct { + values []float32 + propName string + } + + // the most props we could ever fit on a single page is 16 if there was no + // index, which is impossible. This means the practical max is 15, so at + // least 5 props should overflow to the second page. + propCount := 20 + props := make([]prop, propCount) + + for i := range props { + props[i] = prop{ + values: []float32{1, 4, 3, 17}, + propName: fmt.Sprintf("prop_%d", i), + } + } + + for _, prop := range props { + for _, v := range prop.values { + tracker.TrackProperty(prop.propName, v) + } + } + + tracker.TrackProperty("prop_22", 0) + tracker.TrackProperty("prop_22", 0) + tracker.TrackProperty("prop_22", 0) + + for _, prop := range props { + actualMean := float32(0) + for _, v := range prop.values { + actualMean += v + } + actualMean = actualMean / float32(len(prop.values)) + + res, err := tracker.PropertyMean(prop.propName) + require.Nil(t, err) + + assert.InEpsilon(t, actualMean, res, 0.1) + } + + // modify a prop on page 2 and verify + tracker.TrackProperty("prop_19", 24) + actualMeanForProp20 := float32(1+4+3+17+25) / 5.0 + res, err := tracker.PropertyMean("prop_19") + require.Nil(t, err) + + assert.InEpsilon(t, actualMeanForProp20, res, 0.1) + + res, err = tracker.PropertyMean("prop_22") + require.Nil(t, err) + assert.EqualValues(t, res, 0) + + sum, _, average, _ := tracker.PropertyTally("prop_22") + assert.EqualValues(t, 0, sum) + // assert.EqualValues(t, 3, count) + assert.EqualValues(t, 0, average) +} + +// Test the old property length tracker + +func TestOldPropertyLengthTracker(t *testing.T) { + dirName := t.TempDir() + trackerPath := path.Join(dirName, "my_test_shard") + + // This test suite doesn't actually test persistence, there is a separate + // one. However, we still need to supply a valid path. Since nothing is ever + // written, we can use the same one for each sub-test without them + // accidentally sharing state. + + t.Run("single prop", func(t *testing.T) { + type test struct { + values []float32 + name string + floatCompare bool + } + + tests := []test{ + { + values: []float32{2, 2, 3, 100, 100, 500, 7}, + name: "mixed_values", + floatCompare: true, + }, { + values: []float32{ + 1000, 1200, 1000, 1300, 800, 2000, 2050, + 2070, 900, + }, + name: "high_values", + floatCompare: true, + }, { + values: []float32{ + 60000, 50000, 65000, + }, + name: "very_high_values", + floatCompare: true, + }, { + values: []float32{ + 1, 2, 4, 3, 4, 2, 1, 5, 6, 7, 8, 2, 7, 2, 3, 5, + 6, 3, 5, 9, 3, 4, 8, + }, + name: "very_low_values", + floatCompare: true, + }, { + values: []float32{0, 0}, + name: "zeros", + floatCompare: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tracker, err := NewPropertyLengthTracker(trackerPath + test.name) + require.Nil(t, err) + + actualMean := float32(0) + for _, v := range test.values { + tracker.TrackProperty("my-very-first-prop", v) + actualMean += v + } + actualMean = actualMean / float32(len(test.values)) + + res, err := tracker.PropertyMean("my-very-first-prop") + require.Nil(t, err) + + if test.floatCompare { + assert.InEpsilon(t, actualMean, res, 0.1) + } else { + assert.Equal(t, actualMean, res) + } + require.Nil(t, tracker.Close()) + }) + } + }) + + t.Run("test untrack", func(t *testing.T) { + tracker, err := NewPropertyLengthTracker(trackerPath) + require.Nil(t, err) + + tracker.TrackProperty("test-prop", 1) + tracker.TrackProperty("test-prop", 2) + tracker.TrackProperty("test-prop", 3) + tracker.Flush() + + sum, count, mean, err := tracker.PropertyTally("test-prop") + require.Nil(t, err) + assert.Equal(t, 6, sum) + assert.Equal(t, 3, count) + assert.InEpsilon(t, 2, mean, 0.1) + + tracker.UnTrackProperty("test-prop", 2) + sum, count, mean, err = tracker.PropertyTally("test-prop") + require.Nil(t, err) + assert.Equal(t, 4, sum) + assert.Equal(t, 2, count) + assert.InEpsilon(t, 2, mean, 0.1) + + tracker.UnTrackProperty("test-prop", 1) + sum, count, mean, err = tracker.PropertyTally("test-prop") + require.Nil(t, err) + assert.Equal(t, 3, sum) + assert.Equal(t, 1, count) + assert.InEpsilon(t, 3, mean, 0.1) + + require.Nil(t, tracker.Close()) + }) + + t.Run("multiple properties (can all fit on one page)", func(t *testing.T) { + type prop struct { + values []float32 + propName string + } + + props := []prop{ + { + values: []float32{2, 2, 3, 100, 100, 500, 7}, + propName: "property-numero-uno", + }, { + values: []float32{ + 1000, 1200, 1000, 1300, 800, 2000, 2050, + 2070, 900, + }, + propName: "the-second-of-the-properties", + }, { + values: []float32{ + 60000, 50000, 65000, + }, + propName: "property_nummer_DREI", + }, + } + + // This time we use a single tracker + tracker, err := NewPropertyLengthTracker(trackerPath) + require.Nil(t, err) + + for _, prop := range props { + for _, v := range prop.values { + tracker.TrackProperty(prop.propName, v) + } + } + + for _, prop := range props { + actualMean := float32(0) + for _, v := range prop.values { + actualMean += v + } + actualMean = actualMean / float32(len(prop.values)) + + res, err := tracker.PropertyMean(prop.propName) + require.Nil(t, err) + + assert.InEpsilon(t, actualMean, res, 0.1) + } + + require.Nil(t, tracker.Close()) + }) + + t.Run("with more properties that can fit on one page", func(t *testing.T) { + // This time we use a single tracker + tracker, err := NewPropertyLengthTracker(trackerPath) + require.Nil(t, err) + + create20PropsAndVerify_old(t, tracker) + + require.Nil(t, tracker.Close()) + }) +} + +func TestOldPropertyLengthTracker_Persistence(t *testing.T) { + dirName := t.TempDir() + + path := path.Join(dirName, "my_test_shard") + + var tracker *PropertyLengthTracker + + t.Run("initializing an empty tracker, no file present", func(t *testing.T) { + tr, err := NewPropertyLengthTracker(path) + require.Nil(t, err) + tracker = tr + }) + + t.Run("importing multi-page data and verifying", func(t *testing.T) { + create20PropsAndVerify_old(t, tracker) + }) + + t.Run("commit the state to disk", func(t *testing.T) { + require.Nil(t, tracker.Flush()) + }) + + t.Run("shut down the tracker", func(t *testing.T) { + require.Nil(t, tracker.Close()) + }) + + var secondTracker *PropertyLengthTracker + t.Run("initializing a new tracker from the same file", func(t *testing.T) { + tr, err := NewPropertyLengthTracker(path) + require.Nil(t, err) + secondTracker = tr + }) + + t.Run("verify data is correct after read from disk (old tracker persistence)", func(t *testing.T) { + // root page + actualMeanForProp0 := float32(1+4+3+17) / 4.0 + res, err := secondTracker.PropertyMean("prop_0") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp0, res, 0.1) + + // later page + actualMeanForProp20 := float32(1+4+3+17+25) / 5.0 + res, err = secondTracker.PropertyMean("prop_19") + require.Nil(t, err) + assert.InEpsilon(t, actualMeanForProp20, res, 0.1) + }) + + t.Run("shut down the second tracker", func(t *testing.T) { + require.Nil(t, secondTracker.Close()) + }) +} + +func Test_PropertyLengthTracker_Overflow(t *testing.T) { + dirName := t.TempDir() + path := path.Join(dirName, "my_test_shard") + + tracker, err := NewPropertyLengthTracker(path) + require.Nil(t, err) + + for i := 0; i < 16*15; i++ { + err := tracker.TrackProperty(fmt.Sprintf("prop_%v", i), float32(i)) + require.Nil(t, err) + } + + // Check that property that would cause the internal counter to overflow is not added + err = tracker.TrackProperty("OVERFLOW", float32(123)) + require.NotNil(t, err) + + require.Nil(t, tracker.Close()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_value_pairs.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_value_pairs.go new file mode 100644 index 0000000000000000000000000000000000000000..be1b623fafcd7245b3eba564c6a98bb821312dc4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/prop_value_pairs.go @@ -0,0 +1,324 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" +) + +type propValuePair struct { + prop string + operator filters.Operator + + // set for all values that can be served by an inverted index, i.e. anything + // that's not a geoRange + value []byte + + // only set if operator=OperatorWithinGeoRange, as that cannot be served by a + // byte value from an inverted index + valueGeoRange *filters.GeoRange + docIDs docBitmap + children []*propValuePair + hasFilterableIndex bool + hasSearchableIndex bool + hasRangeableIndex bool + Class *models.Class // The schema +} + +func newPropValuePair(class *models.Class) (*propValuePair, error) { + if class == nil { + return nil, errors.Errorf("class must not be nil") + } + return &propValuePair{docIDs: newDocBitmap(), Class: class}, nil +} + +func (pv *propValuePair) resolveDocIDs(ctx context.Context, s *Searcher, limit int) (*docBitmap, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + + if pv.operator.OnValue() { + return pv.fetchDocIDs(ctx, s, limit) + } + + ln := len(pv.children) + switch pv.operator { + case filters.OperatorAnd, filters.OperatorOr: + switch ln { + case 0: + return nil, fmt.Errorf("no children for operator %q", pv.operator.Name()) + case 1: + return pv.children[0].resolveDocIDs(ctx, s, limit) + default: + return pv.resolveDocIDsAndOr(ctx, s) + } + + case filters.OperatorNot: + switch ln { + case 0: + return nil, fmt.Errorf("no children for operator %q", pv.operator.Name()) + case 1: + return pv.resolveDocIDsNot(ctx, s) + default: + return nil, fmt.Errorf("too many children for operator %q. Expected 1, given %q", pv.operator.Name(), ln) + } + + default: + return nil, fmt.Errorf("unsupported operator: %s", pv.operator.Name()) + } +} + +func (pv *propValuePair) resolveDocIDsAndOr(ctx context.Context, s *Searcher) (*docBitmap, error) { + // Explicitly set the limit to 0 (=unlimited) as this is a nested filter, + // otherwise we run into situations where each subfilter on their own + // runs into the limit, possibly yielding in "less than limit" results + // after merging. + limit := 0 + + maxN := 32 // number of children to be fetched before merging them + dbmCh := make(chan *docBitmap, maxN) // subresults to merge + resultCh := make(chan *docBitmap, 1) // merge result + var err error + + // merge subresults in separate goroutine using dbmCh (in) and resultCh (out) + enterrors.GoWrapper(func() { + processDocIDs(maxN, pv.operator, dbmCh, resultCh) + }, s.logger) + + outerConcurrencyLimit := concurrency.BudgetFromCtx(ctx, concurrency.NUMCPU) + if outerConcurrencyLimit <= 1 { + // resolve docIDs sequentially in main goroutine + for i, child := range pv.children { + dbm, err2 := child.resolveDocIDs(ctx, s, limit) + if err2 != nil { + // break on first error + err = errors.Wrapf(err, "nested child %d", i) + break + } + dbmCh <- dbm + } + } else { + // resolve docIDs in parallel using goroutines + concurrencyReductionFactor := min(len(pv.children), outerConcurrencyLimit) + + // collect all errors from goroutines (not only 1st one) + ec := errorcompounder.NewSafe() + // use error group's context to skip remaining children after 1st error + eg, gctx := enterrors.NewErrorGroupWithContextWrapper(s.logger, ctx) + eg.SetLimit(outerConcurrencyLimit - 1) + + for i, child := range pv.children { + i, child := i, child + eg.Go(func() error { + if err := gctx.Err(); err != nil { + // some child failed, skip processing + return nil + } + + ctx := concurrency.ContextWithFractionalBudget(ctx, concurrencyReductionFactor, concurrency.NUMCPU) + dbm, err := child.resolveDocIDs(ctx, s, limit) + if err != nil { + err = errors.Wrapf(err, "nested child %d", i) + ec.Add(err) + return err + } + dbmCh <- dbm + return nil + }) + // some child failed, skip remaining children + if gctx.Err() != nil { + break + } + } + errWait := eg.Wait() + err = ec.ToError() + if err == nil { + // if parent context gets expired/cancelled groupcontext might prevent execution of any goroutine, + // making error compounder empty. in that case take potencial error (context related) from wait + err = errWait + } + } + + close(dbmCh) + result := <-resultCh + + if err != nil { + result.release() + return nil, fmt.Errorf("nested AND/OR query: %w", err) + } + return result, nil +} + +func (pv *propValuePair) resolveDocIDsNot(ctx context.Context, s *Searcher) (*docBitmap, error) { + // Explicitly set the limit to 0 (=unlimited) as this is a nested filter, + // otherwise we run into situations where each subfilter on their own + // runs into the limit, possibly yielding in "less than limit" results + // after merging. + limit := 0 + + dbm, err := pv.children[0].resolveDocIDs(ctx, s, limit) + if err != nil { + return nil, fmt.Errorf("nested NOT query: %w", err) + } + bm := dbm.docIDs + defer dbm.release() + + dbm.docIDs, dbm.release = s.bitmapFactory.GetBitmap() + dbm.docIDs.AndNotConc(bm, concurrency.SROAR_MERGE) + return dbm, nil +} + +// processDocIDs merges received from dbmCh channel docBitmaps and sends result to resultCh channel. +// Children are merged in batches of size [maxN] +func processDocIDs(maxN int, operator filters.Operator, dbmCh <-chan *docBitmap, resultCh chan<- *docBitmap) { + dbms := make([]*docBitmap, 0, maxN) + var result *docBitmap + defer func() { + if result == nil { + empty := newDocBitmap() + result = &empty + } + resultCh <- result + }() + + for dbm := range dbmCh { + dbms = append(dbms, dbm) + // merge if [maxN] children is received + if len(dbms) == maxN { + dbms = mergeDocIDs(operator, dbms) + } + } + // merge remaining children + if dbms = mergeDocIDs(operator, dbms); len(dbms) > 0 { + // merged result is first element of docBitmaps slice + result = dbms[0] + } +} + +// mergeDocIDs merges provided docBitmaps using given operator. +// It mutates given docBitmaps slice, by changing its length to 1 and putting +// merge result as first element. +// If slice of size 0 or 1 is provided, it is returned without any change. +// Merge is performed starting from bitmap with most containers for OR operator +// or starting from bitmap with least containers for AND operator. +func mergeDocIDs(operator filters.Operator, dbms []*docBitmap) []*docBitmap { + if len(dbms) <= 1 { + return dbms + } + + var mergeFn func(*sroar.Bitmap, int) *sroar.Bitmap + if operator == filters.OperatorOr { + // biggest to smallest, so smaller bitmaps are merged into biggest one, + // minimising chance of expanding destination bitmap (memory allocations) + slices.SortFunc(dbms, func(dbma, dbmb *docBitmap) int { + return -dbma.docIDs.CompareNumKeys(dbmb.docIDs) + }) + mergeFn = dbms[0].docIDs.OrConc + } else { + // smallest to biggest, so data is removed from smallest bitmap + // allowing bigger bitmaps to be garbage collected asap + slices.SortFunc(dbms, func(dbma, dbmb *docBitmap) int { + return dbma.docIDs.CompareNumKeys(dbmb.docIDs) + }) + mergeFn = dbms[0].docIDs.AndConc + } + + for i := 1; i < len(dbms); i++ { + mergeFn(dbms[i].docIDs, concurrency.SROAR_MERGE) + // release resources of docBitmaps merged into 1st one + dbms[i].release() + } + + return dbms[:1] +} + +func (pv *propValuePair) fetchDocIDs(ctx context.Context, s *Searcher, limit int) (*docBitmap, error) { + // TODO text_rbm_inverted_index find better way check whether prop len + if strings.HasSuffix(pv.prop, filters.InternalPropertyLength) && + !pv.Class.InvertedIndexConfig.IndexPropertyLength { + return nil, errors.Errorf("Property length must be indexed to be filterable! add `IndexPropertyLength: true` to the invertedIndexConfig in %v. Geo-coordinates, phone numbers and data blobs are not supported by property length.", pv.Class.Class) + } + + if pv.operator == filters.OperatorIsNull && !pv.Class.InvertedIndexConfig.IndexNullState { + return nil, errors.Errorf("Nullstate must be indexed to be filterable! Add `indexNullState: true` to the invertedIndexConfig") + } + + if (pv.prop == filters.InternalPropCreationTimeUnix || + pv.prop == filters.InternalPropLastUpdateTimeUnix) && + !pv.Class.InvertedIndexConfig.IndexTimestamps { + return nil, errors.Errorf("Timestamps must be indexed to be filterable! Add `IndexTimestamps: true` to the InvertedIndexConfig in %v", pv.Class.Class) + } + + bucketName := pv.getBucketName() + if bucketName == "" { + return nil, errors.Errorf("bucket for prop %s not found - is it indexed?", pv.prop) + } + + b := s.store.Bucket(bucketName) + + // TODO: I think we can delete this check entirely. The bucket will never be nill, and routines should now check if their particular feature is active in the schema. However, not all those routines have checks yet. + if b == nil && pv.operator != filters.OperatorWithinGeoRange { + // a nil bucket is ok for a WithinGeoRange filter, as this query is not + // served by the inverted index, but propagated to a secondary index in + // .docPointers() + return nil, errors.Errorf("bucket for prop %s not found - is it indexed?", pv.prop) + } + + dbm, err := s.docBitmap(ctx, b, limit, pv) + if err != nil { + return nil, err + } + return &dbm, nil +} + +func (pv *propValuePair) getBucketName() string { + if pv.hasRangeableIndex { + switch pv.operator { + // decide whether handle equal/not_equal with rangeable index + case filters.OperatorGreaterThan, + filters.OperatorGreaterThanEqual, + filters.OperatorLessThan, + filters.OperatorLessThanEqual: + return helpers.BucketRangeableFromPropNameLSM(pv.prop) + default: + } + } + if pv.hasFilterableIndex { + return helpers.BucketFromPropNameLSM(pv.prop) + } + // fallback equal/not_equal to rangeable + if pv.hasRangeableIndex { + switch pv.operator { + case filters.OperatorEqual, + filters.OperatorNotEqual: + return helpers.BucketRangeableFromPropNameLSM(pv.prop) + default: + } + } + if pv.hasSearchableIndex { + return helpers.BucketSearchableFromPropNameLSM(pv.prop) + } + return "" +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..2b9ab0c0782671e64fee16b103528666a18b9a2b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader.go @@ -0,0 +1,247 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/filters" +) + +// RowReader reads one or many row(s) depending on the specified operator +type RowReader struct { + value []byte + bucket *lsmkv.Bucket + operator filters.Operator + keyOnly bool + bitmapFactory *roaringset.BitmapFactory +} + +// If keyOnly is set, the RowReader will request key-only cursors wherever +// cursors are used, the specified value arguments in the ReadFn will always be +// nil +func NewRowReader(bucket *lsmkv.Bucket, value []byte, operator filters.Operator, + keyOnly bool, bitmapFactory *roaringset.BitmapFactory, +) *RowReader { + return &RowReader{ + bucket: bucket, + value: value, + operator: operator, + keyOnly: keyOnly, + bitmapFactory: bitmapFactory, + } +} + +// Read a row using the specified ReadFn. If RowReader was created with +// keysOnly==true, the values argument in the readFn will always be nil on all +// requests involving cursors +func (rr *RowReader) Read(ctx context.Context, readFn ReadFn) error { + switch rr.operator { + case filters.OperatorEqual: + return rr.equal(ctx, readFn) + case filters.OperatorNotEqual: + return rr.notEqual(ctx, readFn) + case filters.OperatorGreaterThan: + return rr.greaterThan(ctx, readFn, false) + case filters.OperatorGreaterThanEqual: + return rr.greaterThan(ctx, readFn, true) + case filters.OperatorLessThan: + return rr.lessThan(ctx, readFn, false) + case filters.OperatorLessThanEqual: + return rr.lessThan(ctx, readFn, true) + case filters.OperatorLike: + return rr.like(ctx, readFn) + case filters.OperatorIsNull: // we need to fetch a row with a given value (there is only nil and !nil) and can reuse equal to get the correct row + return rr.equal(ctx, readFn) + default: + return fmt.Errorf("operator %v not supported", rr.operator) + } +} + +// equal is a special case, as we don't need to iterate, but just read a single +// row +func (rr *RowReader) equal(ctx context.Context, readFn ReadFn) error { + v, err := rr.equalHelper(ctx) + if err != nil { + return err + } + + _, err = readFn(rr.value, rr.transformToBitmap(v), noopRelease) + return err +} + +func (rr *RowReader) notEqual(ctx context.Context, readFn ReadFn) error { + v, err := rr.equalHelper(ctx) + if err != nil { + return err + } + + // Invert the Equal results for an efficient NotEqual + inverted, release := rr.bitmapFactory.GetBitmap() + inverted.AndNotConc(rr.transformToBitmap(v), concurrency.SROAR_MERGE) + _, err = readFn(rr.value, inverted, release) + return err +} + +// greaterThan reads from the specified value to the end. The first row is only +// included if allowEqual==true, otherwise it starts with the next one +func (rr *RowReader) greaterThan(ctx context.Context, readFn ReadFn, + allowEqual bool, +) error { + c := rr.newCursor() + defer c.Close() + + for k, v := c.Seek(rr.value); k != nil; k, v = c.Next() { + if err := ctx.Err(); err != nil { + return err + } + + if bytes.Equal(k, rr.value) && !allowEqual { + continue + } + + continueReading, err := readFn(k, rr.transformToBitmap(v), noopRelease) + if err != nil { + return err + } + + if !continueReading { + break + } + } + + return nil +} + +// lessThan reads from the very begging to the specified value. The last +// matching row is only included if allowEqual==true, otherwise it ends one +// prior to that. +func (rr *RowReader) lessThan(ctx context.Context, readFn ReadFn, + allowEqual bool, +) error { + c := rr.newCursor() + defer c.Close() + + for k, v := c.First(); k != nil && bytes.Compare(k, rr.value) != 1; k, v = c.Next() { + if err := ctx.Err(); err != nil { + return err + } + + if bytes.Equal(k, rr.value) && !allowEqual { + continue + } + + continueReading, err := readFn(k, rr.transformToBitmap(v), noopRelease) + if err != nil { + return err + } + + if !continueReading { + break + } + } + + return nil +} + +func (rr *RowReader) like(ctx context.Context, readFn ReadFn) error { + like, err := parseLikeRegexp(rr.value) + if err != nil { + return fmt.Errorf("parse like value: %w", err) + } + + c := rr.newCursor() + defer c.Close() + + var ( + initialK []byte + initialV [][]byte + ) + + if like.optimizable { + initialK, initialV = c.Seek(like.min) + } else { + initialK, initialV = c.First() + } + + for k, v := initialK, initialV; k != nil; k, v = c.Next() { + if err := ctx.Err(); err != nil { + return err + } + + if like.optimizable { + // if the query is optimizable, i.e. it doesn't start with a wildcard, we + // can abort once we've moved past the point where the fixed characters + // no longer match + if len(k) < len(like.min) { + break + } + + if bytes.Compare(like.min, k[:len(like.min)]) == -1 { + break + } + } + + if !like.regexp.Match(k) { + continue + } + + continueReading, err := readFn(k, rr.transformToBitmap(v), noopRelease) + if err != nil { + return err + } + + if !continueReading { + break + } + } + + return nil +} + +// newCursor will either return a regular cursor - or a key-only cursor if +// keyOnly==true +func (rr *RowReader) newCursor() *lsmkv.CursorSet { + if rr.keyOnly { + return rr.bucket.SetCursorKeyOnly() + } + + return rr.bucket.SetCursor() +} + +func (rr *RowReader) transformToBitmap(ids [][]byte) *sroar.Bitmap { + out := sroar.NewBitmap() + for _, asBytes := range ids { + out.Set(binary.LittleEndian.Uint64(asBytes)) + } + return out +} + +// equalHelper exists, because the Equal and NotEqual operators share this functionality +func (rr *RowReader) equalHelper(ctx context.Context) ([][]byte, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + + v, err := rr.bucket.SetList(rr.value) + if err != nil { + return nil, err + } + return v, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_frequency.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_frequency.go new file mode 100644 index 0000000000000000000000000000000000000000..ef43437922b8f8a3e9316dbf90f77213f846801e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_frequency.go @@ -0,0 +1,264 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/filters" +) + +// RowReaderFrequency reads one or many row(s) depending on the specified operator +type RowReaderFrequency struct { + value []byte + bucket *lsmkv.Bucket + operator filters.Operator + keyOnly bool + shardVersion uint16 + bitmapFactory *roaringset.BitmapFactory +} + +func NewRowReaderFrequency(bucket *lsmkv.Bucket, value []byte, + operator filters.Operator, keyOnly bool, shardVersion uint16, + bitmapFactory *roaringset.BitmapFactory, +) *RowReaderFrequency { + return &RowReaderFrequency{ + bucket: bucket, + value: value, + operator: operator, + keyOnly: keyOnly, + shardVersion: shardVersion, + bitmapFactory: bitmapFactory, + } +} + +func (rr *RowReaderFrequency) Read(ctx context.Context, readFn ReadFn) error { + switch rr.operator { + case filters.OperatorEqual: + return rr.equal(ctx, readFn) + case filters.OperatorNotEqual: + return rr.notEqual(ctx, readFn) + case filters.OperatorGreaterThan: + return rr.greaterThan(ctx, readFn, false) + case filters.OperatorGreaterThanEqual: + return rr.greaterThan(ctx, readFn, true) + case filters.OperatorLessThan: + return rr.lessThan(ctx, readFn, false) + case filters.OperatorLessThanEqual: + return rr.lessThan(ctx, readFn, true) + case filters.OperatorLike: + return rr.like(ctx, readFn) + default: + return fmt.Errorf("operator %v supported", rr.operator) + } +} + +// equal is a special case, as we don't need to iterate, but just read a single +// row +func (rr *RowReaderFrequency) equal(ctx context.Context, readFn ReadFn) error { + v, err := rr.equalHelper(ctx) + if err != nil { + return err + } + + _, err = readFn(rr.value, rr.transformToBitmap(v), noopRelease) + return err +} + +func (rr *RowReaderFrequency) notEqual(ctx context.Context, readFn ReadFn) error { + v, err := rr.equalHelper(ctx) + if err != nil { + return err + } + + // Invert the Equal results for an efficient NotEqual + inverted, release := rr.bitmapFactory.GetBitmap() + inverted.AndNotConc(rr.transformToBitmap(v), concurrency.SROAR_MERGE) + _, err = readFn(rr.value, inverted, release) + return err +} + +// greaterThan reads from the specified value to the end. The first row is only +// included if allowEqual==true, otherwise it starts with the next one +func (rr *RowReaderFrequency) greaterThan(ctx context.Context, readFn ReadFn, + allowEqual bool, +) error { + c := rr.newCursor() + defer c.Close() + + for k, v := c.Seek(ctx, rr.value); k != nil; k, v = c.Next(ctx) { + if err := ctx.Err(); err != nil { + return err + } + + if bytes.Equal(k, rr.value) && !allowEqual { + continue + } + + continueReading, err := readFn(k, rr.transformToBitmap(v), noopRelease) + if err != nil { + return err + } + + if !continueReading { + break + } + } + + return nil +} + +// lessThan reads from the very begging to the specified value. The last +// matching row is only included if allowEqual==true, otherwise it ends one +// prior to that. +func (rr *RowReaderFrequency) lessThan(ctx context.Context, readFn ReadFn, + allowEqual bool, +) error { + c := rr.newCursor() + defer c.Close() + + for k, v := c.First(ctx); k != nil && bytes.Compare(k, rr.value) != 1; k, v = c.Next(ctx) { + if err := ctx.Err(); err != nil { + return err + } + + if bytes.Equal(k, rr.value) && !allowEqual { + continue + } + + continueReading, err := readFn(k, rr.transformToBitmap(v), noopRelease) + if err != nil { + return err + } + + if !continueReading { + break + } + } + + return nil +} + +func (rr *RowReaderFrequency) like(ctx context.Context, readFn ReadFn) error { + like, err := parseLikeRegexp(rr.value) + if err != nil { + return fmt.Errorf("parse like value: %w", err) + } + + // TODO: don't we need to check here if this is a doc id vs a object search? + // Or is this not a problem because the latter removes duplicates anyway? + c := rr.newCursor(lsmkv.MapListAcceptDuplicates()) + defer c.Close() + + var ( + initialK []byte + initialV []lsmkv.MapPair + ) + + if like.optimizable { + initialK, initialV = c.Seek(ctx, like.min) + } else { + initialK, initialV = c.First(ctx) + } + + for k, v := initialK, initialV; k != nil; k, v = c.Next(ctx) { + if err := ctx.Err(); err != nil { + return err + } + + if like.optimizable { + // if the query is optimizable, i.e. it doesn't start with a wildcard, we + // can abort once we've moved past the point where the fixed characters + // no longer match + if len(k) < len(like.min) { + break + } + + if bytes.Compare(like.min, k[:len(like.min)]) == -1 { + break + } + } + + if !like.regexp.Match(k) { + continue + } + + continueReading, err := readFn(k, rr.transformToBitmap(v), noopRelease) + if err != nil { + return err + } + + if !continueReading { + break + } + } + + return nil +} + +// newCursor will either return a regular cursor - or a key-only cursor if +// keyOnly==true +func (rr *RowReaderFrequency) newCursor( + opts ...lsmkv.MapListOption, +) *lsmkv.CursorMap { + if rr.shardVersion < 2 { + opts = append(opts, lsmkv.MapListLegacySortingRequired()) + } + + if rr.keyOnly { + return rr.bucket.MapCursorKeyOnly(opts...) + } + + return rr.bucket.MapCursor(opts...) +} + +func (rr *RowReaderFrequency) transformToBitmap(pairs []lsmkv.MapPair) *sroar.Bitmap { + out := sroar.NewBitmap() + for _, pair := range pairs { + // this entry has a frequency, but that's only used for bm25, not for + // pure filtering, so we can ignore it here + if rr.shardVersion < 2 { + out.Set(binary.LittleEndian.Uint64(pair.Key)) + } else { + out.Set(binary.BigEndian.Uint64(pair.Key)) + } + } + return out +} + +// equalHelper exists, because the Equal and NotEqual operators share this functionality +func (rr *RowReaderFrequency) equalHelper(ctx context.Context) (v []lsmkv.MapPair, err error) { + if err = ctx.Err(); err != nil { + return + } + + if rr.shardVersion < 2 { + v, err = rr.bucket.MapList(ctx, rr.value, lsmkv.MapListAcceptDuplicates(), + lsmkv.MapListLegacySortingRequired()) + if err != nil { + return + } + } else { + v, err = rr.bucket.MapList(ctx, rr.value, lsmkv.MapListAcceptDuplicates()) + if err != nil { + return + } + } + return +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..e483e5fb3eccc3b46645b6498915e9dd588f93e2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_roaring_set.go @@ -0,0 +1,242 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "context" + "fmt" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/filters" +) + +// RowReaderRoaringSet reads one or many row(s) depending on the specified +// operator +type RowReaderRoaringSet struct { + value []byte + operator filters.Operator + newCursor func() lsmkv.CursorRoaringSet + getter func(key []byte) (*sroar.Bitmap, func(), error) + bitmapFactory *roaringset.BitmapFactory +} + +// If keyOnly is set, the RowReaderRoaringSet will request key-only cursors +// wherever cursors are used, the specified value arguments in the +// ReadFn will always be empty +func NewRowReaderRoaringSet(bucket *lsmkv.Bucket, value []byte, operator filters.Operator, + keyOnly bool, bitmapFactory *roaringset.BitmapFactory, +) *RowReaderRoaringSet { + getter := bucket.RoaringSetGet + newCursor := bucket.CursorRoaringSet + if keyOnly { + newCursor = bucket.CursorRoaringSetKeyOnly + } + + return &RowReaderRoaringSet{ + value: value, + operator: operator, + newCursor: newCursor, + getter: getter, + bitmapFactory: bitmapFactory, + } +} + +// ReadFn will be called 1..n times per match. This means it will also +// be called on a non-match, in this case v == empty bitmap. +// It is up to the caller to decide if that is an error case or not. +// +// Note that because what we are parsing is an inverted index row, it can +// sometimes become confusing what a key and value actually resembles. The +// variables k and v are the literal row key and value. So this means, the +// data-value as in "less than 17" where 17 would be the "value" is in the key +// variable "k". The value will contain bitmap with docIDs having value "k" +// +// The boolean return argument is a way to stop iteration (e.g. when a limit is +// reached) without producing an error. In normal operation always return true, +// if false is returned once, the loop is broken. +type ReadFn func(k []byte, v *sroar.Bitmap, release func()) (bool, error) + +// Read a row using the specified ReadFn. If RowReader was created with +// keysOnly==true, the values argument in the readFn will always be nil on all +// requests involving cursors +func (rr *RowReaderRoaringSet) Read(ctx context.Context, readFn ReadFn) error { + switch rr.operator { + case filters.OperatorEqual, filters.OperatorIsNull: + return rr.equal(ctx, readFn) + case filters.OperatorNotEqual: + return rr.notEqual(ctx, readFn) + case filters.OperatorGreaterThan: + return rr.greaterThan(ctx, readFn, false) + case filters.OperatorGreaterThanEqual: + return rr.greaterThan(ctx, readFn, true) + case filters.OperatorLessThan: + return rr.lessThan(ctx, readFn, false) + case filters.OperatorLessThanEqual: + return rr.lessThan(ctx, readFn, true) + case filters.OperatorLike: + return rr.like(ctx, readFn) + default: + return fmt.Errorf("operator %v not supported", rr.operator) + } +} + +// equal is a special case, as we don't need to iterate, but just read a single +// row +func (rr *RowReaderRoaringSet) equal(ctx context.Context, + readFn ReadFn, +) error { + v, eqRelease, err := rr.equalHelper(ctx) + if err != nil { + return err + } + + _, err = readFn(rr.value, v, eqRelease) + return err +} + +func (rr *RowReaderRoaringSet) notEqual(ctx context.Context, + readFn ReadFn, +) error { + v, eqRelease, err := rr.equalHelper(ctx) + if err != nil { + return err + } + defer eqRelease() + + inverted, release := rr.bitmapFactory.GetBitmap() + inverted.AndNotConc(v, concurrency.SROAR_MERGE) + _, err = readFn(rr.value, inverted, release) + return err +} + +// greaterThan reads from the specified value to the end. The first row is only +// included if allowEqual==true, otherwise it starts with the next one +func (rr *RowReaderRoaringSet) greaterThan(ctx context.Context, + readFn ReadFn, allowEqual bool, +) error { + c := rr.newCursor() + defer c.Close() + + for k, v := c.Seek(rr.value); k != nil; k, v = c.Next() { + if err := ctx.Err(); err != nil { + return err + } + + if bytes.Equal(k, rr.value) && !allowEqual { + continue + } + + if continueReading, err := readFn(k, v, noopRelease); err != nil { + return err + } else if !continueReading { + break + } + } + + return nil +} + +// lessThan reads from the very begging to the specified value. The last +// matching row is only included if allowEqual==true, otherwise it ends one +// prior to that. +func (rr *RowReaderRoaringSet) lessThan(ctx context.Context, + readFn ReadFn, allowEqual bool, +) error { + c := rr.newCursor() + defer c.Close() + + for k, v := c.First(); k != nil && bytes.Compare(k, rr.value) < 1; k, v = c.Next() { + if err := ctx.Err(); err != nil { + return err + } + + if bytes.Equal(k, rr.value) && !allowEqual { + continue + } + + if continueReading, err := readFn(k, v, noopRelease); err != nil { + return err + } else if !continueReading { + break + } + } + + return nil +} + +func (rr *RowReaderRoaringSet) like(ctx context.Context, + readFn ReadFn, +) error { + like, err := parseLikeRegexp(rr.value) + if err != nil { + return fmt.Errorf("parse like value: %w", err) + } + + c := rr.newCursor() + defer c.Close() + + var ( + initialK []byte + initialV *sroar.Bitmap + likeMinLen int + ) + + if like.optimizable { + initialK, initialV = c.Seek(like.min) + likeMinLen = len(like.min) + } else { + initialK, initialV = c.First() + } + + for k, v := initialK, initialV; k != nil; k, v = c.Next() { + if err := ctx.Err(); err != nil { + return err + } + + if like.optimizable { + // if the query is optimizable, i.e. it doesn't start with a wildcard, we + // can abort once we've moved past the point where the fixed characters + // no longer match + if len(k) < likeMinLen { + break + } + if bytes.Compare(like.min, k[:likeMinLen]) == -1 { + break + } + } + + if !like.regexp.Match(k) { + continue + } + + if continueReading, err := readFn(k, v, noopRelease); err != nil { + return err + } else if !continueReading { + break + } + } + + return nil +} + +// equalHelper exists, because the Equal and NotEqual operators share this functionality +func (rr *RowReaderRoaringSet) equalHelper(ctx context.Context) (*sroar.Bitmap, func(), error) { + if err := ctx.Err(); err != nil { + return nil, noopRelease, err + } + + return rr.getter(rr.value) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_roaring_set_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_roaring_set_test.go new file mode 100644 index 0000000000000000000000000000000000000000..de5d91e8f7c3230a62b54f102dfe4b7afa635b81 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/row_reader_roaring_set_test.go @@ -0,0 +1,301 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/filters" + entlsmkv "github.com/weaviate/weaviate/entities/lsmkv" +) + +const maxDocID = 33333333 + +func TestRowReaderRoaringSet(t *testing.T) { + data := []kvData{ + {"aaa", []uint64{1, 2, 3}}, + {"bbb", []uint64{11, 22, 33}}, + {"ccc", []uint64{111, 222, 333}}, + {"ddd", []uint64{1111, 2222, 3333}}, + {"eee", []uint64{11111, 22222, 33333}}, + {"fff", []uint64{111111, 222222, 333333}}, + {"ggg", []uint64{1111111, 2222222, 3333333}}, + {"hhh", []uint64{11111111, 2222222, 33333333}}, + } + ctx := context.Background() + + testcases := []struct { + name string + value string + operator filters.Operator + expected []kvData + }{ + { + name: "equal 'ggg' value", + value: "ggg", + operator: filters.OperatorEqual, + expected: []kvData{ + {"ggg", []uint64{1111111, 2222222, 3333333}}, + }, + }, + { + name: "not equal 'ccc' value", + value: "ccc", + operator: filters.OperatorNotEqual, + expected: []kvData{ + {"ccc", func() []uint64 { + bm := sroar.Prefill(maxDocID) + for _, x := range []uint64{111, 222, 333} { + bm.Remove(x) + } + return bm.ToArray() + }()}, + }, + }, + { + name: "not equal non-matching value", + value: "fgh", + operator: filters.OperatorNotEqual, + expected: []kvData{}, + }, + { + name: "greater than 'ddd' value", + value: "ddd", + operator: filters.OperatorGreaterThan, + expected: []kvData{ + {"eee", []uint64{11111, 22222, 33333}}, + {"fff", []uint64{111111, 222222, 333333}}, + {"ggg", []uint64{1111111, 2222222, 3333333}}, + {"hhh", []uint64{11111111, 2222222, 33333333}}, + }, + }, + { + name: "greater than equal 'ddd' value", + value: "ddd", + operator: filters.OperatorGreaterThanEqual, + expected: []kvData{ + {"ddd", []uint64{1111, 2222, 3333}}, + {"eee", []uint64{11111, 22222, 33333}}, + {"fff", []uint64{111111, 222222, 333333}}, + {"ggg", []uint64{1111111, 2222222, 3333333}}, + {"hhh", []uint64{11111111, 2222222, 33333333}}, + }, + }, + { + name: "greater than non-matching value", + value: "fgh", + operator: filters.OperatorGreaterThan, + expected: []kvData{ + {"ggg", []uint64{1111111, 2222222, 3333333}}, + {"hhh", []uint64{11111111, 2222222, 33333333}}, + }, + }, + { + name: "greater than equal non-matching value", + value: "fgh", + operator: filters.OperatorGreaterThanEqual, + expected: []kvData{ + {"ggg", []uint64{1111111, 2222222, 3333333}}, + {"hhh", []uint64{11111111, 2222222, 33333333}}, + }, + }, + { + name: "less than 'eee' value", + value: "eee", + operator: filters.OperatorLessThan, + expected: []kvData{ + {"aaa", []uint64{1, 2, 3}}, + {"bbb", []uint64{11, 22, 33}}, + {"ccc", []uint64{111, 222, 333}}, + {"ddd", []uint64{1111, 2222, 3333}}, + }, + }, + { + name: "less than equal 'eee' value", + value: "eee", + operator: filters.OperatorLessThanEqual, + expected: []kvData{ + {"aaa", []uint64{1, 2, 3}}, + {"bbb", []uint64{11, 22, 33}}, + {"ccc", []uint64{111, 222, 333}}, + {"ddd", []uint64{1111, 2222, 3333}}, + {"eee", []uint64{11111, 22222, 33333}}, + }, + }, + { + name: "less than non-matching value", + value: "fgh", + operator: filters.OperatorLessThan, + expected: []kvData{ + {"aaa", []uint64{1, 2, 3}}, + {"bbb", []uint64{11, 22, 33}}, + {"ccc", []uint64{111, 222, 333}}, + {"ddd", []uint64{1111, 2222, 3333}}, + {"eee", []uint64{11111, 22222, 33333}}, + {"fff", []uint64{111111, 222222, 333333}}, + }, + }, + { + name: "less than equal non-matching value", + value: "fgh", + operator: filters.OperatorLessThanEqual, + expected: []kvData{ + {"aaa", []uint64{1, 2, 3}}, + {"bbb", []uint64{11, 22, 33}}, + {"ccc", []uint64{111, 222, 333}}, + {"ddd", []uint64{1111, 2222, 3333}}, + {"eee", []uint64{11111, 22222, 33333}}, + {"fff", []uint64{111111, 222222, 333333}}, + }, + }, + { + name: "like '*b' value", + value: "*b", + operator: filters.OperatorLike, + expected: []kvData{ + {"bbb", []uint64{11, 22, 33}}, + }, + }, + { + name: "like 'h*' value", + value: "h*", + operator: filters.OperatorLike, + expected: []kvData{ + {"hhh", []uint64{11111111, 2222222, 33333333}}, + }, + }, + } + + for _, tc := range testcases { + type readResult struct { + k []byte + v *sroar.Bitmap + r func() + } + + t.Run(tc.name, func(t *testing.T) { + result := []readResult{} + rowReader := createRowReaderRoaringSet([]byte(tc.value), tc.operator, data) + rowReader.Read(ctx, func(k []byte, v *sroar.Bitmap, release func()) (bool, error) { + result = append(result, readResult{k, v, release}) + return true, nil + }) + + assert.Len(t, result, len(tc.expected)) + for i, expectedKV := range tc.expected { + assert.Equal(t, []byte(expectedKV.k), result[i].k) + assert.Equal(t, len(expectedKV.v), result[i].v.GetCardinality()) + for _, expectedV := range expectedKV.v { + assert.True(t, result[i].v.Contains(expectedV)) + } + result[i].r() + } + }) + + t.Run(tc.name+" with 3 results limit", func(t *testing.T) { + limit := 3 + expected := tc.expected + if len(tc.expected) > limit { + expected = tc.expected[:limit] + } + + result := []readResult{} + rowReader := createRowReaderRoaringSet([]byte(tc.value), tc.operator, data) + rowReader.Read(ctx, func(k []byte, v *sroar.Bitmap, release func()) (bool, error) { + result = append(result, readResult{k, v, release}) + if len(result) >= limit { + return false, nil + } + return true, nil + }) + + assert.Len(t, result, len(expected)) + for i, expectedKV := range expected { + assert.Equal(t, []byte(expectedKV.k), result[i].k) + assert.Equal(t, len(expectedKV.v), result[i].v.GetCardinality()) + for _, expectedV := range expectedKV.v { + assert.True(t, result[i].v.Contains(expectedV)) + } + result[i].r() + } + }) + } +} + +type kvData struct { + k string + v []uint64 +} + +type dummyCursorRoaringSet struct { + data []kvData + pos int + closed bool +} + +func (c *dummyCursorRoaringSet) First() ([]byte, *sroar.Bitmap) { + c.pos = 0 + return c.Next() +} + +func (c *dummyCursorRoaringSet) Next() ([]byte, *sroar.Bitmap) { + bm := sroar.NewBitmap() + if c.pos >= len(c.data) { + return nil, bm + } + pos := c.pos + c.pos++ + bm.SetMany(c.data[pos].v) + return []byte(c.data[pos].k), bm +} + +func (c *dummyCursorRoaringSet) Seek(key []byte) ([]byte, *sroar.Bitmap) { + pos := -1 + for i := 0; i < len(c.data); i++ { + if bytes.Compare([]byte(c.data[i].k), key) >= 0 { + pos = i + break + } + } + if pos < 0 { + return nil, sroar.NewBitmap() + } + c.pos = pos + return c.Next() +} + +func (c *dummyCursorRoaringSet) Close() { + c.closed = true +} + +func createRowReaderRoaringSet(value []byte, operator filters.Operator, data []kvData) *RowReaderRoaringSet { + return &RowReaderRoaringSet{ + value: value, + operator: operator, + newCursor: func() lsmkv.CursorRoaringSet { return &dummyCursorRoaringSet{data: data} }, + getter: func(key []byte) (*sroar.Bitmap, func(), error) { + for i := 0; i < len(data); i++ { + if bytes.Equal([]byte(data[i].k), key) { + return roaringset.NewBitmap(data[i].v...), noopRelease, nil + } + } + return nil, noopRelease, entlsmkv.NotFound + }, + bitmapFactory: roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), func() uint64 { return maxDocID }), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher.go new file mode 100644 index 0000000000000000000000000000000000000000..936b5e61e9770980a54047f5daa7a40580e35140 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher.go @@ -0,0 +1,982 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "context" + "encoding/binary" + "fmt" + "strconv" + "time" + + "github.com/weaviate/weaviate/entities/concurrency" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/stopwords" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/propertyspecific" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/adapters/repos/db/sorter" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/tokenizer" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +type Searcher struct { + logger logrus.FieldLogger + store *lsmkv.Store + getClass func(string) *models.Class + classSearcher ClassSearcher // to allow recursive searches on ref-props + propIndices propertyspecific.Indices + stopwords stopwords.StopwordDetector + shardVersion uint16 + isFallbackToSearchable IsFallbackToSearchable + tenant string + // nestedCrossRefLimit limits the number of nested cross refs returned for a query + nestedCrossRefLimit int64 + bitmapFactory *roaringset.BitmapFactory +} + +func NewSearcher(logger logrus.FieldLogger, store *lsmkv.Store, + getClass func(string) *models.Class, propIndices propertyspecific.Indices, + classSearcher ClassSearcher, stopwords stopwords.StopwordDetector, + shardVersion uint16, isFallbackToSearchable IsFallbackToSearchable, + tenant string, nestedCrossRefLimit int64, bitmapFactory *roaringset.BitmapFactory, +) *Searcher { + return &Searcher{ + logger: logger, + store: store, + getClass: getClass, + propIndices: propIndices, + classSearcher: classSearcher, + stopwords: stopwords, + shardVersion: shardVersion, + isFallbackToSearchable: isFallbackToSearchable, + tenant: tenant, + nestedCrossRefLimit: nestedCrossRefLimit, + bitmapFactory: bitmapFactory, + } +} + +// Objects returns a list of full objects +func (s *Searcher) Objects(ctx context.Context, limit int, + filter *filters.LocalFilter, sort []filters.Sort, additional additional.Properties, + className schema.ClassName, properties []string, + disableInvertedSorter *runtime.DynamicValue[bool], +) ([]*storobj.Object, error) { + ctx = concurrency.CtxWithBudget(ctx, concurrency.TimesNUMCPU(2)) + beforeFilters := time.Now() + allowList, err := s.docIDs(ctx, filter, className, limit) + if err != nil { + return nil, err + } + defer allowList.Close() + helpers.AnnotateSlowQueryLog(ctx, "build_allow_list_took", time.Since(beforeFilters)) + helpers.AnnotateSlowQueryLog(ctx, "allow_list_doc_ids_count", allowList.Len()) + + var it docIDsIterator + if len(sort) > 0 { + beforeSort := time.Now() + docIDs, err := s.sort(ctx, limit, sort, allowList, className, disableInvertedSorter) + if err != nil { + return nil, fmt.Errorf("sort doc ids: %w", err) + } + helpers.AnnotateSlowQueryLog(ctx, "sort_doc_ids_took", time.Since(beforeSort)) + it = newSliceDocIDsIterator(docIDs) + } else { + it = allowList.Iterator() + } + + beforeObjects := time.Now() + defer func() { + helpers.AnnotateSlowQueryLog(ctx, "objects_by_doc_ids_took", time.Since(beforeObjects)) + }() + return s.objectsByDocID(ctx, it, additional, limit, properties) +} + +func (s *Searcher) sort(ctx context.Context, limit int, sort []filters.Sort, + docIDs helpers.AllowList, className schema.ClassName, + disableInvertedSorter *runtime.DynamicValue[bool], +) ([]uint64, error) { + lsmSorter, err := sorter.NewLSMSorter(s.store, s.getClass, className, disableInvertedSorter) + if err != nil { + return nil, err + } + return lsmSorter.SortDocIDs(ctx, limit, sort, docIDs) +} + +func (s *Searcher) objectsByDocID(ctx context.Context, it docIDsIterator, + additional additional.Properties, limit int, properties []string, +) ([]*storobj.Object, error) { + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + if bucket == nil { + return nil, fmt.Errorf("objects bucket not found") + } + + // Prevent unbounded iteration + if limit == 0 { + limit = int(config.DefaultQueryMaximumResults) + } + outlen := it.Len() + if outlen > limit { + outlen = limit + } + + out := make([]*storobj.Object, outlen) + docIDBytes := make([]byte, 8) + + propertyPaths := make([][]string, len(properties)) + for j := range properties { + propertyPaths[j] = []string{properties[j]} + } + + props := &storobj.PropertyExtraction{ + PropertyPaths: propertyPaths, + } + + deletedIds := sroar.NewBitmap() + deletedCount := 0 + handleDeletedId := func(id uint64) { + if deletedIds.Set(id) { + if deletedCount++; deletedCount >= 1024 { + s.bitmapFactory.Remove(deletedIds) + deletedIds = sroar.NewBitmap().CloneToBuf(deletedIds.ToBuffer()) + deletedCount = 0 + } + } + } + defer func() { + if deletedCount > 0 { + s.bitmapFactory.Remove(deletedIds) + } + }() + + i := 0 + loop := 0 + for docID, ok := it.Next(); ok; docID, ok = it.Next() { + if loop%1000 == 0 && ctx.Err() != nil { + return nil, ctx.Err() + } + loop++ + + binary.LittleEndian.PutUint64(docIDBytes, docID) + res, err := bucket.GetBySecondary(0, docIDBytes) + if err != nil { + return nil, err + } + + if res == nil { + handleDeletedId(docID) + continue + } + + var unmarshalled *storobj.Object + if additional.ReferenceQuery { + unmarshalled, err = storobj.FromBinaryUUIDOnly(res) + } else { + unmarshalled, err = storobj.FromBinaryOptional(res, additional, props) + } + if err != nil { + return nil, fmt.Errorf("unmarshal data object at position %d: %w", i, err) + } + + out[i] = unmarshalled + i++ + + if i >= limit { + break + } + } + + return out[:i], nil +} + +// DocIDs is similar to Objects, but does not actually resolve the docIDs to +// full objects. Instead it returns the pure object id pointers. They can then +// be used in a secondary index (e.g. vector index) +// +// DocID queries does not contain a limit by design, as we won't know if the limit +// wouldn't remove the item that is most important for the follow up query. +// Imagine the user sets the limit to 1 and the follow-up is a vector search. +// If we already limited the allowList to 1, the vector search would be +// pointless, as only the first element would be allowed, regardless of which +// had the shortest distance +func (s *Searcher) DocIDs(ctx context.Context, filter *filters.LocalFilter, + additional additional.Properties, className schema.ClassName, +) (helpers.AllowList, error) { + ctx = concurrency.CtxWithBudget(ctx, concurrency.TimesNUMCPU(2)) + return s.docIDs(ctx, filter, className, 0) +} + +func (s *Searcher) docIDs(ctx context.Context, filter *filters.LocalFilter, + className schema.ClassName, limit int, +) (helpers.AllowList, error) { + pv, err := s.extractPropValuePair(ctx, filter.Root, className) + if err != nil { + return nil, err + } + + beforeResolve := time.Now() + helpers.AnnotateSlowQueryLog(ctx, "build_allow_list_resolve_len", len(pv.children)) + dbm, err := pv.resolveDocIDs(ctx, s, limit) + if err != nil { + return nil, fmt.Errorf("resolve doc ids for prop/value pair: %w", err) + } + helpers.AnnotateSlowQueryLog(ctx, "build_allow_list_resolve_took", time.Since(beforeResolve)) + + return helpers.NewAllowListCloseableFromBitmap(dbm.docIDs, dbm.release), nil +} + +func (s *Searcher) extractPropValuePair( + ctx context.Context, filter *filters.Clause, className schema.ClassName, +) (*propValuePair, error) { + class := s.getClass(className.String()) + if class == nil { + return nil, fmt.Errorf("class %q not found", className) + } + out, err := newPropValuePair(class) + if err != nil { + return nil, fmt.Errorf("new prop value pair: %w", err) + } + if filter.Operands != nil { + // nested filter + children, err := s.extractPropValuePairs(ctx, filter.Operands, className) + if err != nil { + return nil, err + } + out.children = children + out.operator = filter.Operator + return out, nil + } + + switch filter.Operator { + case filters.ContainsAll, filters.ContainsAny, filters.ContainsNone: + return s.extractContains(ctx, filter.On, filter.Value.Type, filter.Value.Value, filter.Operator, class) + default: + // proceed + } + + // on value or non-nested filter + props := filter.On.Slice() + propName := props[0] + + if s.onInternalProp(propName) { + return s.extractInternalProp(propName, filter.Value.Type, filter.Value.Value, filter.Operator, class) + } + + if extractedPropName, ok := schema.IsPropertyLength(propName, 0); ok { + class := s.getClass(schema.ClassName(className).String()) + if class == nil { + return nil, fmt.Errorf("could not find class %s in schema", className) + } + + property, err := schema.GetPropertyByName(class, extractedPropName) + if err != nil { + return nil, err + } + return s.extractPropertyLength(property, filter.Value.Type, filter.Value.Value, filter.Operator, class) + } + + property, err := schema.GetPropertyByName(class, propName) + if err != nil { + return nil, err + } + + if s.onRefProp(property) && len(props) != 1 { + return s.extractReferenceFilter(property, filter, class) + } + + if s.onRefProp(property) && filter.Value.Type == schema.DataTypeInt { + // ref prop and int type is a special case, the user is looking for the + // reference count as opposed to the content + return s.extractReferenceCount(property, filter.Value.Value, filter.Operator, class) + } + + if filter.Operator == filters.OperatorIsNull { + return s.extractPropertyNull(property, filter.Value.Type, filter.Value.Value, filter.Operator, class) + } + + if s.onGeoProp(property) { + return s.extractGeoFilter(property, filter.Value.Value, filter.Value.Type, filter.Operator, class) + } + + if s.onUUIDProp(property) { + return s.extractUUIDFilter(property, filter.Value.Value, filter.Value.Type, filter.Operator, class) + } + + if s.onTokenizableProp(property) { + return s.extractTokenizableProp(property, filter.Value.Type, filter.Value.Value, filter.Operator, class) + } + + return s.extractPrimitiveProp(property, filter.Value.Type, filter.Value.Value, filter.Operator, class) +} + +func (s *Searcher) extractPropValuePairs(ctx context.Context, + operands []filters.Clause, className schema.ClassName, +) ([]*propValuePair, error) { + children := make([]*propValuePair, len(operands)) + eg := enterrors.NewErrorGroupWrapper(s.logger) + outerConcurrencyLimit := concurrency.BudgetFromCtx(ctx, concurrency.NUMCPU) + eg.SetLimit(outerConcurrencyLimit) + + concurrencyReductionFactor := min(len(operands), outerConcurrencyLimit) + + for i, clause := range operands { + i, clause := i, clause + eg.Go(func() error { + ctx := concurrency.ContextWithFractionalBudget(ctx, concurrencyReductionFactor, concurrency.NUMCPU) + child, err := s.extractPropValuePair(ctx, &clause, className) + if err != nil { + return fmt.Errorf("nested clause at pos %d: %w", i, err) + } + children[i] = child + + return nil + }, clause) + } + if err := eg.Wait(); err != nil { + return nil, fmt.Errorf("nested query: %w", err) + } + return children, nil +} + +func (s *Searcher) extractReferenceFilter(prop *models.Property, + filter *filters.Clause, class *models.Class, +) (*propValuePair, error) { + ctx := context.TODO() + return newRefFilterExtractor(s.logger, s.classSearcher, filter, class, prop, s.tenant, s.nestedCrossRefLimit). + Do(ctx) +} + +func (s *Searcher) extractPrimitiveProp(prop *models.Property, propType schema.DataType, + value interface{}, operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var extractValueFn func(in interface{}) ([]byte, error) + switch propType { + case schema.DataTypeBoolean: + extractValueFn = s.extractBoolValue + case schema.DataTypeInt: + extractValueFn = s.extractIntValue + case schema.DataTypeNumber: + extractValueFn = s.extractNumberValue + case schema.DataTypeDate: + extractValueFn = s.extractDateValue + case "": + return nil, fmt.Errorf("data type cannot be empty") + default: + return nil, fmt.Errorf("data type %q not supported in query", propType) + } + + byteValue, err := extractValueFn(value) + if err != nil { + return nil, err + } + + hasFilterableIndex := HasFilterableIndex(prop) + hasSearchableIndex := HasSearchableIndex(prop) + hasRangeableIndex := HasRangeableIndex(prop) + + if !hasFilterableIndex && !hasSearchableIndex && !hasRangeableIndex { + return nil, inverted.NewMissingFilterableIndexError(prop.Name) + } + + return &propValuePair{ + value: byteValue, + prop: prop.Name, + operator: operator, + hasFilterableIndex: hasFilterableIndex, + hasSearchableIndex: hasSearchableIndex, + hasRangeableIndex: hasRangeableIndex, + Class: class, + }, nil +} + +func (s *Searcher) extractReferenceCount(prop *models.Property, value interface{}, + operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + byteValue, err := s.extractIntCountValue(value) + if err != nil { + return nil, err + } + + hasFilterableIndex := HasFilterableIndexMetaCount && HasAnyInvertedIndex(prop) + hasSearchableIndex := HasSearchableIndexMetaCount && HasAnyInvertedIndex(prop) + hasRangeableIndex := HasRangeableIndexMetaCount && HasAnyInvertedIndex(prop) + + if !hasFilterableIndex && !hasSearchableIndex && !hasRangeableIndex { + return nil, inverted.NewMissingFilterableMetaCountIndexError(prop.Name) + } + + return &propValuePair{ + value: byteValue, + prop: helpers.MetaCountProp(prop.Name), + operator: operator, + hasFilterableIndex: hasFilterableIndex, + hasSearchableIndex: hasSearchableIndex, + hasRangeableIndex: hasRangeableIndex, + Class: class, + }, nil +} + +func (s *Searcher) extractGeoFilter(prop *models.Property, value interface{}, + valueType schema.DataType, operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + if valueType != schema.DataTypeGeoCoordinates { + return nil, fmt.Errorf("prop %q is of type geoCoordinates, it can only"+ + "be used with geoRange filters", prop.Name) + } + + parsed := value.(filters.GeoRange) + + return &propValuePair{ + value: nil, // not going to be served by an inverted index + valueGeoRange: &parsed, + prop: prop.Name, + operator: operator, + hasFilterableIndex: HasFilterableIndex(prop), + hasSearchableIndex: HasSearchableIndex(prop), + hasRangeableIndex: HasRangeableIndex(prop), + Class: class, + }, nil +} + +func (s *Searcher) extractUUIDFilter(prop *models.Property, value interface{}, + valueType schema.DataType, operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var byteValue []byte + + switch valueType { + case schema.DataTypeText: + asStr, ok := value.(string) + if !ok { + return nil, fmt.Errorf("expected to see uuid as string in filter, got %T", value) + } + parsed, err := uuid.Parse(asStr) + if err != nil { + return nil, fmt.Errorf("parse uuid string: %w", err) + } + byteValue = parsed[:] + default: + return nil, fmt.Errorf("prop %q is of type uuid, the uuid to filter "+ + "on must be specified as a string (e.g. valueText:)", prop.Name) + } + + hasFilterableIndex := HasFilterableIndex(prop) + hasSearchableIndex := HasSearchableIndex(prop) + hasRangeableIndex := HasRangeableIndex(prop) + + if !hasFilterableIndex && !hasSearchableIndex && !hasRangeableIndex { + return nil, inverted.NewMissingFilterableIndexError(prop.Name) + } + + return &propValuePair{ + value: byteValue, + prop: prop.Name, + operator: operator, + hasFilterableIndex: hasFilterableIndex, + hasSearchableIndex: hasSearchableIndex, + hasRangeableIndex: hasRangeableIndex, + Class: class, + }, nil +} + +func (s *Searcher) extractInternalProp(propName string, propType schema.DataType, value interface{}, + operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + switch propName { + case filters.InternalPropBackwardsCompatID, filters.InternalPropID: + return s.extractIDProp(propName, propType, value, operator, class) + case filters.InternalPropCreationTimeUnix, filters.InternalPropLastUpdateTimeUnix: + return s.extractTimestampProp(propName, propType, value, operator, class) + default: + return nil, fmt.Errorf( + "failed to extract internal prop, unsupported internal prop '%s'", propName) + } +} + +func (s *Searcher) extractIDProp(propName string, propType schema.DataType, + value interface{}, operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var byteValue []byte + + switch propType { + case schema.DataTypeText: + v, ok := value.(string) + if !ok { + return nil, fmt.Errorf("expected value to be string, got '%T'", value) + } + byteValue = []byte(v) + default: + return nil, fmt.Errorf( + "failed to extract id prop, unsupported type '%T' for prop '%s'", propType, propName) + } + + return &propValuePair{ + value: byteValue, + prop: filters.InternalPropID, + operator: operator, + hasFilterableIndex: HasFilterableIndexIdProp, + hasSearchableIndex: HasSearchableIndexIdProp, + Class: class, + }, nil +} + +func (s *Searcher) extractTimestampProp(propName string, propType schema.DataType, value interface{}, + operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var byteValue []byte + + switch propType { + case schema.DataTypeText: + v, ok := value.(string) + if !ok { + return nil, fmt.Errorf("expected value to be string, got '%T'", value) + } + _, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, fmt.Errorf("expected value to be timestamp, got '%s'", v) + } + byteValue = []byte(v) + case schema.DataTypeDate: + v, ok := value.(string) + if !ok { + return nil, fmt.Errorf("expected value to be string, got '%T'", value) + } + t, err := time.Parse(time.RFC3339, v) + if err != nil { + return nil, fmt.Errorf("trying parse time as RFC3339 string: %w", err) + } + + // if propType is a `valueDate`, we need to convert + // it to ms before fetching. this is the format by + // which our timestamps are indexed + byteValue = []byte(strconv.FormatInt(t.UnixMilli(), 10)) + default: + return nil, fmt.Errorf( + "failed to extract timestamp prop, unsupported type '%T' for prop '%s'", propType, propName) + } + + return &propValuePair{ + value: byteValue, + prop: propName, + operator: operator, + hasFilterableIndex: HasFilterableIndexTimestampProp, // TODO text_rbm_inverted_index & with settings + hasSearchableIndex: HasSearchableIndexTimestampProp, // TODO text_rbm_inverted_index & with settings + Class: class, + }, nil +} + +func (s *Searcher) extractTokenizableProp(prop *models.Property, propType schema.DataType, + value interface{}, operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var terms []string + + valueString, ok := value.(string) + if !ok { + return nil, fmt.Errorf("expected value to be string, got '%T'", value) + } + + switch propType { + case schema.DataTypeText: + // if the operator is like, we cannot apply the regular text-splitting + // logic as it would remove all wildcard symbols + if operator == filters.OperatorLike { + terms = tokenizer.TokenizeWithWildcards(prop.Tokenization, valueString) + } else { + terms = tokenizer.Tokenize(prop.Tokenization, valueString) + } + default: + return nil, fmt.Errorf("expected value type to be text, got %v", propType) + } + + hasFilterableIndex := HasFilterableIndex(prop) && !s.isFallbackToSearchable() + hasSearchableIndex := HasSearchableIndex(prop) + hasRangeableIndex := HasRangeableIndex(prop) + + if !hasFilterableIndex && !hasSearchableIndex && !hasRangeableIndex { + return nil, inverted.NewMissingFilterableIndexError(prop.Name) + } + + propValuePairs := make([]*propValuePair, 0, len(terms)) + for _, term := range terms { + if s.stopwords.IsStopword(term) { + continue + } + propValuePairs = append(propValuePairs, &propValuePair{ + value: []byte(term), + prop: prop.Name, + operator: operator, + hasFilterableIndex: hasFilterableIndex, + hasSearchableIndex: hasSearchableIndex, + hasRangeableIndex: hasRangeableIndex, + Class: class, + }) + } + + if len(propValuePairs) > 1 { + return &propValuePair{operator: filters.OperatorAnd, children: propValuePairs, Class: class}, nil + } + if len(propValuePairs) == 1 { + return propValuePairs[0], nil + } + return nil, fmt.Errorf("invalid search term, only stopwords provided. " + + "Stopwords can be configured in class.invertedIndexConfig.stopwords") +} + +func (s *Searcher) extractPropertyLength(prop *models.Property, propType schema.DataType, + value interface{}, operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var byteValue []byte + + switch propType { + case schema.DataTypeInt: + b, err := s.extractIntValue(value) + if err != nil { + return nil, err + } + byteValue = b + default: + return nil, fmt.Errorf( + "failed to extract length of prop, unsupported type '%T' for length of prop '%s'", propType, prop.Name) + } + + return &propValuePair{ + value: byteValue, + prop: helpers.PropLength(prop.Name), + operator: operator, + hasFilterableIndex: HasFilterableIndexPropLength, // TODO text_rbm_inverted_index & with settings + hasSearchableIndex: HasSearchableIndexPropLength, // TODO text_rbm_inverted_index & with settings + Class: class, + }, nil +} + +func (s *Searcher) extractPropertyNull(prop *models.Property, propType schema.DataType, + value interface{}, operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var valResult []byte + + switch propType { + case schema.DataTypeBoolean: + b, err := s.extractBoolValue(value) + if err != nil { + return nil, err + } + valResult = b + default: + return nil, fmt.Errorf( + "failed to extract null prop, unsupported type '%T' for null prop '%s'", propType, prop.Name) + } + + return &propValuePair{ + value: valResult, + prop: helpers.PropNull(prop.Name), + operator: operator, + hasFilterableIndex: HasFilterableIndexPropNull, // TODO text_rbm_inverted_index & with settings + hasSearchableIndex: HasSearchableIndexPropNull, // TODO text_rbm_inverted_index & with settings + Class: class, + }, nil +} + +func (s *Searcher) extractContains(ctx context.Context, + path *filters.Path, propType schema.DataType, value interface{}, + operator filters.Operator, class *models.Class, +) (*propValuePair, error) { + var operands []filters.Clause + switch propType { + case schema.DataTypeText, schema.DataTypeTextArray: + valueStringArray, err := s.extractStringArray(value) + if err != nil { + return nil, err + } + operands = getContainsOperands(propType, path, valueStringArray) + case schema.DataTypeInt, schema.DataTypeIntArray: + valueIntArray, err := s.extractIntArray(value) + if err != nil { + return nil, err + } + operands = getContainsOperands(propType, path, valueIntArray) + case schema.DataTypeNumber, schema.DataTypeNumberArray: + valueFloat64Array, err := s.extractFloat64Array(value) + if err != nil { + return nil, err + } + operands = getContainsOperands(propType, path, valueFloat64Array) + case schema.DataTypeBoolean, schema.DataTypeBooleanArray: + valueBooleanArray, err := s.extractBoolArray(value) + if err != nil { + return nil, err + } + operands = getContainsOperands(propType, path, valueBooleanArray) + case schema.DataTypeDate, schema.DataTypeDateArray: + valueDateArray, err := s.extractStringArray(value) + if err != nil { + return nil, err + } + operands = getContainsOperands(propType, path, valueDateArray) + default: + return nil, fmt.Errorf("unsupported type '%T' for '%v' operator", propType, operator) + } + + children, err := s.extractPropValuePairs(ctx, operands, schema.ClassName(class.Class)) + if err != nil { + return nil, err + } + out, err := newPropValuePair(class) + if err != nil { + return nil, fmt.Errorf("new prop value pair: %w", err) + } + + out.children = children + out.Class = class + + switch operator { + case filters.ContainsAll: + out.operator = filters.OperatorAnd + case filters.ContainsAny: + out.operator = filters.OperatorOr + case filters.ContainsNone: + out.operator = filters.OperatorOr + + parent, err := newPropValuePair(class) + if err != nil { + return nil, fmt.Errorf("new prop value pair: %w", err) + } + parent.operator = filters.OperatorNot + parent.children = []*propValuePair{out} + parent.Class = class + out = parent + default: + return nil, fmt.Errorf("unknown contains operator %v", operator) + } + return out, nil +} + +// TODO: repeated calls to on... aren't too efficient because we iterate over +// the schema each time, might be smarter to have a single method that +// determines the type and then we switch based on the result. However, the +// effect of that should be very small unless the schema is absolutely massive. +func (s *Searcher) onRefProp(property *models.Property) bool { + return schema.IsRefDataType(property.DataType) +} + +// TODO: repeated calls to on... aren't too efficient because we iterate over +// the schema each time, might be smarter to have a single method that +// determines the type and then we switch based on the result. However, the +// effect of that should be very small unless the schema is absolutely massive. +func (s *Searcher) onGeoProp(prop *models.Property) bool { + return schema.DataType(prop.DataType[0]) == schema.DataTypeGeoCoordinates +} + +// Note: A UUID prop is a user-specified prop of type UUID. This has nothing to +// do with the primary ID of an object which happens to always be a UUID in +// Weaviate v1 +// +// TODO: repeated calls to on... aren't too efficient because we iterate over +// the schema each time, might be smarter to have a single method that +// determines the type and then we switch based on the result. However, the +// effect of that should be very small unless the schema is absolutely massive. +func (s *Searcher) onUUIDProp(prop *models.Property) bool { + switch dt, _ := schema.AsPrimitive(prop.DataType); dt { + case schema.DataTypeUUID, schema.DataTypeUUIDArray: + return true + default: + return false + } +} + +func (s *Searcher) onInternalProp(propName string) bool { + return filters.IsInternalProperty(schema.PropertyName(propName)) +} + +func (s *Searcher) onTokenizableProp(prop *models.Property) bool { + switch dt, _ := schema.AsPrimitive(prop.DataType); dt { + case schema.DataTypeText, schema.DataTypeTextArray: + return true + default: + return false + } +} + +func (s *Searcher) extractStringArray(value interface{}) ([]string, error) { + switch v := value.(type) { + case []string: + return v, nil + case []interface{}: + vals := make([]string, len(v)) + for i := range v { + val, ok := v[i].(string) + if !ok { + return nil, fmt.Errorf("value[%d] type should be string but is %T", i, v[i]) + } + vals[i] = val + } + return vals, nil + default: + return nil, fmt.Errorf("value type should be []string but is %T", value) + } +} + +func (s *Searcher) extractIntArray(value interface{}) ([]int, error) { + switch v := value.(type) { + case []int: + return v, nil + case []interface{}: + vals := make([]int, len(v)) + for i := range v { + // in this case all number values are unmarshalled to float64, so we need to cast to float64 + // and then make int + val, ok := v[i].(float64) + if !ok { + return nil, fmt.Errorf("value[%d] type should be float64 but is %T", i, v[i]) + } + vals[i] = int(val) + } + return vals, nil + default: + return nil, fmt.Errorf("value type should be []int but is %T", value) + } +} + +func (s *Searcher) extractFloat64Array(value interface{}) ([]float64, error) { + switch v := value.(type) { + case []float64: + return v, nil + case []interface{}: + vals := make([]float64, len(v)) + for i := range v { + val, ok := v[i].(float64) + if !ok { + return nil, fmt.Errorf("value[%d] type should be float64 but is %T", i, v[i]) + } + vals[i] = val + } + return vals, nil + default: + return nil, fmt.Errorf("value type should be []float64 but is %T", value) + } +} + +func (s *Searcher) extractBoolArray(value interface{}) ([]bool, error) { + switch v := value.(type) { + case []bool: + return v, nil + case []interface{}: + vals := make([]bool, len(v)) + for i := range v { + val, ok := v[i].(bool) + if !ok { + return nil, fmt.Errorf("value[%d] type should be bool but is %T", i, v[i]) + } + vals[i] = val + } + return vals, nil + default: + return nil, fmt.Errorf("value type should be []bool but is %T", value) + } +} + +func getContainsOperands[T any](propType schema.DataType, path *filters.Path, values []T) []filters.Clause { + operands := make([]filters.Clause, len(values)) + for i := range values { + operands[i] = filters.Clause{ + Operator: filters.OperatorEqual, + On: path, + Value: &filters.Value{ + Type: propType, + Value: values[i], + }, + } + } + return operands +} + +type docIDsIterator interface { + Next() (uint64, bool) + Len() int +} + +type sliceDocIDsIterator struct { + docIDs []uint64 + pos int +} + +func newSliceDocIDsIterator(docIDs []uint64) docIDsIterator { + return &sliceDocIDsIterator{docIDs: docIDs, pos: 0} +} + +func (it *sliceDocIDsIterator) Next() (uint64, bool) { + if it.pos >= len(it.docIDs) { + return 0, false + } + pos := it.pos + it.pos++ + return it.docIDs[pos], true +} + +func (it *sliceDocIDsIterator) Len() int { + return len(it.docIDs) +} + +type docBitmap struct { + docIDs *sroar.Bitmap + release func() +} + +// newUninitializedDocBitmap can be used whenever we can be sure that the first +// user of the docBitmap will set or replace the bitmap, such as a row reader +func newUninitializedDocBitmap() docBitmap { + return docBitmap{} +} + +func newDocBitmap() docBitmap { + return docBitmap{docIDs: sroar.NewBitmap(), release: func() {}} +} + +func (dbm *docBitmap) count() int { + if dbm.docIDs == nil { + return 0 + } + return dbm.docIDs.GetCardinality() +} + +func (dbm *docBitmap) IDs() []uint64 { + if dbm.docIDs == nil { + return []uint64{} + } + return dbm.docIDs.ToArray() +} + +func (dbm *docBitmap) IDsWithLimit(limit int) []uint64 { + card := dbm.docIDs.GetCardinality() + if limit >= card { + return dbm.IDs() + } + + out := make([]uint64, limit) + for i := range out { + // safe to ignore error, it can only error if the index is >= cardinality + // which we have already ruled out + out[i], _ = dbm.docIDs.Select(uint64(i)) + } + + return out +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_doc_bitmap.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_doc_bitmap.go new file mode 100644 index 0000000000000000000000000000000000000000..e1dfdbaedcaffd7a9657c605738e1151e8b74615 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_doc_bitmap.go @@ -0,0 +1,228 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/filters" +) + +var noopRelease = func() {} + +func (s *Searcher) docBitmap(ctx context.Context, b *lsmkv.Bucket, limit int, + pv *propValuePair, +) (bm docBitmap, err error) { + before := time.Now() + strategy := "geo" + defer func() { + took := time.Since(before) + vals := map[string]any{ + "prop": pv.prop, + "operator": pv.operator, + "took": took, + "took_string": took.String(), + "value": pv.value, + "count": bm.count(), + "strategy": strategy, + } + + helpers.AnnotateSlowQueryLogAppend(ctx, "build_allow_list_doc_bitmap", vals) + }() + + // geo props cannot be served by the inverted index and they require an + // external index. So, instead of trying to serve this chunk of the filter + // request internally, we can pass it to an external geo index + if pv.operator == filters.OperatorWithinGeoRange { + bm, err = s.docBitmapGeo(ctx, pv) + return + } + strategy = b.Strategy() + + // all other operators perform operations on the inverted index which we + // can serve directly + switch b.Strategy() { + case lsmkv.StrategySetCollection: + bm, err = s.docBitmapInvertedSet(ctx, b, limit, pv) + case lsmkv.StrategyRoaringSet: + bm, err = s.docBitmapInvertedRoaringSet(ctx, b, limit, pv) + case lsmkv.StrategyRoaringSetRange: + bm, err = s.docBitmapInvertedRoaringSetRange(ctx, b, pv) + case lsmkv.StrategyMapCollection: + bm, err = s.docBitmapInvertedMap(ctx, b, limit, pv) + case lsmkv.StrategyInverted: // TODO amourao, check + bm, err = s.docBitmapInvertedMap(ctx, b, limit, pv) + default: + return docBitmap{}, fmt.Errorf("property '%s' is neither filterable nor searchable nor rangeable", pv.prop) + } + + return +} + +func (s *Searcher) docBitmapInvertedRoaringSet(ctx context.Context, b *lsmkv.Bucket, + limit int, pv *propValuePair, +) (docBitmap, error) { + out := newUninitializedDocBitmap() + isEmpty := true + var readFn ReadFn = func(k []byte, docIDs *sroar.Bitmap, release func()) (bool, error) { + if isEmpty { + out.docIDs = docIDs + out.release = release + isEmpty = false + } else { + concurrencyBudget := concurrency.BudgetFromCtx(ctx, concurrency.SROAR_MERGE) + out.docIDs.OrConc(docIDs, concurrencyBudget) + release() + } + + // NotEqual requires the full set of potentially existing doc ids + if pv.operator == filters.OperatorNotEqual { + return true, nil + } + + if limit > 0 && out.docIDs.GetCardinality() >= limit { + return false, nil + } + return true, nil + } + + rr := NewRowReaderRoaringSet(b, pv.value, pv.operator, false, s.bitmapFactory) + if err := rr.Read(ctx, readFn); err != nil { + return out, fmt.Errorf("read row: %w", err) + } + + if isEmpty { + return newDocBitmap(), nil + } + return out, nil +} + +func (s *Searcher) docBitmapInvertedRoaringSetRange(ctx context.Context, b *lsmkv.Bucket, + pv *propValuePair, +) (docBitmap, error) { + if len(pv.value) != 8 { + return newDocBitmap(), fmt.Errorf("readerRoaringSetRange: invalid value length %d, should be 8 bytes", len(pv.value)) + } + + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + docIds, release, err := reader.Read(ctx, binary.BigEndian.Uint64(pv.value), pv.operator) + if err != nil { + return newDocBitmap(), fmt.Errorf("readerRoaringSetRange: %w", err) + } + + out := newUninitializedDocBitmap() + out.docIDs = docIds + out.release = release + return out, nil +} + +func (s *Searcher) docBitmapInvertedSet(ctx context.Context, b *lsmkv.Bucket, + limit int, pv *propValuePair, +) (docBitmap, error) { + out := newUninitializedDocBitmap() + isEmpty := true + var readFn ReadFn = func(k []byte, ids *sroar.Bitmap, release func()) (bool, error) { + if isEmpty { + out.docIDs = ids + out.release = release + isEmpty = false + } else { + concurrencyBudget := concurrency.BudgetFromCtx(ctx, concurrency.SROAR_MERGE) + out.docIDs.OrConc(ids, concurrencyBudget) + release() + } + + // NotEqual requires the full set of potentially existing doc ids + if pv.operator == filters.OperatorNotEqual { + return true, nil + } + + if limit > 0 && out.docIDs.GetCardinality() >= limit { + return false, nil + } + return true, nil + } + + rr := NewRowReader(b, pv.value, pv.operator, false, s.bitmapFactory) + if err := rr.Read(ctx, readFn); err != nil { + return out, fmt.Errorf("read row: %w", err) + } + + if isEmpty { + return newDocBitmap(), nil + } + return out, nil +} + +func (s *Searcher) docBitmapInvertedMap(ctx context.Context, b *lsmkv.Bucket, + limit int, pv *propValuePair, +) (docBitmap, error) { + out := newUninitializedDocBitmap() + isEmpty := true + var readFn ReadFn = func(k []byte, ids *sroar.Bitmap, release func()) (bool, error) { + if isEmpty { + out.docIDs = ids + out.release = release + isEmpty = false + } else { + concurrencyBudget := concurrency.BudgetFromCtx(ctx, concurrency.SROAR_MERGE) + out.docIDs.OrConc(ids, concurrencyBudget) + release() + } + + // NotEqual requires the full set of potentially existing doc ids + if pv.operator == filters.OperatorNotEqual { + return true, nil + } + + if limit > 0 && out.docIDs.GetCardinality() >= limit { + return false, nil + } + return true, nil + } + + rr := NewRowReaderFrequency(b, pv.value, pv.operator, false, s.shardVersion, s.bitmapFactory) + if err := rr.Read(ctx, readFn); err != nil { + return out, fmt.Errorf("read row: %w", err) + } + + if isEmpty { + return newDocBitmap(), nil + } + return out, nil +} + +func (s *Searcher) docBitmapGeo(ctx context.Context, pv *propValuePair) (docBitmap, error) { + out := newDocBitmap() + propIndex, ok := s.propIndices.ByProp(pv.prop) + + if !ok { + return out, nil + } + + res, err := propIndex.GeoIndex.WithinRange(ctx, *pv.valueGeoRange) + if err != nil { + return out, fmt.Errorf("geo index range search on prop %q: %w", pv.prop, err) + } + + out.docIDs.SetMany(res) + return out, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ed4936cfddab2f074650b6b022a901e9aa80ed07 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_integration_test.go @@ -0,0 +1,797 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package inverted + +import ( + "bytes" + "context" + "encoding/binary" + "math" + "strings" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestObjects(t *testing.T) { + var ( + dirName = t.TempDir() + logger, _ = test.NewNullLogger() + propName = "inverted-with-frequency" + charSet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + charRepeat = 50 + multiplier = 10 + numObjects = len(charSet) * multiplier + docIDCounter = uint64(0) + ) + + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer func() { assert.Nil(t, err) }() + + t.Run("create buckets", func(t *testing.T) { + require.Nil(t, store.CreateOrLoadBucket(context.Background(), helpers.ObjectsBucketLSM, + lsmkv.WithStrategy(lsmkv.StrategyReplace), lsmkv.WithSecondaryIndices(1))) + require.NotNil(t, store.Bucket(helpers.ObjectsBucketLSM)) + + require.Nil(t, store.CreateOrLoadBucket(context.Background(), + helpers.BucketSearchableFromPropNameLSM(propName), + lsmkv.WithStrategy(lsmkv.StrategyMapCollection))) + require.NotNil(t, store.Bucket(helpers.BucketSearchableFromPropNameLSM(propName))) + }) + + type testCase struct { + targetChar uint8 + object *storobj.Object + } + tests := make([]testCase, numObjects) + + t.Run("put objects and build test cases", func(t *testing.T) { + for i := 0; i < numObjects; i++ { + targetChar := charSet[i%len(charSet)] + prop := repeatString(string(targetChar), charRepeat) + obj := storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: strfmt.UUID(uuid.NewString()), + Class: className, + Properties: map[string]interface{}{ + propName: prop, + }, + }, + DocID: docIDCounter, + } + docIDCounter++ + putObject(t, store, &obj, propName, []byte(prop)) + tests[i] = testCase{ + targetChar: targetChar, + object: &obj, + } + } + }) + + t.Run("run tests", func(t *testing.T) { + bitmapFactory := roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), newFakeMaxIDGetter(docIDCounter)) + + searcher := NewSearcher(logger, store, createSchema().GetClass, nil, nil, + fakeStopwordDetector{}, 2, func() bool { return false }, "", + config.DefaultQueryNestedCrossReferenceLimit, bitmapFactory) + + t.Run("NotEqual", func(t *testing.T) { + t.Parallel() + for _, test := range tests { + filter := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: className, + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: repeatString(string(test.targetChar), charRepeat), + Type: schema.DataTypeText, + }, + }} + objs, err := searcher.Objects(context.Background(), numObjects, + filter, nil, additional.Properties{}, className, []string{propName}, nil) + assert.Nil(t, err) + assert.Len(t, objs, numObjects-multiplier) + } + }) + t.Run("Equal", func(t *testing.T) { + t.Parallel() + for _, test := range tests { + filter := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: className, + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: repeatString(string(test.targetChar), charRepeat), + Type: schema.DataTypeText, + }, + }} + objs, err := searcher.Objects(context.Background(), numObjects, + filter, nil, additional.Properties{}, className, []string{propName}, nil) + assert.Nil(t, err) + assert.Len(t, objs, multiplier) + } + }) + }) + + t.Run("ids of deleted documents are removed from bitmap factory", func(t *testing.T) { + maxDocID := docIDCounter - 1 + maxDocIDWithNonExistentIds := maxDocID + 10 + + maxDocIdGetterWithNonExistentIds := newFakeMaxIDGetter(maxDocIDWithNonExistentIds) + bitmapFactory := roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), maxDocIdGetterWithNonExistentIds) + + docIDsToRemove := map[uint64]strfmt.UUID{} + + searcher := NewSearcher(logger, store, createSchema().GetClass, nil, nil, + fakeStopwordDetector{}, 2, func() bool { return false }, "", + config.DefaultQueryNestedCrossReferenceLimit, bitmapFactory) + + t.Run("sanity check", func(t *testing.T) { + bm, release := bitmapFactory.GetBitmap() + defer release() + + require.Equal(t, int(maxDocIDWithNonExistentIds)+1, bm.GetCardinality()) + require.Equal(t, maxDocIDWithNonExistentIds, bm.Maximum()) + }) + + t.Run("Equal", func(t *testing.T) { + filter := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: className, + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: repeatString(string(tests[0].targetChar), charRepeat), + Type: schema.DataTypeText, + }, + }} + objects, err := searcher.Objects(context.Background(), numObjects, + filter, nil, additional.Properties{}, className, []string{propName}, nil) + assert.NoError(t, err) + assert.Len(t, objects, multiplier) + + t.Run("all elements found, no changes in bitmap factory expected", func(t *testing.T) { + bm, release := bitmapFactory.GetBitmap() + defer release() + + require.Equal(t, int(maxDocIDWithNonExistentIds+1), bm.GetCardinality()) + require.Equal(t, maxDocIDWithNonExistentIds, bm.Maximum()) + }) + + for i, n := 0, multiplier/2; i < n; i++ { + docIDsToRemove[objects[i].DocID] = objects[i].ID() + } + }) + + t.Run("NotEqual", func(t *testing.T) { + filter := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: className, + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: repeatString(string(tests[0].targetChar), charRepeat), + Type: schema.DataTypeText, + }, + }} + _, err := searcher.Objects(context.Background(), numObjects, + filter, nil, additional.Properties{}, className, []string{propName}, nil) + assert.NoError(t, err) + + t.Run("some elements not found, ids removed from bitmap factory", func(t *testing.T) { + bm, release := bitmapFactory.GetBitmap() + defer release() + + require.Equal(t, int(maxDocID)+1, bm.GetCardinality()) + require.Equal(t, maxDocID, bm.Maximum()) + }) + }) + + t.Run("Equal with removed docIDs", func(t *testing.T) { + bucket := store.Bucket(helpers.ObjectsBucketLSM) + + docIDBytes := make([]byte, 8) + for docID, ID := range docIDsToRemove { + binary.LittleEndian.PutUint64(docIDBytes, docID) + err := bucket.Delete([]byte(ID), lsmkv.WithSecondaryKey(0, docIDBytes)) + require.NoError(t, err) + } + + filter := &filters.LocalFilter{Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: className, + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: repeatString(string(tests[0].targetChar), charRepeat), + Type: schema.DataTypeText, + }, + }} + _, err := searcher.Objects(context.Background(), numObjects, + filter, nil, additional.Properties{}, className, []string{propName}, nil) + assert.NoError(t, err) + + t.Run("some elements not found, ids removed from bitmap factory", func(t *testing.T) { + bm, release := bitmapFactory.GetBitmap() + defer release() + + require.Equal(t, int(maxDocID)+1-len(docIDsToRemove), bm.GetCardinality()) + require.Equal(t, maxDocID, bm.Maximum()) + }) + }) + }) +} + +func TestDocIDs(t *testing.T) { + var ( + dirName = t.TempDir() + logger, _ = test.NewNullLogger() + propName = "inverted-with-frequency" + charSet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + charRepeat = 3 + multiplier = 100 + numObjects = len(charSet) * multiplier + docIDCounter = uint64(0) + ) + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.Nil(t, err) + defer func() { assert.Nil(t, err) }() + + t.Run("create buckets", func(t *testing.T) { + require.Nil(t, store.CreateOrLoadBucket(context.Background(), helpers.ObjectsBucketLSM, + lsmkv.WithStrategy(lsmkv.StrategyReplace), lsmkv.WithSecondaryIndices(1))) + require.NotNil(t, store.Bucket(helpers.ObjectsBucketLSM)) + + require.Nil(t, store.CreateOrLoadBucket(context.Background(), + helpers.BucketSearchableFromPropNameLSM(propName), + lsmkv.WithStrategy(lsmkv.StrategyMapCollection))) + require.NotNil(t, store.Bucket(helpers.BucketSearchableFromPropNameLSM(propName))) + }) + + t.Run("put objects", func(t *testing.T) { + for i := 0; i < numObjects; i++ { + targetChar := charSet[i%len(charSet)] + prop := repeatString(string(targetChar), charRepeat) + obj := storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: strfmt.UUID(uuid.NewString()), + Class: className, + Properties: map[string]interface{}{ + propName: prop, + }, + }, + DocID: docIDCounter, + } + docIDCounter++ + putObject(t, store, &obj, propName, []byte(prop)) + } + }) + + bitmapFactory := roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), newFakeMaxIDGetter(docIDCounter-1)) + + searcher := NewSearcher(logger, store, createSchema().GetClass, nil, nil, + fakeStopwordDetector{}, 2, func() bool { return false }, "", + config.DefaultQueryNestedCrossReferenceLimit, bitmapFactory) + + type testCase struct { + expectedMatches int + filter filters.LocalFilter + } + tests := []testCase{ + { + filter: filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: className, + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "[[[", + Type: schema.DataTypeText, + }, + }, + }, + expectedMatches: numObjects, + }, + { + filter: filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNotEqual, + On: &filters.Path{ + Class: className, + Property: schema.PropertyName(propName), + }, + Value: &filters.Value{ + Value: "AAA", + Type: schema.DataTypeText, + }, + }, + }, + expectedMatches: (len(charSet) - 1) * multiplier, + }, + } + + for _, tc := range tests { + allow, err := searcher.DocIDs(context.Background(), &tc.filter, additional.Properties{}, className) + require.Nil(t, err) + assert.Equal(t, tc.expectedMatches, allow.Len()) + allow.Close() + } +} + +// lifted from Shard::pairPropertyWithFrequency to emulate Bucket::MapSet functionality +func pairPropWithFreq(docID uint64, freq, propLen float32) lsmkv.MapPair { + buf := make([]byte, 16) + + binary.BigEndian.PutUint64(buf[0:8], docID) + binary.LittleEndian.PutUint32(buf[8:12], math.Float32bits(freq)) + binary.LittleEndian.PutUint32(buf[12:16], math.Float32bits(propLen)) + + return lsmkv.MapPair{ + Key: buf[:8], + Value: buf[8:], + } +} + +func putObject(t *testing.T, store *lsmkv.Store, obj *storobj.Object, propName string, data []byte) { + b, err := obj.MarshalBinary() + require.Nil(t, err) + + keyBuf := bytes.NewBuffer(nil) + binary.Write(keyBuf, binary.LittleEndian, &obj.DocID) + docIDBytes := keyBuf.Bytes() + + bucket := store.Bucket(helpers.ObjectsBucketLSM) + err = bucket.Put([]byte(obj.ID()), b, lsmkv.WithSecondaryKey(0, docIDBytes)) + require.Nil(t, err) + + propBucketName := helpers.BucketSearchableFromPropNameLSM(propName) + propBucket := store.Bucket(propBucketName) + err = propBucket.MapSet(data, pairPropWithFreq(obj.DocID, 1, float32(len(data)))) + require.Nil(t, err) +} + +func repeatString(s string, n int) string { + sb := strings.Builder{} + for i := 0; i < n; i++ { + sb.WriteString(s) + } + return sb.String() +} + +func TestSearcher_ResolveDocIds(t *testing.T) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + docIdSet1 := []uint64{7, 8, 9, 10, 11} + docIdSet2 := []uint64{1, 3, 5, 7, 9, 11} + docIdSet3 := []uint64{1, 3, 5, 7, 9} + + fakeInvertedIndex := []struct { + val string + ids []uint64 + }{ + {val: "set1_1", ids: docIdSet1}, + {val: "set1_2", ids: docIdSet1}, + {val: "set1_3", ids: docIdSet1}, + {val: "set2", ids: docIdSet2}, + {val: "set3", ids: docIdSet3}, + } + + var searcher *Searcher + propName := "inverted-text-roaringset" + + t.Run("import data", func(tt *testing.T) { + store, err := lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop(), + cyclemanager.NewCallbackGroupNoop()) + require.NoError(t, err) + t.Cleanup(func() { store.Shutdown(context.Background()) }) // cleanup in outer test + + maxDocID := uint64(12) + bitmapFactory := roaringset.NewBitmapFactory(roaringset.NewBitmapBufPoolNoop(), newFakeMaxIDGetter(maxDocID)) + searcher = NewSearcher(logger, store, createSchema().GetClass, nil, nil, + fakeStopwordDetector{}, 2, func() bool { return false }, "", + config.DefaultQueryNestedCrossReferenceLimit, bitmapFactory) + + bucketName := helpers.BucketFromPropNameLSM(propName) + require.NoError(tt, store.CreateOrLoadBucket(context.Background(), bucketName, + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + )) + bucket := store.Bucket(bucketName) + + for _, entry := range fakeInvertedIndex { + require.NoError(tt, bucket.RoaringSetAddList([]byte(entry.val), entry.ids)) + } + require.Nil(tt, bucket.FlushAndSwitch()) + }) + + equalOperand := func(val string) filters.Clause { + return filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{Class: className, Property: schema.PropertyName(propName)}, + Value: &filters.Value{Value: val, Type: schema.DataTypeText}, + } + } + testCases := []struct { + name string + filter *filters.LocalFilter + expectedIds []uint64 + }{ + { + name: "AND, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set2"), + equalOperand("set3"), + }, + }, + }, + expectedIds: []uint64{7, 9}, + }, + { + name: "OR, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set2"), + equalOperand("set3"), + }, + }, + }, + expectedIds: []uint64{1, 3, 5, 7, 8, 9, 10, 11}, + }, + { + name: "NOT, set1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set1_1"), + }, + }, + }, + expectedIds: []uint64{0, 1, 2, 3, 4, 5, 6, 12}, + }, + { + name: "NOT, set2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set2"), + }, + }, + }, + expectedIds: []uint64{0, 2, 4, 6, 8, 10, 12}, + }, + { + name: "NOT, set3", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set3"), + }, + }, + }, + expectedIds: []uint64{0, 2, 4, 6, 8, 10, 11, 12}, + }, + { + name: "NOT/AND, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + { + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set2"), + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: []uint64{0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12}, + }, + { + name: "AND/NOT, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set1_1"), + }, + }, + { + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set2"), + }, + }, + { + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: []uint64{0, 2, 4, 6, 12}, + }, + { + name: "NOT/OR, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + { + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set2"), + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: []uint64{0, 2, 4, 6, 12}, + }, + { + name: "OR/NOT, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set1_1"), + }, + }, + { + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set2"), + }, + }, + { + Operator: filters.OperatorNot, + Operands: []filters.Clause{ + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: []uint64{0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12}, + }, + { + name: "AND, same sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set1_2"), + equalOperand("set1_3"), + }, + }, + }, + expectedIds: docIdSet1, + }, + { + name: "OR, same sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set1_2"), + equalOperand("set1_3"), + }, + }, + }, + expectedIds: docIdSet1, + }, + { + name: "AND, single child lvl 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set2"), + }, + }, + }, + expectedIds: docIdSet2, + }, + { + name: "OR, single child lvl 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + equalOperand("set2"), + }, + }, + }, + expectedIds: docIdSet2, + }, + { + name: "AND/OR, single child lvl 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: docIdSet3, + }, + { + name: "OR/AND, single child lvl 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: docIdSet3, + }, + { + name: "AND, single children, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set2"), + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: []uint64{7, 9}, + }, + { + name: "OR, single children, different sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set2"), + equalOperand("set3"), + }, + }, + }, + }, + }, + expectedIds: []uint64{1, 3, 5, 7, 8, 9, 10, 11}, + }, + { + name: "AND, single children, same sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + { + Operator: filters.OperatorAnd, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set1_2"), + equalOperand("set1_3"), + }, + }, + }, + }, + }, + expectedIds: docIdSet1, + }, + { + name: "OR, single children, same sets", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + { + Operator: filters.OperatorOr, + Operands: []filters.Clause{ + equalOperand("set1_1"), + equalOperand("set1_2"), + equalOperand("set1_3"), + }, + }, + }, + }, + }, + expectedIds: docIdSet1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + allowList, err := searcher.DocIDs(context.Background(), tc.filter, additional.Properties{}, className) + assert.NoError(t, err) + assert.ElementsMatch(t, tc.expectedIds, allowList.Slice()) + allowList.Close() + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_ref_filter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_ref_filter.go new file mode 100644 index 0000000000000000000000000000000000000000..832b7711c8060336b3570646531cd5cfb1e89ce3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_ref_filter.go @@ -0,0 +1,253 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "context" + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" +) + +// a helper tool to extract the uuid beacon for any matching reference +type refFilterExtractor struct { + logger logrus.FieldLogger + classSearcher ClassSearcher + filter *filters.Clause + class *models.Class + property *models.Property + tenant string + limit int64 +} + +// ClassSearcher is anything that allows a root-level ClassSearch +type ClassSearcher interface { + Search(ctx context.Context, + params dto.GetParams) ([]search.Result, error) + GetQueryMaximumResults() int +} + +func newRefFilterExtractor(logger logrus.FieldLogger, classSearcher ClassSearcher, + filter *filters.Clause, class *models.Class, property *models.Property, tenant string, limit int64, +) *refFilterExtractor { + return &refFilterExtractor{ + logger: logger, + classSearcher: classSearcher, + filter: filter, + class: class, + property: property, + tenant: tenant, + limit: limit, + } +} + +func (r *refFilterExtractor) Do(ctx context.Context) (*propValuePair, error) { + if err := r.validate(); err != nil { + return nil, errors.Wrap(err, "invalid usage") + } + + ids, err := r.fetchIDs(ctx) + if err != nil { + return nil, errors.Wrap(err, "nested request to fetch matching IDs") + } + + if len(ids) > r.classSearcher.GetQueryMaximumResults() { + r.logger. + WithField("nested_reference_results", len(ids)). + WithField("query_maximum_results", r.classSearcher.GetQueryMaximumResults()). + Warnf("Number of found nested reference results exceeds configured QUERY_MAXIMUM_RESULTS. " + + "This may result in search performance degradation or even out of memory errors.") + } + + return r.resultsToPropValuePairs(ids) +} + +func (r *refFilterExtractor) paramsForNestedRequest() (dto.GetParams, error) { + return dto.GetParams{ + Filters: r.innerFilter(), + ClassName: r.filter.On.Child.Class.String(), + Pagination: &filters.Pagination{ + Offset: 0, + // Limit can be set to dynamically with QUERY_NESTED_CROSS_REFERENCE_LIMIT + Limit: int(r.limit), + }, + // set this to indicate that this is a sub-query, so we do not need + // to perform the same search limits cutoff check that we do with + // the root query + AdditionalProperties: additional.Properties{ReferenceQuery: true}, + Tenant: r.tenant, + IsRefOrigin: true, + }, nil +} + +func (r *refFilterExtractor) innerFilter() *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: r.filter.Operator, + On: r.filter.On.Child, + Value: r.filter.Value, + }, + } +} + +type classUUIDPair struct { + class string + id strfmt.UUID +} + +func (r *refFilterExtractor) fetchIDs(ctx context.Context) ([]classUUIDPair, error) { + params, err := r.paramsForNestedRequest() + if err != nil { + return nil, err + } + + res, err := r.classSearcher.Search(ctx, params) + if err != nil { + return nil, err + } + + out := make([]classUUIDPair, len(res)) + for i, elem := range res { + out[i] = classUUIDPair{class: elem.ClassName, id: elem.ID} + } + + return out, nil +} + +func (r *refFilterExtractor) resultsToPropValuePairs(ids []classUUIDPair, +) (*propValuePair, error) { + switch len(ids) { + case 0: + return r.emptyPropValuePair(), nil + case 1: + return r.backwardCompatibleIDToPropValuePair(ids[0]) + default: + return r.chainedIDsToPropValuePair(ids) + } +} + +func (r *refFilterExtractor) emptyPropValuePair() *propValuePair { + return &propValuePair{ + prop: r.property.Name, + value: nil, + operator: filters.OperatorEqual, + hasFilterableIndex: HasFilterableIndex(r.property), + hasSearchableIndex: HasSearchableIndex(r.property), + hasRangeableIndex: HasRangeableIndex(r.property), + Class: r.class, + } +} + +// Because we still support the old beacon format that did not include the +// class yet, we cannot be sure about which format we will find in the +// database. Typically we would now build a filter, such as value==beacon. +// However, this beacon would either have the old format or the new format. +// Depending on which format was used during importing, one would match and the +// other wouldn't. +// +// As a workaround we can use an OR filter to allow both, such as +// ( value==beacon_old_format OR value==beacon_new_format ) +func (r *refFilterExtractor) backwardCompatibleIDToPropValuePair(p classUUIDPair) (*propValuePair, error) { + // this workaround is already implemented in the chained ID case, so we can + // simply pass it through: + return r.chainedIDsToPropValuePair([]classUUIDPair{p}) +} + +func (r *refFilterExtractor) idToPropValuePairWithValue(v []byte, + hasFilterableIndex, hasSearchableIndex bool, +) (*propValuePair, error) { + return &propValuePair{ + prop: r.property.Name, + value: v, + operator: filters.OperatorEqual, + hasFilterableIndex: hasFilterableIndex, + hasSearchableIndex: hasSearchableIndex, + Class: r.class, + }, nil +} + +// chain multiple alternatives using an OR operator +func (r *refFilterExtractor) chainedIDsToPropValuePair(ids []classUUIDPair) (*propValuePair, error) { + hasFilterableIndex := HasFilterableIndex(r.property) + hasSearchableIndex := HasSearchableIndex(r.property) + hasRangeableIndex := HasRangeableIndex(r.property) + + children, err := r.idsToPropValuePairs(ids, hasFilterableIndex, hasSearchableIndex) + if err != nil { + return nil, err + } + + return &propValuePair{ + prop: r.property.Name, + operator: filters.OperatorOr, + children: children, + hasFilterableIndex: hasFilterableIndex, + hasSearchableIndex: hasSearchableIndex, + hasRangeableIndex: hasRangeableIndex, + Class: r.class, + }, nil +} + +// Use both new format with class name in the beacon, as well as the old +// format. Since the results will be OR'ed anyway, this is safe todo. +// +// The additional lookups and OR-merge operations have a cost, therefore this +// backward-compatible logic should be removed, as soon as we can be sure that +// no more class-less beacons exist. Most likely this will be the case with the +// next breaking change, such as v2.0.0. +func (r *refFilterExtractor) idsToPropValuePairs(ids []classUUIDPair, + hasFilterableIndex, hasSearchableIndex bool, +) ([]*propValuePair, error) { + // This makes it safe to access the first element later on without further + // checks + if len(ids) == 0 { + return nil, nil + } + + out := make([]*propValuePair, len(ids)*2) + bb := crossref.NewBulkBuilderWithEstimates(len(ids)*2, ids[0].class, 1.25) + for i, id := range ids { + // future-proof way + pv, err := r.idToPropValuePairWithValue(bb.ClassAndID(id.class, id.id), hasFilterableIndex, hasSearchableIndex) + if err != nil { + return nil, err + } + + out[i*2] = pv + + // backward-compatible way + pv, err = r.idToPropValuePairWithValue(bb.LegacyIDOnly(id.id), hasFilterableIndex, hasSearchableIndex) + if err != nil { + return nil, err + } + + out[(i*2)+1] = pv + } + + return out, nil +} + +func (r *refFilterExtractor) validate() error { + if len(r.filter.On.Slice())%2 != 1 { + return fmt.Errorf("path must have an odd number of segments") + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_test.go new file mode 100644 index 0000000000000000000000000000000000000000..57c96ec73835e1180406c0abad0776664488d9bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_test.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/sroar" +) + +func TestDocBitmap(t *testing.T) { + t.Run("empty doc bitmap", func(t *testing.T) { + dbm := newDocBitmap() + + assert.Equal(t, 0, dbm.count()) + assert.Empty(t, dbm.IDs()) + }) + + t.Run("filled doc bitmap", func(t *testing.T) { + ids := []uint64{1, 2, 3, 4, 5} + + dbm := newDocBitmap() + dbm.docIDs.SetMany(ids) + + assert.Equal(t, 5, dbm.count()) + assert.ElementsMatch(t, ids, dbm.IDs()) + }) +} + +func TestDocBitmap_IDsWithLimit(t *testing.T) { + type test struct { + name string + limit int + input []uint64 + expectedOutput []uint64 + } + + tests := []test{ + { + name: "empty bitmap, positive limit", + input: []uint64{}, + limit: 7, + expectedOutput: []uint64{}, + }, + { + name: "limit matches bitmap cardinality", + input: []uint64{2, 4, 6, 8, 10}, + limit: 5, + expectedOutput: []uint64{2, 4, 6, 8, 10}, + }, + { + name: "limit less than cardinality", + input: []uint64{2, 4, 6, 8, 10}, + limit: 3, + expectedOutput: []uint64{2, 4, 6}, + }, + { + name: "limit higher than cardinality", + input: []uint64{2, 4, 6, 8, 10}, + limit: 10, + expectedOutput: []uint64{2, 4, 6, 8, 10}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + dbm := docBitmap{ + docIDs: sroar.NewBitmap(), + } + + dbm.docIDs.SetMany(test.input) + + res := dbm.IDsWithLimit(test.limit) + assert.Equal(t, test.expectedOutput, res) + }) + } +} + +func TestDocIDsIterator_Slice(t *testing.T) { + t.Run("iterator empty slice", func(t *testing.T) { + it := newSliceDocIDsIterator([]uint64{}) + + id1, ok1 := it.Next() + + assert.Equal(t, 0, it.Len()) + assert.False(t, ok1) + assert.Equal(t, uint64(0), id1) + }) + + t.Run("iterator step by step", func(t *testing.T) { + it := newSliceDocIDsIterator([]uint64{3, 1, 0, 2}) + + id1, ok1 := it.Next() + id2, ok2 := it.Next() + id3, ok3 := it.Next() + id4, ok4 := it.Next() + id5, ok5 := it.Next() + + assert.Equal(t, 4, it.Len()) + assert.True(t, ok1) + assert.Equal(t, uint64(3), id1) + assert.True(t, ok2) + assert.Equal(t, uint64(1), id2) + assert.True(t, ok3) + assert.Equal(t, uint64(0), id3) + assert.True(t, ok4) + assert.Equal(t, uint64(2), id4) + assert.False(t, ok5) + assert.Equal(t, uint64(0), id5) + }) + + t.Run("iterator in loop", func(t *testing.T) { + it := newSliceDocIDsIterator([]uint64{3, 1, 0, 2}) + ids := []uint64{} + + for id, ok := it.Next(); ok; id, ok = it.Next() { + ids = append(ids, id) + } + + assert.Equal(t, 4, it.Len()) + assert.Equal(t, []uint64{3, 1, 0, 2}, ids) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_value_extractors.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_value_extractors.go new file mode 100644 index 0000000000000000000000000000000000000000..daeac780e88c961c21e93181b3217e750ad513f6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted/searcher_value_extractors.go @@ -0,0 +1,91 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package inverted + +import ( + "bytes" + "encoding/binary" + "fmt" + "time" + + "github.com/pkg/errors" + ent "github.com/weaviate/weaviate/entities/inverted" +) + +func (s *Searcher) extractNumberValue(in interface{}) ([]byte, error) { + value, ok := in.(float64) + if !ok { + return nil, fmt.Errorf("expected value to be float64, got %T", in) + } + + return ent.LexicographicallySortableFloat64(value) +} + +// assumes an untyped int and stores as string-formatted int64 +func (s *Searcher) extractIntValue(in interface{}) ([]byte, error) { + value, ok := in.(int) + if !ok { + return nil, fmt.Errorf("expected value to be int, got %T", in) + } + + return ent.LexicographicallySortableInt64(int64(value)) +} + +// assumes an untyped int and stores as string-formatted int64 +func (s *Searcher) extractIntCountValue(in interface{}) ([]byte, error) { + value, ok := in.(int) + if !ok { + return nil, fmt.Errorf("expected value to be int, got %T", in) + } + + return ent.LexicographicallySortableUint64(uint64(value)) +} + +// assumes an untyped bool and stores as bool64 +func (s *Searcher) extractBoolValue(in interface{}) ([]byte, error) { + value, ok := in.(bool) + if !ok { + return nil, fmt.Errorf("expected value to be bool, got %T", in) + } + + buf := bytes.NewBuffer(nil) + if err := binary.Write(buf, binary.LittleEndian, value); err != nil { + return nil, errors.Wrap(err, "encode bool as binary") + } + + return buf.Bytes(), nil +} + +// assumes a time.Time date and stores as string-formatted int64, if it +// encounters a string it tries to parse it as a time.Time +func (s *Searcher) extractDateValue(in interface{}) ([]byte, error) { + var asInt64 int64 + + switch t := in.(type) { + case string: + parsed, err := time.Parse(time.RFC3339, t) + if err != nil { + return nil, errors.Wrap(err, "trying parse time as RFC3339 string") + } + + asInt64 = parsed.UnixNano() + + case time.Time: + asInt64 = t.UnixNano() + + default: + return nil, fmt.Errorf("expected value to be time.Time (or parseable string)"+ + ", got %T", in) + } + + return ent.LexicographicallySortableInt64(asInt64) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_index_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_index_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..927417e9abc4160f5c6c536061bd83b4201b5032 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_index_integration_test.go @@ -0,0 +1,1433 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestIndexByTimestampsNullStatePropLength_AddClass(t *testing.T) { + dirName := t.TempDir() + vFalse := false + vTrue := true + + class := &models.Class{ + Class: "TestClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 60, + Stopwords: &models.StopwordConfig{ + Preset: "none", + }, + IndexTimestamps: true, + IndexNullState: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "initialWithIINil", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "initialWithIITrue", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + }, + { + Name: "initialWithoutII", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + }, + }, + } + shardState := singleShardState() + logger := logrus.New() + schemaGetter := &fakeSchemaGetter{shardState: shardState, schema: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + }} + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: []*models.Class{class}}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + require.Nil(t, migrator.AddProperty(context.Background(), class.Class, &models.Property{ + Name: "updateWithIINil", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + })) + require.Nil(t, migrator.AddProperty(context.Background(), class.Class, &models.Property{ + Name: "updateWithIITrue", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vTrue, + IndexSearchable: &vTrue, + })) + require.Nil(t, migrator.AddProperty(context.Background(), class.Class, &models.Property{ + Name: "updateWithoutII", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + IndexFilterable: &vFalse, + IndexSearchable: &vFalse, + })) + + t.Run("check for additional buckets", func(t *testing.T) { + for _, idx := range migrator.db.indices { + idx.ForEachShard(func(_ string, shd ShardLike) error { + createBucket := shd.Store().Bucket("property__creationTimeUnix") + assert.NotNil(t, createBucket) + + updateBucket := shd.Store().Bucket("property__lastUpdateTimeUnix") + assert.NotNil(t, updateBucket) + + cases := []struct { + prop string + compareFunc func(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool + }{ + {prop: "initialWithIINil", compareFunc: assert.NotNil}, + {prop: "initialWithIITrue", compareFunc: assert.NotNil}, + {prop: "initialWithoutII", compareFunc: assert.Nil}, + {prop: "updateWithIINil", compareFunc: assert.NotNil}, + {prop: "updateWithIITrue", compareFunc: assert.NotNil}, + {prop: "updateWithoutII", compareFunc: assert.Nil}, + } + for _, tt := range cases { + tt.compareFunc(t, shd.Store().Bucket("property_"+tt.prop+filters.InternalNullIndex)) + tt.compareFunc(t, shd.Store().Bucket("property_"+tt.prop+filters.InternalPropertyLength)) + } + return nil + }) + } + }) + + t.Run("Add Objects", func(t *testing.T) { + testID1 := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a62") + objWithProperty := &models.Object{ + ID: testID1, + Class: "TestClass", + Properties: map[string]interface{}{"initialWithIINil": "0", "initialWithIITrue": "0", "initialWithoutII": "1", "updateWithIINil": "2", "updateWithIITrue": "2", "updateWithoutII": "3"}, + } + vec := []float32{1, 2, 3} + require.Nil(t, repo.PutObject(context.Background(), objWithProperty, vec, nil, nil, nil, 0)) + + testID2 := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a63") + objWithoutProperty := &models.Object{ + ID: testID2, + Class: "TestClass", + Properties: map[string]interface{}{}, + } + require.Nil(t, repo.PutObject(context.Background(), objWithoutProperty, vec, nil, nil, nil, 0)) + + testID3 := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a64") + objWithNilProperty := &models.Object{ + ID: testID3, + Class: "TestClass", + Properties: map[string]interface{}{"initialWithIINil": nil, "initialWithIITrue": nil, "initialWithoutII": nil, "updateWithIINil": nil, "updateWithIITrue": nil, "updateWithoutII": nil}, + } + require.Nil(t, repo.PutObject(context.Background(), objWithNilProperty, vec, nil, nil, nil, 0)) + }) + + t.Run("delete class", func(t *testing.T) { + require.Nil(t, migrator.DropClass(context.Background(), class.Class, false)) + for _, idx := range migrator.db.indices { + idx.ForEachShard(func(name string, shd ShardLike) error { + require.Nil(t, shd.Store().Bucket("property__creationTimeUnix")) + require.Nil(t, shd.Store().Bucket("property_name"+filters.InternalNullIndex)) + require.Nil(t, shd.Store().Bucket("property_name"+filters.InternalPropertyLength)) + return nil + }) + } + }) +} + +func TestIndexNullState_GetClass(t *testing.T) { + dirName := t.TempDir() + + testID1 := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a62") + testID2 := strfmt.UUID("65be32cc-bb74-49c7-833e-afb14f957eae") + refID1 := strfmt.UUID("f2e42a9f-e0b5-46bd-8a9c-e70b6330622c") + refID2 := strfmt.UUID("92d5920c-1c20-49da-9cdc-b765813e175b") + + var repo *DB + var schemaGetter *fakeSchemaGetter + + t.Run("init repo", func(t *testing.T) { + shardState := singleShardState() + schemaGetter = &fakeSchemaGetter{ + shardState: shardState, + schema: schema.Schema{ + Objects: &models.Schema{}, + }, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{}).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + var err error + repo, err = New(logrus.New(), "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + }) + + defer repo.Shutdown(testCtx()) + + t.Run("add classes", func(t *testing.T) { + class := &models.Class{ + Class: "TestClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + IndexNullState: true, + IndexTimestamps: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + }, + } + + refClass := &models.Class{ + Class: "RefClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + IndexTimestamps: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + { + Name: "toTest", + DataType: []string{"TestClass"}, + }, + }, + } + + migrator := NewMigrator(repo, repo.logger, "node1") + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + err = migrator.AddClass(context.Background(), refClass) + require.Nil(t, err) + schemaGetter.schema.Objects.Classes = append(schemaGetter.schema.Objects.Classes, class, refClass) + }) + + t.Run("insert test objects", func(t *testing.T) { + vec := []float32{1, 2, 3} + for _, obj := range []*models.Object{ + { + ID: testID1, + Class: "TestClass", + Properties: map[string]interface{}{ + "name": "object1", + }, + }, + { + ID: testID2, + Class: "TestClass", + Properties: map[string]interface{}{ + "name": nil, + }, + }, + { + ID: refID1, + Class: "RefClass", + Properties: map[string]interface{}{ + "name": "ref1", + "toTest": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/TestClass/%s", testID1)), + }, + }, + }, + }, + { + ID: refID2, + Class: "RefClass", + Properties: map[string]interface{}{ + "name": "ref2", + "toTest": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/TestClass/%s", testID2)), + }, + }, + }, + }, + } { + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("check buckets exist", func(t *testing.T) { + index := repo.indices["testclass"] + n := 0 + index.ForEachShard(func(_ string, shard ShardLike) error { + bucketNull := shard.Store().Bucket(helpers.BucketFromPropNameNullLSM("name")) + require.NotNil(t, bucketNull) + n++ + return nil + }) + require.Equal(t, 1, n) + }) + + type testCase struct { + name string + filter *filters.LocalFilter + expectedIds []strfmt.UUID + } + + t.Run("get object with null filters", func(t *testing.T) { + testCases := []testCase{ + { + name: "is null", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorIsNull, + On: &filters.Path{ + Class: "TestClass", + Property: "name", + }, + Value: &filters.Value{ + Value: false, + Type: schema.DataTypeBoolean, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1}, + }, + { + name: "is not null", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorIsNull, + On: &filters.Path{ + Class: "TestClass", + Property: "name", + }, + Value: &filters.Value{ + Value: true, + Type: schema.DataTypeBoolean, + }, + }, + }, + expectedIds: []strfmt.UUID{testID2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TestClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: tc.filter, + }) + require.Nil(t, err) + require.Len(t, res, len(tc.expectedIds)) + + ids := make([]strfmt.UUID, len(res)) + for i := range res { + ids[i] = res[i].ID + } + assert.ElementsMatch(t, ids, tc.expectedIds) + }) + } + }) + + t.Run("get referencing object with null filters", func(t *testing.T) { + testCases := []testCase{ + { + name: "is null", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorIsNull, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "name", + }, + }, + Value: &filters.Value{ + Value: false, + Type: schema.DataTypeBoolean, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1}, + }, + { + name: "is not null", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorIsNull, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "name", + }, + }, + Value: &filters.Value{ + Value: true, + Type: schema.DataTypeBoolean, + }, + }, + }, + expectedIds: []strfmt.UUID{refID2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "RefClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: tc.filter, + }) + require.Nil(t, err) + require.Len(t, res, len(tc.expectedIds)) + + ids := make([]strfmt.UUID, len(res)) + for i := range res { + ids[i] = res[i].ID + } + assert.ElementsMatch(t, ids, tc.expectedIds) + }) + } + }) +} + +func TestIndexPropLength_GetClass(t *testing.T) { + dirName := t.TempDir() + + testID1 := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a62") + testID2 := strfmt.UUID("65be32cc-bb74-49c7-833e-afb14f957eae") + refID1 := strfmt.UUID("f2e42a9f-e0b5-46bd-8a9c-e70b6330622c") + refID2 := strfmt.UUID("92d5920c-1c20-49da-9cdc-b765813e175b") + + var repo *DB + var schemaGetter *fakeSchemaGetter + + t.Run("init repo", func(t *testing.T) { + shardState := singleShardState() + schemaGetter = &fakeSchemaGetter{ + shardState: shardState, + schema: schema.Schema{ + Objects: &models.Schema{}, + }, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + var err error + repo, err = New(logrus.New(), "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + }) + + defer repo.Shutdown(testCtx()) + + t.Run("add classes", func(t *testing.T) { + class := &models.Class{ + Class: "TestClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + IndexPropertyLength: true, + IndexTimestamps: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + { + Name: "int_array", + DataType: schema.DataTypeIntArray.PropString(), + }, + }, + } + + refClass := &models.Class{ + Class: "RefClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + IndexTimestamps: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + { + Name: "toTest", + DataType: []string{"TestClass"}, + }, + }, + } + + migrator := NewMigrator(repo, repo.logger, "node1") + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + err = migrator.AddClass(context.Background(), refClass) + require.Nil(t, err) + schemaGetter.schema.Objects.Classes = append(schemaGetter.schema.Objects.Classes, class, refClass) + }) + + t.Run("insert test objects", func(t *testing.T) { + vec := []float32{1, 2, 3} + for _, obj := range []*models.Object{ + { + ID: testID1, + Class: "TestClass", + Properties: map[string]interface{}{ + "name": "short", + "int_array": []float64{}, + }, + }, + { + ID: testID2, + Class: "TestClass", + Properties: map[string]interface{}{ + "name": "muchLongerName", + "int_array": []float64{1, 2, 3}, + }, + }, + { + ID: refID1, + Class: "RefClass", + Properties: map[string]interface{}{ + "name": "ref1", + "toTest": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/TestClass/%s", testID1)), + }, + }, + }, + }, + { + ID: refID2, + Class: "RefClass", + Properties: map[string]interface{}{ + "name": "ref2", + "toTest": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/TestClass/%s", testID2)), + }, + }, + }, + }, + } { + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("check buckets exist", func(t *testing.T) { + index := repo.indices["testclass"] + n := 0 + index.ForEachShard(func(_ string, shard ShardLike) error { + bucketPropLengthName := shard.Store().Bucket(helpers.BucketFromPropNameLengthLSM("name")) + require.NotNil(t, bucketPropLengthName) + bucketPropLengthIntArray := shard.Store().Bucket(helpers.BucketFromPropNameLengthLSM("int_array")) + require.NotNil(t, bucketPropLengthIntArray) + n++ + return nil + }) + require.Equal(t, 1, n) + }) + + type testCase struct { + name string + filter *filters.LocalFilter + expectedIds []strfmt.UUID + } + + t.Run("get object with prop length filters", func(t *testing.T) { + testCases := []testCase{ + { + name: "name length = 5", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "len(name)", + }, + Value: &filters.Value{ + Value: 5, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1}, + }, + { + name: "name length >= 6", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "len(name)", + }, + Value: &filters.Value{ + Value: 6, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{testID2}, + }, + { + name: "array length = 0", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "len(int_array)", + }, + Value: &filters.Value{ + Value: 0, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1}, + }, + { + name: "array length < 4", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLessThan, + On: &filters.Path{ + Class: "TestClass", + Property: "len(int_array)", + }, + Value: &filters.Value{ + Value: 4, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1, testID2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TestClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: tc.filter, + }) + require.Nil(t, err) + require.Len(t, res, len(tc.expectedIds)) + + ids := make([]strfmt.UUID, len(res)) + for i := range res { + ids[i] = res[i].ID + } + assert.ElementsMatch(t, ids, tc.expectedIds) + }) + } + }) + + t.Run("get referencing object with prop length filters", func(t *testing.T) { + testCases := []testCase{ + { + name: "name length = 5", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "len(name)", + }, + }, + Value: &filters.Value{ + Value: 5, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1}, + }, + { + name: "name length >= 6", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "len(name)", + }, + }, + Value: &filters.Value{ + Value: 6, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{refID2}, + }, + { + name: "array length = 0", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "len(int_array)", + }, + }, + Value: &filters.Value{ + Value: 0, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1}, + }, + { + name: "array length < 4", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLessThan, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "len(int_array)", + }, + }, + Value: &filters.Value{ + Value: 4, + Type: schema.DataTypeInt, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1, refID2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "RefClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: tc.filter, + }) + require.Nil(t, err) + require.Len(t, res, len(tc.expectedIds)) + + ids := make([]strfmt.UUID, len(res)) + for i := range res { + ids[i] = res[i].ID + } + assert.ElementsMatch(t, ids, tc.expectedIds) + }) + } + }) +} + +func TestIndexByTimestamps_GetClass(t *testing.T) { + dirName := t.TempDir() + + time1 := time.Now() + time2 := time1.Add(-time.Hour) + timestamp1 := time1.UnixMilli() + timestamp2 := time2.UnixMilli() + + testID1 := strfmt.UUID("a0b55b05-bc5b-4cc9-b646-1452d1390a62") + testID2 := strfmt.UUID("65be32cc-bb74-49c7-833e-afb14f957eae") + refID1 := strfmt.UUID("f2e42a9f-e0b5-46bd-8a9c-e70b6330622c") + refID2 := strfmt.UUID("92d5920c-1c20-49da-9cdc-b765813e175b") + + var repo *DB + var schemaGetter *fakeSchemaGetter + + t.Run("init repo", func(t *testing.T) { + shardState := singleShardState() + schemaGetter = &fakeSchemaGetter{ + shardState: shardState, + schema: schema.Schema{ + Objects: &models.Schema{}, + }, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + var err error + repo, err = New(logrus.New(), "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + }) + + defer repo.Shutdown(testCtx()) + + t.Run("add classes", func(t *testing.T) { + class := &models.Class{ + Class: "TestClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + IndexTimestamps: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + }, + } + + refClass := &models.Class{ + Class: "RefClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + IndexTimestamps: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationField, + }, + { + Name: "toTest", + DataType: []string{"TestClass"}, + }, + }, + } + + migrator := NewMigrator(repo, repo.logger, "node1") + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + err = migrator.AddClass(context.Background(), refClass) + require.Nil(t, err) + schemaGetter.schema.Objects.Classes = append(schemaGetter.schema.Objects.Classes, class, refClass) + }) + + t.Run("insert test objects", func(t *testing.T) { + vec := []float32{1, 2, 3} + for _, obj := range []*models.Object{ + { + ID: testID1, + Class: "TestClass", + CreationTimeUnix: timestamp1, + LastUpdateTimeUnix: timestamp1, + Properties: map[string]interface{}{ + "name": "object1", + }, + }, + { + ID: testID2, + Class: "TestClass", + CreationTimeUnix: timestamp2, + LastUpdateTimeUnix: timestamp2, + Properties: map[string]interface{}{ + "name": "object2", + }, + }, + { + ID: refID1, + Class: "RefClass", + Properties: map[string]interface{}{ + "name": "ref1", + "toTest": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/TestClass/%s", testID1)), + }, + }, + }, + }, + { + ID: refID2, + Class: "RefClass", + Properties: map[string]interface{}{ + "name": "ref2", + "toTest": models.MultipleRef{ + &models.SingleRef{ + Beacon: strfmt.URI(fmt.Sprintf("weaviate://localhost/TestClass/%s", testID2)), + }, + }, + }, + }, + } { + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.Nil(t, err) + } + }) + + t.Run("check buckets exist", func(t *testing.T) { + index := repo.indices["testclass"] + n := 0 + index.ForEachShard(func(_ string, shard ShardLike) error { + bucketCreated := shard.Store().Bucket("property_" + filters.InternalPropCreationTimeUnix) + require.NotNil(t, bucketCreated) + bucketUpdated := shard.Store().Bucket("property_" + filters.InternalPropLastUpdateTimeUnix) + require.NotNil(t, bucketUpdated) + n++ + return nil + }) + require.Equal(t, 1, n) + }) + + type testCase struct { + name string + filter *filters.LocalFilter + expectedIds []strfmt.UUID + } + + t.Run("get object with timestamp filters", func(t *testing.T) { + testCases := []testCase{ + { + name: "by creation timestamp 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp1), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1}, + }, + { + name: "by creation timestamp 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp2), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{testID2}, + }, + { + name: "by creation date 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + Value: &filters.Value{ + Value: time1.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1}, + }, + { + name: "by creation date 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + Value: &filters.Value{ + Value: time2.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1, testID2}, + }, + + { + name: "by updated timestamp 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp1), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1}, + }, + { + name: "by updated timestamp 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp2), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{testID2}, + }, + { + name: "by updated date 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + Value: &filters.Value{ + Value: time1.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1}, + }, + { + name: "by updated date 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + Value: &filters.Value{ + Value: time2.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{testID1, testID2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "TestClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: tc.filter, + }) + require.Nil(t, err) + require.Len(t, res, len(tc.expectedIds)) + + ids := make([]strfmt.UUID, len(res)) + for i := range res { + ids[i] = res[i].ID + } + assert.ElementsMatch(t, ids, tc.expectedIds) + }) + } + }) + + t.Run("get referencing object with timestamp filters", func(t *testing.T) { + testCases := []testCase{ + { + name: "by creation timestamp 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp1), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1}, + }, + { + name: "by creation timestamp 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp2), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{refID2}, + }, + { + name: "by creation date 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + }, + Value: &filters.Value{ + Value: time1.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1}, + }, + { + name: "by creation date 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_creationTimeUnix", + }, + }, + Value: &filters.Value{ + Value: time2.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1, refID2}, + }, + + { + name: "by updated timestamp 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp1), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1}, + }, + { + name: "by updated timestamp 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + }, + Value: &filters.Value{ + Value: fmt.Sprint(timestamp2), + Type: schema.DataTypeText, + }, + }, + }, + expectedIds: []strfmt.UUID{refID2}, + }, + { + name: "by updated date 1", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + }, + Value: &filters.Value{ + Value: time1.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1}, + }, + { + name: "by updated date 2", + filter: &filters.LocalFilter{ + Root: &filters.Clause{ + // since RFC3339 is limited to seconds, + // >= operator is used to match object with timestamp containing milliseconds + Operator: filters.OperatorGreaterThanEqual, + On: &filters.Path{ + Class: "RefClass", + Property: "toTest", + Child: &filters.Path{ + Class: "TestClass", + Property: "_lastUpdateTimeUnix", + }, + }, + Value: &filters.Value{ + Value: time2.Format(time.RFC3339), + Type: schema.DataTypeDate, + }, + }, + }, + expectedIds: []strfmt.UUID{refID1, refID2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: "RefClass", + Pagination: &filters.Pagination{Limit: 10}, + Filters: tc.filter, + }) + require.Nil(t, err) + require.Len(t, res, len(tc.expectedIds)) + + ids := make([]strfmt.UUID, len(res)) + for i := range res { + ids[i] = res[i].ID + } + assert.ElementsMatch(t, ids, tc.expectedIds) + }) + } + }) +} + +// Cannot filter for property length without enabling in the InvertedIndexConfig +func TestFilterPropertyLengthError(t *testing.T) { + class := createClassWithEverything(false, false) + migrator, repo, schemaGetter := createRepo(t) + defer repo.Shutdown(context.Background()) + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + LengthFilter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + On: &filters.Path{ + Class: schema.ClassName(carClass.Class), + Property: "len(" + schema.PropertyName(class.Properties[0].Name) + ")", + }, + Value: &filters.Value{ + Value: 1, + Type: dtInt, + }, + }, + } + + params := dto.GetParams{ + ClassName: class.Class, + Pagination: &filters.Pagination{Limit: 5}, + Filters: LengthFilter, + } + _, err = repo.Search(context.Background(), params) + require.NotNil(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_migrator_filter_to_search.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_migrator_filter_to_search.go new file mode 100644 index 0000000000000000000000000000000000000000..329b33031324d24e72a78e6b41aa018455e985da --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_migrator_filter_to_search.go @@ -0,0 +1,406 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path" + "sync" + + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/usecases/schema" +) + +type filterableToSearchableMigrator struct { + logger logrus.FieldLogger + files *filterableToSearchableMigrationFiles + schemaGetter schema.SchemaGetter + indexes map[string]*Index +} + +func newFilterableToSearchableMigrator(migrator *Migrator) *filterableToSearchableMigrator { + return &filterableToSearchableMigrator{ + logger: migrator.logger, + files: newFilterableToSearchableMigrationFiles(migrator.db.config.RootPath), + schemaGetter: migrator.db.schemaGetter, + indexes: migrator.db.indices, + } +} + +func (m *filterableToSearchableMigrator) migrate(ctx context.Context) error { + // only properties with both Filterable and Searchable indexing enabled + // filterable bucket has map strategy (when it should have roaring set strategy) + // searchable bucket does not exist (in fact weaviate will create empty one with map strategy) + + // if flag exists, no class/property needs fixing + if m.files.existsMigrationSkipFlag() { + m.log().Debug("migration skip flag set, skipping migration") + return nil + } + + migrationState, err := m.files.loadMigrationState() + if err != nil { + m.log().WithError(err).Error("loading migrated state") + return errors.Wrap(err, "loading migrated state") + } + + migrationStateUpdated := false + updateLock := new(sync.Mutex) + + m.log().Debug("starting migration") + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU * 2) + for _, index := range m.indexes { + index := index + + eg.Go(func() error { + migratedProps, err := m.migrateClass(ctx, index, m.schemaGetter.ReadOnlyClass) + if err != nil { + m.logIndex(index).WithError(err).Error("failed migrating class") + return errors.Wrap(err, "failed migrating class") + } + if len(migratedProps) == 0 { + return nil + } + + updateLock.Lock() + defer updateLock.Unlock() + + migrationState.MissingFilterableClass2Props[index.Config.ClassName.String()] = migratedProps + migrationStateUpdated = true + return nil + }, index.ID()) + } + + err = eg.Wait() + if err != nil { + m.log().WithError(err).Error("failed migrating classes") + } + + // save state regardless of previous error + if migrationStateUpdated { + m.log().Debug("saving migration state") + if err := m.files.saveMigrationState(migrationState); err != nil { + m.log().WithError(err).Error("failed saving migration state") + return errors.Wrap(err, "failed saving migration state") + } + } + + if err != nil { + return errors.Wrap(err, "failed migrating classes") + } + + if err := m.files.createMigrationSkipFlag(); err != nil { + m.log().WithError(err).Error("failed creating migration skip flag") + return errors.Wrap(err, "failed creating migration skip flag") + } + + m.log().Debug("finished migration") + return nil +} + +func (m *filterableToSearchableMigrator) switchShardsToFallbackMode(ctx context.Context) error { + m.log().Debug("starting switching fallback mode") + + migrationState, err := m.files.loadMigrationState() + if err != nil { + m.log().WithError(err).Error("loading migrated state") + return errors.Wrap(err, "loading migrated state") + } + + if len(migrationState.MissingFilterableClass2Props) == 0 { + m.log().Debug("no missing filterable indexes, fallback mode skipped") + return nil + } + + for _, index := range m.indexes { + if _, ok := migrationState.MissingFilterableClass2Props[index.Config.ClassName.String()]; !ok { + continue + } + index.ForEachShard(func(name string, shard ShardLike) error { + m.logShard(shard).Debug("setting fallback mode for shard") + shard.setFallbackToSearchable(true) + return nil + }) + } + + m.log().Debug("finished switching fallback mode") + return nil +} + +func (m *filterableToSearchableMigrator) migrateClass(ctx context.Context, index *Index, getClass func(string) *models.Class) (map[string]struct{}, error) { + m.logIndex(index).Debug("started migration of index") + + className := index.Config.ClassName.String() + class := getClass(className) + if class == nil { + return nil, fmt.Errorf("could not find class %s in schema", className) + } + + shard2PropsToFix := map[string]map[string]struct{}{} + uniquePropsToFix := map[string]struct{}{} + for _, prop := range class.Properties { + if !(inverted.HasFilterableIndex(prop) && inverted.HasSearchableIndex(prop)) { + continue + } + if err := index.ForEachShard(func(name string, shard ShardLike) error { + if toFix, err := m.isPropToFix(ctx, prop, shard); toFix { + if _, ok := shard2PropsToFix[shard.Name()]; !ok { + shard2PropsToFix[shard.Name()] = map[string]struct{}{} + } + shard2PropsToFix[shard.Name()][prop.Name] = struct{}{} + uniquePropsToFix[prop.Name] = struct{}{} + } else if err != nil { + m.logShard(shard).WithError(err).Error("failed discovering props to fix") + return errors.Wrap(err, "failed discovering props to fix") + } + return nil + }); err != nil { + return nil, err + } + } + + m.logIndex(index). + WithField("number_of_props", len(uniquePropsToFix)). + WithField("props", m.uniquePropsToSlice(uniquePropsToFix)). + Debug("found properties to fix") + + if len(uniquePropsToFix) == 0 { + return nil, nil + } + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU) + for shardName, props := range shard2PropsToFix { + shard := index.shards.Load(shardName) + props := props + + eg.Go(func() error { + if err := m.migrateShard(ctx, shard, props); err != nil { + m.logShard(shard).WithError(err).Error("failed migrating shard") + return errors.Wrap(err, "failed migrating shard") + } + return nil + }, "shard:", shard, "props:", props) + } + if err := eg.Wait(); err != nil { + return nil, err + } + + m.logIndex(index).Debug("finished migration of index") + return uniquePropsToFix, nil +} + +func (m *filterableToSearchableMigrator) migrateShard(ctx context.Context, shard ShardLike, + props map[string]struct{}, +) error { + m.logShard(shard).Debug("started migration of shard") + + m.pauseStoreActivity(ctx, shard) + defer m.resumeStoreActivity(ctx, shard) + + for propName := range props { + srcBucketName := helpers.BucketFromPropNameLSM(propName) + dstBucketName := helpers.BucketSearchableFromPropNameLSM(propName) + + m.logShard(shard). + WithField("bucketSrc", srcBucketName). + WithField("bucketDst", dstBucketName). + WithField("prop", propName). + Debug("replacing buckets") + + if err := shard.Store().ReplaceBuckets(ctx, dstBucketName, srcBucketName); err != nil { + return err + } + } + + m.logShard(shard).Debug("finished migration of shard") + return nil +} + +func (m *filterableToSearchableMigrator) isPropToFix(ctx context.Context, prop *models.Property, shard ShardLike) (bool, error) { + bucketFilterable := shard.Store().Bucket(helpers.BucketFromPropNameLSM(prop.Name)) + if bucketFilterable != nil && + bucketFilterable.Strategy() == lsmkv.StrategyMapCollection && + bucketFilterable.DesiredStrategy() == lsmkv.StrategyRoaringSet { + + bucketSearchable := shard.Store().Bucket(helpers.BucketSearchableFromPropNameLSM(prop.Name)) + if bucketSearchable != nil && + bucketSearchable.Strategy() == lsmkv.StrategyMapCollection { + + if m.isEmptyMapBucket(ctx, bucketSearchable) { + return true, nil + } + return false, fmt.Errorf("searchable bucket is not empty") + } else { + return false, fmt.Errorf("searchable bucket should have map strategy") + } + } + return false, nil +} + +func (m *filterableToSearchableMigrator) isEmptyMapBucket(ctx context.Context, bucket *lsmkv.Bucket) bool { + cur := bucket.MapCursorKeyOnly() + defer cur.Close() + + key, _ := cur.First(ctx) + return key == nil +} + +func (m *filterableToSearchableMigrator) pauseStoreActivity( + ctx context.Context, shard ShardLike, +) error { + m.logShard(shard).Debug("pausing store activity") + + if err := shard.Store().PauseCompaction(ctx); err != nil { + return errors.Wrapf(err, "failed pausing compaction for shard '%s'", shard.ID()) + } + if err := shard.Store().FlushMemtables(ctx); err != nil { + return errors.Wrapf(err, "failed flushing memtables for shard '%s'", shard.ID()) + } + if err := shard.Store().UpdateBucketsStatus(storagestate.StatusReadOnly); err != nil { + return errors.Wrapf(err, "failed pausing compaction for shard '%s'", shard.ID()) + } + + m.logShard(shard).Debug("paused store activity") + return nil +} + +func (m *filterableToSearchableMigrator) resumeStoreActivity( + ctx context.Context, shard ShardLike, +) error { + m.logShard(shard).Debug("resuming store activity") + + if err := shard.Store().ResumeCompaction(ctx); err != nil { + return errors.Wrapf(err, "failed resuming compaction for shard '%s'", shard.ID()) + } + if err := shard.Store().UpdateBucketsStatus(storagestate.StatusReady); err != nil { + return errors.Wrapf(err, "failed resuming compaction for shard '%s'", shard.ID()) + } + m.logShard(shard).Debug("resumed store activity") + return nil +} + +func (m *filterableToSearchableMigrator) log() *logrus.Entry { + return m.logger.WithField("action", "inverted filter2search migration") +} + +func (m *filterableToSearchableMigrator) logIndex(index *Index) *logrus.Entry { + return m.log().WithField("index", index.ID()) +} + +func (m *filterableToSearchableMigrator) logShard(shard ShardLike) *logrus.Entry { + return m.logIndex(shard.Index()).WithField("shard", shard.ID()) +} + +func (m *filterableToSearchableMigrator) uniquePropsToSlice(uniqueProps map[string]struct{}) []string { + props := make([]string, 0, len(uniqueProps)) + for prop := range uniqueProps { + props = append(props, prop) + } + return props +} + +type filterableToSearchableMigrationState struct { + MissingFilterableClass2Props map[string]map[string]struct{} + CreatedFilterableClass2Props map[string]map[string]struct{} +} + +type filterableToSearchableMigrationFiles struct { + flagFileName string + stateFileName string +} + +func newFilterableToSearchableMigrationFiles(rootPath string) *filterableToSearchableMigrationFiles { + return &filterableToSearchableMigrationFiles{ + flagFileName: path.Join(rootPath, "migration1.19.filter2search.skip.flag"), + stateFileName: path.Join(rootPath, "migration1.19.filter2search.state"), + } +} + +func (mf *filterableToSearchableMigrationFiles) loadMigrationState() (*filterableToSearchableMigrationState, error) { + f, err := os.OpenFile(mf.stateFileName, os.O_RDWR|os.O_CREATE, 0o666) + if err != nil { + return nil, err + } + defer f.Close() + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(f); err != nil { + return nil, err + } + bytes := buf.Bytes() + + state := filterableToSearchableMigrationState{ + MissingFilterableClass2Props: map[string]map[string]struct{}{}, + CreatedFilterableClass2Props: map[string]map[string]struct{}{}, + } + + if len(bytes) > 0 { + if err := json.Unmarshal(bytes, &state); err != nil { + return nil, err + } + } + return &state, nil +} + +func (mf *filterableToSearchableMigrationFiles) saveMigrationState(state *filterableToSearchableMigrationState) error { + bytes, err := json.Marshal(state) + if err != nil { + return err + } + + fileNameTemp := mf.stateFileName + ".temp" + f, err := os.Create(fileNameTemp) + if err != nil { + return err + } + + _, err = f.Write(bytes) + f.Close() + if err != nil { + return err + } + + err = os.Rename(fileNameTemp, mf.stateFileName) + if err != nil { + return err + } + return nil +} + +func (mf *filterableToSearchableMigrationFiles) existsMigrationSkipFlag() bool { + _, err := os.Stat(mf.flagFileName) + return err == nil +} + +func (mf *filterableToSearchableMigrationFiles) createMigrationSkipFlag() error { + f, err := os.Create(mf.flagFileName) + if err != nil { + return err + } + f.Close() + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer.go new file mode 100644 index 0000000000000000000000000000000000000000..05b13d2bd81012d7f3867813a912cf90085cb073 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer.go @@ -0,0 +1,473 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" +) + +type ShardInvertedReindexTask interface { + GetPropertiesToReindex(ctx context.Context, shard ShardLike, + ) ([]ReindexableProperty, error) + // right now only OnResume is needed, but in the future more + // callbacks could be added + // (like OnPrePauseStore, OnPostPauseStore, OnPreResumeStore, etc) + OnPostResumeStore(ctx context.Context, shard ShardLike) error + ObjectsIterator(shard ShardLike) objectsIterator +} + +type objectsIterator func(ctx context.Context, fn func(object *storobj.Object) error) error + +type ReindexableProperty struct { + PropertyName string + IndexType PropertyIndexType + NewIndex bool // is new index, there is no bucket to replace with + DesiredStrategy string + BucketOptions []lsmkv.BucketOption +} + +type ShardInvertedReindexer struct { + logger logrus.FieldLogger + shard ShardLike + + tasks []ShardInvertedReindexTask + class *models.Class +} + +func NewShardInvertedReindexer(shard ShardLike, logger logrus.FieldLogger) *ShardInvertedReindexer { + class := shard.Index().getSchema.ReadOnlyClass(shard.Index().Config.ClassName.String()) + if class == nil { + return nil + } + + return &ShardInvertedReindexer{ + logger: logger, + shard: shard, + tasks: []ShardInvertedReindexTask{}, + class: class, + } +} + +func (r *ShardInvertedReindexer) AddTask(task ShardInvertedReindexTask) { + r.tasks = append(r.tasks, task) +} + +func (r *ShardInvertedReindexer) Do(ctx context.Context) error { + for _, task := range r.tasks { + if err := r.checkContextExpired(ctx, "remaining tasks skipped due to context canceled"); err != nil { + return err + } + if err := r.doTask(ctx, task); err != nil { + return err + } + } + return nil +} + +func (r *ShardInvertedReindexer) doTask(ctx context.Context, task ShardInvertedReindexTask) error { + reindexProperties, err := task.GetPropertiesToReindex(ctx, r.shard) + if err != nil { + r.logError(err, "failed getting reindex properties") + return errors.Wrapf(err, "failed getting reindex properties") + } + if len(reindexProperties) == 0 { + r.logger. + WithField("action", "inverted_reindex"). + WithField("index", r.shard.Index().ID()). + WithField("shard", r.shard.ID()). + Debug("no properties to reindex") + return nil + } + + if err := r.checkContextExpired(ctx, "pausing store stopped due to context canceled"); err != nil { + return err + } + + if err := r.pauseStoreActivity(ctx); err != nil { + r.logError(err, "failed pausing store activity") + return err + } + + bucketsToReindex := make([]string, len(reindexProperties)) + for i, reindexProperty := range reindexProperties { + if err := r.checkContextExpired(ctx, "creating temp buckets stopped due to context canceled"); err != nil { + return err + } + + if !isIndexTypeSupportedByStrategy(reindexProperty.IndexType, reindexProperty.DesiredStrategy) { + err := fmt.Errorf("strategy '%s' is not supported for given index type '%d", + reindexProperty.DesiredStrategy, reindexProperty.IndexType) + r.logError(err, "invalid strategy") + return err + } + + // TODO verify if property indeed need reindex before creating buckets + // (is filterable / is searchable / null or prop length index enabled) + bucketsToReindex[i] = r.bucketName(reindexProperty.PropertyName, reindexProperty.IndexType) + if err := r.createTempBucket(ctx, bucketsToReindex[i], reindexProperty.DesiredStrategy, + reindexProperty.BucketOptions...); err != nil { + r.logError(err, "failed creating temporary bucket") + return err + } + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + WithField("property", reindexProperty.PropertyName). + WithField("strategy", reindexProperty.DesiredStrategy). + WithField("index_type", reindexProperty.IndexType). + Debug("created temporary bucket") + } + + if err := r.reindexProperties(ctx, reindexProperties, task.ObjectsIterator(r.shard)); err != nil { + r.logError(err, "failed reindexing properties") + return errors.Wrapf(err, "failed reindexing properties on shard '%s'", r.shard.Name()) + } + + for i := range bucketsToReindex { + if err := r.checkContextExpired(ctx, "replacing buckets stopped due to context canceled"); err != nil { + return err + } + tempBucketName := helpers.TempBucketFromBucketName(bucketsToReindex[i]) + tempBucket := r.shard.Store().Bucket(tempBucketName) + tempBucket.FlushMemtable() + tempBucket.UpdateStatus(storagestate.StatusReadOnly) + + if reindexProperties[i].NewIndex { + if err := r.shard.Store().RenameBucket(ctx, tempBucketName, bucketsToReindex[i]); err != nil { + r.logError(err, "failed renaming buckets") + return err + } + + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + WithField("bucket", bucketsToReindex[i]). + WithField("temp_bucket", tempBucketName). + Debug("renamed bucket") + } else { + if err := r.shard.Store().ReplaceBuckets(ctx, bucketsToReindex[i], tempBucketName); err != nil { + r.logError(err, "failed replacing buckets") + return err + } + + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + WithField("bucket", bucketsToReindex[i]). + WithField("temp_bucket", tempBucketName). + Debug("replaced buckets") + } + } + + if err := r.checkContextExpired(ctx, "resuming store stopped due to context canceled"); err != nil { + return err + } + + if err := r.resumeStoreActivity(ctx, task); err != nil { + r.logError(err, "failed resuming store activity") + return err + } + + return nil +} + +func (r *ShardInvertedReindexer) pauseStoreActivity(ctx context.Context) error { + if err := r.shard.Store().PauseCompaction(ctx); err != nil { + return errors.Wrapf(err, "failed pausing compaction for shard '%s'", r.shard.Name()) + } + if err := r.shard.Store().FlushMemtables(ctx); err != nil { + return errors.Wrapf(err, "failed flushing memtables for shard '%s'", r.shard.Name()) + } + if err := r.shard.Store().UpdateBucketsStatus(storagestate.StatusReadOnly); err != nil { + return errors.Wrapf(err, "failed pausing compaction for shard '%s'", r.shard.ID()) + } + + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + Debug("paused store activity") + + return nil +} + +func (r *ShardInvertedReindexer) resumeStoreActivity(ctx context.Context, task ShardInvertedReindexTask) error { + if err := r.shard.Store().ResumeCompaction(ctx); err != nil { + return errors.Wrapf(err, "failed resuming compaction for shard '%s'", r.shard.Name()) + } + if err := r.shard.Store().UpdateBucketsStatus(storagestate.StatusReady); err != nil { + return errors.Wrapf(err, "failed resuming compaction for shard '%s'", r.shard.ID()) + } + if err := task.OnPostResumeStore(ctx, r.shard); err != nil { + return errors.Wrap(err, "failed OnPostResumeStore") + } + + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + Debug("resumed store activity") + + return nil +} + +func (r *ShardInvertedReindexer) createTempBucket(ctx context.Context, name string, + strategy string, options ...lsmkv.BucketOption, +) error { + tempName := helpers.TempBucketFromBucketName(name) + index := r.shard.Index() + bucketOptions := append(options, + lsmkv.WithStrategy(strategy), + lsmkv.WithMinMMapSize(index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(index.Config.MaxReuseWalSize), + ) + + if err := r.shard.Store().CreateBucket(ctx, tempName, bucketOptions...); err != nil { + return errors.Wrapf(err, "failed creating temp bucket '%s'", tempName) + } + return nil +} + +func (r *ShardInvertedReindexer) reindexProperties(ctx context.Context, reindexableProperties []ReindexableProperty, + objectsIterator objectsIterator, +) error { + checker := newReindexablePropertyChecker(reindexableProperties, r.class) + + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + Debug("starting populating indexes") + + i := 0 + if err := objectsIterator(ctx, func(object *storobj.Object) error { + // check context expired every 50k objects + if i%50_000 == 0 && i != 0 { + if err := r.checkContextExpired(ctx, "iterating through objects stopped due to context canceled"); err != nil { + return err + } + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + Debugf("iterating through objects: %d done", i) + } + docID := object.DocID + properties, nilProperties, err := r.shard.AnalyzeObject(object) + if err != nil { + return errors.Wrapf(err, "failed analyzying object") + } + + for _, property := range properties { + if err := r.handleProperty(ctx, checker, docID, property); err != nil { + return errors.Wrapf(err, "failed reindexing property '%s' of object '%d'", property.Name, docID) + } + } + for _, nilProperty := range nilProperties { + if err := r.handleNilProperty(ctx, checker, docID, nilProperty); err != nil { + return errors.Wrapf(err, "failed reindexing property '%s' of object '%d'", nilProperty.Name, docID) + } + } + + i++ + return nil + }); err != nil { + return err + } + + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + Debugf("iterating through objects: %d done", i) + + return nil +} + +func (r *ShardInvertedReindexer) handleProperty(ctx context.Context, checker *reindexablePropertyChecker, + docID uint64, property inverted.Property, +) error { + // skip internal properties (_id etc) + if isInternalProperty(property) { + return nil + } + + if isMetaCountProperty(property) { + propName := strings.TrimSuffix(property.Name, "__meta_count") + if checker.isReindexable(propName, IndexTypePropMetaCount) { + schemaProp := checker.getSchemaProp(propName) + if inverted.HasFilterableIndex(schemaProp) { + bucketMeta := r.tempBucket(propName, IndexTypePropMetaCount) + if bucketMeta == nil { + return fmt.Errorf("no bucket for prop '%s' meta found", propName) + } + for _, item := range property.Items { + if err := r.shard.addToPropertySetBucket(bucketMeta, docID, item.Data); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' meta bucket", property.Name) + } + } + } + } + return nil + } + + schemaProp := checker.getSchemaProp(property.Name) + if checker.isReindexable(property.Name, IndexTypePropValue) && + inverted.HasFilterableIndex(schemaProp) { + + bucketValue := r.tempBucket(property.Name, IndexTypePropValue) + if bucketValue == nil { + return fmt.Errorf("no bucket for prop '%s' value found", property.Name) + } + for _, item := range property.Items { + if err := r.shard.addToPropertySetBucket(bucketValue, docID, item.Data); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' value bucket", property.Name) + } + } + } + if checker.isReindexable(property.Name, IndexTypePropSearchableValue) && + inverted.HasSearchableIndex(schemaProp) { + + bucketSearchableValue := r.tempBucket(property.Name, IndexTypePropSearchableValue) + if bucketSearchableValue == nil { + return fmt.Errorf("no bucket searchable for prop '%s' value found", property.Name) + } + propLen := float32(len(property.Items)) + for _, item := range property.Items { + pair := r.shard.pairPropertyWithFrequency(docID, item.TermFrequency, propLen) + if err := r.shard.addToPropertyMapBucket(bucketSearchableValue, pair, item.Data); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' value bucket", property.Name) + } + } + } + + // properties where defining a length does not make sense (floats etc.) have a negative entry as length + if r.shard.Index().invertedIndexConfig.IndexPropertyLength && + checker.isReindexable(property.Name, IndexTypePropLength) && + property.Length >= 0 { + + key, err := bucketKeyPropertyLength(property.Length) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' length", property.Name) + } + bucketLength := r.tempBucket(property.Name, IndexTypePropLength) + if bucketLength == nil { + return fmt.Errorf("no bucket for prop '%s' length found", property.Name) + } + if err := r.shard.addToPropertySetBucket(bucketLength, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' length bucket", property.Name) + } + } + + if r.shard.Index().invertedIndexConfig.IndexNullState && + checker.isReindexable(property.Name, IndexTypePropNull) { + + key, err := bucketKeyPropertyNull(property.Length == 0) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' null", property.Name) + } + bucketNull := r.tempBucket(property.Name, IndexTypePropNull) + if bucketNull == nil { + return fmt.Errorf("no bucket for prop '%s' null found", property.Name) + } + if err := r.shard.addToPropertySetBucket(bucketNull, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' null bucket", property.Name) + } + } + + return nil +} + +func (r *ShardInvertedReindexer) handleNilProperty(ctx context.Context, checker *reindexablePropertyChecker, + docID uint64, nilProperty inverted.NilProperty, +) error { + if r.shard.Index().invertedIndexConfig.IndexPropertyLength && + checker.isReindexable(nilProperty.Name, IndexTypePropLength) && + nilProperty.AddToPropertyLength { + + key, err := bucketKeyPropertyLength(0) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' length", nilProperty.Name) + } + bucketLength := r.tempBucket(nilProperty.Name, IndexTypePropLength) + if bucketLength == nil { + return fmt.Errorf("no bucket for prop '%s' length found", nilProperty.Name) + } + if err := r.shard.addToPropertySetBucket(bucketLength, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' length bucket", nilProperty.Name) + } + } + + if r.shard.Index().invertedIndexConfig.IndexNullState && + checker.isReindexable(nilProperty.Name, IndexTypePropNull) { + + key, err := bucketKeyPropertyNull(true) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' null", nilProperty.Name) + } + bucketNull := r.tempBucket(nilProperty.Name, IndexTypePropNull) + if bucketNull == nil { + return fmt.Errorf("no bucket for prop '%s' null found", nilProperty.Name) + } + if err := r.shard.addToPropertySetBucket(bucketNull, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' null bucket", nilProperty.Name) + } + } + + return nil +} + +func (r *ShardInvertedReindexer) bucketName(propName string, indexType PropertyIndexType) string { + checkSupportedPropertyIndexType(indexType) + + switch indexType { + case IndexTypePropValue: + return helpers.BucketFromPropNameLSM(propName) + case IndexTypePropSearchableValue: + return helpers.BucketSearchableFromPropNameLSM(propName) + case IndexTypePropLength: + return helpers.BucketFromPropNameLengthLSM(propName) + case IndexTypePropNull: + return helpers.BucketFromPropNameNullLSM(propName) + case IndexTypePropMetaCount: + return helpers.BucketFromPropNameMetaCountLSM(propName) + default: + return "" + } +} + +func (r *ShardInvertedReindexer) tempBucket(propName string, indexType PropertyIndexType) *lsmkv.Bucket { + tempBucketName := helpers.TempBucketFromBucketName(r.bucketName(propName, indexType)) + return r.shard.Store().Bucket(tempBucketName) +} + +func (r *ShardInvertedReindexer) checkContextExpired(ctx context.Context, msg string) error { + if ctx.Err() != nil { + r.logError(ctx.Err(), "%v", msg) + return errors.Wrapf(ctx.Err(), "%v", msg) + } + return nil +} + +func (r *ShardInvertedReindexer) logError(err error, msg string, args ...interface{}) { + r.logger. + WithField("action", "inverted_reindex"). + WithField("shard", r.shard.Name()). + WithError(err). + Errorf(msg, args...) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_index_types.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_index_types.go new file mode 100644 index 0000000000000000000000000000000000000000..64b5f5d13ca64a30c45b9b0c3a752a5adc394cc1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_index_types.go @@ -0,0 +1,58 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + +type PropertyIndexType uint8 + +const ( + IndexTypePropValue PropertyIndexType = iota + 1 + IndexTypePropLength + IndexTypePropNull + IndexTypePropSearchableValue + IndexTypePropMetaCount +) + +func isSupportedPropertyIndexType(indexType PropertyIndexType) bool { + switch indexType { + case IndexTypePropValue, + IndexTypePropLength, + IndexTypePropNull, + IndexTypePropMetaCount, + IndexTypePropSearchableValue: + return true + default: + return false + } +} + +func checkSupportedPropertyIndexType(indexType PropertyIndexType) { + if !isSupportedPropertyIndexType(indexType) { + panic("unsupported property index type") + } +} + +// Some index types are supported by specific strategies only +// Method ensures both index type and strategy work together +func isIndexTypeSupportedByStrategy(indexType PropertyIndexType, strategy string) bool { + switch indexType { + case IndexTypePropLength, + IndexTypePropNull, + IndexTypePropMetaCount, + IndexTypePropValue: + return lsmkv.IsExpectedStrategy(strategy, lsmkv.StrategySetCollection, lsmkv.StrategyRoaringSet) + case IndexTypePropSearchableValue: + return lsmkv.IsExpectedStrategy(strategy, lsmkv.StrategyMapCollection) + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_map_to_blockmax.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_map_to_blockmax.go new file mode 100644 index 0000000000000000000000000000000000000000..0aaf0c50dcec157b0d58cdc2f16b631aa29355a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_map_to_blockmax.go @@ -0,0 +1,2040 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/additional" + entcfg "github.com/weaviate/weaviate/entities/config" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/config" + schema "github.com/weaviate/weaviate/usecases/schema" +) + +func NewShardInvertedReindexTaskMapToBlockmax(logger logrus.FieldLogger, + swapBuckets, unswapBuckets, tidyBuckets, reloadShards, rollback, conditionalStart bool, + processingDuration, pauseDuration time.Duration, perObjectDelay time.Duration, concurrency int, + cptSelected []config.CollectionPropsTenants, schemaManager *schema.Manager, +) *ShardReindexTask_MapToBlockmax { + name := "MapToBlockmax" + keyParser := &UuidKeyParser{} + objectsIteratorAsync := uuidObjectsIteratorAsync + + logger = logger.WithField("task", name) + newReindexTracker := func(lsmPath string) (mapToBlockmaxReindexTracker, error) { + rt := NewFileMapToBlockmaxReindexTracker(lsmPath, keyParser) + if err := rt.init(); err != nil { + return nil, err + } + return rt, nil + } + + selectionEnabled := false + var selectedPropsByCollection, selectedShardsByCollection map[string]map[string]struct{} + if count := len(cptSelected); count > 0 { + selectionEnabled = true + selectedPropsByCollection = make(map[string]map[string]struct{}, count) + selectedShardsByCollection = make(map[string]map[string]struct{}, count) + + for _, cpt := range cptSelected { + var props, shards map[string]struct{} + if countp := len(cpt.Props); countp > 0 { + props = make(map[string]struct{}, countp) + for _, prop := range cpt.Props { + props[prop] = struct{}{} + } + } + if counts := len(cpt.Tenants); counts > 0 { + shards = make(map[string]struct{}, counts) + for _, shard := range cpt.Tenants { + shards[shard] = struct{}{} + } + } + selectedPropsByCollection[cpt.Collection] = props + selectedShardsByCollection[cpt.Collection] = shards + } + } + + config := mapToBlockmaxConfig{ + swapBuckets: swapBuckets, + unswapBuckets: unswapBuckets, + tidyBuckets: tidyBuckets, + reloadShards: reloadShards, + rollback: rollback, + concurrency: concurrency, + conditionalStart: conditionalStart, + memtableOptBlockmaxFactor: 4, + processingDuration: processingDuration, + pauseDuration: pauseDuration, + checkProcessingEveryNoObjects: 1000, + selectionEnabled: selectionEnabled, + selectedPropsByCollection: selectedPropsByCollection, + selectedShardsByCollection: selectedShardsByCollection, + perObjectDelay: perObjectDelay, + } + + logger.WithField("config", fmt.Sprintf("%+v", config)).Debug("task created") + + return &ShardReindexTask_MapToBlockmax{ + name: name, + logger: logger, + newReindexTracker: newReindexTracker, + keyParser: keyParser, + objectsIteratorAsync: objectsIteratorAsync, + schemaManager: schemaManager, + config: config, + } +} + +type ShardReindexTask_MapToBlockmax struct { + name string + logger logrus.FieldLogger + newReindexTracker func(lsmPath string) (mapToBlockmaxReindexTracker, error) + keyParser indexKeyParser + objectsIteratorAsync objectsIteratorAsync + config mapToBlockmaxConfig + schemaManager *schema.Manager +} + +type mapToBlockmaxConfig struct { + swapBuckets bool + unswapBuckets bool + tidyBuckets bool + reloadShards bool + rollback bool + conditionalStart bool + concurrency int + memtableOptBlockmaxFactor int + processingDuration time.Duration + pauseDuration time.Duration + perObjectDelay time.Duration + checkProcessingEveryNoObjects int + selectionEnabled bool + selectedPropsByCollection map[string]map[string]struct{} + selectedShardsByCollection map[string]map[string]struct{} +} + +func (t *ShardReindexTask_MapToBlockmax) Name() string { + return t.name +} + +func (t *ShardReindexTask_MapToBlockmax) OnBeforeLsmInit(ctx context.Context, shard *Shard) (err error) { + collectionName := shard.Index().Config.ClassName.String() + shardName := shard.Name() + logger := t.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shardName, + "method": "OnBeforeLsmInit", + }) + logger.Info("starting") + defer func(started time.Time) { + logger = logger.WithField("took", time.Since(started)) + if err != nil { + logger.WithError(err).Error("finished with error") + } else { + logger.Info("finished") + } + }(time.Now()) + + if !t.isShardSelected(collectionName, shardName) { + logger.Debug("different collection/shard selected. nothing to do") + return nil + } + + rt, err := t.newReindexTracker(shard.pathLSM()) + if err != nil { + err = fmt.Errorf("creating reindex tracker: %w", err) + return + } + + rt.checkOverrides(logger, &t.config) + + if rt.IsRollback() { + // make it so it "survives" the rt.reset() + t.config.rollback = true + } + + if rt.IsReset() && rt.IsTidied() { + rt.reset() + err = fmt.Errorf("reset was manually triggered") + return + } + + if t.config.conditionalStart && !rt.HasStartCondition() { + err = fmt.Errorf("conditional start is set, but file trigger is not found") + return + } + + props, err := t.readPropsToReindex(rt) + if err != nil { + err = fmt.Errorf("reading reindexable props: %w", err) + return + } + + if t.config.rollback { + logger.Debugf("rollback started: config=%v, runtime=%v", t.config.rollback, rt.IsRollback()) + + if rt.IsTidied() { + err = fmt.Errorf("rollback: searchable map buckets are deleted, can not restore") + return + } + if rt.IsSwapped() { + if err = t.unswapIngestAndMapBuckets(ctx, logger, shard, rt, props); err != nil { + err = fmt.Errorf("rollback: unswapping buckets: %w", err) + return + } + } + if err = t.removeReindexBucketsDirs(ctx, logger, shard, props); err != nil { + err = fmt.Errorf("rollback: removing reindex buckets: %w", err) + return + } + if err = t.removeIngestBucketsDirs(ctx, logger, shard, props); err != nil { + err = fmt.Errorf("rollback: removing ingest buckets: %w", err) + return + } + if err = rt.reset(); err != nil { + err = fmt.Errorf("rollback: removing migration files: %w", err) + return + } + + logger.Debug("rollback completed") + return nil + } + + if len(props) == 0 { + logger.Debug("no props read. nothing to do") + return nil + } + + if err = ctx.Err(); err != nil { + err = fmt.Errorf("context check (1): %w / %w", err, context.Cause(ctx)) + return + } + + isMerged := rt.IsMerged() + if !isMerged && rt.IsReindexed() { + logger.Debug("reindexed, not merged. merging buckets") + + if err = t.mergeReindexAndIngestBuckets(ctx, logger, shard, rt, props); err != nil { + err = fmt.Errorf("merging reindex and ingest buckets:%w", err) + return + } + isMerged = true + } + + if err = ctx.Err(); err != nil { + err = fmt.Errorf("context check (2): %w / %w", err, context.Cause(ctx)) + return + } + + isSwapped := rt.IsSwapped() + isTidied := rt.IsTidied() + if isMerged { + if isSwapped { + if t.config.unswapBuckets { + if isTidied { + logger.Debug("swapped and tidied. can not be unswapped") + } else { + logger.Debug("swapped, not tidied. unswapping buckets") + + if err = t.unswapIngestAndMapBuckets(ctx, logger, shard, rt, props); err != nil { + err = fmt.Errorf("unswapping ingest and map buckets:%w", err) + return + } + isSwapped = false + } + } + } else { + if t.config.swapBuckets { + logger.Debug("merged, not swapped. swapping buckets") + + if err = t.swapIngestAndMapBuckets(ctx, logger, shard, rt, props); err != nil { + err = fmt.Errorf("swapping ingest and map buckets:%w", err) + return + } + isSwapped = true + } + } + } + + if err = ctx.Err(); err != nil { + err = fmt.Errorf("context check (3): %w / %w", err, context.Cause(ctx)) + return + } + + if isSwapped { + if isTidied { + logger.Debug("tidied. nothing to do") + return nil + } + + if t.config.tidyBuckets { + logger.Debug("swapped, not tidied. tidying buckets") + + if err = t.tidyMapBuckets(ctx, logger, shard, rt, props); err != nil { + err = fmt.Errorf("tidying map buckets:%w", err) + return + } + } + } + + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) OnAfterLsmInit(ctx context.Context, shard *Shard) (err error) { + collectionName := shard.Index().Config.ClassName.String() + shardName := shard.Name() + logger := t.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shardName, + "method": "OnAfterLsmInit", + }) + logger.Info("starting") + defer func(started time.Time) { + logger = logger.WithField("took", time.Since(started)) + if err != nil { + logger.WithError(err).Error("finished with error") + } else { + logger.Info("finished") + } + }(time.Now()) + + // skip shard only if not started or rollback requested + // otherwise double writes have to be enabled if migration was already started + isShardSelected := t.isShardSelected(collectionName, shardName) + + if t.config.rollback && isShardSelected { + logger.Debug("rollback. nothing to do") + return nil + } + + rt, err := t.newReindexTracker(shard.pathLSM()) + if err != nil { + err = fmt.Errorf("creating reindex tracker: %w", err) + return + } + + rt.checkOverrides(logger, &t.config) + + if rt.IsRollback() { + logger.Debug("rollback. nothing to do") + return + } + + if t.config.conditionalStart && !rt.HasStartCondition() { + err = fmt.Errorf("conditional start is set, but file trigger is not found") + return + } + + isStarted := rt.IsStarted() + if !isStarted && !isShardSelected { + logger.Debug("different collection/shard selected. nothing to do") + return nil + } + + props, err := t.getPropsToReindex(shard, rt) + if err != nil { + err = fmt.Errorf("getting reindexable props: %w", err) + return + } + logger.WithField("props", props).Debug("props found") + if len(props) == 0 { + logger.Debug("no props found. nothing to do") + return nil + } + + if !isStarted { + if err = rt.markStarted(time.Now()); err != nil { + err = fmt.Errorf("marking reindex started: %w", err) + return + } + } + + if rt.IsSwapped() { + if !rt.IsTidied() { + logger.Debug("swapped, not tidied. starting map buckets") + + if err = t.loadMapSearchBuckets(ctx, logger, shard, props); err != nil { + err = fmt.Errorf("starting map buckets:%w", err) + return + } + if err = t.duplicateToMapBuckets(shard, props); err != nil { + err = fmt.Errorf("duplicating map buckets:%w", err) + return + } + } + } else { + isMerged := rt.IsMerged() + if isMerged { + logger.Debug("merged, not swapped. starting ingest buckets") + } else { + if !rt.IsReindexed() { + logger.Debug("not reindexed. starting reindex buckets") + + if err = t.loadReindexSearchBuckets(ctx, logger, shard, props); err != nil { + err = fmt.Errorf("starting reindex buckets: %w", err) + return + } + } + + logger.Debug("not merged. starting ingest buckets") + } + + shard.markSearchableBlockmaxProperties(props...) + + // since reindex bucket will be merged into ingest bucket with reindex segments being before ingest, + // ingest segments should not be compacted and tombstones should be kept + if err = t.loadIngestSearchBuckets(ctx, logger, shard, props, !isMerged, !isMerged); err != nil { + err = fmt.Errorf("starting ingest buckets:%w", err) + return + } + if err = t.duplicateToIngestBuckets(shard, props); err != nil { + err = fmt.Errorf("duplicating ingest buckets:%w", err) + return + } + } + + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) OnAfterLsmInitAsync(ctx context.Context, shard ShardLike, +) (rerunAt time.Time, reloadShard bool, err error) { + collectionName := shard.Index().Config.ClassName.String() + shardName := shard.Name() + logger := t.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shardName, + "method": "OnAfterLsmInitAsync", + }) + logger.Info("starting") + defer func(started time.Time) { + logger = logger.WithField("took", time.Since(started)) + if err != nil { + logger.WithError(err).Error("finished with error") + } else { + logger.Info("finished") + } + }(time.Now()) + + zerotime := time.Time{} + + if !t.isShardSelected(collectionName, shardName) { + logger.Debug("different collection/shard selected. nothing to do") + return zerotime, false, nil + } + + rt, err := t.newReindexTracker(shard.pathLSM()) + if err != nil { + err = fmt.Errorf("creating reindex tracker: %w", err) + return zerotime, false, err + } + + rt.checkOverrides(logger, &t.config) + + // rollback initiated by the user after restart, stop double writes + if rt.IsRollback() { + logger.Debug("rollback started") + props, err2 := t.readPropsToReindex(rt) + if err2 != nil { + err = fmt.Errorf("reading reindexable props for rollback: %w", err2) + return zerotime, false, err + } + err = nil + + if !rt.IsSwapped() { + err = t.unloadReindexBuckets(ctx, logger, shard, props) + if err != nil { + err = fmt.Errorf("unloading reindex buckets: %w", err) + return zerotime, false, err + } + logger.Info("reindex buckets unloaded") + err = t.unloadIngestBuckets(ctx, logger, shard, props) + if err != nil { + err = fmt.Errorf("unloading ingest buckets: %w", err) + return zerotime, false, err + } + logger.Info("ingest buckets unloaded") + } else { + logger.Warnf("inverted bucket is being used for search, will not be unloaded: %s. Rollback will proceed on restart", shard.Name()) + } + // return early to stop ingestion + return zerotime, false, nil + } + + if t.config.rollback { + logger.Debug("rollback. nothing to do") + return zerotime, false, nil + } + + if t.config.conditionalStart && !rt.HasStartCondition() { + err = fmt.Errorf("conditional start is set, but file trigger is not found") + return zerotime, false, err + } + + props, err := t.readPropsToReindex(rt) + if err != nil { + err = fmt.Errorf("reading reindexable props: %w", err) + return zerotime, false, err + } + + if rt.IsPaused() { + logger.Debug("paused. waiting for resuming") + return time.Now().Add(t.config.pauseDuration), false, nil + } + + if rt.IsTidied() { + err = updateToBlockMaxInvertedIndexConfig(ctx, t.schemaManager, shard.Index().Config.ClassName.String()) + if err != nil { + err = fmt.Errorf("updating inverted index config: %w", err) + } + return zerotime, false, err + } + + if len(props) == 0 { + logger.Debug("no props read. nothing to do") + return zerotime, false, nil + } + + if rt.IsReindexed() { + logger.Debug("reindexed. nothing to do") + return zerotime, false, nil + } + + var reindexStarted time.Time + if !rt.IsStarted() { + err = fmt.Errorf("missing reindex started") + return zerotime, false, err + } else if reindexStarted, err = rt.getStarted(); err != nil { + err = fmt.Errorf("getting reindex started: %w", err) + return zerotime, false, err + } + + var lastStoredKey indexKey + if lastStoredKey, _, err = rt.GetProgress(); err != nil { + err = fmt.Errorf("getting reindex progress: %w", err) + return zerotime, false, err + } + + logger.WithFields(map[string]any{ + "last_stored_key": lastStoredKey, + "reindex_started": reindexStarted, + }).Debug("reindexing") + + if err = ctx.Err(); err != nil { + err = fmt.Errorf("context check (1): %w / %w", err, context.Cause(ctx)) + return zerotime, false, err + } + + processedCount := 0 + indexedCount := 0 + lastProcessedKey := lastStoredKey.Clone() + + defer func() { + if err != nil && !bytes.Equal(lastStoredKey.Bytes(), lastProcessedKey.Bytes()) { + logger.WithField("last_processed_key", lastProcessedKey).Debug("marking progress on error") + rt.markProgress(lastProcessedKey, processedCount, indexedCount) + } + }() + + store := shard.Store() + propExtraction := storobj.NewPropExtraction() + bucketsByPropName := map[string]*lsmkv.Bucket{} + for _, prop := range props { + propExtraction.Add(prop) + bucketName := t.reindexBucketName(prop) + bucketsByPropName[prop] = store.Bucket(bucketName) + } + + breakCh := make(chan bool, 1) + breakCh <- false + finished := false + + err = store.PauseObjectBucketCompaction(ctx) + if err != nil { + return zerotime, false, err + } + defer store.ResumeObjectBucketCompaction(ctx) + + processingStarted, mdCh := t.objectsIteratorAsync(logger, shard, lastStoredKey, t.keyParser.FromBytes, + propExtraction, reindexStarted, breakCh) + + for md := range mdCh { + if md == nil { + finished = true + } else if md.err != nil { + err = md.err + return zerotime, false, err + } else if err = ctx.Err(); err != nil { + breakCh <- true + err = fmt.Errorf("context check (loop): %w / %w", err, context.Cause(ctx)) + return zerotime, false, err + } else { + if len(md.props) > 0 { + for _, invprop := range md.props { + if bucket, ok := bucketsByPropName[invprop.Name]; ok { + propLen := t.calcPropLenInverted(invprop.Items) + for _, item := range invprop.Items { + pair := shard.pairPropertyWithFrequency(md.docID, item.TermFrequency, propLen) + if err := shard.addToPropertyMapBucket(bucket, pair, item.Data); err != nil { + breakCh <- true + err = fmt.Errorf("adding object '%s' prop '%s': %w", md.key.String(), invprop.Name, err) + return zerotime, false, err + } + } + } + } + indexedCount++ + } + processedCount++ + lastProcessedKey = md.key + + breakCh <- processedCount%t.config.checkProcessingEveryNoObjects == 0 && (time.Since(processingStarted) > t.config.processingDuration || rt.IsPaused()) + time.Sleep(t.config.perObjectDelay) + } + } + if !bytes.Equal(lastStoredKey.Bytes(), lastProcessedKey.Bytes()) { + if err := rt.markProgress(lastProcessedKey, processedCount, indexedCount); err != nil { + err = fmt.Errorf("marking reindex progress: %w", err) + return zerotime, false, err + } + lastStoredKey = lastProcessedKey.Clone() + } + if finished { + if err = rt.markReindexed(); err != nil { + err = fmt.Errorf("marking reindexed: %w", err) + return zerotime, false, err + } + return zerotime, t.config.reloadShards, nil + } + return time.Now().Add(t.config.pauseDuration), false, nil +} + +func (t *ShardReindexTask_MapToBlockmax) mergeReindexAndIngestBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, rt mapToBlockmaxReindexTracker, props []string, +) error { + lsmPath := shard.pathLSM() + segmentPathsToMove := [][2]string{} + bucketPathsToRemove := make([]string, 0, len(props)) + lock := new(sync.Mutex) + + eg, gctx := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range props { + propName := props[i] + + eg.Go(func() error { + reindexBucketName := t.reindexBucketName(propName) + reindexBucketPath := filepath.Join(lsmPath, reindexBucketName) + ingestBucketName := t.ingestBucketName(propName) + ingestBucketPath := filepath.Join(lsmPath, ingestBucketName) + + for { + propSegmentPathsToMove, needsRecover, err := t.getSegmentPathsToMove(reindexBucketPath, ingestBucketPath) + if err != nil { + return fmt.Errorf("buckets '%s' & '%s': %w", reindexBucketName, ingestBucketName, err) + } + + if needsRecover { + if err := t.recoverReindexBucket(gctx, logger, shard, reindexBucketName); err != nil { + return err + } + } else { + lock.Lock() + bucketPathsToRemove = append(bucketPathsToRemove, reindexBucketPath) + segmentPathsToMove = append(segmentPathsToMove, propSegmentPathsToMove...) + lock.Unlock() + return nil + } + } + }) + } + if err := eg.Wait(); err != nil { + return err + } + + logger.WithField("segments_to_move", segmentPathsToMove).WithField("buckets_to_remove", bucketPathsToRemove). + Debug("merging reindex and ingest buckets") + + eg, _ = enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range segmentPathsToMove { + i := i + eg.Go(func() error { + return os.Rename(segmentPathsToMove[i][0], segmentPathsToMove[i][1]) + }) + } + if err := eg.Wait(); err != nil { + return fmt.Errorf("moving segment: %w", err) + } + + eg, _ = enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range bucketPathsToRemove { + i := i + eg.Go(func() error { + return os.RemoveAll(bucketPathsToRemove[i]) + }) + } + if err := eg.Wait(); err != nil { + return fmt.Errorf("removing bucket: %w", err) + } + + if err := rt.markMerged(); err != nil { + return fmt.Errorf("marking reindex merged: %w", err) + } + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) getSegmentPathsToMove(bucketPathSrc, bucketPathDst string, +) ([][2]string, bool, error) { + segmentPaths := [][2]string{} + needsRecover := false + + err := filepath.WalkDir(bucketPathSrc, func(path string, d os.DirEntry, err error) error { + if d.IsDir() { + return nil + } + if t.isSegmentDb(d.Name()) || t.isSegmentBloom(d.Name()) { + ext := filepath.Ext(d.Name()) + id := strings.TrimSuffix(strings.TrimPrefix(d.Name(), "segment-"), ext) + timestamp, err := strconv.ParseInt(id, 10, 64) + if err != nil { + return err + } + timestampPast := time.Unix(0, timestamp).AddDate(-23, 0, 0).UnixNano() + + segmentPaths = append(segmentPaths, [2]string{ + path, filepath.Join(bucketPathDst, fmt.Sprintf("segment-%d%s", timestampPast, ext)), + }) + } else if t.isSegmentWal(d.Name()) { + if info, err := d.Info(); err != nil { + return err + } else if info.Size() > 0 { + needsRecover = true + return filepath.SkipAll + } + } + return nil + }) + if err != nil { + return nil, false, err + } + if needsRecover { + return nil, true, nil + } + return segmentPaths, false, nil +} + +func (t *ShardReindexTask_MapToBlockmax) swapIngestAndMapBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, rt mapToBlockmaxReindexTracker, props []string, +) error { + lsmPath := shard.pathLSM() + + eg, _ := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range props { + propName := props[i] + + if !rt.IsSwappedProp(props[i]) { + eg.Go(func() error { + bucketName := helpers.BucketSearchableFromPropNameLSM(propName) + bucketPath := filepath.Join(lsmPath, bucketName) + ingestBucketName := t.ingestBucketName(propName) + ingestBucketPath := filepath.Join(lsmPath, ingestBucketName) + mapBucketName := t.mapBucketName(propName) + mapBucketPath := filepath.Join(lsmPath, mapBucketName) + + logger.WithFields(map[string]any{ + "bucket": bucketName, + "ingest_bucket": ingestBucketName, + "map_bucket": mapBucketName, + }).Debug("swapping buckets") + + if err := os.Rename(bucketPath, mapBucketPath); err != nil { + return err + } + if err := os.Rename(ingestBucketPath, bucketPath); err != nil { + return err + } + if err := rt.markSwappedProp(propName); err != nil { + return fmt.Errorf("marking reindex swapped for '%s': %w", propName, err) + } + return nil + }) + } + } + if err := eg.Wait(); err != nil { + return err + } + if err := rt.markSwapped(); err != nil { + return fmt.Errorf("marking reindex swapped: %w", err) + } + + logger.Debug("swapped searchable buckets") + + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) unswapIngestAndMapBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, rt mapToBlockmaxReindexTracker, props []string, +) error { + lsmPath := shard.pathLSM() + + eg, _ := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range props { + propName := props[i] + + if rt.IsSwappedProp(props[i]) { + eg.Go(func() error { + bucketName := helpers.BucketSearchableFromPropNameLSM(propName) + bucketPath := filepath.Join(lsmPath, bucketName) + ingestBucketName := t.ingestBucketName(propName) + ingestBucketPath := filepath.Join(lsmPath, ingestBucketName) + mapBucketName := t.mapBucketName(propName) + mapBucketPath := filepath.Join(lsmPath, mapBucketName) + + logger.WithFields(map[string]any{ + "bucket": bucketName, + "ingest_bucket": ingestBucketName, + "map_bucket": mapBucketName, + }).Debug("unswapping buckets") + + if err := os.Rename(bucketPath, ingestBucketPath); err != nil { + return err + } + if err := os.Rename(mapBucketPath, bucketPath); err != nil { + return err + } + if err := rt.unmarkSwappedProp(propName); err != nil { + return fmt.Errorf("unmarking reindex swapped for '%s': %w", propName, err) + } + return nil + }) + } + } + if err := eg.Wait(); err != nil { + return err + } + if err := rt.unmarkSwapped(); err != nil { + return fmt.Errorf("unmarking reindex swapped: %w", err) + } + + logger.Debug("unswapped searchable buckets") + + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) tidyMapBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, rt mapToBlockmaxReindexTracker, props []string, +) error { + lsmPath := shard.pathLSM() + + eg, _ := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range props { + propName := props[i] + + eg.Go(func() error { + bucketName := t.mapBucketName(propName) + bucketPath := filepath.Join(lsmPath, bucketName) + if err := os.RemoveAll(bucketPath); err != nil { + return err + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return err + } + if err := rt.markTidied(); err != nil { + return fmt.Errorf("marking reindex tidied: %w", err) + } + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) loadReindexSearchBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, props []string, +) error { + bucketOpts := t.bucketOptions(shard, lsmkv.StrategyInverted, false, false, t.config.memtableOptBlockmaxFactor) + return t.loadBuckets(ctx, logger, shard, props, t.reindexBucketName, bucketOpts) +} + +func (t *ShardReindexTask_MapToBlockmax) loadIngestSearchBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, props []string, + keepLevelCompaction, keepTombstones bool, +) error { + bucketOpts := t.bucketOptions(shard, lsmkv.StrategyInverted, keepLevelCompaction, keepTombstones, t.config.memtableOptBlockmaxFactor) + return t.loadBuckets(ctx, logger, shard, props, t.ingestBucketName, bucketOpts) +} + +func (t *ShardReindexTask_MapToBlockmax) loadMapSearchBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, props []string, +) error { + bucketOpts := t.bucketOptions(shard, lsmkv.StrategyMapCollection, false, false, 1) + return t.loadBuckets(ctx, logger, shard, props, t.mapBucketName, bucketOpts) +} + +func (t *ShardReindexTask_MapToBlockmax) loadBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, props []string, bucketNamer func(string) string, + bucketOpts []lsmkv.BucketOption, +) error { + store := shard.Store() + + eg, gctx := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range props { + propName := props[i] + + eg.Go(func() error { + bucketName := bucketNamer(propName) + logger.WithField("bucket", bucketName).Debug("loading bucket") + if err := store.CreateOrLoadBucket(gctx, bucketName, bucketOpts...); err != nil { + return err + } + logger.WithField("bucket", bucketName).Debug("bucket loaded") + return nil + }) + } + + if err := eg.Wait(); err != nil { + return err + } + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) unloadIngestBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, props []string, +) error { + return t.unloadBuckets(ctx, logger, shard, props, t.ingestBucketName) +} + +func (t *ShardReindexTask_MapToBlockmax) unloadReindexBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, props []string, +) error { + return t.unloadBuckets(ctx, logger, shard, props, t.reindexBucketName) +} + +func (t *ShardReindexTask_MapToBlockmax) unloadBuckets(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, props []string, bucketNamer func(string) string, +) error { + store := shard.Store() + + eg, gctx := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range props { + propName := props[i] + + eg.Go(func() error { + bucketName := bucketNamer(propName) + logger.WithField("bucket", bucketName).Debug("loading bucket") + if err := store.ShutdownBucket(gctx, bucketName); err != nil { + return err + } + logger.WithField("bucket", bucketName).Debug("bucket loaded") + return nil + }) + } + + if err := eg.Wait(); err != nil { + return err + } + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) recoverReindexBucket(ctx context.Context, + logger logrus.FieldLogger, shard ShardLike, bucketName string, +) error { + store := shard.Store() + bucketOpts := t.bucketOptions(shard, lsmkv.StrategyInverted, true, false, t.config.memtableOptBlockmaxFactor) + + logger.WithField("bucket", bucketName).Debug("recover wals, loading bucket") + if err := store.CreateOrLoadBucket(ctx, bucketName, bucketOpts...); err != nil { + return fmt.Errorf("bucket '%s': %w", bucketName, err) + } + logger.WithField("bucket", bucketName).Debug("recover wals, shutting down bucket") + if err := store.ShutdownBucket(ctx, bucketName); err != nil { + return fmt.Errorf("bucket '%s': %w", bucketName, err) + } + logger.WithField("bucket", bucketName).Debug("recover wals, shut down bucket") + + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) removeReindexBucketsDirs(ctx context.Context, logger logrus.FieldLogger, + shard ShardLike, props []string, +) error { + return t.removeBucketsDirs(ctx, logger, shard, props, t.reindexBucketName) +} + +func (t *ShardReindexTask_MapToBlockmax) removeIngestBucketsDirs(ctx context.Context, logger logrus.FieldLogger, + shard ShardLike, props []string, +) error { + return t.removeBucketsDirs(ctx, logger, shard, props, t.ingestBucketName) +} + +func (t *ShardReindexTask_MapToBlockmax) removeBucketsDirs(ctx context.Context, logger logrus.FieldLogger, + shard ShardLike, props []string, bucketNamer func(string) string, +) error { + lsmPath := shard.pathLSM() + eg, _ := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(t.config.concurrency) + for i := range props { + propName := props[i] + + eg.Go(func() error { + bucketName := bucketNamer(propName) + bucketPath := filepath.Join(lsmPath, bucketName) + + logger.WithField("bucket", bucketName).Debug("removing bucket") + + return os.RemoveAll(bucketPath) + }) + } + return eg.Wait() +} + +func (t *ShardReindexTask_MapToBlockmax) duplicateToIngestBuckets(shard *Shard, props []string, +) error { + return t.duplicateToBuckets(shard, props, t.ingestBucketName, t.calcPropLenInverted) +} + +func (t *ShardReindexTask_MapToBlockmax) duplicateToMapBuckets(shard *Shard, props []string, +) error { + return t.duplicateToBuckets(shard, props, t.mapBucketName, t.calcPropLenMap) +} + +func (t *ShardReindexTask_MapToBlockmax) duplicateToBuckets(shard *Shard, props []string, + bucketNamer func(string) string, calcPropLen func([]inverted.Countable) float32, +) error { + propsByName := map[string]struct{}{} + for i := range props { + propsByName[props[i]] = struct{}{} + } + + shard.registerAddToPropertyValueIndex(func(s *Shard, docID uint64, property *inverted.Property) error { + if !property.HasSearchableIndex { + return nil + } + if _, ok := propsByName[property.Name]; !ok { + return nil + } + + bucketName := bucketNamer(property.Name) + bucket := s.store.Bucket(bucketName) + propLen := calcPropLen(property.Items) + for _, item := range property.Items { + pair := s.pairPropertyWithFrequency(docID, item.TermFrequency, propLen) + if err := s.addToPropertyMapBucket(bucket, pair, item.Data); err != nil { + return fmt.Errorf("adding prop '%s' to bucket '%s': %w", item.Data, bucketName, err) + } + } + return nil + }) + shard.registerDeleteFromPropertyValueIndex(func(s *Shard, docID uint64, property *inverted.Property) error { + if !property.HasSearchableIndex { + return nil + } + if _, ok := propsByName[property.Name]; !ok { + return nil + } + + bucketName := bucketNamer(property.Name) + bucket := s.store.Bucket(bucketName) + for _, item := range property.Items { + if err := s.deleteInvertedIndexItemWithFrequencyLSM(bucket, item, docID); err != nil { + return fmt.Errorf("deleting prop '%s' from bucket '%s': %w", item.Data, bucketName, err) + } + } + return nil + }) + + return nil +} + +func (t *ShardReindexTask_MapToBlockmax) isSegmentDb(filename string) bool { + return strings.HasPrefix(filename, "segment-") && strings.HasSuffix(filename, ".db") +} + +func (t *ShardReindexTask_MapToBlockmax) isSegmentBloom(filename string) bool { + return strings.HasPrefix(filename, "segment-") && strings.HasSuffix(filename, ".bloom") +} + +func (t *ShardReindexTask_MapToBlockmax) isSegmentWal(filename string) bool { + return strings.HasPrefix(filename, "segment-") && strings.HasSuffix(filename, ".wal") +} + +func (t *ShardReindexTask_MapToBlockmax) calcPropLenMap(items []inverted.Countable) float32 { + return float32(len(items)) +} + +func (t *ShardReindexTask_MapToBlockmax) calcPropLenInverted(items []inverted.Countable) float32 { + propLen := float32(0) + for _, item := range items { + propLen += item.TermFrequency + } + return propLen +} + +func (t *ShardReindexTask_MapToBlockmax) bucketOptions(shard ShardLike, strategy string, + keepLevelCompaction, keepTombstones bool, memtableOptFactor int, +) []lsmkv.BucketOption { + index := shard.Index() + + opts := []lsmkv.BucketOption{ + lsmkv.WithDirtyThreshold(time.Duration(index.Config.MemtablesFlushDirtyAfter) * time.Second), + lsmkv.WithDynamicMemtableSizing( + index.Config.MemtablesInitialSizeMB*memtableOptFactor, + index.Config.MemtablesMaxSizeMB*memtableOptFactor, + index.Config.MemtablesMinActiveSeconds*memtableOptFactor, + index.Config.MemtablesMaxActiveSeconds*memtableOptFactor, + ), + lsmkv.WithPread(index.Config.AvoidMMap), + lsmkv.WithAllocChecker(index.allocChecker), + lsmkv.WithMaxSegmentSize(index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithStrategy(strategy), + lsmkv.WithKeepLevelCompaction(keepLevelCompaction), + lsmkv.WithKeepTombstones(keepTombstones), + lsmkv.WithWriteSegmentInfoIntoFileName(index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(index.Config.WriteMetadataFilesEnabled), + } + + if strategy == lsmkv.StrategyMapCollection && shard.Versioner().Version() < 2 { + opts = append(opts, lsmkv.WithLegacyMapSorting()) + } + + return opts +} + +func (t *ShardReindexTask_MapToBlockmax) reindexBucketName(propName string) string { + return helpers.BucketSearchableFromPropNameLSM(propName) + "__blockmax_reindex" +} + +func (t *ShardReindexTask_MapToBlockmax) ingestBucketName(propName string) string { + return helpers.BucketSearchableFromPropNameLSM(propName) + "__blockmax_ingest" +} + +func (t *ShardReindexTask_MapToBlockmax) mapBucketName(propName string) string { + return helpers.BucketSearchableFromPropNameLSM(propName) + "__blockmax_map" +} + +func (t *ShardReindexTask_MapToBlockmax) findPropsToReindex(shard ShardLike) (props []string, save bool) { + collectionName := shard.Index().Config.ClassName.String() + shardName := shard.Name() + propNames := []string{} + + if !t.isShardSelected(collectionName, shardName) { + return propNames, false + } + + checkPropSelected := func(propName string) bool { return true } + if t.config.selectionEnabled { + if selectedProps := t.config.selectedPropsByCollection[collectionName]; len(selectedProps) > 0 { + checkPropSelected = func(propName string) bool { + _, ok := selectedProps[propName] + return ok + } + } + } + + for name, bucket := range shard.Store().GetBucketsByName() { + if bucket.Strategy() == lsmkv.StrategyMapCollection { + propName, indexType := GetPropNameAndIndexTypeFromBucketName(name) + + if indexType == IndexTypePropSearchableValue && checkPropSelected(propName) { + propNames = append(propNames, propName) + } + } + } + return propNames, true +} + +func (t *ShardReindexTask_MapToBlockmax) getPropsToReindex(shard ShardLike, rt mapToBlockmaxReindexTracker) ([]string, error) { + if rt.HasProps() { + props, err := rt.GetProps() + if err != nil { + return nil, err + } + return props, nil + } + props, save := t.findPropsToReindex(shard) + if save { + if err := rt.saveProps(props); err != nil { + return nil, err + } + } + return props, nil +} + +func (t *ShardReindexTask_MapToBlockmax) readPropsToReindex(rt mapToBlockmaxReindexTracker) ([]string, error) { + if rt.HasProps() { + props, err := rt.GetProps() + if err != nil { + return nil, err + } + return props, nil + } + return []string{}, nil +} + +func (t *ShardReindexTask_MapToBlockmax) isShardSelected(collectionName, shardName string) bool { + if t.config.selectionEnabled { + selectedShards, isCollectionSelected := t.config.selectedShardsByCollection[collectionName] + if !isCollectionSelected { + return false + } + + if len(selectedShards) > 0 { + if _, isShardSelected := selectedShards[shardName]; !isShardSelected { + return false + } + } + } + return true +} + +// ----------------------------------------------------------------------------- + +type migrationData struct { + key indexKey + docID uint64 + props []inverted.Property + err error +} + +type objectsIteratorAsync func(logger logrus.FieldLogger, shard ShardLike, lastKey indexKey, keyParse func([]byte) indexKey, propExtraction *storobj.PropertyExtraction, reindexStarted time.Time, breakCh <-chan bool, +) (time.Time, <-chan *migrationData) + +func uuidObjectsIteratorAsync(logger logrus.FieldLogger, shard ShardLike, lastKey indexKey, keyParse func([]byte) indexKey, + propExtraction *storobj.PropertyExtraction, reindexStarted time.Time, breakCh <-chan bool, +) (time.Time, <-chan *migrationData) { + startedCh := make(chan time.Time) + mdCh := make(chan *migrationData) + + enterrors.GoWrapper(func() { + cursor := shard.Store().Bucket(helpers.ObjectsBucketLSM).CursorOnDisk() + defer cursor.Close() + + startedCh <- time.Now() // after cursor created (necessary locks acquired) + addProps := additional.Properties{} + + var k, v []byte + if lastKey == nil { + k, v = cursor.First() + } else { + key := lastKey.Bytes() + k, v = cursor.Seek(key) + if bytes.Equal(k, key) { + k, v = cursor.Next() + } + } + + for ; k != nil; k, v = cursor.Next() { + ik := keyParse(k) + obj, err := storobj.FromBinaryOptional(v, addProps, propExtraction) + if err != nil { + mdCh <- &migrationData{err: fmt.Errorf("unmarshalling object '%s': %w", ik.String(), err)} + break + } + + if obj.LastUpdateTimeUnix() < reindexStarted.UnixMilli() { + props, _, err := shard.AnalyzeObject(obj) + if err != nil { + mdCh <- &migrationData{err: fmt.Errorf("analyzing object '%s': %w", ik.String(), err)} + break + } + + if <-breakCh { + break + } + mdCh <- &migrationData{key: ik.Clone(), props: props, docID: obj.DocID} + } else { + if <-breakCh { + break + } + mdCh <- &migrationData{key: ik.Clone()} + } + } + if k == nil { + <-breakCh + mdCh <- nil + } + close(mdCh) + }, logger) + + return <-startedCh, mdCh +} + +// ----------------------------------------------------------------------------- + +type mapToBlockmaxReindexTracker interface { + HasStartCondition() bool + IsStarted() bool + markStarted(time.Time) error + getStarted() (time.Time, error) + + markProgress(lastProcessedKey indexKey, processedCount, indexedCount int) error + GetProgress() (indexKey, *time.Time, error) + + IsReindexed() bool + markReindexed() error + + IsMerged() bool + markMerged() error + + IsSwapped() bool + markSwapped() error + unmarkSwapped() error + IsSwappedProp(propName string) bool + markSwappedProp(propName string) error + unmarkSwappedProp(propName string) error + + IsTidied() bool + markTidied() error + + HasProps() bool + GetProps() ([]string, error) + saveProps([]string) error + + IsPaused() bool + IsRollback() bool + IsReset() bool + + reset() error + + checkOverrides(logger logrus.FieldLogger, config *mapToBlockmaxConfig) +} + +func NewFileMapToBlockmaxReindexTracker(lsmPath string, keyParser indexKeyParser) *fileMapToBlockmaxReindexTracker { + return &fileMapToBlockmaxReindexTracker{ + progressCheckpoint: 1, + keyParser: keyParser, + config: fileReindexTrackerConfig{ + filenameStart: "start.mig", + filenameStarted: "started.mig", + filenameProgress: "progress.mig", + filenameReindexed: "reindexed.mig", + filenameMerged: "merged.mig", + filenameSwapped: "swapped.mig", + filenameTidied: "tidied.mig", + filenameProperties: "properties.mig", + filenameRollback: "rollback.mig", + filenameReset: "reset.mig", + filenamePaused: "paused.mig", + filenameOverrides: "overrides.mig", + migrationPath: filepath.Join(lsmPath, ".migrations", "searchable_map_to_blockmax"), + }, + } +} + +type fileMapToBlockmaxReindexTracker struct { + progressCheckpoint int + keyParser indexKeyParser + config fileReindexTrackerConfig +} + +type fileReindexTrackerConfig struct { + filenameStart string + filenameStarted string + filenameProgress string + filenameReindexed string + filenameMerged string + filenameSwapped string + filenameTidied string + filenameProperties string + filenameRollback string + filenameReset string + filenamePaused string + filenameOverrides string + migrationPath string +} + +func (t *fileMapToBlockmaxReindexTracker) init() error { + if err := os.MkdirAll(t.config.migrationPath, 0o777); err != nil { + return err + } + return nil +} + +func (t *fileMapToBlockmaxReindexTracker) HasStartCondition() bool { + return t.fileExists(t.config.filenameStart) +} + +func (t *fileMapToBlockmaxReindexTracker) IsStarted() bool { + return t.fileExists(t.config.filenameStarted) +} + +func (t *fileMapToBlockmaxReindexTracker) markStarted(started time.Time) error { + return t.createFile(t.config.filenameStarted, []byte(t.encodeTime(started))) +} + +func (t *fileMapToBlockmaxReindexTracker) getTime(filePath string) (time.Time, error) { + path := t.filepath(filePath) + content, err := os.ReadFile(path) + if err != nil { + return time.Time{}, err + } + return t.decodeTime(string(content)) +} + +func (t *fileMapToBlockmaxReindexTracker) getStarted() (time.Time, error) { + return t.getTime(t.config.filenameStarted) +} + +func (t *fileMapToBlockmaxReindexTracker) findLastProgressFile() (string, error) { + prefix := t.config.filenameProgress + "." + expectedLen := len(prefix) + 9 // 9 digits + + lastProgressFilename := "" + err := filepath.WalkDir(t.config.migrationPath, func(path string, d os.DirEntry, err error) error { + // skip parent and children dirs + if path != t.config.migrationPath { + if d.IsDir() { + return filepath.SkipDir + } + if name := d.Name(); len(name) == expectedLen && strings.HasPrefix(name, prefix) { + lastProgressFilename = name + } + } + return nil + }) + + return lastProgressFilename, err +} + +func (t *fileMapToBlockmaxReindexTracker) markProgress(lastProcessedKey indexKey, processedCount, indexedCount int) error { + filename := fmt.Sprintf("%s.%09d", t.config.filenameProgress, t.progressCheckpoint) + content := strings.Join([]string{ + t.encodeTime(time.Now()), + lastProcessedKey.String(), + fmt.Sprintf("all %d", processedCount), + fmt.Sprintf("idx %d", indexedCount), + }, "\n") + + if err := t.createFile(filename, []byte(content)); err != nil { + return err + } + t.progressCheckpoint++ + return nil +} + +func (t *fileMapToBlockmaxReindexTracker) GetProgress() (indexKey, *time.Time, error) { + filename, err := t.findLastProgressFile() + if err != nil { + return nil, nil, err + } + if filename == "" { + return t.keyParser.FromBytes(nil), nil, nil + } + + checkpoint, err := strconv.Atoi(strings.TrimPrefix(filename, t.config.filenameProgress+".")) + if err != nil { + return nil, nil, err + } + + path := t.filepath(filename) + content, err := os.ReadFile(path) + if err != nil { + return nil, nil, err + } + + split := strings.Split(string(content), "\n") + key, err := t.keyParser.FromString(split[1]) + if err != nil { + return nil, nil, err + } + + timeStr := strings.TrimSpace(split[0]) + if timeStr == "" { + return key, nil, fmt.Errorf("progress file '%s' is empty", filename) + } + + tm, err := t.decodeTime(timeStr) + if err != nil { + return nil, nil, fmt.Errorf("decoding time from '%s': %w", timeStr, err) + } + + t.progressCheckpoint = checkpoint + 1 + return key, &tm, nil +} + +func (t *fileMapToBlockmaxReindexTracker) parseProgressFile(filename string) (lastProcessedKey indexKey, tm time.Time, allCount int, idxCount int, err error) { + progressFilePath := filename + progressFile, err := os.ReadFile(progressFilePath) + if err != nil { + err = fmt.Errorf("failed to read %s: %w", progressFilePath, err) + return + } + + if len(progressFile) == 0 { + err = fmt.Errorf("progress file %s is empty", progressFilePath) + return + } + + progressFileFields := strings.Split(string(progressFile), "\n") + if len(progressFileFields) != 4 { + err = fmt.Errorf("progress file %s has unexpected format, expected 4 lines, got %d", progressFilePath, len(progressFileFields)) + return + } + + tm, err = t.decodeTime(strings.TrimSpace(progressFileFields[0])) + if err != nil { + err = fmt.Errorf("failed to parse timestamp from %s: %w", progressFilePath, err) + return + } + + lastProcessedKey, err = t.keyParser.FromString(progressFileFields[1]) + if err != nil { + err = fmt.Errorf("failed to parse last processed key from %s: %w", progressFilePath, err) + return + } + + allCount, err = strconv.Atoi(strings.Split(progressFileFields[2], " ")[1]) + if err != nil { + err = fmt.Errorf("failed to parse objects migrated count from %s: %w", progressFilePath, err) + return + } + + idxCount, err = strconv.Atoi(strings.Split(progressFileFields[3], " ")[1]) + if err != nil { + err = fmt.Errorf("failed to parse index count from %s: %w", progressFilePath, err) + return + } + + return +} + +func (t *fileMapToBlockmaxReindexTracker) GetMigratedCount() (objectsMigratedCountTotal int, snapshots []map[string]string, err error) { + snapshots = make([]map[string]string, 0) + files, err := os.ReadDir(t.config.migrationPath) + objectsMigratedCountTotal = 0 + progressCount := 0 + + if err != nil { + return + } + for _, file := range files { + if strings.HasPrefix(file.Name(), "progress.mig.") { + snapshot := map[string]string{ + "checkpoint": strings.TrimPrefix(file.Name(), "progress.mig."), + } + progressCount++ + progressFilePath := t.config.migrationPath + "/" + file.Name() + key, tm, allCount, idxCount, err2 := t.parseProgressFile(progressFilePath) + if err2 != nil { + err = fmt.Errorf("failed to parse progress file %s: %w", progressFilePath, err2) + return + } + + objectsMigratedCountTotal += allCount + snapshot["lastProcessedKey"] = key.String() + snapshot["timestamp"] = tm.Format(time.RFC3339) + snapshot["allCount"] = fmt.Sprintf("%d", allCount) + snapshot["idxCount"] = fmt.Sprintf("%d", idxCount) + snapshots = append(snapshots, snapshot) + } + } + return +} + +func (t *fileMapToBlockmaxReindexTracker) IsReindexed() bool { + return t.fileExists(t.config.filenameReindexed) +} + +func (t *fileMapToBlockmaxReindexTracker) markReindexed() error { + return t.createFile(t.config.filenameReindexed, []byte(t.encodeTimeNow())) +} + +func (t *fileMapToBlockmaxReindexTracker) getReindexed() (time.Time, error) { + return t.getTime(t.config.filenameReindexed) +} + +func (t *fileMapToBlockmaxReindexTracker) IsMerged() bool { + return t.fileExists(t.config.filenameMerged) +} + +func (t *fileMapToBlockmaxReindexTracker) markMerged() error { + return t.createFile(t.config.filenameMerged, []byte(t.encodeTimeNow())) +} + +func (t *fileMapToBlockmaxReindexTracker) getMerged() (time.Time, error) { + return t.getTime(t.config.filenameMerged) +} + +func (t *fileMapToBlockmaxReindexTracker) IsSwappedProp(propName string) bool { + return t.fileExists(t.config.filenameSwapped + "." + propName) +} + +func (t *fileMapToBlockmaxReindexTracker) markSwappedProp(propName string) error { + return t.createFile(t.config.filenameSwapped+"."+propName, []byte(t.encodeTimeNow())) +} + +func (t *fileMapToBlockmaxReindexTracker) unmarkSwappedProp(propName string) error { + return t.removeFile(t.config.filenameSwapped + "." + propName) +} + +func (t *fileMapToBlockmaxReindexTracker) IsSwapped() bool { + return t.fileExists(t.config.filenameSwapped) +} + +func (t *fileMapToBlockmaxReindexTracker) markSwapped() error { + return t.createFile(t.config.filenameSwapped, []byte(t.encodeTimeNow())) +} + +func (t *fileMapToBlockmaxReindexTracker) unmarkSwapped() error { + return t.removeFile(t.config.filenameSwapped) +} + +func (t *fileMapToBlockmaxReindexTracker) getSwapped() (time.Time, error) { + return t.getTime(t.config.filenameSwapped) +} + +func (t *fileMapToBlockmaxReindexTracker) IsTidied() bool { + return t.fileExists(t.config.filenameTidied) +} + +func (t *fileMapToBlockmaxReindexTracker) getTidied() (time.Time, error) { + return t.getTime(t.config.filenameTidied) +} + +func (t *fileMapToBlockmaxReindexTracker) markTidied() error { + return t.createFile(t.config.filenameTidied, []byte(t.encodeTimeNow())) +} + +func (t *fileMapToBlockmaxReindexTracker) filepath(filename string) string { + return filepath.Join(t.config.migrationPath, filename) +} + +func (t *fileMapToBlockmaxReindexTracker) fileExists(filename string) bool { + if _, err := os.Stat(t.filepath(filename)); err == nil { + return true + } else if errors.Is(err, os.ErrNotExist) { + return false + } + return false +} + +func (t *fileMapToBlockmaxReindexTracker) createFile(filename string, content []byte) error { + path := t.filepath(filename) + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o777) + if err != nil { + return err + } + defer file.Close() + + if len(content) > 0 { + _, err = file.Write(content) + return err + } + return nil +} + +func (t *fileMapToBlockmaxReindexTracker) removeFile(filename string) error { + if err := os.Remove(t.filepath(filename)); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } + return nil +} + +func (t *fileMapToBlockmaxReindexTracker) encodeTimeNow() string { + return t.encodeTime(time.Now()) +} + +func (t *fileMapToBlockmaxReindexTracker) encodeTime(tm time.Time) string { + return tm.UTC().Format(time.RFC3339Nano) +} + +func (t *fileMapToBlockmaxReindexTracker) decodeTime(tm string) (time.Time, error) { + return time.Parse(time.RFC3339Nano, tm) +} + +func (t *fileMapToBlockmaxReindexTracker) HasProps() bool { + return t.fileExists(t.config.filenameProperties) +} + +func (t *fileMapToBlockmaxReindexTracker) saveProps(propNames []string) error { + props := []byte(strings.Join(propNames, ",")) + return t.createFile(t.config.filenameProperties, props) +} + +func (t *fileMapToBlockmaxReindexTracker) GetProps() ([]string, error) { + content, err := os.ReadFile(t.filepath(t.config.filenameProperties)) + if err != nil { + return nil, err + } + if len(content) == 0 { + return []string{}, nil + } + return strings.Split(strings.TrimSpace(string(content)), ","), nil +} + +func (t *fileMapToBlockmaxReindexTracker) IsReset() bool { + return t.fileExists(t.config.filenameReset) +} + +func (t *fileMapToBlockmaxReindexTracker) reset() error { + return os.RemoveAll(t.config.migrationPath) +} + +func (t *fileMapToBlockmaxReindexTracker) IsRollback() bool { + return t.fileExists(t.config.filenameRollback) +} + +func (t *fileMapToBlockmaxReindexTracker) IsPaused() bool { + return t.fileExists(t.config.filenamePaused) +} + +func (t *fileMapToBlockmaxReindexTracker) GetStatusStrings() (status string, message string, action string) { + if !t.IsStarted() { + status = "not started" + message = "reindexing not started" + action = "enable relevant REINDEX_MAP_TO_BLOCKMAX_* env vars" + if t.HasStartCondition() { + message = "reindexing will start on next restart" + action = "restart" + } + return + } + message = "reindexing started" + action = "wait" + + if !t.HasProps() { + status = "computing properties" + message = "computing properties to reindex" + return + } + + count, _, err := t.GetMigratedCount() + if err != nil { + status = "error" + message = fmt.Sprintf("failed to get migrated count: %v", err) + return + } + + status = "in progress" + + if count == 0 { + message = "reindexing just started, no snapshots yet" + } + + if t.IsReindexed() { + status = "reindexed" + message = "reindexing done, needs restart to merge buckets" + action = "restart" + } + + if t.IsMerged() { + status = "merged" + message = "reindexing done, buckets merged" + action = "restart" + } + + if t.IsSwapped() { + status = "swapped" + message = "reindexing done, buckets swapped" + action = "restart" + } + + if t.IsPaused() { + status = "paused" + message = "reindexing paused, needs resume or rollback" + action = "resume or rollback" + } + + if t.IsRollback() { + status = "rollback" + message = "reindexing rollback in progress, will finish on next restart" + action = "restart" + } + + if t.IsTidied() { + status = "tidied" + message = "reindexing done, buckets tidied" + action = "nothing to do" + } + + return +} + +func (t *fileMapToBlockmaxReindexTracker) GetTimes() map[string]string { + times := map[string]string{} + + started, err := t.getStarted() + if err != nil { + times["started"] = "" + } else { + times["started"] = t.encodeTime(started) + } + _, tm, _ := t.GetProgress() + if tm == nil { + times["reindexSnapshot"] = "" + } else { + times["reindexSnapshot"] = t.encodeTime(*tm) + } + + reindexed, err := t.getReindexed() + if err != nil { + times["reindexFinished"] = "" + } else { + times["reindexFinished"] = t.encodeTime(reindexed) + } + merged, err := t.getMerged() + if err != nil { + times["merged"] = "" + } else { + times["merged"] = t.encodeTime(merged) + } + + swapped, err := t.getSwapped() + if err != nil { + times["swapped"] = "" + } else { + times["swapped"] = t.encodeTime(swapped) + } + + tidied, err := t.getTidied() + if err != nil { + times["tidied"] = "" + } else { + times["tidied"] = t.encodeTime(tidied) + } + + return times +} + +func (t *fileMapToBlockmaxReindexTracker) checkOverrides(logger logrus.FieldLogger, config *mapToBlockmaxConfig) { + if !t.fileExists(t.config.filenameOverrides) { + return + } + if config == nil { + return + } + content, err := os.ReadFile(t.filepath(t.config.filenameOverrides)) + if err != nil { + return + } + lines := strings.Split(strings.TrimSpace(string(content)), "\n") + if len(lines) == 0 { + return + } + + for _, line := range lines { + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + logger.WithField("line", line).Warn("invalid override line, expected 'key=value'") + continue + } + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + logger.WithFields(logrus.Fields{ + "key": key, + "value": value, + }).Info("processing override") + + switch key { + case "swapBuckets": + config.swapBuckets = entcfg.Enabled(value) + case "unswapBuckets": + config.unswapBuckets = entcfg.Enabled(value) + case "tidyBuckets": + config.tidyBuckets = entcfg.Enabled(value) + case "reloadShards": + config.reloadShards = entcfg.Enabled(value) + case "rollback": + config.rollback = entcfg.Enabled(value) + case "conditionalStart": + config.conditionalStart = entcfg.Enabled(value) + case "concurrency": + concurrency, err := strconv.Atoi(value) + if err != nil { + logger.WithField("value", value).Warn("invalid concurrency value, must be an integer") + continue + } + if concurrency <= 0 { + logger.WithField("value", value).Warn("invalid concurrency value, must be greater than 0") + continue + } + config.concurrency = concurrency + case "memtableOptBlockmaxFactor": + memtableOptBlockmaxFactor, err := strconv.Atoi(value) + if err != nil { + logger.WithField("value", value).Warn("invalid memtableOptBlockmaxFactor value, must be an integer") + continue + } + if memtableOptBlockmaxFactor <= 0 { + logger.WithField("value", value).Warn("invalid memtableOptBlockmaxFactor value, must be greater than 0") + continue + } + config.memtableOptBlockmaxFactor = memtableOptBlockmaxFactor + case "processingDuration": + processingDuration, err := time.ParseDuration(value) + if err != nil { + logger.WithField("value", value).Warnf("invalid processingDuration value: %v", err) + continue + } + if processingDuration <= 0 { + logger.WithField("value", value).Warn("invalid processingDuration value, must be greater than 0") + continue + } + config.processingDuration = processingDuration + case "pauseDuration": + pauseDuration, err := time.ParseDuration(value) + if err != nil { + logger.WithField("value", value).Warnf("invalid pauseDuration value: %v", err) + continue + } + if pauseDuration <= 0 { + logger.WithField("value", value).Warn("invalid pauseDuration value, must be greater than 0") + continue + } + config.pauseDuration = pauseDuration + case "perObjectDelay": + perObjectDelay, err := time.ParseDuration(value) + if err != nil { + logger.WithField("value", value).Warnf("invalid perObjectDelay value: %v", err) + continue + } + if perObjectDelay < 0 { + logger.WithField("value", value).Warn("invalid perObjectDelay value, must be greater than or equal to 0") + continue + } + config.perObjectDelay = perObjectDelay + case "checkProcessingEveryNoObjects": + checkProcessingEveryNoObjects, err := strconv.Atoi(value) + if err != nil { + logger.WithField("value", value).Warnf("invalid checkProcessingEveryNoObjects value: %v", err) + continue + } + if checkProcessingEveryNoObjects <= 0 { + logger.WithField("value", value).Warn("invalid checkProcessingEveryNoObjects value, must be greater than 0") + continue + } + config.checkProcessingEveryNoObjects = checkProcessingEveryNoObjects + default: + logger.WithField("key", key).Warnf("unknown override key, ignoring: %s", key) + continue + } + } + + logger.WithField("config", fmt.Sprintf("%+v", config)).Debug("reindex config overrides applied") +} + +// ----------------------------------------------------------------------------- + +type indexKey interface { + String() string + Bytes() []byte + Clone() indexKey +} + +type uuidBytes []byte + +func (b uuidBytes) String() string { + if b == nil { + return "nil" + } + uid, err := uuid.FromBytes(b) + if err != nil { + return err.Error() + } + return uid.String() +} + +func (b uuidBytes) Bytes() []byte { + return b +} + +func (b uuidBytes) Clone() indexKey { + buf := make([]byte, len(b)) + copy(buf, b) + return uuidBytes(buf) +} + +// type uint64Bytes []byte + +// func (b uint64Bytes) String() string { +// if b == nil { +// return "nil" +// } +// return fmt.Sprint(binary.LittleEndian.Uint64(b)) +// } + +// func (b uint64Bytes) Bytes() []byte { +// return b +// } + +// func (b uint64Bytes) Clone() indexKey { +// buf := make([]byte, len(b)) +// copy(buf, b) +// return uint64Bytes(buf) +// } + +// ----------------------------------------------------------------------------- + +type indexKeyParser interface { + FromString(key string) (indexKey, error) + FromBytes(key []byte) indexKey +} + +type UuidKeyParser struct{} + +func (p *UuidKeyParser) FromString(key string) (indexKey, error) { + uid, err := uuid.Parse(key) + if err != nil { + return nil, err + } + buf, err := uid.MarshalBinary() + if err != nil { + return nil, err + } + return uuidBytes(buf), nil +} + +func (p *UuidKeyParser) FromBytes(key []byte) indexKey { + return uuidBytes(key) +} + +// type uint64KeyParser struct{} + +// func (p *uint64KeyParser) FromString(key string) (indexKey, error) { +// u, err := strconv.ParseUint(key, 10, 64) +// if err != nil { +// return nil, err +// } +// buf := make([]byte, 8) +// binary.LittleEndian.PutUint64(buf, u) +// return uint64Bytes(buf), nil +// } + +// func (p *uint64KeyParser) FromBytes(key []byte) indexKey { +// return uint64Bytes(key) +// } diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_missing_text_filterable.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_missing_text_filterable.go new file mode 100644 index 0000000000000000000000000000000000000000..806f3499189f729c6276dcbab96fd679cbd9b076 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_missing_text_filterable.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" +) + +type shardInvertedReindexTaskMissingTextFilterable struct { + logger logrus.FieldLogger + files *filterableToSearchableMigrationFiles + stateLock *sync.RWMutex + + migrationState *filterableToSearchableMigrationState +} + +func newShardInvertedReindexTaskMissingTextFilterable(migrator *Migrator, +) *shardInvertedReindexTaskMissingTextFilterable { + return &shardInvertedReindexTaskMissingTextFilterable{ + logger: migrator.logger, + files: newFilterableToSearchableMigrationFiles(migrator.db.config.RootPath), + stateLock: new(sync.RWMutex), + } +} + +func (t *shardInvertedReindexTaskMissingTextFilterable) init() error { + migrationState, err := t.files.loadMigrationState() + if err != nil { + return errors.Wrap(err, "failed loading migration state") + } + + t.migrationState = migrationState + return nil +} + +func (t *shardInvertedReindexTaskMissingTextFilterable) GetPropertiesToReindex(ctx context.Context, + shard ShardLike, +) ([]ReindexableProperty, error) { + reindexableProperties := []ReindexableProperty{} + + t.stateLock.RLock() + className := shard.Index().Config.ClassName.String() + props, ok := t.migrationState.MissingFilterableClass2Props[className] + t.stateLock.RUnlock() + + if !ok || len(props) == 0 { + return reindexableProperties, nil + } + + bucketOptions := []lsmkv.BucketOption{ + lsmkv.WithDirtyThreshold(time.Duration(shard.Index().Config.MemtablesFlushDirtyAfter) * time.Second), + } + + for propName := range props { + bucketNameSearchable := helpers.BucketSearchableFromPropNameLSM(propName) + bucketNameFilterable := helpers.BucketFromPropNameLSM(propName) + + bucketSearchable := shard.Store().Bucket(bucketNameSearchable) + bucketFilterable := shard.Store().Bucket(bucketNameFilterable) + + // exists bucket searchable of strategy map and either of + // - exists empty filterable bucket of strategy roaring set + // (weaviate was restrated after filterable to searchable migration) + // - filterable bucket does not exist + // (indexing comes right after filterable to searchable migration) + if bucketSearchable != nil && + bucketSearchable.Strategy() == lsmkv.StrategyMapCollection { + + if bucketFilterable == nil { + reindexableProperties = append(reindexableProperties, ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropValue, + DesiredStrategy: lsmkv.StrategyRoaringSet, + NewIndex: true, + BucketOptions: bucketOptions, + }) + } else if bucketFilterable.Strategy() == lsmkv.StrategyRoaringSet { + reindexableProperties = append(reindexableProperties, ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropValue, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }) + } + } + } + + return reindexableProperties, nil +} + +func (t *shardInvertedReindexTaskMissingTextFilterable) updateMigrationStateAndSave(classCreatedFilterable string) error { + t.stateLock.Lock() + defer t.stateLock.Unlock() + + t.migrationState.CreatedFilterableClass2Props[classCreatedFilterable] = t.migrationState.MissingFilterableClass2Props[classCreatedFilterable] + delete(t.migrationState.MissingFilterableClass2Props, classCreatedFilterable) + return t.files.saveMigrationState(t.migrationState) +} + +func (t *shardInvertedReindexTaskMissingTextFilterable) OnPostResumeStore(ctx context.Context, shard ShardLike) error { + // turn off fallback mode immediately after creating filterable index and resuming store's activity + shard.setFallbackToSearchable(false) + return nil +} + +func (t *shardInvertedReindexTaskMissingTextFilterable) ObjectsIterator(shard ShardLike) objectsIterator { + return shard.Store().Bucket(helpers.ObjectsBucketLSM).IterateObjects +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_set_to_roaringset.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_set_to_roaringset.go new file mode 100644 index 0000000000000000000000000000000000000000..8fe0c6cdf6f43aec39e0454e3caa369e19a0b7e2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_set_to_roaringset.go @@ -0,0 +1,81 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "time" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" +) + +type ShardInvertedReindexTaskSetToRoaringSet struct{} + +func (t *ShardInvertedReindexTaskSetToRoaringSet) GetPropertiesToReindex(ctx context.Context, + shard ShardLike, +) ([]ReindexableProperty, error) { + reindexableProperties := []ReindexableProperty{} + + bucketOptions := []lsmkv.BucketOption{ + lsmkv.WithDirtyThreshold(time.Duration(shard.Index().Config.MemtablesFlushDirtyAfter) * time.Second), + } + + for name, bucket := range shard.Store().GetBucketsByName() { + if bucket.Strategy() == lsmkv.StrategySetCollection && + bucket.DesiredStrategy() == lsmkv.StrategyRoaringSet { + + propName, indexType := GetPropNameAndIndexTypeFromBucketName(name) + switch indexType { + case IndexTypePropValue: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropValue, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }, + ) + case IndexTypePropLength: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropLength, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }, + ) + case IndexTypePropNull: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropNull, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }, + ) + default: + // skip remaining + } + } + } + + return reindexableProperties, nil +} + +func (t *ShardInvertedReindexTaskSetToRoaringSet) OnPostResumeStore(ctx context.Context, shard ShardLike) error { + return nil +} + +func (t *ShardInvertedReindexTaskSetToRoaringSet) ObjectsIterator(shard ShardLike) objectsIterator { + return shard.Store().Bucket(helpers.ObjectsBucketLSM).IterateObjects +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_specified_index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_specified_index.go new file mode 100644 index 0000000000000000000000000000000000000000..23c2304109b9cd62129e7794aabdb151a10c3d8f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_specified_index.go @@ -0,0 +1,152 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "time" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/storobj" +) + +type ShardInvertedReindexTask_SpecifiedIndex struct { + classNamesWithPropertyNames map[string]map[string]struct{} +} + +func (t *ShardInvertedReindexTask_SpecifiedIndex) GetPropertiesToReindex(ctx context.Context, + shard ShardLike, +) ([]ReindexableProperty, error) { + reindexableProperties := []ReindexableProperty{} + + // shard of selected class + props, ok := t.classNamesWithPropertyNames[shard.Index().Config.ClassName.String()] + if !ok { + return reindexableProperties, nil + } + + bucketOptions := []lsmkv.BucketOption{ + lsmkv.WithDirtyThreshold(time.Duration(shard.Index().Config.MemtablesFlushDirtyAfter) * time.Second), + } + + for name := range shard.Store().GetBucketsByName() { + // skip non prop buckets + switch name { + case helpers.ObjectsBucketLSM: + case helpers.VectorsBucketLSM: + case helpers.VectorsCompressedBucketLSM: + case helpers.DimensionsBucketLSM: + continue + } + + propName, indexType := GetPropNameAndIndexTypeFromBucketName(name) + if _, ok := props[propName]; !ok { + continue + } + + switch indexType { + case IndexTypePropValue: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropValue, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }, + ) + case IndexTypePropSearchableValue: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropSearchableValue, + DesiredStrategy: lsmkv.StrategyMapCollection, + BucketOptions: bucketOptions, + }, + ) + case IndexTypePropLength: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropLength, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }, + ) + case IndexTypePropNull: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropNull, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }, + ) + case IndexTypePropMetaCount: + reindexableProperties = append(reindexableProperties, + ReindexableProperty{ + PropertyName: propName, + IndexType: IndexTypePropMetaCount, + DesiredStrategy: lsmkv.StrategyRoaringSet, + BucketOptions: bucketOptions, + }, + ) + default: + // skip remaining + } + + } + + return reindexableProperties, nil +} + +func (t *ShardInvertedReindexTask_SpecifiedIndex) OnPostResumeStore(ctx context.Context, shard ShardLike) error { + return nil +} + +func (t *ShardInvertedReindexTask_SpecifiedIndex) ObjectsIterator(shard ShardLike) objectsIterator { + class := shard.Index().Config.ClassName.String() + props, ok := t.classNamesWithPropertyNames[class] + if !ok || len(props) == 0 { + return nil + } + + propertyPaths := make([][]string, 0, len(props)) + for prop := range props { + propertyPaths = append(propertyPaths, []string{prop}) + } + + propsExtraction := &storobj.PropertyExtraction{ + PropertyPaths: propertyPaths, + } + + objectsBucket := shard.Store().Bucket(helpers.ObjectsBucketLSM) + return func(ctx context.Context, fn func(object *storobj.Object) error) error { + cursor := objectsBucket.Cursor() + defer cursor.Close() + + i := 0 + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + obj, err := storobj.FromBinaryOptional(v, additional.Properties{}, propsExtraction) + if err != nil { + return fmt.Errorf("cannot unmarshal object %d, %w", i, err) + } + if err := fn(obj); err != nil { + return fmt.Errorf("callback on object '%d' failed: %w", obj.DocID, err) + } + i++ + } + return nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_utils.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_utils.go new file mode 100644 index 0000000000000000000000000000000000000000..d193c28ee182619e589ea5d8e2337caa07f1eabe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_utils.go @@ -0,0 +1,92 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "regexp" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func GetPropNameAndIndexTypeFromBucketName(bucketName string) (string, PropertyIndexType) { + propRegexpGroup := "(?P.*)" + + types := []struct { + indexType PropertyIndexType + bucketNameFn func(string) string + }{ + { + IndexTypePropNull, + helpers.BucketFromPropNameNullLSM, + }, + { + IndexTypePropLength, + helpers.BucketFromPropNameLengthLSM, + }, + { + IndexTypePropSearchableValue, + helpers.BucketSearchableFromPropNameLSM, + }, + { + IndexTypePropMetaCount, + helpers.BucketFromPropNameMetaCountLSM, + }, + { + IndexTypePropValue, + helpers.BucketFromPropNameLSM, + }, + } + + for _, t := range types { + r, err := regexp.Compile("^" + t.bucketNameFn(propRegexpGroup) + "$") + if err != nil { + continue + } + matches := r.FindStringSubmatch(bucketName) + if len(matches) > 0 { + return matches[r.SubexpIndex("propName")], t.indexType + } + } + return "", 0 +} + +type reindexablePropertyChecker struct { + reindexables map[string]map[PropertyIndexType]struct{} + props map[string]*models.Property +} + +func newReindexablePropertyChecker(reindexableProperties []ReindexableProperty, class *models.Class) *reindexablePropertyChecker { + reindexables := map[string]map[PropertyIndexType]struct{}{} + props := map[string]*models.Property{} + for _, property := range reindexableProperties { + if _, ok := reindexables[property.PropertyName]; !ok { + reindexables[property.PropertyName] = map[PropertyIndexType]struct{}{} + } + reindexables[property.PropertyName][property.IndexType] = struct{}{} + props[property.PropertyName], _ = schema.GetPropertyByName(class, property.PropertyName) + } + return &reindexablePropertyChecker{reindexables, props} +} + +func (c *reindexablePropertyChecker) isReindexable(propName string, indexType PropertyIndexType) bool { + if _, ok := c.reindexables[propName]; ok { + _, ok := c.reindexables[propName][indexType] + return ok + } + return false +} + +func (c *reindexablePropertyChecker) getSchemaProp(propName string) *models.Property { + return c.props[propName] +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_v3.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_v3.go new file mode 100644 index 0000000000000000000000000000000000000000..38bdb327bf92fef7aa4528c7fb04457f3fc8fc93 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_v3.go @@ -0,0 +1,565 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/schema" +) + +type ShardReindexerV3 interface { + RunBeforeLsmInit(ctx context.Context, shard *Shard) error + RunAfterLsmInit(ctx context.Context, shard *Shard) error + RunAfterLsmInitAsync(ctx context.Context, shard *Shard) error + Stop(shard *Shard, cause error) +} + +type ShardReindexTaskV3 interface { + Name() string + OnBeforeLsmInit(ctx context.Context, shard *Shard) error + OnAfterLsmInit(ctx context.Context, shard *Shard) error + // TODO alisza:blockmax change to *Shard? + OnAfterLsmInitAsync(ctx context.Context, shard ShardLike) (rerunAt time.Time, reloadShard bool, err error) +} + +// ----------------------------------------------------------------------------- + +func NewShardReindexerV3Noop() *shardReindexerV3Noop { + return &shardReindexerV3Noop{} +} + +type shardReindexerV3Noop struct{} + +func (r *shardReindexerV3Noop) RunBeforeLsmInit(ctx context.Context, shard *Shard) error { + return nil +} + +func (r *shardReindexerV3Noop) RunAfterLsmInit(ctx context.Context, shard *Shard) error { + return nil +} + +func (r *shardReindexerV3Noop) RunAfterLsmInitAsync(ctx context.Context, shard *Shard) error { + return nil +} + +func (r *shardReindexerV3Noop) Stop(shard *Shard, cause error) {} + +// ----------------------------------------------------------------------------- + +func NewShardReindexerV3(ctx context.Context, logger logrus.FieldLogger, + getIndex func(className schema.ClassName) *Index, concurrency int, +) *shardReindexerV3 { + config := shardsReindexerV3Config{ + concurrency: concurrency, + retryOnErrorInterval: 15 * time.Minute, + } + + logger.WithField("config", fmt.Sprintf("%+v", config)).Debug("reindexer created") + + return &shardReindexerV3{ + logger: logger, + ctx: ctx, + getIndex: getIndex, + queue: newShardsQueue(), + lock: new(sync.Mutex), + taskNames: map[string]struct{}{}, + tasks: []ShardReindexTaskV3{}, + waitingTasksPerShard: map[string][]ShardReindexTaskV3{}, + waitingCtxPerShard: map[string]context.Context{}, + waitingCtxCancelPerShard: map[string]context.CancelCauseFunc{}, + processingCtxPerShard: map[string]context.Context{}, + processingCtxCancelPerShard: map[string]context.CancelCauseFunc{}, + config: config, + } +} + +type shardReindexerV3 struct { + logger logrus.FieldLogger + ctx context.Context + getIndex func(className schema.ClassName) *Index + queue *shardsQueue + lock *sync.Mutex + + taskNames map[string]struct{} + tasks []ShardReindexTaskV3 + waitingTasksPerShard map[string][]ShardReindexTaskV3 + waitingCtxPerShard map[string]context.Context + waitingCtxCancelPerShard map[string]context.CancelCauseFunc + processingCtxPerShard map[string]context.Context + processingCtxCancelPerShard map[string]context.CancelCauseFunc + + config shardsReindexerV3Config +} + +type shardsReindexerV3Config struct { + concurrency int + retryOnErrorInterval time.Duration +} + +func (r *shardReindexerV3) RegisterTask(task ShardReindexTaskV3) bool { + name := task.Name() + if _, ok := r.taskNames[name]; ok { + return false + } + + r.taskNames[name] = struct{}{} + r.tasks = append(r.tasks, task) + return true +} + +func (r *shardReindexerV3) Init() { + enterrors.GoWrapper(func() { + eg := enterrors.NewErrorGroupWrapper(r.logger) + eg.SetLimit(r.config.concurrency) + + for { + key, tasks, err := r.queue.getWhenReady(r.ctx) + if err != nil { + r.logger.WithError(err).Errorf("failed getting shard key from queue") + return + } + + r.locked(func() { + r.waitingTasksPerShard[key] = tasks + if _, ok := r.waitingCtxPerShard[key]; !ok { + r.waitingCtxPerShard[key], r.waitingCtxCancelPerShard[key] = context.WithCancelCause(r.ctx) + } + }) + + eg.Go(func() error { + var prevProcessingCtx context.Context + var processingCtx context.Context + var processingCtxCancel context.CancelCauseFunc + var tasks []ShardReindexTaskV3 + + r.locked(func() { + tasks = r.waitingTasksPerShard[key] + processingCtx = r.waitingCtxPerShard[key] + processingCtxCancel = r.waitingCtxCancelPerShard[key] + prevProcessingCtx = r.processingCtxPerShard[key] + + delete(r.waitingTasksPerShard, key) + delete(r.waitingCtxPerShard, key) + delete(r.waitingCtxCancelPerShard, key) + + r.processingCtxPerShard[key] = processingCtx + r.processingCtxCancelPerShard[key] = processingCtxCancel + }) + + if processingCtx == nil { + return nil + } + if prevProcessingCtx != nil { + <-prevProcessingCtx.Done() + } + + defer processingCtxCancel(fmt.Errorf("deferred, context cleanup")) + return r.runScheduledTask(processingCtx, key, tasks) + }) + } + }, r.logger) +} + +func (r *shardReindexerV3) RunBeforeLsmInit(_ context.Context, shard *Shard) (err error) { + // TODO aliszka:blockmax merge contexts (reindex + incoming)? + mergedCtx := r.ctx + + collectionName := shard.Index().Config.ClassName.String() + logger := r.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shard.Name(), + "method": "RunBeforeLsmInit", + }) + logger.Debug("starting") + defer func(started time.Time) { + logger = logger.WithField("took", time.Since(started)) + if err != nil { + logger.WithError(err).Error("finished with error") + } else { + logger.Debug("finished") + } + }(time.Now()) + + if len(r.tasks) == 0 { + return nil + } + + ec := errorcompounder.New() + for i := range r.tasks { + ec.Add(r.tasks[i].OnBeforeLsmInit(mergedCtx, shard)) + } + err = ec.ToError() + return +} + +func (r *shardReindexerV3) RunAfterLsmInit(_ context.Context, shard *Shard) (err error) { + // TODO aliszka:blockmax merge contexts (reindex + incoming)? + mergedCtx := r.ctx + + collectionName := shard.Index().Config.ClassName.String() + logger := r.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shard.Name(), + "method": "RunAfterLsmInit", + }) + logger.Debug("starting") + defer func(started time.Time) { + logger = logger.WithField("took", time.Since(started)) + if err != nil { + logger.WithError(err).Error("finished with error") + } else { + logger.Debug("finished") + } + }(time.Now()) + + if len(r.tasks) == 0 { + return nil + } + + ec := errorcompounder.New() + for i := range r.tasks { + ec.Add(r.tasks[i].OnAfterLsmInit(mergedCtx, shard)) + } + err = ec.ToError() + return +} + +func (r *shardReindexerV3) RunAfterLsmInitAsync(_ context.Context, shard *Shard) (err error) { + collectionName := shard.Index().Config.ClassName.String() + logger := r.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shard.Name(), + "method": "RunAfterLsmInitAsync", + }) + logger.Debug("starting") + defer func(started time.Time) { + logger = logger.WithField("took", time.Since(started)) + if err != nil { + logger.WithError(err).Error("finished with error") + } else { + logger.Debug("finished") + } + }(time.Now()) + + if len(r.tasks) == 0 { + return nil + } + + key := toIndexShardKeyOfShard(shard) + return r.scheduleTasks(key, r.tasks, time.Now()) +} + +func (r *shardReindexerV3) Stop(shard *Shard, cause error) { + key := toIndexShardKeyOfShard(shard) + r.locked(func() { + if cancel, ok := r.processingCtxCancelPerShard[key]; ok { + cancel(cause) + } + if cancel, ok := r.waitingCtxCancelPerShard[key]; ok { + cancel(cause) + } + r.scheduleTasks(key, nil, time.Time{}) + }) + + collectionName := shard.Index().Config.ClassName.String() + r.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shard.Name(), + "cause": cause, + }).Debug("stop reindex tasks requested") +} + +func (r *shardReindexerV3) runScheduledTask(ctx context.Context, key string, tasks []ShardReindexTaskV3) (err error) { + collectionName, shardName := fromIndexShardKey(key) + logger := r.logger.WithFields(map[string]any{ + "collection": collectionName, + "shard": shardName, + "method": "runScheduledTask", + }) + logger.Debug("starting") + defer func(started time.Time) { + logger = logger.WithField("took", time.Since(started)) + if err != nil { + logger.WithError(err).Error("finished with error") + } else { + logger.Debug("finished") + } + }(time.Now()) + + if err = ctx.Err(); err != nil { + err = fmt.Errorf("context check (1): %w / %w", ctx.Err(), context.Cause(ctx)) + return + } + + index := r.getIndex(schema.ClassName(collectionName)) + if index == nil { + // try again later, as we have observed that index can be nil + // for a short period of time after shard is created, but before it is loaded + r.locked(func() { + if ctx.Err() == nil { + r.queue.insert(key, tasks, time.Now().Add(1*time.Minute)) + } + }) + err = fmt.Errorf("index for shard '%s' of collection '%s' not found", shardName, collectionName) + return + } + shard, release, err := index.GetShard(ctx, shardName) + if err != nil { + r.locked(func() { + if ctx.Err() == nil { + r.queue.insert(key, tasks, time.Now().Add(r.config.retryOnErrorInterval)) + } + }) + err = fmt.Errorf("not loaded '%s' of collection '%s': %w", shardName, collectionName, err) + return + } + + rerunAt, reloadShard, err := func() (time.Time, bool, error) { + defer release() + if shard == nil { + return time.Time{}, false, fmt.Errorf("shard '%s' of collection '%s' not found", shardName, collectionName) + } + + if err = ctx.Err(); err != nil { + return time.Time{}, false, fmt.Errorf("context check (2): %w / %w", ctx.Err(), context.Cause(ctx)) + } + + // at this point lazy shard should be loaded (there is no unloading), otherwise [RunAfterLsmInitAsync] + // would not be called and tasks scheduled for shard + return tasks[0].OnAfterLsmInitAsync(ctx, shard) + }() + + scheduleNextTasks := func(ctx context.Context, lastErr error) (err error) { + r.locked(func() { + if lastErr != nil { + // schedule tasks only if context not cancelled + if ctx.Err() == nil { + r.scheduleTasks(key, tasks[1:], time.Now()) + } + err = fmt.Errorf("executing task '%s' on shard '%s' of collection '%s': %w", + tasks[0].Name(), shardName, collectionName, lastErr) + return + } + if err = ctx.Err(); err != nil { + err = fmt.Errorf("executing task '%s' on shard '%s' of collection '%s': %w / %w", + tasks[0].Name(), shardName, collectionName, err, context.Cause(ctx)) + return + } + if rerunAt.IsZero() { + r.scheduleTasks(key, tasks[1:], time.Now()) + logger.WithField("task", tasks[0].Name()).Debug("task executed completely") + return + } + r.scheduleTasks(key, tasks, rerunAt) + logger.WithField("task", tasks[0].Name()).Debug("task executed partially, rerun scheduled") + }) + return + } + + // do not reload if error occurred. schedule tasks using shard's individual context + if !reloadShard || err != nil || ctx.Err() != nil { + err = scheduleNextTasks(ctx, err) + return + } + + // reload uninterrupted by context. shard's context will be cancelled by shutdown anyway + if err = r.reloadShard(context.Background(), index, shardName); err != nil { + err = fmt.Errorf("reloading shard '%s' of collection '%s': %w", shardName, collectionName, err) + } + // schedule tasks using global context + err = scheduleNextTasks(r.ctx, err) + return +} + +func (r *shardReindexerV3) scheduleTasks(key string, tasks []ShardReindexTaskV3, runAt time.Time) error { + if len(tasks) == 0 { + r.queue.delete(key) + return nil + } + + return r.queue.insert(key, tasks, runAt) +} + +func (r *shardReindexerV3) locked(callback func()) { + r.lock.Lock() + defer r.lock.Unlock() + callback() +} + +func (r *shardReindexerV3) reloadShard(ctx context.Context, index *Index, shardName string) error { + if err := index.IncomingReinitShard(ctx, shardName); err != nil { + return err + } + // force loading shard (if lazy) by getting store + shard, release, err := index.GetShard(ctx, shardName) + if err != nil { + return err + } + defer release() + shard.Store() + + return nil +} + +// ----------------------------------------------------------------------------- + +type shardsQueue struct { + lock *sync.Mutex + runShardQueue *priorityqueue.Queue[string] + tasksPerShard map[string][]ShardReindexTaskV3 + timerCtx context.Context + timerCtxCancel context.CancelFunc +} + +func newShardsQueue() *shardsQueue { + q := &shardsQueue{ + lock: new(sync.Mutex), + runShardQueue: priorityqueue.NewMinWithId[string](16), + tasksPerShard: map[string][]ShardReindexTaskV3{}, + } + q.timerCtx, q.timerCtxCancel = q.infiniteDeadlineCtx() + + return q +} + +func (q *shardsQueue) insert(key string, tasks []ShardReindexTaskV3, runAt time.Time) error { + id := q.timeToId(runAt) + + q.lock.Lock() + defer q.lock.Unlock() + + if _, ok := q.tasksPerShard[key]; ok { + return fmt.Errorf("tasks for shard already added") + } + q.tasksPerShard[key] = tasks + + q.runShardQueue.InsertWithValue(id, 0, key) + // element added as top. update deadline context to new (and closest) runAt + if top := q.runShardQueue.Top(); top.Value == key && top.ID == id { + _, cancel := q.timerCtx, q.timerCtxCancel + q.timerCtx, q.timerCtxCancel = q.deadlineCtx(runAt) + cancel() + } + return nil +} + +func (q *shardsQueue) delete(key string) bool { + q.lock.Lock() + defer q.lock.Unlock() + + if _, ok := q.tasksPerShard[key]; !ok { + return false + } + delete(q.tasksPerShard, key) + + if q.runShardQueue.Len() > 0 && q.runShardQueue.Top().Value == key { + _, cancel := q.timerCtx, q.timerCtxCancel + q.runShardQueue.Pop() + if q.runShardQueue.Len() > 0 { + // set timer to next/top shard + tm := q.idToTime(q.runShardQueue.Top().ID) + q.timerCtx, q.timerCtxCancel = q.deadlineCtx(tm) + } else { + // set timer to "infinity" + q.timerCtx, q.timerCtxCancel = q.infiniteDeadlineCtx() + } + cancel() + return true + } + + return q.runShardQueue.DeleteItem(func(item priorityqueue.Item[string]) bool { + return item.Value == key + }) +} + +func (q *shardsQueue) getWhenReady(ctx context.Context) (key string, tasks []ShardReindexTaskV3, err error) { + q.lock.Lock() + timerCtx, timerCtxCancel := q.timerCtx, q.timerCtxCancel + q.lock.Unlock() + + for { + select { + case <-ctx.Done(): + timerCtxCancel() + return "", nil, fmt.Errorf("context check (shardsQueue): %w / %w", ctx.Err(), context.Cause(ctx)) + + case <-timerCtx.Done(): + q.lock.Lock() + // check if this is latest ctx and deadline exceeded. if so then top key is to be returned + if q.timerCtx == timerCtx && errors.Is(timerCtx.Err(), context.DeadlineExceeded) { + defer q.lock.Unlock() + + if q.runShardQueue.Len() > 0 { + key := q.runShardQueue.Pop().Value + if q.runShardQueue.Len() > 0 { + // set timer to next/top shard + tm := q.idToTime(q.runShardQueue.Top().ID) + q.timerCtx, q.timerCtxCancel = q.deadlineCtx(tm) + } else { + // set timer to "infinity" + q.timerCtx, q.timerCtxCancel = q.infiniteDeadlineCtx() + } + tasks := q.tasksPerShard[key] + delete(q.tasksPerShard, key) + return key, tasks, nil + } + // should not happen + return "", nil, fmt.Errorf("shards queue empty") + } + + timerCtx, timerCtxCancel = q.timerCtx, q.timerCtxCancel + q.lock.Unlock() + } + } +} + +func (q *shardsQueue) timeToId(tm time.Time) uint64 { + return uint64(-tm.UnixNano()) +} + +func (q *shardsQueue) idToTime(id uint64) time.Time { + nsec := -int64(id) + return time.Unix(0, nsec) +} + +func (q *shardsQueue) deadlineCtx(deadline time.Time) (context.Context, context.CancelFunc) { + return context.WithDeadline(context.Background(), deadline) +} + +func (q *shardsQueue) infiniteDeadlineCtx() (context.Context, context.CancelFunc) { + return q.deadlineCtx(time.Now().Add(10 * 365 * 24 * time.Hour)) +} + +// ----------------------------------------------------------------------------- + +func toIndexShardKeyOfShard(shard ShardLike) string { + return toIndexShardKey(shard.Index().Config.ClassName.String(), shard.Name()) +} + +func toIndexShardKey(collectionName, shardName string) string { + return collectionName + "//" + shardName +} + +func fromIndexShardKey(key string) (string, string) { + s := strings.Split(key, "//") + return s[0], s[1] +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_v3_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_v3_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e2f6beb52f1b2558b4507663afe3d4f1dc96f7a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/inverted_reindexer_v3_test.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProcessingQueue(t *testing.T) { + t.Run("single key", func(t *testing.T) { + expKey := "some_key" + expTasks := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t1"}} + interval := 10 * time.Millisecond + expTime := time.Now().Add(interval) + q := newShardsQueue() + + q.insert(expKey, expTasks, expTime) + key, tasks, err := q.getWhenReady(context.Background()) + after := time.Now() + + require.NoError(t, err) + assert.Equal(t, expKey, key) + assert.ElementsMatch(t, expTasks, tasks) + assert.LessOrEqual(t, expTime.UnixNano(), after.UnixNano()) + }) + + t.Run("multiple keys", func(t *testing.T) { + expKey1 := "some_key_1" + expKey2 := "some_key_2" + expKey3 := "some_key_3" + expKey4 := "some_key_4" + expKey5 := "some_key_5" + expTasks1 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t1"}} + expTasks2 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t2"}} + expTasks3 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t3"}} + expTasks4 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t4"}} + expTasks5 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t5"}} + interval := 10 * time.Millisecond + expTime1 := time.Now().Add(interval) + expTime2 := time.Now().Add(interval * 2) + expTime3 := time.Now().Add(interval * 3) + expTime4 := time.Now().Add(interval * 4) + expTime5 := time.Now().Add(interval * 5) + + q := newShardsQueue() + + q.insert(expKey4, expTasks4, expTime4) + q.insert(expKey3, expTasks3, expTime3) + q.insert(expKey1, expTasks1, expTime1) + q.insert(expKey5, expTasks5, expTime5) + q.insert(expKey2, expTasks2, expTime2) + + key1, tasks1, err1 := q.getWhenReady(context.Background()) + after1 := time.Now() + key2, tasks2, err2 := q.getWhenReady(context.Background()) + after2 := time.Now() + key3, tasks3, err3 := q.getWhenReady(context.Background()) + after3 := time.Now() + key4, tasks4, err4 := q.getWhenReady(context.Background()) + after4 := time.Now() + key5, tasks5, err5 := q.getWhenReady(context.Background()) + after5 := time.Now() + + require.NoError(t, err1) + assert.Equal(t, expKey1, key1) + assert.ElementsMatch(t, expTasks1, tasks1) + assert.LessOrEqual(t, expTime1.UnixNano(), after1.UnixNano()) + + require.NoError(t, err2) + assert.Equal(t, expKey2, key2) + assert.ElementsMatch(t, expTasks2, tasks2) + assert.LessOrEqual(t, expTime2.UnixNano(), after2.UnixNano()) + + require.NoError(t, err3) + assert.Equal(t, expKey3, key3) + assert.ElementsMatch(t, expTasks3, tasks3) + assert.LessOrEqual(t, expTime3.UnixNano(), after3.UnixNano()) + + require.NoError(t, err4) + assert.Equal(t, expKey4, key4) + assert.ElementsMatch(t, expTasks4, tasks4) + assert.LessOrEqual(t, expTime4.UnixNano(), after4.UnixNano()) + + require.NoError(t, err5) + assert.Equal(t, expKey5, key5) + assert.ElementsMatch(t, expTasks5, tasks5) + assert.LessOrEqual(t, expTime5.UnixNano(), after5.UnixNano()) + }) + + t.Run("multiple keys, cancelled context", func(t *testing.T) { + expKey1 := "some_key_1" + expKey2 := "some_key_2" + expKey3 := "some_key_3" + expKey4 := "some_key_4" + expKey5 := "some_key_5" + interval := 10 * time.Millisecond + expTasks1 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t1"}} + expTasks2 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t2"}} + expTasks3 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t3"}} + expTasks4 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t4"}} + expTasks5 := []ShardReindexTaskV3{&dummyShardReindexTaskV3{name: "t5"}} + expTime1 := time.Now().Add(interval) + expTime2 := time.Now().Add(interval * 2) + expTime3 := time.Now().Add(interval * 3) + expTime4 := time.Now().Add(interval * 4) + expTime5 := time.Now().Add(interval * 5) + + q := newShardsQueue() + + q.insert(expKey4, expTasks4, expTime4) + q.insert(expKey3, expTasks3, expTime3) + q.insert(expKey1, expTasks1, expTime1) + q.insert(expKey5, expTasks5, expTime5) + q.insert(expKey2, expTasks2, expTime2) + + ctx, cancel := context.WithCancel(context.Background()) + + key1, tasks1, err1 := q.getWhenReady(ctx) + after1 := time.Now() + key2, tasks2, err2 := q.getWhenReady(ctx) + after2 := time.Now() + cancel() + key3, tasks3, err3 := q.getWhenReady(ctx) + after3 := time.Now() + key4, tasks4, err4 := q.getWhenReady(ctx) + after4 := time.Now() + key5, tasks5, err5 := q.getWhenReady(ctx) + after5 := time.Now() + + require.NoError(t, err1) + assert.Equal(t, expKey1, key1) + assert.ElementsMatch(t, expTasks1, tasks1) + assert.LessOrEqual(t, expTime1.UnixNano(), after1.UnixNano()) + + require.NoError(t, err2) + assert.Equal(t, expKey2, key2) + assert.ElementsMatch(t, expTasks2, tasks2) + assert.LessOrEqual(t, expTime2.UnixNano(), after2.UnixNano()) + + require.Error(t, err3) + assert.Empty(t, key3) + assert.Empty(t, tasks3) + assert.Greater(t, expTime3.UnixNano(), after3.UnixNano()) + + require.Error(t, err4) + assert.Empty(t, key4) + assert.Empty(t, tasks4) + assert.Greater(t, expTime3.UnixNano(), after4.UnixNano()) + + require.Error(t, err5) + assert.Empty(t, key5) + assert.Empty(t, tasks5) + assert.Greater(t, expTime3.UnixNano(), after5.UnixNano()) + }) +} + +type dummyShardReindexTaskV3 struct { + name string +} + +func (t *dummyShardReindexTaskV3) Name() string { + return t.name +} + +func (t *dummyShardReindexTaskV3) OnBeforeLsmInit(ctx context.Context, shard *Shard) error { + return nil +} + +func (t *dummyShardReindexTaskV3) OnAfterLsmInit(ctx context.Context, shard *Shard) error { + return nil +} + +func (t *dummyShardReindexTaskV3) OnAfterLsmInitAsync(ctx context.Context, shard ShardLike, +) (rerunAt time.Time, reloadShard bool, err error) { + return time.Time{}, false, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/.gitignore b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f9f7ca8a99dcf0fa43b2293a35b9faf2bbfd8ff1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/.gitignore @@ -0,0 +1 @@ +my-bucket diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree.go new file mode 100644 index 0000000000000000000000000000000000000000..037cbb953cb188c7b770d8970bc5cab337c67d75 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree.go @@ -0,0 +1,396 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/rbtree" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type binarySearchTree struct { + root *binarySearchNode +} + +// returns net additions of insert in bytes, and previous secondary keys +func (t *binarySearchTree) insert(key, value []byte, secondaryKeys [][]byte) (int, [][]byte) { + if t.root == nil { + t.root = &binarySearchNode{ + key: key, + value: value, + secondaryKeys: secondaryKeys, + colourIsRed: false, // root node is always black + } + return len(key) + len(value), nil + } + + addition, newRoot, previousSecondaryKeys := t.root.insert(key, value, secondaryKeys) + if newRoot != nil { + t.root = newRoot + } + t.root.colourIsRed = false // Can be flipped in the process of balancing, but root is always black + + return addition, previousSecondaryKeys +} + +func (t *binarySearchTree) get(key []byte) ([]byte, error) { + if t.root == nil { + return nil, lsmkv.NotFound + } + + return t.root.get(key) +} + +func (t *binarySearchTree) getNode(key []byte) (*binarySearchNode, error) { + if t.root == nil { + return nil, lsmkv.NotFound + } + + return t.root.getNode(key) +} + +func (t *binarySearchTree) setTombstone(key, value []byte, secondaryKeys [][]byte) { + if t.root == nil { + // we need to actively insert a node with a tombstone, even if this node is + // not present because we still need to propagate the delete into the disk + // segments. It could refer to an entity which was created in a previous + // segment and is thus unknown to this memtable + t.root = &binarySearchNode{ + key: key, + value: value, + tombstone: true, + secondaryKeys: secondaryKeys, + colourIsRed: false, // root node is always black + } + return + } + + newRoot := t.root.setTombstone(key, value, secondaryKeys) + if newRoot != nil { + t.root = newRoot + } + t.root.colourIsRed = false // Can be flipped in the process of balancing, but root is always black +} + +func (t *binarySearchTree) flattenInOrder() []*binarySearchNode { + if t.root == nil { + return nil + } + + return t.root.flattenInOrder() +} + +type countStats struct { + upsertKeys [][]byte + tombstonedKeys [][]byte +} + +func (c *countStats) hasUpsert(needle []byte) bool { + if c == nil { + return false + } + + for _, hay := range c.upsertKeys { + if bytes.Equal(needle, hay) { + return true + } + } + + return false +} + +func (c *countStats) hasTombstone(needle []byte) bool { + if c == nil { + return false + } + + for _, hay := range c.tombstonedKeys { + if bytes.Equal(needle, hay) { + return true + } + } + + return false +} + +func (t *binarySearchTree) countStats() *countStats { + stats := &countStats{} + if t.root == nil { + return stats + } + + t.root.countStats(stats) + return stats +} + +type binarySearchNode struct { + key []byte + value []byte + secondaryKeys [][]byte + left *binarySearchNode + right *binarySearchNode + parent *binarySearchNode + tombstone bool + colourIsRed bool +} + +func (n *binarySearchNode) Parent() rbtree.Node { + if n == nil { + return nil + } + return n.parent +} + +func (n *binarySearchNode) SetParent(parent rbtree.Node) { + if n == nil { + addNewSearchNodeReceiver(&n) + } + + if parent == nil { + n.parent = nil + return + } + + n.parent = parent.(*binarySearchNode) +} + +func (n *binarySearchNode) Left() rbtree.Node { + if n == nil { + return nil + } + return n.left +} + +func (n *binarySearchNode) SetLeft(left rbtree.Node) { + if n == nil { + addNewSearchNodeReceiver(&n) + } + + if left == nil { + n.left = nil + return + } + + n.left = left.(*binarySearchNode) +} + +func (n *binarySearchNode) Right() rbtree.Node { + if n == nil { + return nil + } + return n.right +} + +func (n *binarySearchNode) SetRight(right rbtree.Node) { + if n == nil { + addNewSearchNodeReceiver(&n) + } + + if right == nil { + n.right = nil + return + } + + n.right = right.(*binarySearchNode) +} + +func (n *binarySearchNode) IsRed() bool { + if n == nil { + return false + } + return n.colourIsRed +} + +func (n *binarySearchNode) SetRed(isRed bool) { + n.colourIsRed = isRed +} + +func (n *binarySearchNode) IsNil() bool { + return n == nil +} + +func addNewSearchNodeReceiver(nodePtr **binarySearchNode) { + *nodePtr = &binarySearchNode{} +} + +// returns net additions of insert in bytes +func (n *binarySearchNode) insert(key, value []byte, secondaryKeys [][]byte) (netAdditions int, newRoot *binarySearchNode, previousSecondaryKeys [][]byte) { + if bytes.Equal(key, n.key) { + // since the key already exists, we only need to take the difference + // between the existing value and the new one to determine net change + netAdditions = len(n.value) - len(value) + if netAdditions < 0 { + netAdditions *= -1 + } + + // assign new value to node + n.value = value + + // reset tombstone in case it had one + n.tombstone = false + previousSecondaryKeys = n.secondaryKeys + n.secondaryKeys = secondaryKeys + + newRoot = nil // tree root does not change when replacing node + return + } + + if bytes.Compare(key, n.key) < 0 { + if n.left != nil { + netAdditions, newRoot, previousSecondaryKeys = n.left.insert(key, value, secondaryKeys) + return + } else { + n.left = &binarySearchNode{ + key: key, + value: value, + secondaryKeys: secondaryKeys, + parent: n, + colourIsRed: true, // new nodes are always red, except root node which is handled in the tree itself + } + newRoot = binarySearchNodeFromRB(rbtree.Rebalance(n.left)) + netAdditions = len(key) + len(value) + return + } + } else { + if n.right != nil { + netAdditions, newRoot, previousSecondaryKeys = n.right.insert(key, value, secondaryKeys) + return + } else { + n.right = &binarySearchNode{ + key: key, + value: value, + secondaryKeys: secondaryKeys, + parent: n, + colourIsRed: true, + } + netAdditions = len(key) + len(value) + newRoot = binarySearchNodeFromRB(rbtree.Rebalance(n.right)) + return + } + } +} + +func (n *binarySearchNode) get(key []byte) ([]byte, error) { + node, err := n.getNode(key) + if err != nil { + return nil, err + } + return node.value, nil +} + +func (n *binarySearchNode) getNode(key []byte) (*binarySearchNode, error) { + if bytes.Equal(n.key, key) { + if !n.tombstone { + return n, nil + } else { + return nil, errorFromTombstonedValue(n.value) + } + } + + if bytes.Compare(key, n.key) < 0 { + if n.left == nil { + return nil, lsmkv.NotFound + } + + return n.left.getNode(key) + } else { + if n.right == nil { + return nil, lsmkv.NotFound + } + + return n.right.getNode(key) + } +} + +func (n *binarySearchNode) setTombstone(key, value []byte, secondaryKeys [][]byte) *binarySearchNode { + if bytes.Equal(n.key, key) { + n.value = value + n.tombstone = true + n.secondaryKeys = secondaryKeys + return nil + } + + if bytes.Compare(key, n.key) < 0 { + if n.left == nil { + n.left = &binarySearchNode{ + key: key, + value: value, + tombstone: true, + secondaryKeys: secondaryKeys, + parent: n, + colourIsRed: true, + } + return binarySearchNodeFromRB(rbtree.Rebalance(n.left)) + + } + return n.left.setTombstone(key, value, secondaryKeys) + } else { + if n.right == nil { + n.right = &binarySearchNode{ + key: key, + value: value, + tombstone: true, + secondaryKeys: secondaryKeys, + parent: n, + colourIsRed: true, + } + return binarySearchNodeFromRB(rbtree.Rebalance(n.right)) + } + return n.right.setTombstone(key, value, secondaryKeys) + } +} + +func (n *binarySearchNode) flattenInOrder() []*binarySearchNode { + var left []*binarySearchNode + var right []*binarySearchNode + + if n.left != nil { + left = n.left.flattenInOrder() + } + + if n.right != nil { + right = n.right.flattenInOrder() + } + + right = append([]*binarySearchNode{n}, right...) + return append(left, right...) +} + +// This is not very allocation friendly, since we basically need to allocate +// once for each element in the memtable. However, these results can +// potentially be cached, as we don't care about the intermediary results, just +// the net additions. +func (n *binarySearchNode) countStats(stats *countStats) { + if n.tombstone { + stats.tombstonedKeys = append(stats.tombstonedKeys, n.key) + } else { + stats.upsertKeys = append(stats.upsertKeys, n.key) + } + + if n.left != nil { + n.left.countStats(stats) + } + + if n.right != nil { + n.right.countStats(stats) + } +} + +func binarySearchNodeFromRB(rbNode rbtree.Node) (bsNode *binarySearchNode) { + if rbNode == nil { + bsNode = nil + return + } + bsNode = rbNode.(*binarySearchNode) + return +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_map.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_map.go new file mode 100644 index 0000000000000000000000000000000000000000..177b6c74ace773d2a060f95e9df8e697df85e6a4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_map.go @@ -0,0 +1,260 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "sort" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/rbtree" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type binarySearchTreeMap struct { + root *binarySearchNodeMap +} + +func (t *binarySearchTreeMap) insert(key []byte, pair MapPair) { + if t.root == nil { + t.root = &binarySearchNodeMap{ + key: key, + values: []MapPair{pair}, + colourIsRed: false, // root node is always black + } + return + } + + if newRoot := t.root.insert(key, pair); newRoot != nil { + t.root = newRoot + } + t.root.colourIsRed = false // Can be flipped in the process of balancing, but root is always black +} + +func (t *binarySearchTreeMap) get(key []byte) ([]MapPair, error) { + if t.root == nil { + return nil, lsmkv.NotFound + } + + return t.root.get(key) +} + +func (t *binarySearchTreeMap) flattenInOrder() []*binarySearchNodeMap { + if t.root == nil { + return nil + } + + return t.root.flattenInOrder() +} + +type binarySearchNodeMap struct { + key []byte + values []MapPair + left *binarySearchNodeMap + right *binarySearchNodeMap + parent *binarySearchNodeMap + colourIsRed bool +} + +func (n *binarySearchNodeMap) Parent() rbtree.Node { + if n == nil { + return nil + } + return n.parent +} + +func (n *binarySearchNodeMap) SetParent(parent rbtree.Node) { + if n == nil { + addNewSearchNodeMapReceiver(&n) + } + + if parent == nil { + n.parent = nil + return + } + + n.parent = parent.(*binarySearchNodeMap) +} + +func (n *binarySearchNodeMap) Left() rbtree.Node { + if n == nil { + return nil + } + return n.left +} + +func (n *binarySearchNodeMap) SetLeft(left rbtree.Node) { + if n == nil { + addNewSearchNodeMapReceiver(&n) + } + + if left == nil { + n.left = nil + return + } + + n.left = left.(*binarySearchNodeMap) +} + +func (n *binarySearchNodeMap) Right() rbtree.Node { + if n == nil { + return nil + } + return n.right +} + +func (n *binarySearchNodeMap) SetRight(right rbtree.Node) { + if n == nil { + addNewSearchNodeMapReceiver(&n) + } + + if right == nil { + n.right = nil + return + } + + n.right = right.(*binarySearchNodeMap) +} + +func (n *binarySearchNodeMap) IsRed() bool { + if n == nil { + return false + } + return n.colourIsRed +} + +func (n *binarySearchNodeMap) SetRed(isRed bool) { + n.colourIsRed = isRed +} + +func (n *binarySearchNodeMap) IsNil() bool { + return n == nil +} + +func addNewSearchNodeMapReceiver(nodePtr **binarySearchNodeMap) { + *nodePtr = &binarySearchNodeMap{} +} + +func (n *binarySearchNodeMap) insert(key []byte, pair MapPair) *binarySearchNodeMap { + if bytes.Equal(key, n.key) { + n.values = append(n.values, pair) + return nil // tree root does not change when replacing node + } + + if bytes.Compare(key, n.key) < 0 { + if n.left != nil { + return n.left.insert(key, pair) + } else { + n.left = &binarySearchNodeMap{ + key: key, + parent: n, + colourIsRed: true, + values: []MapPair{pair}, + } + return binarySearchNodeMapFromRB(rbtree.Rebalance(n.left)) + } + } else { + if n.right != nil { + return n.right.insert(key, pair) + } else { + n.right = &binarySearchNodeMap{ + key: key, + parent: n, + colourIsRed: true, + values: []MapPair{pair}, + } + return binarySearchNodeMapFromRB(rbtree.Rebalance(n.right)) + } + } +} + +func (n *binarySearchNodeMap) get(key []byte) ([]MapPair, error) { + if bytes.Equal(n.key, key) { + return sortAndDedupValues(n.values), nil + } + + if bytes.Compare(key, n.key) < 0 { + if n.left == nil { + return nil, lsmkv.NotFound + } + + return n.left.get(key) + } else { + if n.right == nil { + return nil, lsmkv.NotFound + } + + return n.right.get(key) + } +} + +func (n *binarySearchNodeMap) flattenInOrder() []*binarySearchNodeMap { + var left []*binarySearchNodeMap + var right []*binarySearchNodeMap + + if n.left != nil { + left = n.left.flattenInOrder() + } + + if n.right != nil { + right = n.right.flattenInOrder() + } + + // the values are sorted on read for performance reasons, the assumption is + // that while a memtable is open writes a much more common, thus we write map + // KVs unsorted and only sort/dedup them on read. + right = append([]*binarySearchNodeMap{{ + key: n.key, + values: sortAndDedupValues(n.values), + colourIsRed: n.colourIsRed, + }}, right...) + return append(left, right...) +} + +// takes a list of MapPair and sorts it while keeping the original order. Then +// removes redundancies (from updates or deletes after previous inserts) using +// a simple deduplication process. +func sortAndDedupValues(in []MapPair) []MapPair { + out := make([]MapPair, len(in)) + copy(out, in) + + // use SliceStable so that we keep the insert order on duplicates. This is + // important because otherwise we can't dedup them correctly if we don't know + // in which order they came in. + sort.SliceStable(out, func(a, b int) bool { + return bytes.Compare(out[a].Key, out[b].Key) < 0 + }) + + // now deduping is as simple as looking one key ahead - if it's the same key + // simply skip the current element. Meaning "out" will be a subset of + // (sorted) "in". + outIndex := 0 + for inIndex, pair := range out { + // look ahead + if inIndex+1 < len(out) && bytes.Equal(out[inIndex+1].Key, pair.Key) { + continue + } + + out[outIndex] = pair + outIndex++ + } + + return out[:outIndex] +} + +func binarySearchNodeMapFromRB(rbNode rbtree.Node) (bsNode *binarySearchNodeMap) { + if rbNode == nil { + bsNode = nil + return + } + bsNode = rbNode.(*binarySearchNodeMap) + return +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_map_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_map_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ffc9113746da100709e5a173da505271f6972542 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_map_test.go @@ -0,0 +1,212 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_BinarySearchTreeMap(t *testing.T) { + t.Run("single row key, single map key", func(t *testing.T) { + tree := &binarySearchTreeMap{} + rowKey := []byte("rowkey") + + pair1 := MapPair{ + Key: []byte("map-key-1"), + Value: []byte("map-value-1"), + } + + tree.insert(rowKey, pair1) + + res, err := tree.get(rowKey) + require.Nil(t, err) + assert.Equal(t, []MapPair{ + { + Key: []byte("map-key-1"), + Value: []byte("map-value-1"), + }, + }, res) + }) + + t.Run("single row key, updated map value", func(t *testing.T) { + tree := &binarySearchTreeMap{} + rowKey := []byte("rowkey") + + tree.insert(rowKey, MapPair{ + Key: []byte("c"), + Value: []byte("c1"), + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("a"), + Value: []byte("a1"), + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("b"), + Value: []byte("b1"), + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("b"), + Value: []byte("b2"), + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("a"), + Value: []byte("a2"), + }) + + res, err := tree.get(rowKey) + require.Nil(t, err) + assert.Equal(t, []MapPair{ + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b2"), + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + }, res) + }) + + t.Run("two row keys, updated map value", func(t *testing.T) { + tree := &binarySearchTreeMap{} + rowKey1 := []byte("rowkey") + rowKey2 := []byte("other-rowkey") + + tree.insert(rowKey1, MapPair{ + Key: []byte("c"), + Value: []byte("c1"), + }) + + tree.insert(rowKey1, MapPair{ + Key: []byte("a"), + Value: []byte("a1"), + }) + + tree.insert(rowKey2, MapPair{ + Key: []byte("z"), + Value: []byte("z1"), + }) + + tree.insert(rowKey1, MapPair{ + Key: []byte("b"), + Value: []byte("b1"), + }) + + tree.insert(rowKey2, MapPair{ + Key: []byte("x"), + Value: []byte("x1"), + }) + + tree.insert(rowKey1, MapPair{ + Key: []byte("b"), + Value: []byte("b2"), + }) + + tree.insert(rowKey1, MapPair{ + Key: []byte("a"), + Value: []byte("a2"), + }) + + tree.insert(rowKey2, MapPair{ + Key: []byte("x"), + Value: []byte("x2"), + }) + + res, err := tree.get(rowKey1) + require.Nil(t, err) + assert.Equal(t, []MapPair{ + { + Key: []byte("a"), + Value: []byte("a2"), + }, + { + Key: []byte("b"), + Value: []byte("b2"), + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + }, res) + + res, err = tree.get(rowKey2) + require.Nil(t, err) + assert.Equal(t, []MapPair{ + { + Key: []byte("x"), + Value: []byte("x2"), + }, + { + Key: []byte("z"), + Value: []byte("z1"), + }, + }, res) + }) + + t.Run("single row key, deleted map values", func(t *testing.T) { + tree := &binarySearchTreeMap{} + rowKey := []byte("rowkey") + + tree.insert(rowKey, MapPair{ + Key: []byte("c"), + Value: []byte("c1"), + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("a"), + Value: []byte("a1"), + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("b"), + Value: []byte("b1"), + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("b"), + Tombstone: true, + }) + + tree.insert(rowKey, MapPair{ + Key: []byte("a"), + Tombstone: true, + }) + + res, err := tree.get(rowKey) + require.Nil(t, err) + assert.Equal(t, []MapPair{ + { + Key: []byte("a"), + Tombstone: true, + }, + { + Key: []byte("b"), + Tombstone: true, + }, + { + Key: []byte("c"), + Value: []byte("c1"), + }, + }, res) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_multi.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_multi.go new file mode 100644 index 0000000000000000000000000000000000000000..8444f1b66dacc32d8f8af75b87a6612e57891dcd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_multi.go @@ -0,0 +1,276 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/rbtree" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type binarySearchTreeMulti struct { + root *binarySearchNodeMulti +} + +type value struct { + value []byte + tombstone bool +} + +func (t *binarySearchTreeMulti) insert(key []byte, values []value) { + if t.root == nil { + t.root = &binarySearchNodeMulti{ + key: key, + values: values, + colourIsRed: false, // root node is always black + } + return + } + + if newRoot := t.root.insert(key, values); newRoot != nil { + t.root = newRoot + } + t.root.colourIsRed = false // Can be flipped in the process of balancing, but root is always black +} + +func (t *binarySearchTreeMulti) get(key []byte) ([]value, error) { + if t.root == nil { + return nil, lsmkv.NotFound + } + + return t.root.get(key) +} + +// // set Tombstone for the entire entry, i.e. all values for this key +// func (t *binarySearchTreeMulti) setTombstone(key []byte) { +// if t.root == nil { +// // we need to actively insert a node with a tombstone, even if this node is +// // not present because we still need to propagate the delete into the disk +// // segments. It could refer to an entity which was created in a previous +// // segment and is thus unknown to this memtable +// t.root = &binarySearchNodeMulti{ +// key: key, +// value: nil, +// tombstone: true, +// } +// } + +// t.root.setTombstone(key) +// } + +func (t *binarySearchTreeMulti) flattenInOrder() []*binarySearchNodeMulti { + if t.root == nil { + return nil + } + + return t.root.flattenInOrder() +} + +type binarySearchNodeMulti struct { + key []byte + values []value + left *binarySearchNodeMulti + right *binarySearchNodeMulti + parent *binarySearchNodeMulti + colourIsRed bool +} + +func (n *binarySearchNodeMulti) Parent() rbtree.Node { + if n == nil { + return nil + } + return n.parent +} + +func (n *binarySearchNodeMulti) SetParent(parent rbtree.Node) { + if n == nil { + addNewSearchNodeMultiReceiver(&n) + } + + if parent == nil { + n.parent = nil + return + } + + n.parent = parent.(*binarySearchNodeMulti) +} + +func (n *binarySearchNodeMulti) Left() rbtree.Node { + if n == nil { + return nil + } + return n.left +} + +func (n *binarySearchNodeMulti) SetLeft(left rbtree.Node) { + if n == nil { + addNewSearchNodeMultiReceiver(&n) + } + + if left == nil { + n.left = nil + return + } + + n.left = left.(*binarySearchNodeMulti) +} + +func (n *binarySearchNodeMulti) Right() rbtree.Node { + if n == nil { + return nil + } + return n.right +} + +func (n *binarySearchNodeMulti) SetRight(right rbtree.Node) { + if n == nil { + addNewSearchNodeMultiReceiver(&n) + } + + if right == nil { + n.right = nil + return + } + + n.right = right.(*binarySearchNodeMulti) +} + +func (n *binarySearchNodeMulti) IsRed() bool { + if n == nil { + return false + } + return n.colourIsRed +} + +func (n *binarySearchNodeMulti) SetRed(isRed bool) { + n.colourIsRed = isRed +} + +func (n *binarySearchNodeMulti) IsNil() bool { + return n == nil +} + +func addNewSearchNodeMultiReceiver(nodePtr **binarySearchNodeMulti) { + *nodePtr = &binarySearchNodeMulti{} +} + +func (n *binarySearchNodeMulti) insert(key []byte, values []value) *binarySearchNodeMulti { + if bytes.Equal(key, n.key) { + n.values = append(n.values, values...) + return nil + } + + if bytes.Compare(key, n.key) < 0 { + if n.left != nil { + return n.left.insert(key, values) + } else { + n.left = &binarySearchNodeMulti{ + key: key, + values: values, + parent: n, + colourIsRed: true, + } + return binarySearchNodeMultiFromRB(rbtree.Rebalance(n.left)) + } + } else { + if n.right != nil { + return n.right.insert(key, values) + } else { + n.right = &binarySearchNodeMulti{ + key: key, + values: values, + parent: n, + colourIsRed: true, + } + return binarySearchNodeMultiFromRB(rbtree.Rebalance(n.right)) + } + } +} + +func (n *binarySearchNodeMulti) get(key []byte) ([]value, error) { + if bytes.Equal(n.key, key) { + return n.values, nil + } + + if bytes.Compare(key, n.key) < 0 { + if n.left == nil { + return nil, lsmkv.NotFound + } + + return n.left.get(key) + } else { + if n.right == nil { + return nil, lsmkv.NotFound + } + + return n.right.get(key) + } +} + +func binarySearchNodeMultiFromRB(rbNode rbtree.Node) (bsNode *binarySearchNodeMulti) { + if rbNode == nil { + bsNode = nil + return + } + bsNode = rbNode.(*binarySearchNodeMulti) + return +} + +// func (n *binarySearchNodeMulti) setTombstone(key []byte) { +// if bytes.Equal(n.key, key) { +// n.value = nil +// n.tombstone = true +// } + +// if bytes.Compare(key, n.key) < 0 { +// if n.left == nil { +// n.left = &binarySearchNodeMulti{ +// key: key, +// value: nil, +// tombstone: true, +// } +// return +// } + +// n.left.setTombstone(key) +// return +// } else { +// if n.right == nil { +// n.right = &binarySearchNodeMulti{ +// key: key, +// value: nil, +// tombstone: true, +// } +// return +// } + +// n.right.setTombstone(key) +// return +// } +// } + +func (n *binarySearchNodeMulti) flattenInOrder() []*binarySearchNodeMulti { + var left []*binarySearchNodeMulti + var right []*binarySearchNodeMulti + + if n.left != nil { + left = n.left.flattenInOrder() + } + + if n.right != nil { + right = n.right.flattenInOrder() + } + + right = append([]*binarySearchNodeMulti{n}, right...) + return append(left, right...) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_test.go new file mode 100644 index 0000000000000000000000000000000000000000..506824f2cb38670ee2103c6c10f3728a38dd2b75 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/binary_search_tree_test.go @@ -0,0 +1,133 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "crypto/rand" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +// This test asserts that the *binarySearchTree.insert +// method properly calculates the net additions of a +// new node into the tree +func TestInsertNetAdditions_Replace(t *testing.T) { + t.Run("single node entry", func(t *testing.T) { + tree := &binarySearchTree{} + + key := make([]byte, 8) + val := make([]byte, 8) + + rand.Read(key) + rand.Read(val) + + n, _ := tree.insert(key, val, nil) + require.Equal(t, len(key)+len(val), n) + }) + + t.Run("multiple unique node entries", func(t *testing.T) { + tree := &binarySearchTree{} + + amount := 100 + size := 8 + + var n int + for i := 0; i < amount; i++ { + key := make([]byte, size) + val := make([]byte, size) + + rand.Read(key) + rand.Read(val) + + newAdditions, _ := tree.insert(key, val, nil) + n += newAdditions + } + + require.Equal(t, amount*size*2, n) + }) + + t.Run("multiple non-unique node entries", func(t *testing.T) { + tree := &binarySearchTree{} + + var ( + amount = 100 + keySize = 100 + origValSize = 100 + newValSize = origValSize * 100 + keys = make([][]byte, amount) + vals = make([][]byte, amount) + + netAdditions int + ) + + // write the keys and original values + for i := range keys { + key := make([]byte, keySize) + rand.Read(key) + + val := make([]byte, origValSize) + rand.Read(val) + + keys[i], vals[i] = key, val + } + + // make initial inserts + for i := range keys { + currentNetAddition, _ := tree.insert(keys[i], vals[i], nil) + netAdditions += currentNetAddition + } + + // change the values of the existing keys + // with new values of different length + for i := 0; i < amount; i++ { + val := make([]byte, newValSize) + rand.Read(val) + + vals[i] = val + } + + for i := 0; i < amount; i++ { + currentNetAddition, _ := tree.insert(keys[i], vals[i], nil) + netAdditions += currentNetAddition + } + + // Formulas for calculating the total net additions after + // updating the keys with differently sized values + expectedFirstNetAdd := amount * (keySize + origValSize) + expectedSecondNetAdd := (amount * (keySize + newValSize)) - (amount * keySize) - (amount * origValSize) + expectedNetAdditions := expectedFirstNetAdd + expectedSecondNetAdd + + require.Equal(t, expectedNetAdditions, netAdditions) + }) + + // test to assure multiple tombstone nodes are not created when same value is added and deleted multiple times + // https://semi-technology.atlassian.net/browse/WEAVIATE-31 + t.Run("consecutive adding and deleting value does not multiply nodes", func(t *testing.T) { + tree := &binarySearchTree{} + + key := []byte(uuid.New().String()) + value := make([]byte, 100) + rand.Read(value) + + for i := 0; i < 10; i++ { + tree.insert(key, value, nil) + tree.setTombstone(key, nil, nil) + } + + flat := tree.flattenInOrder() + + require.Equal(t, 1, len(flat)) + require.True(t, flat[0].tombstone) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bloom_filter_metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bloom_filter_metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..6a0dcbdeca4110628a1cb0200a87b99c7db9e871 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bloom_filter_metrics.go @@ -0,0 +1,28 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +type bloomFilterMetrics struct { + trueNegative TimeObserver + falsePositive TimeObserver + truePositive TimeObserver +} + +// newBloomFilterMetrics curries the prometheus metrics just once at +// initialization to prevent further allocs on the hot path +func newBloomFilterMetrics(metrics *Metrics) *bloomFilterMetrics { + return &bloomFilterMetrics{ + trueNegative: metrics.BloomFilterObserver("replace", "get_true_negative"), + falsePositive: metrics.BloomFilterObserver("replace", "get_false_positive"), + truePositive: metrics.BloomFilterObserver("replace", "get_true_positive"), + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket.go new file mode 100644 index 0000000000000000000000000000000000000000..30ddb67007d759d420f5dc70f1b0bff4e4e23b5b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket.go @@ -0,0 +1,1878 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math" + "os" + "path/filepath" + "runtime/debug" + "sort" + "sync" + "time" + + "github.com/weaviate/weaviate/entities/diskio" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + entcfg "github.com/weaviate/weaviate/entities/config" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/interval" + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +const FlushAfterDirtyDefault = 60 * time.Second + +type BucketCreator interface { + NewBucket(ctx context.Context, dir, rootDir string, logger logrus.FieldLogger, + metrics *Metrics, compactionCallbacks, flushCallbacks cyclemanager.CycleCallbackGroup, + opts ...BucketOption, + ) (*Bucket, error) +} + +type Bucket struct { + dir string + rootDir string + active *Memtable + flushing *Memtable + disk *SegmentGroup + logger logrus.FieldLogger + + // Lock() means a move from active to flushing is happening, RLock() is + // normal operation + flushLock sync.RWMutex + haltedFlushTimer *interval.BackoffTimer + + minWalThreshold uint64 + walThreshold uint64 + flushDirtyAfter time.Duration + memtableThreshold uint64 + minMMapSize int64 + memtableResizer *memtableSizeAdvisor + strategy string + // Strategy inverted index is supposed to be created with, but existing + // segment files were created with different one. + // It can happen when new strategy were introduced to weaviate, but + // files are already created using old implementation. + // Example: RoaringSet strategy replaces CollectionSet strategy. + // Field can be used for migration files of old strategy to newer one. + desiredStrategy string + secondaryIndices uint16 + + // Optional to avoid syscalls + mmapContents bool + writeMetadata bool + + // for backward compatibility + legacyMapSortingBeforeCompaction bool + + flushCallbackCtrl cyclemanager.CycleCallbackCtrl + + status storagestate.Status + statusLock sync.RWMutex + + metrics *Metrics + + // all "replace" buckets support counting through net additions, but not all + // produce a meaningful count. Typically, the only count we're interested in + // is that of the bucket that holds objects + monitorCount bool + + pauseTimer *prometheus.Timer // Times the pause + + // Whether tombstones (set/map/replace types) or deletions (roaringset type) + // should be kept in root segment during compaction process. + // Since segments are immutable, deletions are added as new entries with + // tombstones. Tombstones are by default copied to merged segment, as they + // can refer to keys/values present in previous segments. + // Those tombstones can be removed entirely when merging with root (1st) segment, + // due to lack of previous segments, tombstones may relate to. + // As info about key/value being deleted (based on tombstone presence) may be important + // for some use cases (e.g. replication needs to know if object(ObjectsBucketLSM) was deleted) + // keeping tombstones on compaction is optional + keepTombstones bool + + // Init and use bloom filter for getting key from bucket segments. + // As some buckets can be accessed only with cursor (see flat index), + // where bloom filter is not applicable, it can be disabled. + // ON by default + useBloomFilter bool + + // Net additions keep track of number of elements stored in bucket (of type replace). + // As some buckets don't have to provide Count info (see flat index), + // tracking additions can be disabled. + // ON by default + calcCountNetAdditions bool + + forceCompaction bool + disableCompaction bool + lazySegmentLoading bool + + // if true, don't increase the segment level during compaction. + // useful for migrations, as it allows to merge reindex and ingest buckets + // without discontinuities in segment levels. + keepLevelCompaction bool + + // optionally supplied to prevent starting memory-intensive + // processes when memory pressure is high + allocChecker memwatch.AllocChecker + + // optional segment size limit. If set, a compaction will skip segments that + // sum to more than the specified value. + maxSegmentSize int64 + + // optional segments cleanup interval. If set, segments will be cleaned of + // redundant obsolete data, that was deleted or updated in newer segments + // (currently supported only in buckets of REPLACE strategy) + segmentsCleanupInterval time.Duration + + // optional validation of segment file checksums. Enabling this option + // introduces latency of segment availability, for the tradeoff of + // ensuring segment files have integrity before reading them. + enableChecksumValidation bool + + // keep segments in memory for more performant search + // (currently used by roaringsetrange inverted indexes) + keepSegmentsInMemory bool + + // pool of buffers for bitmaps merges + // (currently used by roaringsetrange inverted indexes) + bitmapBufPool roaringset.BitmapBufPool + + // add information like the level and the strategy into the filename so these things can be checked without loading + // the segment + writeSegmentInfoIntoFileName bool + + bm25Config *models.BM25Config +} + +func NewBucketCreator() *Bucket { return &Bucket{} } + +// NewBucket initializes a new bucket. It either loads the state from disk if +// it exists, or initializes new state. +// +// You do not need to ever call NewBucket() yourself, if you are using a +// [Store]. In this case the [Store] can manage buckets for you, using methods +// such as CreateOrLoadBucket(). +func (*Bucket) NewBucket(ctx context.Context, dir, rootDir string, logger logrus.FieldLogger, + metrics *Metrics, compactionCallbacks, flushCallbacks cyclemanager.CycleCallbackGroup, + opts ...BucketOption, +) (*Bucket, error) { + beforeAll := time.Now() + defaultMemTableThreshold := uint64(10 * 1024 * 1024) + defaultWalThreshold := uint64(1024 * 1024 * 1024) + defaultFlushAfterDirty := FlushAfterDirtyDefault + defaultStrategy := StrategyReplace + + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, err + } + + files, err := diskio.GetFileWithSizes(dir) + if err != nil { + return nil, err + } + + b := &Bucket{ + dir: dir, + rootDir: rootDir, + memtableThreshold: defaultMemTableThreshold, + walThreshold: defaultWalThreshold, + flushDirtyAfter: defaultFlushAfterDirty, + strategy: defaultStrategy, + mmapContents: true, + logger: logger, + metrics: metrics, + useBloomFilter: true, + calcCountNetAdditions: false, + haltedFlushTimer: interval.NewBackoffTimer(), + writeSegmentInfoIntoFileName: false, + } + + for _, opt := range opts { + if err := opt(b); err != nil { + return nil, err + } + } + + if b.memtableResizer != nil { + b.memtableThreshold = uint64(b.memtableResizer.Initial()) + } + + if b.disableCompaction { + compactionCallbacks = cyclemanager.NewCallbackGroupNoop() + } + + b.desiredStrategy = b.strategy + + sg, err := newSegmentGroup(ctx, logger, metrics, + sgConfig{ + dir: dir, + strategy: b.strategy, + mapRequiresSorting: b.legacyMapSortingBeforeCompaction, + monitorCount: b.monitorCount, + mmapContents: b.mmapContents, + keepTombstones: b.keepTombstones, + forceCompaction: b.forceCompaction, + useBloomFilter: b.useBloomFilter, + calcCountNetAdditions: b.calcCountNetAdditions, + maxSegmentSize: b.maxSegmentSize, + cleanupInterval: b.segmentsCleanupInterval, + enableChecksumValidation: b.enableChecksumValidation, + keepSegmentsInMemory: b.keepSegmentsInMemory, + MinMMapSize: b.minMMapSize, + bm25config: b.bm25Config, + keepLevelCompaction: b.keepLevelCompaction, + writeSegmentInfoIntoFileName: b.writeSegmentInfoIntoFileName, + writeMetadata: b.writeMetadata, + }, compactionCallbacks, b, files) + if err != nil { + return nil, fmt.Errorf("init disk segments: %w", err) + } + + b.disk = sg + + if b.active == nil { + err = b.setNewActiveMemtable() + if err != nil { + return nil, err + } + } + + id := "bucket/flush/" + b.dir + b.flushCallbackCtrl = flushCallbacks.Register(id, b.flushAndSwitchIfThresholdsMet) + + b.metrics.TrackStartupBucket(beforeAll) + + if err := GlobalBucketRegistry.TryAdd(dir); err != nil { + // prevent accidentally trying to register the same bucket twice + return nil, err + } + + return b, nil +} + +func (b *Bucket) GetDir() string { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + return b.dir +} + +func (b *Bucket) GetRootDir() string { + return b.rootDir +} + +func (b *Bucket) GetStrategy() string { + return b.strategy +} + +func (b *Bucket) GetDesiredStrategy() string { + return b.desiredStrategy +} + +func (b *Bucket) GetSecondaryIndices() uint16 { + return b.secondaryIndices +} + +func (b *Bucket) GetStatus() storagestate.Status { + b.statusLock.RLock() + defer b.statusLock.RUnlock() + + return b.status +} + +func (b *Bucket) GetMemtableThreshold() uint64 { + return b.memtableThreshold +} + +func (b *Bucket) GetWalThreshold() uint64 { + return b.walThreshold +} + +func (b *Bucket) GetFlushCallbackCtrl() cyclemanager.CycleCallbackCtrl { + return b.flushCallbackCtrl +} + +func (b *Bucket) IterateObjects(ctx context.Context, f func(object *storobj.Object) error) error { + cursor := b.Cursor() + defer cursor.Close() + + i := 0 + + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + obj, err := storobj.FromBinary(v) + if err != nil { + return fmt.Errorf("cannot unmarshal object %d, %w", i, err) + } + if err := f(obj); err != nil { + return fmt.Errorf("callback on object '%d' failed: %w", obj.DocID, err) + } + + i++ + } + + return nil +} + +func (b *Bucket) ApplyToObjectDigests(ctx context.Context, + afterInMemCallback func(), f func(object *storobj.Object) error, +) error { + // note: it's important to first create the on disk cursor so to avoid potential double scanning over flushing memtable + onDiskCursor := b.CursorOnDisk() + defer onDiskCursor.Close() + + inmemProcessedDocIDs := make(map[uint64]struct{}) + + // note: read-write access to active and flushing memtable will be blocked only during the scope of this inner function + err := func() error { + defer afterInMemCallback() + + inMemCursor := b.CursorInMem() + defer inMemCursor.Close() + + for k, v := inMemCursor.First(); k != nil; k, v = inMemCursor.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + obj, err := storobj.FromBinaryUUIDOnly(v) + if err != nil { + return fmt.Errorf("cannot unmarshal object: %w", err) + } + if err := f(obj); err != nil { + return fmt.Errorf("callback on object '%d' failed: %w", obj.DocID, err) + } + + inmemProcessedDocIDs[obj.DocID] = struct{}{} + } + } + + return nil + }() + if err != nil { + return err + } + + for k, v := onDiskCursor.First(); k != nil; k, v = onDiskCursor.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + obj, err := storobj.FromBinaryUUIDOnly(v) + if err != nil { + return fmt.Errorf("cannot unmarshal object: %w", err) + } + + if _, ok := inmemProcessedDocIDs[obj.DocID]; ok { + continue + } + + if err := f(obj); err != nil { + return fmt.Errorf("callback on object '%d' failed: %w", obj.DocID, err) + } + } + } + + return nil +} + +func (b *Bucket) IterateMapObjects(ctx context.Context, f func([]byte, []byte, []byte, bool) error) error { + cursor := b.MapCursor() + defer cursor.Close() + + for kList, vList := cursor.First(ctx); kList != nil; kList, vList = cursor.Next(ctx) { + for _, v := range vList { + if err := f(kList, v.Key, v.Value, v.Tombstone); err != nil { + return fmt.Errorf("callback on object '%v' failed: %w", v, err) + } + } + } + + return nil +} + +func (b *Bucket) SetMemtableThreshold(size uint64) { + b.memtableThreshold = size +} + +// Get retrieves the single value for the given key. +// +// Get is specific to ReplaceStrategy and cannot be used with any of the other +// strategies. Use [Bucket.SetList] or [Bucket.MapList] instead. +// +// Get uses the regular or "primary" key for an object. If a bucket has +// secondary indexes, use [Bucket.GetBySecondary] to retrieve an object using +// its secondary key +func (b *Bucket) Get(key []byte) ([]byte, error) { + beforeFlushLock := time.Now() + b.flushLock.RLock() + if time.Since(beforeFlushLock) > 100*time.Millisecond { + b.logger.WithField("duration", time.Since(beforeFlushLock)). + WithField("action", "lsm_bucket_get_acquire_flush_lock"). + Debugf("Waited more than 100ms to obtain a flush lock during get") + } + defer b.flushLock.RUnlock() + + return b.get(key) +} + +func (b *Bucket) get(key []byte) ([]byte, error) { + beforeMemtable := time.Now() + v, err := b.active.get(key) + if time.Since(beforeMemtable) > 100*time.Millisecond { + b.logger.WithField("duration", time.Since(beforeMemtable)). + WithField("action", "lsm_bucket_get_active_memtable"). + Debugf("Waited more than 100ms to retrieve object from memtable") + } + if err == nil { + // item found and no error, return and stop searching, since the strategy + // is replace + return v, nil + } + if errors.Is(err, lsmkv.Deleted) { + // deleted in the mem-table (which is always the latest) means we don't + // have to check the disk segments, return nil now + return nil, nil + } + + if !errors.Is(err, lsmkv.NotFound) { + panic(fmt.Sprintf("unsupported error in bucket.Get: %v\n", err)) + } + + if b.flushing != nil { + beforeFlushMemtable := time.Now() + v, err := b.flushing.get(key) + if time.Since(beforeFlushMemtable) > 100*time.Millisecond { + b.logger.WithField("duration", time.Since(beforeFlushMemtable)). + WithField("action", "lsm_bucket_get_flushing_memtable"). + Debugf("Waited over 100ms to retrieve object from flushing memtable") + } + if err == nil { + // item found and no error, return and stop searching, since the strategy + // is replace + return v, nil + } + if errors.Is(err, lsmkv.Deleted) { + // deleted in the now most recent memtable means we don't have to check + // the disk segments, return nil now + return nil, nil + } + + if !errors.Is(err, lsmkv.NotFound) { + panic("unsupported error in bucket.Get") + } + } + + return b.disk.get(key) +} + +func (b *Bucket) GetErrDeleted(key []byte) ([]byte, error) { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + v, err := b.active.get(key) + if err == nil { + // item found and no error, return and stop searching, since the strategy + // is replace + return v, nil + } + if errors.Is(err, lsmkv.Deleted) { + // deleted in the mem-table (which is always the latest) means we don't + // have to check the disk segments, return nil now + return nil, err + } + + if !errors.Is(err, lsmkv.NotFound) { + panic(fmt.Sprintf("unsupported error in bucket.Get: %v\n", err)) + } + + if b.flushing != nil { + v, err := b.flushing.get(key) + if err == nil { + // item found and no error, return and stop searching, since the strategy + // is replace + return v, nil + } + if errors.Is(err, lsmkv.Deleted) { + // deleted in the now most recent memtable means we don't have to check + // the disk segments, return nil now + return nil, err + } + + if !errors.Is(err, lsmkv.NotFound) { + panic("unsupported error in bucket.Get") + } + } + + return b.disk.getErrDeleted(key) +} + +// GetBySecondary retrieves an object using one of its secondary keys. A bucket +// can have an infinite number of secondary keys. Specify the secondary key +// position as the first argument. +// +// A real-life example of secondary keys is the Weaviate object store. Objects +// are stored with the user-facing ID as their primary key and with the doc-id +// (an ever-increasing uint64) as the secondary key. +// +// Similar to [Bucket.Get], GetBySecondary is limited to ReplaceStrategy. No +// equivalent exists for Set and Map, as those do not support secondary +// indexes. +func (b *Bucket) GetBySecondary(pos int, key []byte) ([]byte, error) { + bytes, _, err := b.GetBySecondaryIntoMemory(pos, key, nil) + return bytes, err +} + +// GetBySecondaryWithBuffer is like [Bucket.GetBySecondary], but also takes a +// buffer. It's in the response of the caller to pool the buffer, since the +// bucket does not know when the caller is done using it. The return bytes will +// likely point to the same memory that's part of the buffer. However, if the +// buffer is to small, a larger buffer may also be returned (second arg). +func (b *Bucket) GetBySecondaryWithBuffer(pos int, key []byte, buf []byte) ([]byte, []byte, error) { + bytes, newBuf, err := b.GetBySecondaryIntoMemory(pos, key, buf) + return bytes, newBuf, err +} + +// GetBySecondaryIntoMemory copies into the specified memory, and retrieves +// an object using one of its secondary keys. A bucket +// can have an infinite number of secondary keys. Specify the secondary key +// position as the first argument. +// +// A real-life example of secondary keys is the Weaviate object store. Objects +// are stored with the user-facing ID as their primary key and with the doc-id +// (an ever-increasing uint64) as the secondary key. +// +// Similar to [Bucket.Get], GetBySecondary is limited to ReplaceStrategy. No +// equivalent exists for Set and Map, as those do not support secondary +// indexes. +func (b *Bucket) GetBySecondaryIntoMemory(pos int, key []byte, buffer []byte) ([]byte, []byte, error) { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + if pos >= int(b.secondaryIndices) { + return nil, nil, fmt.Errorf("no secondary index at pos %d", pos) + } + + v, err := b.active.getBySecondary(pos, key) + if err == nil { + // item found and no error, return and stop searching, since the strategy + // is replace + return v, buffer, nil + } + if errors.Is(err, lsmkv.Deleted) { + // deleted in the mem-table (which is always the latest) means we don't + // have to check the disk segments, return nil now + return nil, buffer, nil + } + + if !errors.Is(err, lsmkv.NotFound) { + panic("unsupported error in bucket.Get") + } + + if b.flushing != nil { + v, err := b.flushing.getBySecondary(pos, key) + if err == nil { + // item found and no error, return and stop searching, since the strategy + // is replace + return v, buffer, nil + } + if errors.Is(err, lsmkv.Deleted) { + // deleted in the now most recent memtable means we don't have to check + // the disk segments, return nil now + return nil, buffer, nil + } + + if !errors.Is(err, lsmkv.NotFound) { + panic("unsupported error in bucket.Get") + } + } + + k, v, buffer, err := b.disk.getBySecondaryIntoMemory(pos, key, buffer) + if err != nil { + return nil, nil, err + } + + // additional validation to ensure the primary key has not been marked as deleted + pkv, err := b.get(k) + if err != nil { + return nil, nil, err + } else if pkv == nil { + return nil, buffer, nil + } + + return v, buffer, nil +} + +// SetList returns all Set entries for a given key. +// +// SetList is specific to the Set Strategy, for Map use [Bucket.MapList], and +// for Replace use [Bucket.Get]. +func (b *Bucket) SetList(key []byte) ([][]byte, error) { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + var out []value + + v, err := b.disk.getCollection(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + out = v + + if b.flushing != nil { + v, err = b.flushing.getCollection(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + out = append(out, v...) + + } + + v, err = b.active.getCollection(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + if len(v) > 0 { + // skip the expensive append operation if there was no memtable + out = append(out, v...) + } + + return newSetDecoder().Do(out), nil +} + +// Put creates or replaces a single value for a given key. +// +// err := bucket.Put([]byte("my_key"), []byte("my_value")) +// if err != nil { +// /* do something */ +// } +// +// If a bucket has a secondary index configured, you can also specify one or +// more secondary keys, like so: +// +// err := bucket.Put([]byte("my_key"), []byte("my_value"), +// WithSecondaryKey(0, []byte("my_alternative_key")), +// ) +// if err != nil { +// /* do something */ +// } +// +// Put is limited to ReplaceStrategy, use [Bucket.SetAdd] for Set or +// [Bucket.MapSet] and [Bucket.MapSetMulti]. +func (b *Bucket) Put(key, value []byte, opts ...SecondaryKeyOption) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.put(key, value, opts...) +} + +// SetAdd adds one or more Set-Entries to a Set for the given key. SetAdd is +// entirely agnostic of existing entries, it acts as append-only. This also +// makes it agnostic of whether the key already exists or not. +// +// Example to add two entries to a set: +// +// err := bucket.SetAdd([]byte("my_key"), [][]byte{ +// []byte("one-set-element"), []byte("another-set-element"), +// }) +// if err != nil { +// /* do something */ +// } +// +// SetAdd is specific to the Set strategy. For Replace, use [Bucket.Put], for +// Map use either [Bucket.MapSet] or [Bucket.MapSetMulti]. +func (b *Bucket) SetAdd(key []byte, values [][]byte) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.append(key, newSetEncoder().Do(values)) +} + +// SetDeleteSingle removes one Set element from the given key. Note that LSM +// stores are append only, thus internally this action appends a tombstone. The +// entry will not be removed until a compaction has run, and even then a +// compaction does not guarantee the removal of the data right away. This is +// because an entry could have been created in an older segment than those +// present in the compaction. This can be seen as an implementation detail, +// unless the caller expects to free disk space by calling this method. Such +// freeing is not guaranteed. +// +// SetDeleteSingle is specific to the Set Strategy. For Replace, you can use +// [Bucket.Delete] to delete the entire row, for Maps use [Bucket.MapDeleteKey] +// to delete a single map entry. +func (b *Bucket) SetDeleteSingle(key []byte, valueToDelete []byte) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.append(key, []value{ + { + value: valueToDelete, + tombstone: true, + }, + }) +} + +// WasDeleted determines if an object used to exist in the LSM store +// +// There are 3 different locations that we need to check for the key +// in this order: active memtable, flushing memtable, and disk +// segment +func (b *Bucket) WasDeleted(key []byte) (bool, time.Time, error) { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + if !b.keepTombstones { + return false, time.Time{}, fmt.Errorf("Bucket requires option `keepTombstones` set to check deleted keys") + } + + _, err := b.active.get(key) + if err == nil { + return false, time.Time{}, nil + } + if errors.Is(err, lsmkv.Deleted) { + var errDeleted lsmkv.ErrDeleted + if errors.As(err, &errDeleted) { + return true, errDeleted.DeletionTime(), nil + } else { + return true, time.Time{}, nil + } + } + if !errors.Is(err, lsmkv.NotFound) { + return false, time.Time{}, fmt.Errorf("unsupported bucket error: %w", err) + } + + // can still check flushing and disk + + if b.flushing != nil { + _, err := b.flushing.get(key) + if err == nil { + return false, time.Time{}, nil + } + if errors.Is(err, lsmkv.Deleted) { + var errDeleted lsmkv.ErrDeleted + if errors.As(err, &errDeleted) { + return true, errDeleted.DeletionTime(), nil + } else { + return true, time.Time{}, nil + } + } + if !errors.Is(err, lsmkv.NotFound) { + return false, time.Time{}, fmt.Errorf("unsupported bucket error: %w", err) + } + + // can still check disk + } + + _, err = b.disk.getErrDeleted(key) + if err == nil { + return false, time.Time{}, nil + } + if errors.Is(err, lsmkv.Deleted) { + var errDeleted lsmkv.ErrDeleted + if errors.As(err, &errDeleted) { + return true, errDeleted.DeletionTime(), nil + } else { + return true, time.Time{}, nil + } + } + if !errors.Is(err, lsmkv.NotFound) { + return false, time.Time{}, fmt.Errorf("unsupported bucket error: %w", err) + } + + return false, time.Time{}, nil +} + +type MapListOptionConfig struct { + acceptDuplicates bool + legacyRequireManualSorting bool +} + +type MapListOption func(c *MapListOptionConfig) + +func MapListAcceptDuplicates() MapListOption { + return func(c *MapListOptionConfig) { + c.acceptDuplicates = true + } +} + +func MapListLegacySortingRequired() MapListOption { + return func(c *MapListOptionConfig) { + c.legacyRequireManualSorting = true + } +} + +// MapList returns all map entries for a given row key. The order of map pairs +// has no specific meaning. For efficient merge operations, pair entries are +// stored sorted on disk, however that is an implementation detail and not a +// caller-facing guarantee. +// +// MapList is specific to the Map strategy, for Sets use [Bucket.SetList], for +// Replace use [Bucket.Get]. +func (b *Bucket) MapList(ctx context.Context, key []byte, cfgs ...MapListOption) ([]MapPair, error) { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + c := MapListOptionConfig{} + for _, cfg := range cfgs { + cfg(&c) + } + + segments := [][]MapPair{} + // before := time.Now() + disk, segmentsDisk, release, err := b.disk.getCollectionAndSegments(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + + defer release() + + allTombstones, err := b.loadAllTombstones(segmentsDisk) + if err != nil { + return nil, err + } + + for i := range disk { + if ctx.Err() != nil { + return nil, ctx.Err() + } + + propLengths := make(map[uint64]uint32) + if segmentsDisk[i].getStrategy() == segmentindex.StrategyInverted { + sgm := segmentsDisk[i].getSegment() + propLengths, err = sgm.GetPropertyLengths() + if err != nil { + return nil, err + } + } + + segmentDecoded := make([]MapPair, len(disk[i])) + for j, v := range disk[i] { + // Inverted segments have a slightly different internal format + // and separate property lengths that need to be read. + if segmentsDisk[i].getStrategy() == segmentindex.StrategyInverted { + if err := segmentDecoded[j].FromBytesInverted(v.value, false); err != nil { + return nil, err + } + docId := binary.BigEndian.Uint64(segmentDecoded[j].Key[:8]) + // check if there are any tombstones between the i and len(disk) segments + for _, tombstones := range allTombstones[i+1:] { + if tombstones != nil && tombstones.Contains(docId) { + segmentDecoded[j].Tombstone = true + break + } + } + // put the property length in the value from the "external" property lengths + binary.LittleEndian.PutUint32(segmentDecoded[j].Value[4:], math.Float32bits(float32(propLengths[docId]))) + + } else { + if err := segmentDecoded[j].FromBytes(v.value, false); err != nil { + return nil, err + } + // Read "broken" tombstones with length 12 but a non-tombstone value + // Related to Issue #4125 + // TODO: Remove the extra check, as it may interfere future in-disk format changes + segmentDecoded[j].Tombstone = v.tombstone || len(v.value) == 12 + } + } + if len(segmentDecoded) > 0 { + segments = append(segments, segmentDecoded) + } + } + + // fmt.Printf("--map-list: get all disk segments took %s\n", time.Since(before)) + + // before = time.Now() + // fmt.Printf("--map-list: append all disk segments took %s\n", time.Since(before)) + + if b.flushing != nil { + v, err := b.flushing.getMap(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + if len(v) > 0 { + segments = append(segments, v) + } + } + + // before = time.Now() + v, err := b.active.getMap(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + if len(v) > 0 { + segments = append(segments, v) + } + // fmt.Printf("--map-list: get all active segments took %s\n", time.Since(before)) + + // before = time.Now() + // defer func() { + // fmt.Printf("--map-list: run decoder took %s\n", time.Since(before)) + // }() + + if c.legacyRequireManualSorting { + // Sort to support segments which were stored in an unsorted fashion + for i := range segments { + sort.Slice(segments[i], func(a, b int) bool { + return bytes.Compare(segments[i][a].Key, segments[i][b].Key) == -1 + }) + } + } + + return newSortedMapMerger().do(ctx, segments) +} + +func (b *Bucket) loadAllTombstones(segmentsDisk []Segment) ([]*sroar.Bitmap, error) { + hasTombstones := false + allTombstones := make([]*sroar.Bitmap, len(segmentsDisk)+2) + for i, segment := range segmentsDisk { + if segment.getStrategy() == segmentindex.StrategyInverted { + tombstones, err := segment.ReadOnlyTombstones() + if err != nil { + return nil, err + } + allTombstones[i] = tombstones + hasTombstones = true + } + } + if hasTombstones { + + if b.flushing != nil { + tombstones, err := b.flushing.ReadOnlyTombstones() + if err != nil { + return nil, err + } + allTombstones[len(segmentsDisk)] = tombstones + } + + tombstones, err := b.active.ReadOnlyTombstones() + if err != nil { + return nil, err + } + allTombstones[len(segmentsDisk)+1] = tombstones + } + return allTombstones, nil +} + +// MapSet writes one [MapPair] into the map for the given row key. It is +// agnostic of whether the row key already exists, as well as agnostic of +// whether the map key already exists. In both cases it will create the entry +// if it does not exist or override if it does. +// +// Example to add a new MapPair: +// +// pair := MapPair{Key: []byte("Jane"), Value: []byte("Backend")} +// err := bucket.MapSet([]byte("developers"), pair) +// if err != nil { +// /* do something */ +// } +// +// MapSet is specific to the Map Strategy, for Replace use [Bucket.Put], and for Set use [Bucket.SetAdd] instead. +func (b *Bucket) MapSet(rowKey []byte, kv MapPair) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.appendMapSorted(rowKey, kv) +} + +// MapSetMulti is the same as [Bucket.MapSet], except that it takes in multiple +// [MapPair] objects at the same time. +func (b *Bucket) MapSetMulti(rowKey []byte, kvs []MapPair) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + for _, kv := range kvs { + if err := b.active.appendMapSorted(rowKey, kv); err != nil { + return err + } + } + + return nil +} + +// MapDeleteKey removes one key-value pair from the given map row. Note that +// LSM stores are append only, thus internally this action appends a tombstone. +// The entry will not be removed until a compaction has run, and even then a +// compaction does not guarantee the removal of the data right away. This is +// because an entry could have been created in an older segment than those +// present in the compaction. This can be seen as an implementation detail, +// unless the caller expects to free disk space by calling this method. Such +// freeing is not guaranteed. +// +// MapDeleteKey is specific to the Map Strategy. For Replace, you can use +// [Bucket.Delete] to delete the entire row, for Sets use [Bucket.SetDeleteSingle] to delete a single set element. +func (b *Bucket) MapDeleteKey(rowKey, mapKey []byte) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + pair := MapPair{ + Key: mapKey, + Tombstone: true, + } + + if b.active.strategy == StrategyInverted { + docID := binary.BigEndian.Uint64(mapKey) + if err := b.active.SetTombstone(docID); err != nil { + return err + } + } + + return b.active.appendMapSorted(rowKey, pair) +} + +// Delete removes the given row. Note that LSM stores are append only, thus +// internally this action appends a tombstone. The entry will not be removed +// until a compaction has run, and even then a compaction does not guarantee +// the removal of the data right away. This is because an entry could have been +// created in an older segment than those present in the compaction. This can +// be seen as an implementation detail, unless the caller expects to free disk +// space by calling this method. Such freeing is not guaranteed. +// +// Delete is specific to the Replace Strategy. For Maps, you can use +// [Bucket.MapDeleteKey] to delete a single key-value pair, for Sets use +// [Bucket.SetDeleteSingle] to delete a single set element. +func (b *Bucket) Delete(key []byte, opts ...SecondaryKeyOption) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.setTombstone(key, opts...) +} + +func (b *Bucket) DeleteWith(key []byte, deletionTime time.Time, opts ...SecondaryKeyOption) error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + if !b.keepTombstones { + return fmt.Errorf("bucket requires option `keepTombstones` set to delete keys at a given timestamp") + } + + return b.active.setTombstoneWith(key, deletionTime, opts...) +} + +// meant to be called from situations where a lock is already held, does not +// lock on its own +func (b *Bucket) setNewActiveMemtable() error { + path := filepath.Join(b.dir, fmt.Sprintf("segment-%d", time.Now().UnixNano())) + + cl, err := newLazyCommitLogger(path, b.strategy) + if err != nil { + return errors.Wrap(err, "init commit logger") + } + + mt, err := newMemtable(path, b.strategy, b.secondaryIndices, cl, + b.metrics, b.logger, b.enableChecksumValidation, b.bm25Config, b.writeSegmentInfoIntoFileName, b.allocChecker) + if err != nil { + return err + } + + b.active = mt + return nil +} + +func (b *Bucket) Count() int { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + if b.strategy != StrategyReplace { + panic("Count() called on strategy other than 'replace'") + } + + memtableCount := 0 + if b.flushing == nil { + // only consider active + memtableCount += b.memtableNetCount(b.active.countStats(), nil) + } else { + flushingCountStats := b.flushing.countStats() + activeCountStats := b.active.countStats() + deltaActive := b.memtableNetCount(activeCountStats, flushingCountStats) + deltaFlushing := b.memtableNetCount(flushingCountStats, nil) + + memtableCount = deltaActive + deltaFlushing + } + + diskCount := b.disk.count() + + if b.monitorCount { + b.metrics.ObjectCount(memtableCount + diskCount) + } + return memtableCount + diskCount +} + +// CountAsync ignores the current memtable, that makes it async because it only +// reflects what has been already flushed. This in turn makes it very cheap to +// call, so it can be used for observability purposes where eventual +// consistency on the count is fine, but a large cost is not. +func (b *Bucket) CountAsync() int { + return b.disk.count() +} + +func (b *Bucket) memtableNetCount(stats *countStats, previousMemtable *countStats) int { + netCount := 0 + + // TODO: this uses regular get, given that this may be called quite commonly, + // we might consider building a pure Exists(), which skips reading the value + // and only checks for tombstones, etc. + for _, key := range stats.upsertKeys { + if !b.existsOnDiskAndPreviousMemtable(previousMemtable, key) { + netCount++ + } + } + + for _, key := range stats.tombstonedKeys { + if b.existsOnDiskAndPreviousMemtable(previousMemtable, key) { + netCount-- + } + } + + return netCount +} + +func (b *Bucket) existsOnDiskAndPreviousMemtable(previous *countStats, key []byte) bool { + v, _ := b.disk.get(key) // current implementation can't error + if v == nil { + // not on disk, but it could still be in the previous memtable + return previous.hasUpsert(key) + } + + // it exists on disk ,but it could still have been deleted in the previous memtable + return !previous.hasTombstone(key) +} + +func (b *Bucket) Shutdown(ctx context.Context) error { + defer GlobalBucketRegistry.Remove(b.GetDir()) + + if err := b.disk.shutdown(ctx); err != nil { + return err + } + + if err := b.flushCallbackCtrl.Unregister(ctx); err != nil { + return fmt.Errorf("long-running flush in progress: %w", ctx.Err()) + } + + b.flushLock.Lock() + if b.active.strategy == StrategyInverted { + b.active.averagePropLength, b.active.propLengthCount = b.disk.GetAveragePropertyLength() + } + if b.shouldReuseWAL() { + if err := b.active.flushWAL(); err != nil { + b.flushLock.Unlock() + return err + } + } else { + if _, err := b.active.flush(); err != nil { + b.flushLock.Unlock() + return err + } + } + b.flushLock.Unlock() + + if b.flushing == nil { + // active has flushing, no one else was currently flushing, it's safe to + // exit + return nil + } + + // it seems we still need to wait for someone to finish flushing + t := time.NewTicker(50 * time.Millisecond) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + if b.flushing == nil { + return nil + } + } + } +} + +func (b *Bucket) shouldReuseWAL() bool { + return uint64(b.active.commitlog.size()) <= uint64(b.minWalThreshold) +} + +// flushAndSwitchIfThresholdsMet is part of flush callbacks of the bucket. +func (b *Bucket) flushAndSwitchIfThresholdsMet(shouldAbort cyclemanager.ShouldAbortCallback) bool { + b.flushLock.RLock() + commitLogSize := b.active.commitlog.size() + memtableTooLarge := b.active.Size() >= b.memtableThreshold + walTooLarge := uint64(commitLogSize) >= b.walThreshold + dirtyTooLong := b.active.DirtyDuration() >= b.flushDirtyAfter + shouldSwitch := memtableTooLarge || walTooLarge || dirtyTooLong + + // If true, the parent shard has indicated that it has + // entered an immutable state. During this time, the + // bucket should refrain from flushing until its shard + // indicates otherwise + if shouldSwitch && b.isReadOnly() { + if b.haltedFlushTimer.IntervalElapsed() { + b.logger.WithField("action", "lsm_memtable_flush"). + WithField("path", b.dir). + Warn("flush halted due to shard READONLY status") + b.haltedFlushTimer.IncreaseInterval() + } + + b.flushLock.RUnlock() + return false + } + + if b.shouldReuseWAL() { + defer b.flushLock.RUnlock() + return b.getAndUpdateWritesSinceLastSync() + } + + b.flushLock.RUnlock() + if shouldSwitch { + b.haltedFlushTimer.Reset() + cycleLength := b.active.ActiveDuration() + if err := b.FlushAndSwitch(); err != nil { + b.logger.WithField("action", "lsm_memtable_flush"). + WithField("path", b.GetDir()). + WithError(err). + Errorf("flush and switch failed") + return false + } + + if b.memtableResizer != nil { + next, ok := b.memtableResizer.NextTarget(int(b.memtableThreshold), cycleLength) + if ok { + b.memtableThreshold = uint64(next) + } + } + return true + } + return false +} + +func (b *Bucket) getAndUpdateWritesSinceLastSync() bool { + b.active.Lock() + defer b.active.Unlock() + + hasWrites := b.active.writesSinceLastSync + if !hasWrites { + // had no work this iteration, cycle manager can back off + return false + } + + err := b.active.commitlog.flushBuffers() + if err != nil { + b.logger.WithField("action", "lsm_memtable_flush"). + WithField("path", b.dir). + WithError(err). + Errorf("flush and switch failed") + + return false + } + + err = b.active.commitlog.sync() + if err != nil { + b.logger.WithField("action", "lsm_memtable_flush"). + WithField("path", b.dir). + WithError(err). + Errorf("flush and switch failed") + + return false + } + b.active.writesSinceLastSync = false + // there was work in this iteration, cycle manager should not back off and revisit soon + return true +} + +// UpdateStatus is used by the parent shard to communicate to the bucket +// when the shard has been set to readonly, or when it is ready for +// writes. +func (b *Bucket) UpdateStatus(status storagestate.Status) { + b.statusLock.Lock() + defer b.statusLock.Unlock() + + b.status = status + b.disk.UpdateStatus(status) +} + +func (b *Bucket) isReadOnly() bool { + b.statusLock.Lock() + defer b.statusLock.Unlock() + + return b.status == storagestate.StatusReadOnly +} + +// FlushAndSwitch is the main way to flush a memtable, replace it with a new +// one, and make sure that the flushed segment gets added to the segment group. +// +// Flushing and adding a segment can take considerable time, which is why the +// whole process is designed to be non-blocking. +// +// To achieve a non-blocking flush, the process is split into four parts: +// +// 1. atomicallySwitchMemtable: A new memtable is created, the previous +// memtable is moved from "active" to "flushing". This switch is blocking +// (holds b.flushLock.Lock()), but extremely fast, as we essentially just +// switch a pointer. +// +// 2. flush: The previous memtable is flushed to disk. This may take +// considerable time as we are I/O-bound. This is done "in the +// background"meaning that it does not block any CRUD operations for the +// user. It only blocks the flush process itself, meaning only one flush per +// bucket can happen simultaneously. This is by design. +// +// 3. initAndPrecomputeNewSegment: (Newly added in +// https://github.com/weaviate/weaviate/pull/5943, early October 2024). After +// the previous flush step the segment can now be initialized. However, to +// make it usable for real life, we still need to compute metadata, such as +// bloom filters (all types) and net count additions (only Replace type). +// Bloom filters can be calculated in isolation and are therefore fairly +// trivial. Net count additions on the other hand are more complex, as they +// depend on all previous segments. Calculating net count additions can take +// a considerable amount of time, especially as the buckets grow larger. As a +// result, we need to provide two guarantees: (1) the calculation is +// non-blocking from a user's POV and (2) for the duration of the +// calculation, the segment group is considered stable, i.e. no other +// segments are added, removed, or merged. We can achieve this by holding a +// `b.disk.maintenanceLock.RLock()` which prevents modification of the +// segments array, but does not block user operation (which are themselves +// RLock-holders on that same Lock). +// +// 4. atomicallyAddDiskSegmentAndRemoveFlushing: The previous method returned +// a fully initialized segment that has not yet been added to the segment +// group. This last step is the counter part to the first step and again +// blocking, but fast. It adds the segment to the segment group  which at +// this point is just a simple array append. At the same time it removes the +// "flushing" memtable. It holds the `b.flushLock.Lock()` making this +// operation atomic, but blocking. +// +// FlushAndSwitch is typically called periodically and does not require manual +// calling, but there are some situations where this might be intended, such as +// in test scenarios or when a force flush is desired. +func (b *Bucket) FlushAndSwitch() error { + before := time.Now() + var err error + + bucketPath := b.GetDir() + + b.logger.WithField("action", "lsm_memtable_flush_start"). + WithField("path", bucketPath). + Trace("start flush and switch") + + switched, err := b.atomicallySwitchMemtable() + if err != nil { + b.logger.WithField("action", "lsm_memtable_flush_start"). + WithField("path", bucketPath). + Error(err) + return fmt.Errorf("flush and switch: %w", err) + } + if !switched { + b.logger.WithField("action", "lsm_memtable_flush_start"). + WithField("path", bucketPath). + Trace("flush and switch not needed") + return nil + } + + if b.flushing.strategy == StrategyInverted { + b.flushing.averagePropLength, b.flushing.propLengthCount = b.disk.GetAveragePropertyLength() + } + segmentPath, err := b.flushing.flush() + if err != nil { + return fmt.Errorf("flush: %w", err) + } + + var tombstones *sroar.Bitmap + if b.strategy == StrategyInverted { + if tombstones, err = b.flushing.ReadOnlyTombstones(); err != nil { + return fmt.Errorf("get tombstones: %w", err) + } + } + + segment, err := b.initAndPrecomputeNewSegment(segmentPath) + if err != nil { + return fmt.Errorf("precompute metadata: %w", err) + } + + flushing := b.flushing + if err := b.atomicallyAddDiskSegmentAndRemoveFlushing(segment); err != nil { + return fmt.Errorf("add segment and remove flushing: %w", err) + } + + switch b.strategy { + case StrategyInverted: + if !tombstones.IsEmpty() { + if err = func() error { + b.disk.maintenanceLock.RLock() + defer b.disk.maintenanceLock.RUnlock() + // add flushing memtable tombstones to all segments + for _, seg := range b.disk.segments { + if _, err := seg.MergeTombstones(tombstones); err != nil { + return fmt.Errorf("merge tombstones: %w", err) + } + } + return nil + }(); err != nil { + return fmt.Errorf("add tombstones: %w", err) + } + } + + case StrategyRoaringSetRange: + if b.keepSegmentsInMemory { + if err := b.disk.roaringSetRangeSegmentInMemory.MergeMemtable(flushing.roaringSetRange); err != nil { + return fmt.Errorf("merge roaringsetrange memtable to segment-in-memory: %w", err) + } + } + } + + took := time.Since(before) + b.logger.WithField("action", "lsm_memtable_flush_complete"). + WithField("path", bucketPath). + Trace("finish flush and switch") + + b.logger.WithField("action", "lsm_memtable_flush_complete"). + WithField("path", bucketPath). + WithField("took", took). + Debugf("flush and switch took %s\n", took) + + return nil +} + +func (b *Bucket) atomicallySwitchMemtable() (bool, error) { + b.flushLock.Lock() + defer b.flushLock.Unlock() + + if b.active.size == 0 { + return false, nil + } + + flushing := b.active + + err := b.setNewActiveMemtable() + if err != nil { + return false, fmt.Errorf("switch active memtable: %w", err) + } + b.flushing = flushing + + return true, nil +} + +func (b *Bucket) initAndPrecomputeNewSegment(segmentPath string) (*segment, error) { + // Note that this operation does not require the flush lock, i.e. it can + // happen in the background and we can accept new writes will this + // pre-compute is happening. + segment, err := b.disk.initAndPrecomputeNewSegment(segmentPath) + if err != nil { + return nil, err + } + + return segment, nil +} + +func (b *Bucket) atomicallyAddDiskSegmentAndRemoveFlushing(seg *segment) error { + b.flushLock.Lock() + defer b.flushLock.Unlock() + + if b.flushing.Size() == 0 { + b.flushing = nil + return nil + } + + if err := b.disk.addInitializedSegment(seg); err != nil { + return err + } + b.flushing = nil + + if b.strategy == StrategyReplace && b.monitorCount { + // having just flushed the memtable we now have the most up2date count which + // is a good place to update the metric + b.metrics.ObjectCount(b.disk.count()) + } + + return nil +} + +func (b *Bucket) Strategy() string { + return b.strategy +} + +func (b *Bucket) DesiredStrategy() string { + return b.desiredStrategy +} + +// the WAL uses a buffer and isn't written until the buffer size is crossed or +// this function explicitly called. This allows to avoid unnecessary disk +// writes in larger operations, such as batches. It is sufficient to call write +// on the WAL just once. This does not make a batch atomic, but it guarantees +// that the WAL is written before a successful response is returned to the +// user. +func (b *Bucket) WriteWAL() error { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.writeWAL() +} + +func (b *Bucket) DocPointerWithScoreList(ctx context.Context, key []byte, propBoost float32, cfgs ...MapListOption) ([]terms.DocPointerWithScore, error) { + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + c := MapListOptionConfig{} + for _, cfg := range cfgs { + cfg(&c) + } + + segments := [][]terms.DocPointerWithScore{} + disk, segmentsDisk, release, err := b.disk.getCollectionAndSegments(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + + defer release() + + allTombstones, err := b.loadAllTombstones(segmentsDisk) + if err != nil { + return nil, err + } + + for i := range disk { + if ctx.Err() != nil { + return nil, ctx.Err() + } + propLengths := make(map[uint64]uint32) + if segmentsDisk[i].getStrategy() == segmentindex.StrategyInverted { + sgm := segmentsDisk[i].getSegment() + + propLengths, err = sgm.GetPropertyLengths() + if err != nil { + return nil, err + } + } + + segmentDecoded := make([]terms.DocPointerWithScore, len(disk[i])) + for j, v := range disk[i] { + if segmentsDisk[i].getStrategy() == segmentindex.StrategyInverted { + docId := binary.BigEndian.Uint64(v.value[:8]) + propLen := propLengths[docId] + if err := segmentDecoded[j].FromBytesInverted(v.value, propBoost, float32(propLen)); err != nil { + return nil, err + } + // check if there are any tombstones between the i and len(disk) segments + for _, tombstones := range allTombstones[i+1:] { + if tombstones != nil && tombstones.Contains(docId) { + segmentDecoded[j].Frequency = 0 + break + } + } + } else { + if err := segmentDecoded[j].FromBytes(v.value, v.tombstone, propBoost); err != nil { + return nil, err + } + } + } + segments = append(segments, segmentDecoded) + } + + if b.flushing != nil { + mem, err := b.flushing.getMap(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + docPointers := make([]terms.DocPointerWithScore, len(mem)) + for i, v := range mem { + if err := docPointers[i].FromKeyVal(v.Key, v.Value, v.Tombstone, propBoost); err != nil { + return nil, err + } + } + segments = append(segments, docPointers) + } + + mem, err := b.active.getMap(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return nil, err + } + docPointers := make([]terms.DocPointerWithScore, len(mem)) + for i, v := range mem { + if err := docPointers[i].FromKeyVal(v.Key, v.Value, v.Tombstone, propBoost); err != nil { + return nil, err + } + } + segments = append(segments, docPointers) + + if c.legacyRequireManualSorting { + // Sort to support segments which were stored in an unsorted fashion + for i := range segments { + sort.Slice(segments[i], func(a, b int) bool { + return segments[i][a].Id < segments[i][b].Id + }) + } + } + + return terms.NewSortedDocPointerWithScoreMerger().Do(ctx, segments) +} + +func (b *Bucket) CreateDiskTerm(N float64, filterDocIds helpers.AllowList, query []string, propName string, propertyBoost float32, duplicateTextBoosts []int, config schema.BM25Config, ctx context.Context) ([][]*SegmentBlockMax, map[string]uint64, func(), error) { + release := func() {} + + defer func() { + if !entcfg.Enabled(os.Getenv("DISABLE_RECOVERY_ON_PANIC")) { + if r := recover(); r != nil { + b.logger.Errorf("Recovered from panic in CreateDiskTerm: %v", r) + debug.PrintStack() + release() + } + } + }() + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + averagePropLength, err := b.GetAveragePropertyLength() + if err != nil { + release() + return nil, nil, func() {}, err + } + + // The lock is necessary, as data is being read from the disk during blockmax wand search. + // BlockMax is ran outside this function, so, the lock is returned to the caller. + // Panics at this level are caught and the lock is released in the defer function. + // The lock is released after the blockmax search is done, and panics are also handled. + segmentsDisk, release := b.disk.getAndLockSegments() + + output := make([][]*SegmentBlockMax, len(segmentsDisk)+2) + idfs := make([]float64, len(query)) + idfCounts := make(map[string]uint64, len(query)) + // flusing memtable + output[len(segmentsDisk)] = make([]*SegmentBlockMax, 0, len(query)) + + // active memtable + output[len(segmentsDisk)+1] = make([]*SegmentBlockMax, 0, len(query)) + + memTombstones := sroar.NewBitmap() + + for i, queryTerm := range query { + key := []byte(queryTerm) + n := uint64(0) + + active := NewSegmentBlockMaxDecoded(key, i, propertyBoost, filterDocIds, averagePropLength, config) + flushing := NewSegmentBlockMaxDecoded(key, i, propertyBoost, filterDocIds, averagePropLength, config) + + var activeTombstones *sroar.Bitmap + if b.active != nil { + memtable := b.active + n2, _ := fillTerm(memtable, key, active, filterDocIds) + if active.Count() > 0 { + output[len(segmentsDisk)+1] = append(output[len(segmentsDisk)+1], active) + } + n += n2 + + var err error + activeTombstones, err = b.active.ReadOnlyTombstones() + if err != nil { + release() + return nil, nil, func() {}, err + } + memTombstones.Or(activeTombstones) + + if !active.Exhausted() { + active.advanceOnTombstoneOrFilter() + } + } + + if b.flushing != nil { + memtable := b.flushing + n2, _ := fillTerm(memtable, key, flushing, filterDocIds) + if flushing.Count() > 0 { + output[len(segmentsDisk)] = append(output[len(segmentsDisk)], flushing) + } + n += n2 + + tombstones, err := b.flushing.ReadOnlyTombstones() + if err != nil { + release() + return nil, nil, func() {}, err + } + memTombstones.Or(tombstones) + + if !flushing.Exhausted() { + flushing.tombstones = activeTombstones + flushing.advanceOnTombstoneOrFilter() + } + + } + + for _, segment := range segmentsDisk { + sgm := segment.getSegment() + if segment.getStrategy() == segmentindex.StrategyInverted && sgm.hasKey(key) { + n += sgm.getDocCount(key) + } + } + + // we can only know the full n after we have checked all segments and all memtables + idfs[i] = math.Log(float64(1)+(N-float64(n)+0.5)/(float64(n)+0.5)) * float64(duplicateTextBoosts[i]) + + active.idf = idfs[i] + active.currentBlockImpact = float32(idfs[i]) + + flushing.idf = idfs[i] + flushing.currentBlockImpact = float32(idfs[i]) + + idfCounts[queryTerm] = n + } + + for j := len(segmentsDisk) - 1; j >= 0; j-- { + segment := segmentsDisk[j] + output[j] = make([]*SegmentBlockMax, 0, len(query)) + + allTombstones := memTombstones.Clone() + if j != len(segmentsDisk)-1 { + segTombstones, err := segmentsDisk[j+1].ReadOnlyTombstones() + if err != nil { + release() + return nil, nil, func() {}, err + } + allTombstones.Or(segTombstones) + } + + for i, key := range query { + term := NewSegmentBlockMax(segment.getSegment(), []byte(key), i, idfs[i], propertyBoost, allTombstones, filterDocIds, averagePropLength, config) + if term != nil { + output[j] = append(output[j], term) + } + } + } + return output, idfCounts, release, nil +} + +func fillTerm(memtable *Memtable, key []byte, blockmax *SegmentBlockMax, filterDocIds helpers.AllowList) (uint64, error) { + mapPairs, err := memtable.getMap(key) + if err != nil && !errors.Is(err, lsmkv.NotFound) { + return 0, err + } + if errors.Is(err, lsmkv.NotFound) { + return 0, nil + } + n, err := addDataToTerm(mapPairs, filterDocIds, blockmax) + if err != nil { + return 0, err + } + return n, nil +} + +func addDataToTerm(mem []MapPair, filterDocIds helpers.AllowList, term *SegmentBlockMax) (uint64, error) { + n := uint64(0) + term.blockDataDecoded = &terms.BlockDataDecoded{ + DocIds: make([]uint64, 0, len(mem)), + Tfs: make([]uint64, 0, len(mem)), + } + term.propLengths = make(map[uint64]uint32) + + for _, v := range mem { + if v.Tombstone { + continue + } + n++ + if len(v.Value) < 8 { + // b.logger.Warnf("Skipping pair in BM25: MapPair.Value should be 8 bytes long, but is %d.", len(v.Value)) + continue + } + d := terms.DocPointerWithScore{} + if err := d.FromKeyVal(v.Key, v.Value, false, 1.0); err != nil { + return 0, err + } + if filterDocIds != nil && !filterDocIds.Contains(d.Id) { + continue + } + + term.blockDataDecoded.DocIds = append(term.blockDataDecoded.DocIds, d.Id) + term.blockDataDecoded.Tfs = append(term.blockDataDecoded.Tfs, uint64(d.Frequency)) + term.propLengths[d.Id] = uint32(d.PropLength) + + } + if len(term.blockDataDecoded.DocIds) == 0 { + return n, nil + } + term.exhausted = false + term.blockEntries = make([]*terms.BlockEntry, 1) + term.blockEntries[0] = &terms.BlockEntry{ + MaxId: term.blockDataDecoded.DocIds[len(term.blockDataDecoded.DocIds)-1], + Offset: 0, + } + + term.currentBlockMaxId = term.blockDataDecoded.DocIds[len(term.blockDataDecoded.DocIds)-1] + term.docCount = uint64(len(term.blockDataDecoded.DocIds)) + term.blockDataSize = len(term.blockDataDecoded.DocIds) + term.idPointer = term.blockDataDecoded.DocIds[0] + return n, nil +} + +func (b *Bucket) GetAveragePropertyLength() (float64, error) { + if b.strategy != StrategyInverted { + return 0, fmt.Errorf("active memtable is not inverted") + } + + var err error + propLengthCount := uint64(0) + propLengthSum := uint64(0) + if b.flushing != nil { + propLengthSum, propLengthCount, err = b.flushing.GetPropLengths() + if err != nil { + return 0, err + } + } + // if the active memtable is inverted, we need to get the average property + if b.active != nil { + propLengthSum2, propLengthCount2, err := b.active.GetPropLengths() + if err != nil { + return 0, err + } + propLengthCount += propLengthCount2 + propLengthSum += propLengthSum2 + } + + // weighted average of m.averagePropLength and the average of the current flush + // averaged by propLengthCount and m.propLengthCount + segmentAveragePropLength, segmentPropCount := b.disk.GetAveragePropertyLength() + + if segmentPropCount != 0 { + propLengthSum += uint64(segmentAveragePropLength * float64(segmentPropCount)) + propLengthCount += segmentPropCount + } + if propLengthCount == 0 { + return 0, nil + } + return float64(propLengthSum) / float64(propLengthCount), nil +} + +// DiskSize returns the total size from the disk segment group (cold path) +func (b *Bucket) DiskSize() int64 { + if b.disk != nil { + return b.disk.Size() + } + return 0 +} + +// MetadataSize returns the total size of metadata files (.bloom and .cna) from segments in memory +func (b *Bucket) MetadataSize() int64 { + if b.disk == nil { + return 0 + } + return b.disk.MetadataSize() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_backup.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_backup.go new file mode 100644 index 0000000000000000000000000000000000000000..c98dfe0c3c57d6e3e6b59c55565dfab4f874708b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_backup.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "os" + "path" + "path/filepath" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/storagestate" +) + +// FlushMemtable flushes any active memtable and returns only once the memtable +// has been fully flushed and a stable state on disk has been reached. +// +// This is a preparatory stage for creating backups. +// +// Method should be run only if flushCycle is not running +// (was not started, is stopped, or noop impl is provided) +func (b *Bucket) FlushMemtable() error { + if b.isReadOnly() { + return errors.Wrap(storagestate.ErrStatusReadOnly, "flush memtable") + } + + return b.FlushAndSwitch() +} + +// ListFiles lists all files that currently exist in the Bucket. The files are only +// in a stable state if the memtable is empty, and if compactions are paused. If one +// of those conditions is not given, it errors +func (b *Bucket) ListFiles(ctx context.Context, basePath string) ([]string, error) { + bucketRoot := b.disk.dir + + entries, err := os.ReadDir(bucketRoot) + if err != nil { + return nil, errors.Errorf("failed to list files for bucket: %s", err) + } + + var files []string + for _, entry := range entries { + // Skip directories as they are used as scratch spaces (e.g. for compaction or flushing). + // All stable files are in the root of the bucket. + if entry.IsDir() { + continue + } + + ext := filepath.Ext(entry.Name()) + + // ignore .wal files because they are not immutable, + // ignore .tmp files because they are temporary files created during compaction or flushing + // and are not part of the stable state of the bucket + if ext == ".wal" || ext == ".tmp" { + continue + } + + files = append(files, path.Join(basePath, entry.Name())) + } + return files, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_backup_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_backup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ae1f81f529005155be132d278a655e4b1e31e675 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_backup_test.go @@ -0,0 +1,106 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "testing" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storagestate" +) + +func Test_BucketBackup(t *testing.T) { + ctx := context.Background() + tests := bucketTests{ + { + name: "bucketBackup_FlushMemtable", + f: bucketBackup_FlushMemtable, + opts: []BucketOption{WithStrategy(StrategyReplace)}, + }, + { + name: "bucketBackup_ListFiles", + f: bucketBackup_ListFiles, + opts: []BucketOption{WithStrategy(StrategyReplace), WithCalcCountNetAdditions(true)}, + }, + } + tests.run(ctx, t) +} + +func bucketBackup_FlushMemtable(ctx context.Context, t *testing.T, opts []BucketOption) { + t.Run("assert that readonly bucket fails to flush", func(t *testing.T) { + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, logrus.New(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + b.UpdateStatus(storagestate.StatusReadOnly) + + err = b.FlushMemtable() + require.NotNil(t, err) + expectedErr := errors.Wrap(storagestate.ErrStatusReadOnly, "flush memtable") + assert.EqualError(t, expectedErr, err.Error()) + + err = b.Shutdown(context.Background()) + require.Nil(t, err) + }) +} + +func bucketBackup_ListFiles(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, logrus.New(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.NoError(t, err) + + for i := 0; i < 10; i++ { + err := b.Put([]byte(fmt.Sprint(i)), []byte(fmt.Sprint(i))) + require.NoError(t, err) + } + + // flush memtable to generate .db files + err = b.FlushMemtable() + require.NoError(t, err) + + // create an arbitrary directory and file that is a leftover of some old process + leftoverDir := path.Join(dirName, "scratch_leftover") + require.NoError(t, os.MkdirAll(leftoverDir, 0o755)) + require.NoError(t, os.WriteFile(path.Join(leftoverDir, "partial_segment.db"), []byte("some data"), 0o644)) + + files, err := b.ListFiles(ctx, dirName) + assert.NoError(t, err) + assert.Len(t, files, 3) + + // make sure all these files are accessible to prove that the paths are correct + for _, file := range files { + _, err = os.Stat(file) + require.NoError(t, err) + } + + exts := make([]string, 3) + for i, file := range files { + exts[i] = filepath.Ext(file) + } + assert.Contains(t, exts, ".db") // the segment itself + assert.Contains(t, exts, ".bloom") // the segment's bloom filter + assert.Contains(t, exts, ".cna") // the segment's count net additions + + require.NoError(t, b.Shutdown(context.Background())) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_options.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_options.go new file mode 100644 index 0000000000000000000000000000000000000000..300642f66dc5076d7e809bea274175cb494a9d55 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_options.go @@ -0,0 +1,267 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type BucketOption func(b *Bucket) error + +func WithStrategy(strategy string) BucketOption { + return func(b *Bucket) error { + if err := CheckExpectedStrategy(strategy); err != nil { + return err + } + + b.strategy = strategy + return nil + } +} + +func WithMemtableThreshold(threshold uint64) BucketOption { + return func(b *Bucket) error { + b.memtableThreshold = threshold + return nil + } +} + +func WithMinMMapSize(minMMapSize int64) BucketOption { + return func(b *Bucket) error { + b.minMMapSize = minMMapSize + return nil + } +} + +func WithMinWalThreshold(threshold int64) BucketOption { + return func(b *Bucket) error { + b.minWalThreshold = uint64(threshold) + return nil + } +} + +func WithWalThreshold(threshold uint64) BucketOption { + return func(b *Bucket) error { + b.walThreshold = threshold + return nil + } +} + +// WithLazySegmentLoading enables that segments are only initialized when they are actually used +// +// This option should be used: +// - For buckets that are NOT used in every request. For example, the object bucket is accessed for +// almost all operations anyway. +// - For implicit request only (== requests originating with auto-tenant activation). Explicit activation should +// always load all segments. +func WithLazySegmentLoading(lazyLoading bool) BucketOption { + return func(b *Bucket) error { + b.lazySegmentLoading = lazyLoading + return nil + } +} + +func WithDirtyThreshold(threshold time.Duration) BucketOption { + return func(b *Bucket) error { + b.flushDirtyAfter = threshold + return nil + } +} + +func WithSecondaryIndices(count uint16) BucketOption { + return func(b *Bucket) error { + b.secondaryIndices = count + return nil + } +} + +// WithWriteMetadata enables writing all metadata (primary+secondary bloom+ cna) in a single file instead of separate files +func WithWriteMetadata(writeMetadata bool) BucketOption { + return func(b *Bucket) error { + b.writeMetadata = writeMetadata + return nil + } +} + +func WithLegacyMapSorting() BucketOption { + return func(b *Bucket) error { + b.legacyMapSortingBeforeCompaction = true + return nil + } +} + +func WithPread(with bool) BucketOption { + return func(b *Bucket) error { + b.mmapContents = !with + return nil + } +} + +func WithDynamicMemtableSizing( + initialMB, maxMB, minActiveSeconds, maxActiveSeconds int, +) BucketOption { + return func(b *Bucket) error { + mb := 1024 * 1024 + cfg := memtableSizeAdvisorCfg{ + initial: initialMB * mb, + stepSize: 10 * mb, + maxSize: maxMB * mb, + minDuration: time.Duration(minActiveSeconds) * time.Second, + maxDuration: time.Duration(maxActiveSeconds) * time.Second, + } + b.memtableResizer = newMemtableSizeAdvisor(cfg) + return nil + } +} + +func WithAllocChecker(mm memwatch.AllocChecker) BucketOption { + return func(b *Bucket) error { + b.allocChecker = mm + return nil + } +} + +func WithWriteSegmentInfoIntoFileName(writeSegmentInfoIntoFileName bool) BucketOption { + return func(b *Bucket) error { + b.writeSegmentInfoIntoFileName = writeSegmentInfoIntoFileName + return nil + } +} + +type secondaryIndexKeys [][]byte + +type SecondaryKeyOption func(s secondaryIndexKeys) error + +func WithSecondaryKey(pos int, key []byte) SecondaryKeyOption { + return func(s secondaryIndexKeys) error { + if pos > len(s) { + return errors.Errorf("set secondary index %d on an index of length %d", + pos, len(s)) + } + + s[pos] = key + + return nil + } +} + +func WithMonitorCount() BucketOption { + return func(b *Bucket) error { + if b.strategy != StrategyReplace { + return errors.Errorf("count monitoring only supported on 'replace' buckets") + } + b.monitorCount = true + return nil + } +} + +func WithKeepTombstones(keepTombstones bool) BucketOption { + return func(b *Bucket) error { + b.keepTombstones = keepTombstones + return nil + } +} + +func WithUseBloomFilter(useBloomFilter bool) BucketOption { + return func(b *Bucket) error { + b.useBloomFilter = useBloomFilter + return nil + } +} + +func WithCalcCountNetAdditions(calcCountNetAdditions bool) BucketOption { + return func(b *Bucket) error { + b.calcCountNetAdditions = calcCountNetAdditions + return nil + } +} + +func WithMaxSegmentSize(maxSegmentSize int64) BucketOption { + return func(b *Bucket) error { + b.maxSegmentSize = maxSegmentSize + return nil + } +} + +func WithSegmentsCleanupInterval(interval time.Duration) BucketOption { + return func(b *Bucket) error { + b.segmentsCleanupInterval = interval + return nil + } +} + +func WithSegmentsChecksumValidationEnabled(enable bool) BucketOption { + return func(b *Bucket) error { + b.enableChecksumValidation = enable + return nil + } +} + +/* +Background for this option: + +We use the LSM store in two places: +Our existing key/value and inverted buckets +As part of the new brute-force based index (to be built this week). + +Brute-force index +This is a simple disk-index where we use a cursor to iterate over all objects. This is what we need the force-compaction for. The experimentation so far has shown that the cursor is much more performant on a single segment than it is on multiple segments. This is because with a single segment it’s essentially just one conitiguuous chunk of data on disk that we read through. But with multiple segments (and an unpredicatable order) it ends up being many tiny reads (inefficient). +Existing uses of the LSM store +For existing uses, e.g. the object store, we don’t want to force-compact. This is because they can grow massive. For example, you could have a 100GB segment, then a new write leads to a new segment that is just a few bytes. If we would force-compact those two we would write 100GB every time the user sends a few bytes to Weaviate. In this case, the existing tiered compaction strategy makes more sense. +Configurability of buckets +*/ +func WithForceCompaction(opt bool) BucketOption { + return func(b *Bucket) error { + b.forceCompaction = opt + return nil + } +} + +func WithDisableCompaction(disable bool) BucketOption { + return func(b *Bucket) error { + b.disableCompaction = disable + return nil + } +} + +func WithKeepLevelCompaction(keepLevelCompaction bool) BucketOption { + return func(b *Bucket) error { + b.keepLevelCompaction = keepLevelCompaction + return nil + } +} + +func WithKeepSegmentsInMemory(keep bool) BucketOption { + return func(b *Bucket) error { + b.keepSegmentsInMemory = keep + return nil + } +} + +func WithBitmapBufPool(bufPool roaringset.BitmapBufPool) BucketOption { + return func(b *Bucket) error { + b.bitmapBufPool = bufPool + return nil + } +} + +func WithBM25Config(bm25Config *models.BM25Config) BucketOption { + return func(b *Bucket) error { + b.bm25Config = bm25Config + return nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_pauses.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_pauses.go new file mode 100644 index 0000000000000000000000000000000000000000..d67acd2797f1f681d8aac11833683db91728a91c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_pauses.go @@ -0,0 +1,33 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func (b *Bucket) doStartPauseTimer() { + label := b.GetDir() + if monitoring.GetMetrics().Group { + label = "n/a" + } + if metric, err := monitoring.GetMetrics().BucketPauseDurations.GetMetricWithLabelValues(label); err == nil { + b.pauseTimer = prometheus.NewTimer(metric) + } +} + +func (b *Bucket) doStopPauseTimer() { + if b.pauseTimer != nil { + b.pauseTimer.ObserveDuration() + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_recover_from_wal.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_recover_from_wal.go new file mode 100644 index 0000000000000000000000000000000000000000..4b4410cf0b1bf8ba5a7a4f02e5203e5ad0a207e4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_recover_from_wal.go @@ -0,0 +1,157 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bufio" + "context" + "io" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/entities/diskio" +) + +var logOnceWhenRecoveringFromWAL sync.Once + +func (b *Bucket) mayRecoverFromCommitLogs(ctx context.Context, sg *SegmentGroup, files map[string]int64) error { + beforeAll := time.Now() + defer b.metrics.TrackStartupBucketRecovery(beforeAll) + + recovered := false + + // the context is only ever checked once at the beginning, as there is no + // point in aborting an ongoing recovery. It makes more sense to let it + // complete and have the next recovery (this is called once per bucket) run + // into this error. This way in a crashloop we'd eventually recover each + // bucket until there is nothing left to recover and startup could complete + // in time + if err := ctx.Err(); err != nil { + return errors.Wrap(err, "recover commit log") + } + + var walFileNames []string + for file, size := range files { + if filepath.Ext(file) != ".wal" { + // skip, this could be disk segments, etc. + continue + } + + path := filepath.Join(b.dir, file) + + if size == 0 { + err := os.Remove(path) + if err != nil { + return errors.Wrap(err, "remove empty wal file") + } + continue + } + + walFileNames = append(walFileNames, file) + } + + if len(walFileNames) > 0 { + logOnceWhenRecoveringFromWAL.Do(func() { + b.logger.WithField("action", "lsm_recover_from_active_wal"). + WithField("path", b.dir). + Debug("active write-ahead-log found") + }) + } + + // recover from each log + for i, fname := range walFileNames { + walForActiveMemtable := i == len(walFileNames)-1 + + path := filepath.Join(b.dir, strings.TrimSuffix(fname, ".wal")) + + cl, err := newCommitLogger(path, b.strategy, files[fname]) + if err != nil { + return errors.Wrap(err, "init commit logger") + } + if !walForActiveMemtable { + defer cl.close() + } + + cl.pause() + defer cl.unpause() + + mt, err := newMemtable(path, b.strategy, b.secondaryIndices, + cl, b.metrics, b.logger, b.enableChecksumValidation, b.bm25Config, b.writeSegmentInfoIntoFileName, b.allocChecker) + if err != nil { + return err + } + + _, err = cl.file.Seek(0, io.SeekStart) + if err != nil { + return err + } + + meteredReader := diskio.NewMeteredReader(cl.file, b.metrics.TrackStartupReadWALDiskIO) + if err := newCommitLoggerParser(b.strategy, bufio.NewReaderSize(meteredReader, 32*1024), mt).Do(); err != nil { + b.logger.WithField("action", "lsm_recover_from_active_wal_corruption"). + WithField("path", filepath.Join(b.dir, fname)). + Error(errors.Wrap(err, "write-ahead-log ended abruptly, some elements may not have been recovered")) + } + + if mt.strategy == StrategyInverted { + mt.averagePropLength, _ = sg.GetAveragePropertyLength() + } + if walForActiveMemtable { + _, err = cl.file.Seek(0, io.SeekEnd) + if err != nil { + return err + } + b.active = mt + } else { + segmentPath, err := mt.flush() + if err != nil { + return errors.Wrap(err, "flush memtable after WAL recovery") + } + + if mt.Size() == 0 { + continue + } + + if err := sg.add(segmentPath); err != nil { + return err + } + } + + if b.strategy == StrategyReplace && b.monitorCount { + // having just flushed the memtable we now have the most up2date count which + // is a good place to update the metric + b.metrics.ObjectCount(sg.count()) + } + + b.logger.WithField("action", "lsm_recover_from_active_wal_success"). + WithField("path", filepath.Join(b.dir, fname)). + Debug("successfully recovered from write-ahead-log") + + recovered = true + + } + + // force re-sort if any segment was added + if recovered { + sort.Slice(sg.segments, func(i, j int) bool { + return sg.segments[i].getPath() < sg.segments[j].getPath() + }) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..3c4452344e57324b48bd6418a91b00795a868d4c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_roaring_set.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "errors" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +func (b *Bucket) RoaringSetAddOne(key []byte, value uint64) error { + if err := CheckStrategyRoaringSet(b.strategy); err != nil { + return err + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.roaringSetAddOne(key, value) +} + +func (b *Bucket) RoaringSetRemoveOne(key []byte, value uint64) error { + if err := CheckStrategyRoaringSet(b.strategy); err != nil { + return err + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.roaringSetRemoveOne(key, value) +} + +func (b *Bucket) RoaringSetAddList(key []byte, values []uint64) error { + if err := CheckStrategyRoaringSet(b.strategy); err != nil { + return err + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.roaringSetAddList(key, values) +} + +func (b *Bucket) RoaringSetAddBitmap(key []byte, bm *sroar.Bitmap) error { + if err := CheckStrategyRoaringSet(b.strategy); err != nil { + return err + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.roaringSetAddBitmap(key, bm) +} + +func (b *Bucket) RoaringSetGet(key []byte) (bm *sroar.Bitmap, release func(), err error) { + if err := CheckStrategyRoaringSet(b.strategy); err != nil { + return nil, noopRelease, err + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + layers, release, err := b.disk.roaringSetGet(key) + if err != nil { + return nil, noopRelease, err + } + defer func() { + if err != nil { + release() + } + }() + + if b.flushing != nil { + flushing, err := b.flushing.roaringSetGet(key) + if err != nil { + if !errors.Is(err, lsmkv.NotFound) { + return nil, noopRelease, err + } + } else { + layers = append(layers, flushing) + } + } + + active, err := b.active.roaringSetGet(key) + if err != nil { + if !errors.Is(err, lsmkv.NotFound) { + return nil, noopRelease, err + } + } else { + layers = append(layers, active) + } + + return layers.Flatten(false), release, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_roaring_set_range.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_roaring_set_range.go new file mode 100644 index 0000000000000000000000000000000000000000..872fa0e81424abaf5a4bab7a9413a01d818f7844 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_roaring_set_range.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" + "github.com/weaviate/weaviate/entities/concurrency" + "github.com/weaviate/weaviate/entities/filters" +) + +func (b *Bucket) RoaringSetRangeAdd(key uint64, values ...uint64) error { + if err := CheckStrategyRoaringSetRange(b.strategy); err != nil { + return err + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.roaringSetRangeAdd(key, values...) +} + +func (b *Bucket) RoaringSetRangeRemove(key uint64, values ...uint64) error { + if err := CheckStrategyRoaringSetRange(b.strategy); err != nil { + return err + } + + b.flushLock.RLock() + defer b.flushLock.RUnlock() + + return b.active.roaringSetRangeRemove(key, values...) +} + +type ReaderRoaringSetRange interface { + Read(ctx context.Context, value uint64, operator filters.Operator) (result *sroar.Bitmap, release func(), err error) + Close() +} + +func (b *Bucket) ReaderRoaringSetRange() ReaderRoaringSetRange { + MustBeExpectedStrategy(b.strategy, StrategyRoaringSetRange) + + b.flushLock.RLock() + + var release func() + var readers []roaringsetrange.InnerReader + if b.keepSegmentsInMemory { + reader, releaseInt := roaringsetrange.NewSegmentInMemoryReader(b.disk.roaringSetRangeSegmentInMemory, b.bitmapBufPool) + readers, release = []roaringsetrange.InnerReader{reader}, releaseInt + } else { + readers, release = b.disk.newRoaringSetRangeReaders() + } + + // we have a flush-RLock, so we have the guarantee that the flushing state + // will not change for the lifetime of the cursor, thus there can only be two + // states: either a flushing memtable currently exists - or it doesn't + if b.flushing != nil { + readers = append(readers, b.flushing.newRoaringSetRangeReader()) + } + readers = append(readers, b.active.newRoaringSetRangeReader()) + + return roaringsetrange.NewCombinedReader(readers, func() { + release() + b.flushLock.RUnlock() + }, concurrency.SROAR_MERGE, b.logger) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_test.go new file mode 100644 index 0000000000000000000000000000000000000000..27d9dff9a1f08b323979c13d2012173fcaaf57ee --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_test.go @@ -0,0 +1,660 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "slices" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +type bucketTest struct { + name string + f func(context.Context, *testing.T, []BucketOption) + opts []BucketOption +} + +type bucketTests []bucketTest + +func (tests bucketTests) run(ctx context.Context, t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("mmap", func(t *testing.T) { + test.f(ctx, t, test.opts) + }) + t.Run("pread", func(t *testing.T) { + test.f(ctx, t, append([]BucketOption{WithPread(true)}, test.opts...)) + }) + }) + } +} + +func TestBucket(t *testing.T) { + ctx := context.Background() + tests := bucketTests{ + { + name: "bucket_WasDeleted_KeepTombstones", + f: bucket_WasDeleted_KeepTombstones, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithKeepTombstones(true), + }, + }, + { + name: "bucket_WasDeleted_CleanupTombstones", + f: bucket_WasDeleted_CleanupTombstones, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + { + name: "bucketReadsIntoMemory", + f: bucketReadsIntoMemory, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + }, + }, + } + tests.run(ctx, t) +} + +func bucket_WasDeleted_KeepTombstones(ctx context.Context, t *testing.T, opts []BucketOption) { + tmpDir := t.TempDir() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, tmpDir, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + t.Cleanup(func() { + require.Nil(t, b.Shutdown(context.Background())) + }) + + var ( + key = []byte("key") + val = []byte("value") + ) + + t.Run("insert object", func(t *testing.T) { + err = b.Put(key, val) + require.Nil(t, err) + }) + + t.Run("assert object was not deleted yet", func(t *testing.T) { + deleted, _, err := b.WasDeleted(key) + require.Nil(t, err) + assert.False(t, deleted) + }) + + deletionTime := time.Now() + + time.Sleep(3 * time.Millisecond) + + t.Run("delete object", func(t *testing.T) { + err = b.DeleteWith(key, deletionTime) + require.Nil(t, err) + }) + + time.Sleep(1 * time.Millisecond) + + t.Run("assert object was deleted", func(t *testing.T) { + deleted, ts, err := b.WasDeleted(key) + require.Nil(t, err) + assert.True(t, deleted) + require.WithinDuration(t, deletionTime, ts, 1*time.Millisecond) + }) + + t.Run("assert a nonexistent object is not detected as deleted", func(t *testing.T) { + deleted, _, err := b.WasDeleted([]byte("DNE")) + require.Nil(t, err) + assert.False(t, deleted) + }) +} + +func bucket_WasDeleted_CleanupTombstones(ctx context.Context, t *testing.T, opts []BucketOption) { + tmpDir := t.TempDir() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, tmpDir, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, b.Shutdown(context.Background())) + }) + + var ( + key = []byte("key") + val = []byte("value") + ) + + t.Run("insert object", func(t *testing.T) { + err = b.Put(key, val) + require.Nil(t, err) + }) + + t.Run("fails on WasDeleted without keepTombstones set (before delete)", func(t *testing.T) { + deleted, _, err := b.WasDeleted(key) + require.ErrorContains(t, err, "keepTombstones") + require.False(t, deleted) + }) + + t.Run("delete object", func(t *testing.T) { + err = b.Delete(key) + require.Nil(t, err) + }) + + t.Run("fails on WasDeleted without keepTombstones set (after delete)", func(t *testing.T) { + deleted, _, err := b.WasDeleted(key) + require.ErrorContains(t, err, "keepTombstones") + require.False(t, deleted) + }) + + t.Run("fails on WasDeleted without keepTombstones set (non-existent key)", func(t *testing.T) { + deleted, _, err := b.WasDeleted([]byte("DNE")) + require.ErrorContains(t, err, "keepTombstones") + require.False(t, deleted) + }) +} + +func bucketReadsIntoMemory(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + require.Nil(t, b.Put([]byte("hello"), []byte("world"), + WithSecondaryKey(0, []byte("bonjour")))) + require.Nil(t, b.FlushMemtable()) + + files, err := os.ReadDir(b.GetDir()) + require.Nil(t, err) + + _, ok := findFileWithExt(files, ".bloom") + assert.True(t, ok) + + _, ok = findFileWithExt(files, "secondary.0.bloom") + assert.True(t, ok) + b.Shutdown(ctx) + + b2, err := NewBucketCreator().NewBucket(ctx, b.GetDir(), "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer b2.Shutdown(ctx) + + valuePrimary, err := b2.Get([]byte("hello")) + require.Nil(t, err) + valueSecondary := make([]byte, 5) + valueSecondary, _, err = b2.GetBySecondaryIntoMemory(0, []byte("bonjour"), valueSecondary) + require.Nil(t, err) + + assert.Equal(t, []byte("world"), valuePrimary) + assert.Equal(t, []byte("world"), valueSecondary) +} + +func TestBucket_MemtableCountWithFlushing(t *testing.T) { + b := Bucket{ + // by using an empty segment group for the disk portion, we can test the + // memtable portion in isolation + disk: &SegmentGroup{}, + } + + tests := []struct { + name string + current *countStats + previous *countStats + expectedNetActive int + expectedNetPrevious int + expectedNetTotal int + }{ + { + name: "only active, only additions", + current: &countStats{ + upsertKeys: [][]byte{[]byte("key-1")}, + }, + expectedNetActive: 1, + }, + { + name: "only active, both additions and deletions", + current: &countStats{ + upsertKeys: [][]byte{[]byte("key-1")}, + // no key with key-2 ever existed, so this does not alter the net count + tombstonedKeys: [][]byte{[]byte("key-2")}, + }, + expectedNetActive: 1, + }, + { + name: "an deletion that was previously added", + current: &countStats{ + tombstonedKeys: [][]byte{[]byte("key-a")}, + }, + previous: &countStats{ + upsertKeys: [][]byte{[]byte("key-a")}, + }, + expectedNetActive: -1, + expectedNetPrevious: 1, + expectedNetTotal: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actualActive := b.memtableNetCount(tt.current, tt.previous) + assert.Equal(t, tt.expectedNetActive, actualActive) + + if tt.previous != nil { + actualPrevious := b.memtableNetCount(tt.previous, nil) + assert.Equal(t, tt.expectedNetPrevious, actualPrevious) + + assert.Equal(t, tt.expectedNetTotal, actualPrevious+actualActive) + } + }) + } +} + +func TestBucketGetBySecondary(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), WithSecondaryIndices(1)) + require.Nil(t, err) + + err = b.Put([]byte("hello"), []byte("world"), WithSecondaryKey(0, []byte("bonjour"))) + require.Nil(t, err) + + value, err := b.Get([]byte("hello")) + require.Nil(t, err) + require.Equal(t, []byte("world"), value) + + _, err = b.GetBySecondary(0, []byte("bonjour")) + require.Nil(t, err) + require.Equal(t, []byte("world"), value) + + _, err = b.GetBySecondary(1, []byte("bonjour")) + require.Error(t, err) + + require.Nil(t, b.FlushMemtable()) + + value, err = b.Get([]byte("hello")) + require.Nil(t, err) + require.Equal(t, []byte("world"), value) + + _, err = b.GetBySecondary(0, []byte("bonjour")) + require.Nil(t, err) + require.Equal(t, []byte("world"), value) + + _, err = b.GetBySecondary(1, []byte("bonjour")) + require.Error(t, err) +} + +func TestBucketWalReload(t *testing.T) { + for _, strategy := range []string{StrategyReplace, StrategySetCollection, StrategyMapCollection, StrategyRoaringSet, StrategyRoaringSetRange} { + t.Run(strategy, func(t *testing.T) { + ctx := context.Background() + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + + secondaryIndicesCount := uint16(0) + if strategy == StrategyReplace { + secondaryIndicesCount = 1 + } + + // initial bucket, always create segment, even if it is just a single entry + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(strategy), WithSecondaryIndices(secondaryIndicesCount), WithMinWalThreshold(4096), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop())) + require.NoError(t, err) + + if strategy == StrategyReplace { + require.NoError(t, b.Put([]byte("hello1"), []byte("world1"), WithSecondaryKey(0, []byte("bonjour1")))) + } else if strategy == StrategySetCollection { + require.NoError(t, b.SetAdd([]byte("hello1"), [][]byte{[]byte("world1")})) + } else if strategy == StrategyRoaringSet { + require.NoError(t, b.RoaringSetAddOne([]byte("hello1"), uint64(1))) + } else if strategy == StrategyMapCollection { + require.NoError(t, b.MapSet([]byte("hello1"), MapPair{Key: []byte("hello1"), Value: []byte("world1")})) + } else if strategy == StrategyRoaringSetRange { + require.NoError(t, b.RoaringSetRangeAdd(uint64(1), uint64(1))) + } else { + require.Fail(t, "unknown strategy %s", strategy) + } + testBucketContent(t, strategy, b, 2) + + require.NoError(t, b.Shutdown(ctx)) + + entries, err := os.ReadDir(dirName) + require.NoError(t, err) + require.Len(t, entries, 1, "single wal file should be created") + + testBucketContent(t, strategy, b, 2) + + // start fresh with a new memtable, new entries will stay in wal until size is reached + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(strategy), WithSecondaryIndices(secondaryIndicesCount), WithMinWalThreshold(4096), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop())) + require.NoError(t, err) + + if strategy == StrategyReplace { + require.NoError(t, b.Put([]byte("hello2"), []byte("world2"), WithSecondaryKey(0, []byte("bonjour2")))) + } else if strategy == StrategySetCollection { + require.NoError(t, b.SetAdd([]byte("hello2"), [][]byte{[]byte("world2")})) + } else if strategy == StrategyRoaringSet { + require.NoError(t, b.RoaringSetAddOne([]byte("hello2"), uint64(2))) + } else if strategy == StrategyMapCollection { + require.NoError(t, b.MapSet([]byte("hello2"), MapPair{Key: []byte("hello2"), Value: []byte("world2")})) + } else if strategy == StrategyRoaringSetRange { + require.NoError(t, b.RoaringSetRangeAdd(uint64(2), uint64(2))) + } + require.NoError(t, b.Shutdown(ctx)) + + entries, err = os.ReadDir(dirName) + require.NoError(t, err) + fileTypes := map[string]int{} + for _, entry := range entries { + fileTypes[filepath.Ext(entry.Name())] += 1 + } + require.Equal(t, 0, fileTypes[".db"], "no segment file") + require.Equal(t, 1, fileTypes[".wal"], "single wal file") + + // will load wal and reuse memtable + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(strategy), WithSecondaryIndices(1), WithMinWalThreshold(4096), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop())) + require.NoError(t, err) + + testBucketContent(t, strategy, b, 3) + + if strategy == StrategyReplace { + require.NoError(t, b.Put([]byte("hello3"), []byte("world3"), WithSecondaryKey(0, []byte("bonjour3")))) + } else if strategy == StrategySetCollection { + require.NoError(t, b.SetAdd([]byte("hello3"), [][]byte{[]byte("world3")})) + } else if strategy == StrategyRoaringSet { + require.NoError(t, b.RoaringSetAddOne([]byte("hello3"), uint64(3))) + } else if strategy == StrategyMapCollection { + require.NoError(t, b.MapSet([]byte("hello3"), MapPair{Key: []byte("hello3"), Value: []byte("world3")})) + } else if strategy == StrategyRoaringSetRange { + require.NoError(t, b.RoaringSetRangeAdd(uint64(3), uint64(3))) + require.NoError(t, err) + } + require.NoError(t, b.Shutdown(ctx)) + + entries, err = os.ReadDir(dirName) + require.NoError(t, err) + clear(fileTypes) + for _, entry := range entries { + fileTypes[filepath.Ext(entry.Name())] += 1 + } + require.Equal(t, 0, fileTypes[".db"], "no segment file") + require.Equal(t, 1, fileTypes[".wal"], "single wal file") + + // now add a lot of entries to hit .wal file limit + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(strategy), WithSecondaryIndices(secondaryIndicesCount), WithMinWalThreshold(4096), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop())) + require.NoError(t, err) + + testBucketContent(t, strategy, b, 4) + + for i := 4; i < 120; i++ { // larger than min .wal threshold + if strategy == StrategyReplace { + require.NoError(t, b.Put([]byte(fmt.Sprintf("hello%d", i)), []byte(fmt.Sprintf("world%d", i)), WithSecondaryKey(0, []byte(fmt.Sprintf("bonjour%d", i))))) + } else if strategy == StrategySetCollection { + require.NoError(t, b.SetAdd([]byte(fmt.Sprintf("hello%d", i)), [][]byte{[]byte(fmt.Sprintf("world%d", i))})) + } else if strategy == StrategyRoaringSet { + require.NoError(t, b.RoaringSetAddOne([]byte(fmt.Sprintf("hello%d", i)), uint64(i))) + } else if strategy == StrategyMapCollection { + require.NoError(t, b.MapSet([]byte(fmt.Sprintf("hello%d", i)), MapPair{Key: []byte(fmt.Sprintf("hello%d", i)), Value: []byte(fmt.Sprintf("world%d", i))})) + } else if strategy == StrategyRoaringSetRange { + require.NoError(t, b.RoaringSetRangeAdd(uint64(4), uint64(4))) + } + } + testBucketContent(t, strategy, b, 120) + + require.NoError(t, b.Shutdown(ctx)) + + entries, err = os.ReadDir(dirName) + require.NoError(t, err) + clear(fileTypes) + for _, entry := range entries { + fileTypes[filepath.Ext(entry.Name())] += 1 + } + require.Equal(t, 1, fileTypes[".db"], "no segment file") + require.Equal(t, 0, fileTypes[".wal"], "single wal file") + + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(strategy), WithSecondaryIndices(secondaryIndicesCount), WithMinWalThreshold(4096), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop())) + require.NoError(t, err) + + testBucketContent(t, strategy, b, 120) + }) + } +} + +func testBucketContent(t *testing.T, strategy string, b *Bucket, maxObject int) { + t.Helper() + ctx := context.Background() + for i := 1; i < maxObject; i++ { + key := []byte(fmt.Sprintf("hello%d", i)) + val := []byte(fmt.Sprintf("world%d", i)) + if strategy == StrategyReplace { + get, err := b.Get(key) + require.NoError(t, err) + require.Equal(t, val, get) + + secondary, err := b.GetBySecondary(0, []byte(fmt.Sprintf("bonjour%d", i))) + require.NoError(t, err) + require.Equal(t, val, secondary) + } else if strategy == StrategySetCollection { + get, err := b.SetList(key) + require.NoError(t, err) + require.Equal(t, val, get[0]) + } else if strategy == StrategyRoaringSet { + get, release, err := b.RoaringSetGet(key) + require.NoError(t, err) + defer release() + require.True(t, get.Contains(uint64(i))) + } else if strategy == StrategyRoaringSetRange { + //_, err := b.Rang + //require.NoError(t,err) + } else if strategy == StrategyMapCollection { + get, err := b.MapList(ctx, key) + require.NoError(t, err) + require.Equal(t, val, get[0].Value) + } + } +} + +func TestBucketInfoInFileName(t *testing.T) { + ctx := context.Background() + + logger, _ := test.NewNullLogger() + + for _, segmentInfo := range []bool{true, false} { + t.Run(fmt.Sprintf("%t", segmentInfo), func(t *testing.T) { + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithWriteSegmentInfoIntoFileName(segmentInfo), + ) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("hello1"), []byte("world1"), WithSecondaryKey(0, []byte("bonjour1")))) + require.NoError(t, b.FlushMemtable()) + dbFiles, _ := countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 1) + }) + } +} + +func TestBucketCompactionFileName(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + + tests := []struct { + firstSegment bool + secondSegment bool + compaction bool + }{ + {firstSegment: false, secondSegment: false, compaction: true}, + {firstSegment: false, secondSegment: true, compaction: true}, + {firstSegment: true, secondSegment: false, compaction: true}, + {firstSegment: true, secondSegment: true, compaction: true}, + {firstSegment: true, secondSegment: true, compaction: false}, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("firstSegment: %t", tt.firstSegment), func(t *testing.T) { + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithWriteSegmentInfoIntoFileName(tt.firstSegment), + ) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("hello1"), []byte("world1"), WithSecondaryKey(0, []byte("bonjour1")))) + require.NoError(t, b.FlushMemtable()) + require.NoError(t, b.Shutdown(ctx)) + dbFiles, _ := countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 1) + oldNames := verifyFileInfo(t, dirName, nil, tt.firstSegment, 0) + + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithWriteSegmentInfoIntoFileName(tt.secondSegment), + ) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("hello2"), []byte("world2"), WithSecondaryKey(0, []byte("bonjour2")))) + require.NoError(t, b.FlushMemtable()) + require.NoError(t, b.Shutdown(ctx)) + + dbFiles, _ = countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 2) + oldNames = verifyFileInfo(t, dirName, oldNames, tt.secondSegment, 0) + + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithWriteSegmentInfoIntoFileName(tt.compaction), + ) + require.NoError(t, err) + compact, err := b.disk.compactOnce() + require.NoError(t, err) + require.True(t, compact) + dbFiles, _ = countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 1) + verifyFileInfo(t, dirName, oldNames, tt.compaction, 1) + }) + } +} + +func countDbAndWalFiles(t *testing.T, path string) (int, int) { + t.Helper() + fileTypes := map[string]int{} + entries, err := os.ReadDir(path) + require.NoError(t, err) + for _, entry := range entries { + fileTypes[filepath.Ext(entry.Name())] += 1 + } + return fileTypes[".db"], fileTypes[".wal"] +} + +func verifyFileInfo(t *testing.T, path string, oldEntries []string, segmentInfo bool, level int) []string { + t.Helper() + entries, err := os.ReadDir(path) + require.NoError(t, err) + fileNames := []string{} + for _, entry := range entries { + fileNames = append(fileNames, entry.Name()) + if oldEntries != nil && slices.Contains(oldEntries, entry.Name()) { + continue + } + if filepath.Ext(entry.Name()) == ".db" { + if segmentInfo { + require.Contains(t, entry.Name(), fmt.Sprintf(".l%d.", level)) + require.Contains(t, entry.Name(), ".s0.") + } else { + require.NotRegexp(t, regexp.MustCompile(`\.l\d+\.`), entry.Name()) + require.NotContains(t, entry.Name(), ".s0.") + } + } + } + return fileNames +} + +func TestBucketRecovery(t *testing.T) { + logger, _ := test.NewNullLogger() + + ctx := context.Background() + dirName := t.TempDir() + tmpDir := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithWriteSegmentInfoIntoFileName(true), WithMinWalThreshold(4096), + ) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("hello1"), []byte("world1"), WithSecondaryKey(0, []byte("bonjour1")))) + require.NoError(t, b.Shutdown(ctx)) + dbFiles, walFiles := countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 0) + require.Equal(t, walFiles, 1) + + // move .wal file somewhere else to recover later + var oldPath, tmpPath string + entries, err := os.ReadDir(dirName) + require.NoError(t, err) + for _, entry := range entries { + if filepath.Ext(entry.Name()) == ".wal" { + oldPath = dirName + "/" + entry.Name() + tmpPath = tmpDir + "/" + entry.Name() + require.NoError(t, os.Rename(oldPath, tmpPath)) + } + } + _, walFiles = countDbAndWalFiles(t, dirName) + require.Equal(t, walFiles, 0) + + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithWriteSegmentInfoIntoFileName(true), WithMinWalThreshold(4096), + ) + require.NoError(t, err) + require.NoError(t, b.Put([]byte("hello2"), []byte("world2"), WithSecondaryKey(0, []byte("bonjour2")))) + require.NoError(t, b.Shutdown(ctx)) + dbFiles, walFiles = countDbAndWalFiles(t, dirName) + require.Equal(t, dbFiles, 0) + require.Equal(t, walFiles, 1) + + // move .wal file back so we can recover one + require.NoError(t, os.Rename(tmpPath, oldPath)) + + b, err = NewBucketCreator().NewBucket(ctx, dirName, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithWriteSegmentInfoIntoFileName(true), WithMinWalThreshold(4096), + ) + require.NoError(t, err) + get, err := b.Get([]byte("hello1")) + require.NoError(t, err) + require.Equal(t, []byte("world1"), get) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_threshold_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_threshold_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2632d3cf05a2eedc18f5ab8d4e3271f8fa26aae7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/bucket_threshold_test.go @@ -0,0 +1,392 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package lsmkv + +import ( + "context" + "crypto/rand" + "encoding/json" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +var logger, _ = test.NewNullLogger() + +// This test ensures that the WAL threshold is being adhered to, and that a +// flush to segment followed by a switch to a new WAL is being performed +// once the threshold is reached +func TestWriteAheadLogThreshold_Replace(t *testing.T) { + dirName := t.TempDir() + + amount := 100 + keys := make([][]byte, amount) + values := make([][]byte, amount) + + walThreshold := uint64(4096) + tolerance := 4. + + flushCallbacks := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + flushCycle := cyclemanager.NewManager(cyclemanager.MemtableFlushCycleTicker(), flushCallbacks.CycleCallback, logger) + flushCycle.Start() + + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushCallbacks, + WithStrategy(StrategyReplace), + WithMemtableThreshold(1024*1024*1024), + WithWalThreshold(walThreshold), + WithMinWalThreshold(0), // small enough to not affect this test + ) + require.Nil(t, err) + + // generate only a small amount of sequential values. this allows + // us to keep the memtable small (the net additions will be close + // to zero), and focus on testing the WAL threshold + t.Run("generate sequential data", func(t *testing.T) { + for i := range keys { + n, err := json.Marshal(i) + require.Nil(t, err) + + keys[i], values[i] = n, n + } + }) + + t.Run("check switchover during insertion", func(t *testing.T) { + // Importing data for over 10s with 1.6ms break between each object + // should result in ~100kB of commitlog data in total. + // With couple of flush attempts happening during this 10s period, + // and with threshold set to 4kB, first .wal size should be much smaller than 100kb + // when commitlog switched to new .wal file. + ctxTimeout, cancelTimeout := context.WithTimeout(context.Background(), 10*time.Second) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + for { + for i := range keys { + if i%100 == 0 && ctxTimeout.Err() != nil { + wg.Done() + return + } + assert.Nil(t, bucket.Put(keys[i], values[i])) + time.Sleep(1600 * time.Microsecond) + } + } + }() + + var firstWalFile string + var firstWalSize int64 + out: + for { + time.Sleep(time.Millisecond) + if ctxTimeout.Err() != nil { + t.Fatalf("Import finished without flushing in the meantime. Size of first WAL file was (%d)", firstWalSize) + } + + bucket.flushLock.RLock() + walFile := bucket.active.commitlog.walPath() + walSize := bucket.active.commitlog.size() + bucket.flushLock.RUnlock() + + if firstWalFile == "" { + firstWalFile = walFile + } + + if firstWalFile == walFile { + firstWalSize = walSize + } else { + // new path found; flush must have occurred - stop import and exit loop + cancelTimeout() + break out + } + } + + wg.Wait() + if !isSizeWithinTolerance(t, uint64(firstWalSize), walThreshold, tolerance) { + t.Fatalf("WAL size (%d) was allowed to increase beyond threshold (%d) with tolerance of (%f)%%", + firstWalSize, walThreshold, tolerance*100) + } + }) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + require.Nil(t, bucket.Shutdown(ctx)) + require.Nil(t, flushCycle.StopAndWait(ctx)) +} + +// This test ensures that the Memtable threshold is being adhered to, and +// that a flush to segment followed by a switch to a new WAL is being +// performed once the threshold is reached +func TestMemtableThreshold_Replace(t *testing.T) { + dirName := t.TempDir() + + amount := 10000 + sizePerValue := 8 + + keys := make([][]byte, amount) + values := make([][]byte, amount) + + memtableThreshold := uint64(4096) + tolerance := 4. + + flushCallbacks := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + flushCycle := cyclemanager.NewManager(cyclemanager.MemtableFlushCycleTicker(), flushCallbacks.CycleCallback, logger) + flushCycle.Start() + + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushCallbacks, + WithStrategy(StrategyReplace), + WithMemtableThreshold(memtableThreshold), + WithMinWalThreshold(0), + ) + require.Nil(t, err) + + t.Run("generate random data", func(t *testing.T) { + for i := range keys { + n, err := json.Marshal(i) + require.Nil(t, err) + + keys[i] = n + values[i] = make([]byte, sizePerValue) + rand.Read(values[i]) + } + }) + + t.Run("check switchover during insertion", func(t *testing.T) { + // Importing data for over 10s with 0.8ms break between each object + // should result in ~100kB of memtable data. + // With couple of flush attempts happening during this 10s period, + // and with threshold set to 4kB, first memtable size should be much smaller than 100kb + // when memtable flushed and replaced with new one + ctxTimeout, cancelTimeout := context.WithTimeout(context.Background(), 10*time.Second) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + for { + for i := range keys { + if i%100 == 0 && ctxTimeout.Err() != nil { + wg.Done() + return + } + assert.Nil(t, bucket.Put(keys[i], values[i])) + time.Sleep(800 * time.Microsecond) + } + } + }() + + var firstMemtablePath string + var firstMemtableSize uint64 + out: + for { + time.Sleep(time.Millisecond) + if ctxTimeout.Err() != nil { + t.Fatalf("Import finished without flushing in the meantime. Size of first memtable was (%d)", firstMemtableSize) + } + + bucket.flushLock.RLock() + activePath := bucket.active.path + activeSize := bucket.active.Size() + bucket.flushLock.RUnlock() + + if firstMemtablePath == "" { + firstMemtablePath = activePath + } + + if firstMemtablePath == activePath { + firstMemtableSize = activeSize + } else { + // new path found; flush must have occurred - stop import and exit loop + cancelTimeout() + break out + } + } + + wg.Wait() + if !isSizeWithinTolerance(t, uint64(firstMemtableSize), memtableThreshold, tolerance) { + t.Fatalf("Memtable size (%d) was allowed to increase beyond threshold (%d) with tolerance of (%f)%%", + firstMemtableSize, memtableThreshold, tolerance*100) + } + }) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + require.Nil(t, bucket.Shutdown(ctx)) + require.Nil(t, flushCycle.StopAndWait(ctx)) +} + +func isSizeWithinTolerance(t *testing.T, detectedSize uint64, threshold uint64, tolerance float64) bool { + return detectedSize > 0 && float64(detectedSize) <= float64(threshold)*(tolerance+1) +} + +func TestMemtableFlushesIfDirty(t *testing.T) { + t.Run("an empty memtable is not flushed", func(t *testing.T) { + dirName := t.TempDir() + + flushCallbacks := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + flushCycle := cyclemanager.NewManager(cyclemanager.MemtableFlushCycleTicker(), flushCallbacks.CycleCallback, logger) + flushCycle.Start() + + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushCallbacks, + WithStrategy(StrategyReplace), + WithMemtableThreshold(1e12), // large enough to not affect this test + WithWalThreshold(1e12), // large enough to not affect this test + WithMinWalThreshold(0), // small enough to not affect this test + WithDirtyThreshold(10*time.Millisecond), + ) + require.Nil(t, err) + + t.Run("assert no segments exist initially", func(t *testing.T) { + segments, release := bucket.disk.getAndLockSegments() + defer release() + + assert.Equal(t, 0, len(segments)) + }) + + t.Run("wait until dirty threshold has passed", func(t *testing.T) { + // First flush attempt should occur after ~100ms after creating bucket. + // Buffer of 200ms guarantees, flush will be called during sleep period. + time.Sleep(200 * time.Millisecond) + }) + + t.Run("assert no segments exist even after passing the dirty threshold", func(t *testing.T) { + segments, release := bucket.disk.getAndLockSegments() + defer release() + + assert.Equal(t, 0, len(segments)) + }) + + t.Run("shutdown bucket", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + require.Nil(t, bucket.Shutdown(ctx)) + require.Nil(t, flushCycle.StopAndWait(ctx)) + }) + }) + + t.Run("a dirty memtable is flushed once dirty period has passed with single write", func(t *testing.T) { + dirName := t.TempDir() + + flushCallbacks := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + flushCycle := cyclemanager.NewManager(cyclemanager.MemtableFlushCycleTicker(), flushCallbacks.CycleCallback, logger) + flushCycle.Start() + + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushCallbacks, + WithStrategy(StrategyReplace), + WithMemtableThreshold(1e12), // large enough to not affect this test + WithWalThreshold(1e12), // large enough to not affect this test + WithMinWalThreshold(0), // small enough to not affect this test + WithDirtyThreshold(50*time.Millisecond), + ) + require.Nil(t, err) + + t.Run("import something to make it dirty", func(t *testing.T) { + require.Nil(t, bucket.Put([]byte("some-key"), []byte("some-value"))) + }) + + t.Run("assert no segments exist initially", func(t *testing.T) { + segments, release := bucket.disk.getAndLockSegments() + defer release() + + assert.Equal(t, 0, len(segments)) + }) + + t.Run("wait until dirty threshold has passed", func(t *testing.T) { + // First flush attempt should occur after ~100ms after creating bucket. + // Buffer of 200ms guarantees, flush will be called during sleep period. + time.Sleep(200 * time.Millisecond) + }) + + t.Run("assert that a flush has occurred (and one segment exists)", func(t *testing.T) { + segments, release := bucket.disk.getAndLockSegments() + defer release() + + assert.Equal(t, 1, len(segments)) + }) + + t.Run("shutdown bucket", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + require.Nil(t, bucket.Shutdown(ctx)) + require.Nil(t, flushCycle.StopAndWait(ctx)) + }) + }) + + t.Run("a dirty memtable is flushed once dirty period has passed with ongoing writes", func(t *testing.T) { + dirName := t.TempDir() + + flushCallbacks := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + flushCycle := cyclemanager.NewManager(cyclemanager.MemtableFlushCycleTicker(), flushCallbacks.CycleCallback, logger) + flushCycle.Start() + + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushCallbacks, + WithStrategy(StrategyReplace), + WithMemtableThreshold(1e12), // large enough to not affect this test + WithWalThreshold(1e12), // large enough to not affect this test + WithMinWalThreshold(0), // small enough to not affect this test + WithDirtyThreshold(50*time.Millisecond), + ) + require.Nil(t, err) + + t.Run("import something to make it dirty", func(t *testing.T) { + require.Nil(t, bucket.Put([]byte("some-key"), []byte("some-value"))) + }) + + t.Run("assert no segments exist initially", func(t *testing.T) { + segments, release := bucket.disk.getAndLockSegments() + defer release() + + assert.Equal(t, 0, len(segments)) + }) + + t.Run("keep importing crossing the dirty threshold", func(t *testing.T) { + rounds := 12 // at least 300ms + data := make([]byte, rounds*4) + _, err := rand.Read(data) + require.Nil(t, err) + + for i := 0; i < rounds; i++ { + key := data[(i * 4) : (i+1)*4] + bucket.Put(key, []byte("value")) + time.Sleep(25 * time.Millisecond) + } + }) + + t.Run("assert that flush has occurred in the meantime", func(t *testing.T) { + segments, release := bucket.disk.getAndLockSegments() + defer release() + + // at least 2 segments should be created already + assert.GreaterOrEqual(t, len(segments), 2) + }) + + t.Run("shutdown bucket", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + require.Nil(t, bucket.Shutdown(ctx)) + require.Nil(t, flushCycle.StopAndWait(ctx)) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cleanup_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cleanup_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..43d7211e0aa450e50f4e60d871e2e064ec68bd0b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cleanup_integration_test.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "testing" + "time" +) + +func TestSegmentsCleanup(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "cleanupReplaceStrategy", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + cleanupReplaceStrategy(ctx, t, opts) + }, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSegmentsCleanupInterval(time.Second), + WithCalcCountNetAdditions(true), + }, + }, + { + name: "cleanupReplaceStrategy_WithSecondaryKeys", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + cleanupReplaceStrategy_WithSecondaryKeys(ctx, t, opts) + }, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(2), + WithSegmentsCleanupInterval(time.Second), + WithCalcCountNetAdditions(true), + }, + }, + } + tests.run(ctx, t) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cleanup_replace_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cleanup_replace_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3738d603d4504db2a6f3abd1cf056d5ddcc3fccb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cleanup_replace_integration_test.go @@ -0,0 +1,813 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +func cleanupReplaceStrategy(ctx context.Context, t *testing.T, opts []BucketOption) { + dir := t.TempDir() + + bucket, err := NewBucketCreator().NewBucket(ctx, dir, dir, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer bucket.Shutdown(context.Background()) + + type kvt struct { + pkey string + val string + tomb bool + } + + t.Run("create segments", func(t *testing.T) { + /* + SEG1 SEG2 SEG3 SEG4 SEG5 + ------------------------------------ + c101 + c102 u102 + c103 u103 + c104 u104 + c105 u105 + c106 d106 + c107 d107 + c108 d108 + c109 d109 + c110 u110 d110 + c111 u111 d111 + c112 u112 d112 + c113 d113 u113 + c114 d114 u114 + c115 d115 u115 + ------------------------------------ + c201 + c202 u202 + c203 u203 + c204 u204 + c205 d205 + c206 d206 + c207 d207 + c208 u208 d208 + c209 u209 d209 + c210 d210 u210 + c211 d211 u211 + ------------------------------------ + c301 + c302 u302 + c303 u303 + c304 d304 + c305 d305 + c306 u306 d306 + c307 d307 u307 + ------------------------------------ + c401 + c402 u402 + c403 d403 + ------------------------------------ + c501 + */ + + put := func(t *testing.T, pkey, value string) { + require.NoError(t, bucket.Put([]byte(pkey), []byte(value))) + } + delete := func(t *testing.T, pkey string) { + require.NoError(t, bucket.Delete([]byte(pkey))) + } + + t.Run("segment 1", func(t *testing.T) { + put(t, "key101_created1", "created") + put(t, "key102_updated2", "created") + put(t, "key103_updated3", "created") + put(t, "key104_updated4", "created") + put(t, "key105_updated5", "created") + put(t, "key106_deleted2", "created") + put(t, "key107_deleted3", "created") + put(t, "key108_deleted4", "created") + put(t, "key109_deleted5", "created") + put(t, "key110_updated2_deleted3", "created") + put(t, "key111_updated3_deleted4", "created") + put(t, "key112_updated4_deleted5", "created") + put(t, "key113_deleted2_updated3", "created") + put(t, "key114_deleted3_updated4", "created") + put(t, "key115_deleted4_updated5", "created") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 2", func(t *testing.T) { + put(t, "key201_created2", "created") + put(t, "key202_updated3", "created") + put(t, "key203_updated4", "created") + put(t, "key204_updated5", "created") + put(t, "key205_deleted3", "created") + put(t, "key206_deleted4", "created") + put(t, "key207_deleted5", "created") + put(t, "key208_updated3_deleted4", "created") + put(t, "key209_updated4_deleted5", "created") + put(t, "key210_deleted3_updated4", "created") + put(t, "key211_deleted4_updated5", "created") + + put(t, "key102_updated2", "updated") + put(t, "key110_updated2_deleted3", "updated") + + delete(t, "key106_deleted2") + delete(t, "key113_deleted2_updated3") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 3", func(t *testing.T) { + put(t, "key301_created3", "created") + put(t, "key302_updated4", "created") + put(t, "key303_updated5", "created") + put(t, "key304_deleted4", "created") + put(t, "key305_deleted5", "created") + put(t, "key306_updated4_deleted5", "created") + put(t, "key307_deleted4_updated5", "created") + + put(t, "key103_updated3", "updated") + put(t, "key111_updated3_deleted4", "updated") + put(t, "key113_deleted2_updated3", "updated") + put(t, "key202_updated3", "updated") + put(t, "key208_updated3_deleted4", "updated") + + delete(t, "key107_deleted3") + delete(t, "key110_updated2_deleted3") + delete(t, "key114_deleted3_updated4") + delete(t, "key205_deleted3") + delete(t, "key210_deleted3_updated4") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 4", func(t *testing.T) { + put(t, "key401_created4", "created") + put(t, "key402_updated5", "created") + put(t, "key403_deleted5", "created") + + put(t, "key104_updated4", "updated") + put(t, "key112_updated4_deleted5", "updated") + put(t, "key114_deleted3_updated4", "updated") + put(t, "key203_updated4", "updated") + put(t, "key209_updated4_deleted5", "updated") + put(t, "key210_deleted3_updated4", "updated") + put(t, "key302_updated4", "updated") + put(t, "key306_updated4_deleted5", "updated") + + delete(t, "key108_deleted4") + delete(t, "key111_updated3_deleted4") + delete(t, "key115_deleted4_updated5") + delete(t, "key206_deleted4") + delete(t, "key208_updated3_deleted4") + delete(t, "key211_deleted4_updated5") + delete(t, "key304_deleted4") + delete(t, "key307_deleted4_updated5") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 5", func(t *testing.T) { + put(t, "key501_created5", "created") + + put(t, "key105_updated5", "updated") + put(t, "key115_deleted4_updated5", "updated") + put(t, "key204_updated5", "updated") + put(t, "key211_deleted4_updated5", "updated") + put(t, "key303_updated5", "updated") + put(t, "key307_deleted4_updated5", "updated") + put(t, "key402_updated5", "updated") + + delete(t, "key109_deleted5") + delete(t, "key112_updated4_deleted5") + delete(t, "key207_deleted5") + delete(t, "key209_updated4_deleted5") + delete(t, "key305_deleted5") + delete(t, "key306_updated4_deleted5") + delete(t, "key403_deleted5") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + }) + + t.Run("clean segments", func(t *testing.T) { + shouldAbort := func() bool { return false } + count := 5 // 5 segments total + + // all but last segments should be cleaned + for i := 0; i < count; i++ { + cleaned, err := bucket.disk.segmentCleaner.cleanupOnce(shouldAbort) + assert.NoError(t, err) + + if i != count-1 { + assert.True(t, cleaned) + } else { + assert.False(t, cleaned) + } + } + }) + + t.Run("verify segments' contents", func(t *testing.T) { + assertContents := func(t *testing.T, segIdx int, expected []*kvt) { + seg := bucket.disk.segments[segIdx] + cur := seg.newCursor() + + i := 0 + var k, v []byte + var err error + for k, v, err = cur.first(); k != nil && i < len(expected); k, v, err = cur.next() { + assert.Equal(t, []byte(expected[i].pkey), k) + if expected[i].tomb { + assert.ErrorIs(t, err, lsmkv.Deleted) + assert.Nil(t, v) + } else { + assert.NoError(t, err) + assert.Equal(t, []byte(expected[i].val), v) + } + i++ + } + assert.ErrorIs(t, err, lsmkv.NotFound, "cursor not finished") + assert.Equal(t, i, len(expected), "more entries expected") + } + + t.Run("segment 1", func(t *testing.T) { + assertContents(t, 0, []*kvt{ + {pkey: "key101_created1", val: "created"}, + }) + }) + + t.Run("segment 2", func(t *testing.T) { + assertContents(t, 1, []*kvt{ + {pkey: "key102_updated2", val: "updated"}, + {pkey: "key106_deleted2", tomb: true}, + {pkey: "key201_created2", val: "created"}, + }) + }) + + t.Run("segment 3", func(t *testing.T) { + assertContents(t, 2, []*kvt{ + {pkey: "key103_updated3", val: "updated"}, + {pkey: "key107_deleted3", tomb: true}, + {pkey: "key110_updated2_deleted3", tomb: true}, + {pkey: "key113_deleted2_updated3", val: "updated"}, + {pkey: "key202_updated3", val: "updated"}, + {pkey: "key205_deleted3", tomb: true}, + {pkey: "key301_created3", val: "created"}, + }) + }) + + t.Run("segment 4", func(t *testing.T) { + assertContents(t, 3, []*kvt{ + {pkey: "key104_updated4", val: "updated"}, + {pkey: "key108_deleted4", tomb: true}, + {pkey: "key111_updated3_deleted4", tomb: true}, + {pkey: "key114_deleted3_updated4", val: "updated"}, + {pkey: "key203_updated4", val: "updated"}, + {pkey: "key206_deleted4", tomb: true}, + {pkey: "key208_updated3_deleted4", tomb: true}, + {pkey: "key210_deleted3_updated4", val: "updated"}, + {pkey: "key302_updated4", val: "updated"}, + {pkey: "key304_deleted4", tomb: true}, + {pkey: "key401_created4", val: "created"}, + }) + }) + + t.Run("segment 5", func(t *testing.T) { + assertContents(t, 4, []*kvt{ + {pkey: "key105_updated5", val: "updated"}, + {pkey: "key109_deleted5", tomb: true}, + {pkey: "key112_updated4_deleted5", tomb: true}, + {pkey: "key115_deleted4_updated5", val: "updated"}, + {pkey: "key204_updated5", val: "updated"}, + {pkey: "key207_deleted5", tomb: true}, + {pkey: "key209_updated4_deleted5", tomb: true}, + {pkey: "key211_deleted4_updated5", val: "updated"}, + {pkey: "key303_updated5", val: "updated"}, + {pkey: "key305_deleted5", tomb: true}, + {pkey: "key306_updated4_deleted5", tomb: true}, + {pkey: "key307_deleted4_updated5", val: "updated"}, + {pkey: "key402_updated5", val: "updated"}, + {pkey: "key403_deleted5", tomb: true}, + {pkey: "key501_created5", val: "created"}, + }) + }) + }) + + t.Run("verify bucket's contents", func(t *testing.T) { + expected := []*kvt{ + {pkey: "key101_created1", val: "created"}, + {pkey: "key102_updated2", val: "updated"}, + {pkey: "key103_updated3", val: "updated"}, + {pkey: "key104_updated4", val: "updated"}, + {pkey: "key105_updated5", val: "updated"}, + {pkey: "key106_deleted2", tomb: true}, + {pkey: "key107_deleted3", tomb: true}, + {pkey: "key108_deleted4", tomb: true}, + {pkey: "key109_deleted5", tomb: true}, + {pkey: "key110_updated2_deleted3", tomb: true}, + {pkey: "key111_updated3_deleted4", tomb: true}, + {pkey: "key112_updated4_deleted5", tomb: true}, + {pkey: "key113_deleted2_updated3", val: "updated"}, + {pkey: "key114_deleted3_updated4", val: "updated"}, + {pkey: "key115_deleted4_updated5", val: "updated"}, + + {pkey: "key201_created2", val: "created"}, + {pkey: "key202_updated3", val: "updated"}, + {pkey: "key203_updated4", val: "updated"}, + {pkey: "key204_updated5", val: "updated"}, + {pkey: "key205_deleted3", tomb: true}, + {pkey: "key206_deleted4", tomb: true}, + {pkey: "key207_deleted5", tomb: true}, + {pkey: "key208_updated3_deleted4", tomb: true}, + {pkey: "key209_updated4_deleted5", tomb: true}, + {pkey: "key210_deleted3_updated4", val: "updated"}, + {pkey: "key211_deleted4_updated5", val: "updated"}, + + {pkey: "key301_created3", val: "created"}, + {pkey: "key302_updated4", val: "updated"}, + {pkey: "key303_updated5", val: "updated"}, + {pkey: "key304_deleted4", tomb: true}, + {pkey: "key305_deleted5", tomb: true}, + {pkey: "key306_updated4_deleted5", tomb: true}, + {pkey: "key307_deleted4_updated5", val: "updated"}, + + {pkey: "key401_created4", val: "created"}, + {pkey: "key402_updated5", val: "updated"}, + {pkey: "key403_deleted5", tomb: true}, + + {pkey: "key501_created5", val: "created"}, + } + expectedExising := []*kvt{} + for i := range expected { + if !expected[i].tomb { + expectedExising = append(expectedExising, expected[i]) + } + } + + t.Run("cursor", func(t *testing.T) { + c := bucket.Cursor() + defer c.Close() + + i := 0 + for k, v := c.First(); k != nil && i < len(expectedExising); k, v = c.Next() { + assert.Equal(t, []byte(expectedExising[i].pkey), k) + assert.Equal(t, []byte(expectedExising[i].val), v) + i++ + } + assert.Equal(t, i, len(expectedExising)) + }) + + t.Run("get", func(t *testing.T) { + for i := range expected { + val, err := bucket.Get([]byte(expected[i].pkey)) + + assert.NoError(t, err) + if expected[i].tomb { + assert.Nil(t, val) + } else { + assert.Equal(t, []byte(expected[i].val), val) + } + } + }) + + t.Run("net count", func(t *testing.T) { + assert.Equal(t, len(expectedExising), bucket.Count()) + }) + }) +} + +func cleanupReplaceStrategy_WithSecondaryKeys(ctx context.Context, t *testing.T, opts []BucketOption) { + dir := t.TempDir() + + bucket, err := NewBucketCreator().NewBucket(ctx, dir, dir, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer bucket.Shutdown(context.Background()) + + secondaryKey0 := func(primaryKey string) string { + return "secondary0-" + primaryKey + } + secondaryKey1 := func(primaryKey string) string { + return "secondary1-" + primaryKey + } + + type kvt struct { + pkey string + val string + tomb bool + } + + t.Run("create segments", func(t *testing.T) { + /* + SEG1 SEG2 SEG3 SEG4 SEG5 + ------------------------------------ + c101 + c102 u102 + c103 u103 + c104 u104 + c105 u105 + c106 d106 + c107 d107 + c108 d108 + c109 d109 + c110 u110 d110 + c111 u111 d111 + c112 u112 d112 + c113 d113 u113 + c114 d114 u114 + c115 d115 u115 + ------------------------------------ + c201 + c202 u202 + c203 u203 + c204 u204 + c205 d205 + c206 d206 + c207 d207 + c208 u208 d208 + c209 u209 d209 + c210 d210 u210 + c211 d211 u211 + ------------------------------------ + c301 + c302 u302 + c303 u303 + c304 d304 + c305 d305 + c306 u306 d306 + c307 d307 u307 + ------------------------------------ + c401 + c402 u402 + c403 d403 + ------------------------------------ + c501 + */ + + putWithSecondaryKeys := func(t *testing.T, pkey, value string) { + err := bucket.Put( + []byte(pkey), + []byte(value), + WithSecondaryKey(0, []byte(secondaryKey0(pkey))), + WithSecondaryKey(1, []byte(secondaryKey1(pkey))), + ) + require.NoError(t, err) + } + deleteWithSecondaryKeys := func(t *testing.T, pkey string) { + err := bucket.Delete( + []byte(pkey), + WithSecondaryKey(0, []byte(secondaryKey0(pkey))), + WithSecondaryKey(1, []byte(secondaryKey1(pkey))), + ) + require.NoError(t, err) + } + + t.Run("segment 1", func(t *testing.T) { + putWithSecondaryKeys(t, "key101_created1", "created") + putWithSecondaryKeys(t, "key102_updated2", "created") + putWithSecondaryKeys(t, "key103_updated3", "created") + putWithSecondaryKeys(t, "key104_updated4", "created") + putWithSecondaryKeys(t, "key105_updated5", "created") + putWithSecondaryKeys(t, "key106_deleted2", "created") + putWithSecondaryKeys(t, "key107_deleted3", "created") + putWithSecondaryKeys(t, "key108_deleted4", "created") + putWithSecondaryKeys(t, "key109_deleted5", "created") + putWithSecondaryKeys(t, "key110_updated2_deleted3", "created") + putWithSecondaryKeys(t, "key111_updated3_deleted4", "created") + putWithSecondaryKeys(t, "key112_updated4_deleted5", "created") + putWithSecondaryKeys(t, "key113_deleted2_updated3", "created") + putWithSecondaryKeys(t, "key114_deleted3_updated4", "created") + putWithSecondaryKeys(t, "key115_deleted4_updated5", "created") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 2", func(t *testing.T) { + putWithSecondaryKeys(t, "key201_created2", "created") + putWithSecondaryKeys(t, "key202_updated3", "created") + putWithSecondaryKeys(t, "key203_updated4", "created") + putWithSecondaryKeys(t, "key204_updated5", "created") + putWithSecondaryKeys(t, "key205_deleted3", "created") + putWithSecondaryKeys(t, "key206_deleted4", "created") + putWithSecondaryKeys(t, "key207_deleted5", "created") + putWithSecondaryKeys(t, "key208_updated3_deleted4", "created") + putWithSecondaryKeys(t, "key209_updated4_deleted5", "created") + putWithSecondaryKeys(t, "key210_deleted3_updated4", "created") + putWithSecondaryKeys(t, "key211_deleted4_updated5", "created") + + putWithSecondaryKeys(t, "key102_updated2", "updated") + putWithSecondaryKeys(t, "key110_updated2_deleted3", "updated") + + deleteWithSecondaryKeys(t, "key106_deleted2") + deleteWithSecondaryKeys(t, "key113_deleted2_updated3") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 3", func(t *testing.T) { + putWithSecondaryKeys(t, "key301_created3", "created") + putWithSecondaryKeys(t, "key302_updated4", "created") + putWithSecondaryKeys(t, "key303_updated5", "created") + putWithSecondaryKeys(t, "key304_deleted4", "created") + putWithSecondaryKeys(t, "key305_deleted5", "created") + putWithSecondaryKeys(t, "key306_updated4_deleted5", "created") + putWithSecondaryKeys(t, "key307_deleted4_updated5", "created") + + putWithSecondaryKeys(t, "key103_updated3", "updated") + putWithSecondaryKeys(t, "key111_updated3_deleted4", "updated") + putWithSecondaryKeys(t, "key113_deleted2_updated3", "updated") + putWithSecondaryKeys(t, "key202_updated3", "updated") + putWithSecondaryKeys(t, "key208_updated3_deleted4", "updated") + + deleteWithSecondaryKeys(t, "key107_deleted3") + deleteWithSecondaryKeys(t, "key110_updated2_deleted3") + deleteWithSecondaryKeys(t, "key114_deleted3_updated4") + deleteWithSecondaryKeys(t, "key205_deleted3") + deleteWithSecondaryKeys(t, "key210_deleted3_updated4") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 4", func(t *testing.T) { + putWithSecondaryKeys(t, "key401_created4", "created") + putWithSecondaryKeys(t, "key402_updated5", "created") + putWithSecondaryKeys(t, "key403_deleted5", "created") + + putWithSecondaryKeys(t, "key104_updated4", "updated") + putWithSecondaryKeys(t, "key112_updated4_deleted5", "updated") + putWithSecondaryKeys(t, "key114_deleted3_updated4", "updated") + putWithSecondaryKeys(t, "key203_updated4", "updated") + putWithSecondaryKeys(t, "key209_updated4_deleted5", "updated") + putWithSecondaryKeys(t, "key210_deleted3_updated4", "updated") + putWithSecondaryKeys(t, "key302_updated4", "updated") + putWithSecondaryKeys(t, "key306_updated4_deleted5", "updated") + + deleteWithSecondaryKeys(t, "key108_deleted4") + deleteWithSecondaryKeys(t, "key111_updated3_deleted4") + deleteWithSecondaryKeys(t, "key115_deleted4_updated5") + deleteWithSecondaryKeys(t, "key206_deleted4") + deleteWithSecondaryKeys(t, "key208_updated3_deleted4") + deleteWithSecondaryKeys(t, "key211_deleted4_updated5") + deleteWithSecondaryKeys(t, "key304_deleted4") + deleteWithSecondaryKeys(t, "key307_deleted4_updated5") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("segment 5", func(t *testing.T) { + putWithSecondaryKeys(t, "key501_created5", "created") + + putWithSecondaryKeys(t, "key105_updated5", "updated") + putWithSecondaryKeys(t, "key115_deleted4_updated5", "updated") + putWithSecondaryKeys(t, "key204_updated5", "updated") + putWithSecondaryKeys(t, "key211_deleted4_updated5", "updated") + putWithSecondaryKeys(t, "key303_updated5", "updated") + putWithSecondaryKeys(t, "key307_deleted4_updated5", "updated") + putWithSecondaryKeys(t, "key402_updated5", "updated") + + deleteWithSecondaryKeys(t, "key109_deleted5") + deleteWithSecondaryKeys(t, "key112_updated4_deleted5") + deleteWithSecondaryKeys(t, "key207_deleted5") + deleteWithSecondaryKeys(t, "key209_updated4_deleted5") + deleteWithSecondaryKeys(t, "key305_deleted5") + deleteWithSecondaryKeys(t, "key306_updated4_deleted5") + deleteWithSecondaryKeys(t, "key403_deleted5") + + require.NoError(t, bucket.FlushAndSwitch()) + }) + }) + + t.Run("clean segments", func(t *testing.T) { + shouldAbort := func() bool { return false } + count := 5 // 5 segments total + + // all but last segments should be cleaned + for i := 0; i < count; i++ { + cleaned, err := bucket.disk.segmentCleaner.cleanupOnce(shouldAbort) + assert.NoError(t, err) + + if i != count-1 { + assert.True(t, cleaned) + } else { + assert.False(t, cleaned) + } + } + }) + + t.Run("verify segments' contents", func(t *testing.T) { + assertContents := func(t *testing.T, segIdx int, expected []*kvt) { + seg := bucket.disk.segments[segIdx] + cur := seg.newCursor() + + i := 0 + var n segmentReplaceNode + var err error + + for n, err = cur.firstWithAllKeys(); !errors.Is(err, lsmkv.NotFound) && i < len(expected); n, err = cur.nextWithAllKeys() { + assert.Equal(t, uint16(2), n.secondaryIndexCount) + assert.Equal(t, []byte(expected[i].pkey), n.primaryKey) + assert.Equal(t, []byte(secondaryKey0(expected[i].pkey)), []byte(n.secondaryKeys[0])) + assert.Equal(t, []byte(secondaryKey1(expected[i].pkey)), []byte(n.secondaryKeys[1])) + + if expected[i].tomb { + assert.ErrorIs(t, err, lsmkv.Deleted) + assert.Equal(t, []byte{}, n.value) + } else { + assert.NoError(t, err) + assert.Equal(t, []byte(expected[i].val), n.value) + } + i++ + } + assert.ErrorIs(t, err, lsmkv.NotFound, "cursor not finished") + assert.Equal(t, i, len(expected), "more entries expected") + } + + t.Run("segment 1", func(t *testing.T) { + assertContents(t, 0, []*kvt{ + {pkey: "key101_created1", val: "created"}, + }) + }) + + t.Run("segment 2", func(t *testing.T) { + assertContents(t, 1, []*kvt{ + {pkey: "key102_updated2", val: "updated"}, + {pkey: "key106_deleted2", tomb: true}, + {pkey: "key201_created2", val: "created"}, + }) + }) + + t.Run("segment 3", func(t *testing.T) { + assertContents(t, 2, []*kvt{ + {pkey: "key103_updated3", val: "updated"}, + {pkey: "key107_deleted3", tomb: true}, + {pkey: "key110_updated2_deleted3", tomb: true}, + {pkey: "key113_deleted2_updated3", val: "updated"}, + {pkey: "key202_updated3", val: "updated"}, + {pkey: "key205_deleted3", tomb: true}, + {pkey: "key301_created3", val: "created"}, + }) + }) + + t.Run("segment 4", func(t *testing.T) { + assertContents(t, 3, []*kvt{ + {pkey: "key104_updated4", val: "updated"}, + {pkey: "key108_deleted4", tomb: true}, + {pkey: "key111_updated3_deleted4", tomb: true}, + {pkey: "key114_deleted3_updated4", val: "updated"}, + {pkey: "key203_updated4", val: "updated"}, + {pkey: "key206_deleted4", tomb: true}, + {pkey: "key208_updated3_deleted4", tomb: true}, + {pkey: "key210_deleted3_updated4", val: "updated"}, + {pkey: "key302_updated4", val: "updated"}, + {pkey: "key304_deleted4", tomb: true}, + {pkey: "key401_created4", val: "created"}, + }) + }) + + t.Run("segment 5", func(t *testing.T) { + assertContents(t, 4, []*kvt{ + {pkey: "key105_updated5", val: "updated"}, + {pkey: "key109_deleted5", tomb: true}, + {pkey: "key112_updated4_deleted5", tomb: true}, + {pkey: "key115_deleted4_updated5", val: "updated"}, + {pkey: "key204_updated5", val: "updated"}, + {pkey: "key207_deleted5", tomb: true}, + {pkey: "key209_updated4_deleted5", tomb: true}, + {pkey: "key211_deleted4_updated5", val: "updated"}, + {pkey: "key303_updated5", val: "updated"}, + {pkey: "key305_deleted5", tomb: true}, + {pkey: "key306_updated4_deleted5", tomb: true}, + {pkey: "key307_deleted4_updated5", val: "updated"}, + {pkey: "key402_updated5", val: "updated"}, + {pkey: "key403_deleted5", tomb: true}, + {pkey: "key501_created5", val: "created"}, + }) + }) + }) + + t.Run("verify bucket's contents", func(t *testing.T) { + expected := []*kvt{ + {pkey: "key101_created1", val: "created"}, + {pkey: "key102_updated2", val: "updated"}, + {pkey: "key103_updated3", val: "updated"}, + {pkey: "key104_updated4", val: "updated"}, + {pkey: "key105_updated5", val: "updated"}, + {pkey: "key106_deleted2", tomb: true}, + {pkey: "key107_deleted3", tomb: true}, + {pkey: "key108_deleted4", tomb: true}, + {pkey: "key109_deleted5", tomb: true}, + {pkey: "key110_updated2_deleted3", tomb: true}, + {pkey: "key111_updated3_deleted4", tomb: true}, + {pkey: "key112_updated4_deleted5", tomb: true}, + {pkey: "key113_deleted2_updated3", val: "updated"}, + {pkey: "key114_deleted3_updated4", val: "updated"}, + {pkey: "key115_deleted4_updated5", val: "updated"}, + + {pkey: "key201_created2", val: "created"}, + {pkey: "key202_updated3", val: "updated"}, + {pkey: "key203_updated4", val: "updated"}, + {pkey: "key204_updated5", val: "updated"}, + {pkey: "key205_deleted3", tomb: true}, + {pkey: "key206_deleted4", tomb: true}, + {pkey: "key207_deleted5", tomb: true}, + {pkey: "key208_updated3_deleted4", tomb: true}, + {pkey: "key209_updated4_deleted5", tomb: true}, + {pkey: "key210_deleted3_updated4", val: "updated"}, + {pkey: "key211_deleted4_updated5", val: "updated"}, + + {pkey: "key301_created3", val: "created"}, + {pkey: "key302_updated4", val: "updated"}, + {pkey: "key303_updated5", val: "updated"}, + {pkey: "key304_deleted4", tomb: true}, + {pkey: "key305_deleted5", tomb: true}, + {pkey: "key306_updated4_deleted5", tomb: true}, + {pkey: "key307_deleted4_updated5", val: "updated"}, + + {pkey: "key401_created4", val: "created"}, + {pkey: "key402_updated5", val: "updated"}, + {pkey: "key403_deleted5", tomb: true}, + + {pkey: "key501_created5", val: "created"}, + } + expectedExising := []*kvt{} + for i := range expected { + if !expected[i].tomb { + expectedExising = append(expectedExising, expected[i]) + } + } + + t.Run("cursor", func(t *testing.T) { + c := bucket.Cursor() + defer c.Close() + + i := 0 + for k, v := c.First(); k != nil && i < len(expectedExising); k, v = c.Next() { + assert.Equal(t, []byte(expectedExising[i].pkey), k) + assert.Equal(t, []byte(expectedExising[i].val), v) + i++ + } + assert.Equal(t, i, len(expectedExising)) + }) + + t.Run("get by primary", func(t *testing.T) { + for i := range expected { + val, err := bucket.Get([]byte(expected[i].pkey)) + + assert.NoError(t, err) + if expected[i].tomb { + assert.Nil(t, val) + } else { + assert.Equal(t, []byte(expected[i].val), val) + } + } + }) + + t.Run("get by secondary 1", func(t *testing.T) { + for i := range expected { + val, err := bucket.GetBySecondary(0, []byte(secondaryKey0(expected[i].pkey))) + + assert.NoError(t, err) + if expected[i].tomb { + assert.Nil(t, val) + } else { + assert.Equal(t, []byte(expected[i].val), val) + } + } + }) + + t.Run("get by secondary 2", func(t *testing.T) { + for i := range expected { + val, err := bucket.GetBySecondary(1, []byte(secondaryKey1(expected[i].pkey))) + + assert.NoError(t, err) + if expected[i].tomb { + assert.Nil(t, val) + } else { + assert.Equal(t, []byte(expected[i].val), val) + } + } + }) + + t.Run("net count", func(t *testing.T) { + assert.Equal(t, len(expectedExising), bucket.Count()) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger.go new file mode 100644 index 0000000000000000000000000000000000000000..253c4d5d1cda2ed06aa29f63b3dcbe560ee9f8cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger.go @@ -0,0 +1,427 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "sync" + "sync/atomic" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/byteops" + "github.com/weaviate/weaviate/usecases/monitoring" + + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/usecases/integrity" +) + +type memtableCommitLogger interface { + writeEntry(commitType CommitType, nodeBytes []byte) error + put(node segmentReplaceNode) error + append(node segmentCollectionNode) error + add(node *roaringset.SegmentNodeList) error + walPath() string + size() int64 + flushBuffers() error + close() error + delete() error + sync() error +} + +var ( + _ memtableCommitLogger = (*lazyCommitLogger)(nil) + _ memtableCommitLogger = (*commitLogger)(nil) +) + +type lazyCommitLogger struct { + path string + strategy string + commitLogger *commitLogger + mux sync.Mutex +} + +func (cl *lazyCommitLogger) mayInitCommitLogger() error { + cl.mux.Lock() + defer cl.mux.Unlock() + + if cl.commitLogger != nil { + return nil + } + + // file does not exist yet + commitLogger, err := newCommitLogger(cl.path, cl.strategy, 0) + if err != nil { + return err + } + + cl.commitLogger = commitLogger + return nil +} + +func walPath(path string) string { + return path + ".wal" +} + +func (cl *lazyCommitLogger) walPath() string { + return walPath(cl.path) +} + +func (cl *lazyCommitLogger) writeEntry(commitType CommitType, nodeBytes []byte) error { + err := cl.mayInitCommitLogger() + if err != nil { + return err + } + + return cl.commitLogger.writeEntry(commitType, nodeBytes) +} + +func (cl *lazyCommitLogger) put(node segmentReplaceNode) error { + err := cl.mayInitCommitLogger() + if err != nil { + return err + } + + return cl.commitLogger.put(node) +} + +func (cl *lazyCommitLogger) append(node segmentCollectionNode) error { + err := cl.mayInitCommitLogger() + if err != nil { + return err + } + + return cl.commitLogger.append(node) +} + +func (cl *lazyCommitLogger) add(node *roaringset.SegmentNodeList) error { + err := cl.mayInitCommitLogger() + if err != nil { + return err + } + + return cl.commitLogger.add(node) +} + +func (cl *lazyCommitLogger) size() int64 { + cl.mux.Lock() + defer cl.mux.Unlock() + + if cl.commitLogger == nil { + return 0 + } + + return cl.commitLogger.size() +} + +func (cl *lazyCommitLogger) flushBuffers() error { + cl.mux.Lock() + defer cl.mux.Unlock() + + if cl.commitLogger == nil { + return nil + } + + return cl.commitLogger.flushBuffers() +} + +func (cl *lazyCommitLogger) close() error { + cl.mux.Lock() + defer cl.mux.Unlock() + + if cl.commitLogger == nil { + return nil + } + + return cl.commitLogger.close() +} + +func (cl *lazyCommitLogger) delete() error { + cl.mux.Lock() + defer cl.mux.Unlock() + + if cl.commitLogger == nil { + return nil + } + + return cl.commitLogger.delete() +} + +func (cl *lazyCommitLogger) sync() error { + cl.mux.Lock() + defer cl.mux.Unlock() + + if cl.commitLogger == nil { + return nil + } + + return cl.commitLogger.sync() +} + +type commitLogger struct { + file *os.File + writer *bufio.Writer + n atomic.Int64 + path string + + checksumWriter integrity.ChecksumWriter + + bufNode *bytes.Buffer + tmpBuf []byte + + // e.g. when recovering from an existing log, we do not want to write into a + // new log again + paused bool +} + +// commit log entry data format +// --------------------------- +// | version == 0 (1byte) | +// | record (dynamic length) | +// --------------------------- + +// ------------------------------------------------------ +// | version == 1 (1byte) | +// | type (1byte) | +// | node length (4bytes) | +// | node (dynamic length) | +// | checksum (crc32 4bytes non-checksum fields so far) | +// ------------------------------------------------------ + +const CurrentCommitLogVersion uint8 = 1 + +type CommitType uint8 + +const ( + CommitTypeReplace CommitType = iota // replace strategy + + // collection strategy - this can handle all cases as updates and deletes are + // only appends in a collection strategy + CommitTypeCollection + CommitTypeRoaringSet + // new version of roaringset that stores data as a list of uint64 values, + // instead of a roaring bitmap + CommitTypeRoaringSetList +) + +func (ct CommitType) String() string { + switch ct { + case CommitTypeReplace: + return "replace" + case CommitTypeCollection: + return "collection" + case CommitTypeRoaringSet: + return "roaringset" + case CommitTypeRoaringSetList: + return "roaringsetlist" + default: + return "unknown" + } +} + +func (ct CommitType) Is(checkedCommitType CommitType) bool { + return ct == checkedCommitType +} + +func newLazyCommitLogger(path, strategy string) (*lazyCommitLogger, error) { + return &lazyCommitLogger{ + path: path, + strategy: strategy, + }, nil +} + +func newCommitLogger(path, strategy string, fileSize int64) (*commitLogger, error) { + out := &commitLogger{path: walPath(path)} + + out.n.Swap(fileSize) + + f, err := os.OpenFile(out.path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o666) + if err != nil { + return nil, err + } + + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": strategy, + "operation": "appendWAL", + }) + + out.file = f + + meteredF := diskio.NewMeteredWriter(f, func(written int64) { + observeWrite.Observe(float64(written)) + }) + + out.writer = bufio.NewWriter(meteredF) + + if out.n.Load() == 0 { + out.checksumWriter = integrity.NewCRC32Writer(out.writer) + } else { + _, err = out.file.Seek(-crc32.Size, io.SeekEnd) + if err != nil { + return nil, err + } + + var checksum [crc32.Size]byte + _, err = io.ReadFull(out.file, checksum[:]) + if err != nil { + return nil, err + } + + seed := binary.BigEndian.Uint32(checksum[:]) + + out.checksumWriter = integrity.NewCRC32WriterWithSeed(out.writer, seed) + } + + out.bufNode = bytes.NewBuffer(nil) + out.tmpBuf = make([]byte, byteops.Uint8Len+byteops.Uint8Len+byteops.Uint32Len) + + return out, nil +} + +func (cl *commitLogger) walPath() string { + return cl.path +} + +func (cl *commitLogger) writeEntry(commitType CommitType, nodeBytes []byte) error { + // TODO: do we need a timestamp? if so, does it need to be a vector clock? + + rw := byteops.NewReadWriter(cl.tmpBuf) + rw.WriteByte(byte(commitType)) + rw.WriteByte(CurrentCommitLogVersion) + rw.WriteUint32(uint32(len(nodeBytes))) + + _, err := cl.checksumWriter.Write(rw.Buffer) + if err != nil { + return err + } + + _, err = cl.checksumWriter.Write(nodeBytes) + if err != nil { + return err + } + + // write record checksum directly on the writer + checksumSize, err := cl.writer.Write(cl.checksumWriter.Hash()) + if err != nil { + return err + } + + cl.n.Add(int64(1 + 1 + 4 + len(nodeBytes) + checksumSize)) + + return nil +} + +func (cl *commitLogger) put(node segmentReplaceNode) error { + if cl.paused { + return nil + } + + cl.bufNode.Reset() + + ki, err := node.KeyIndexAndWriteTo(cl.bufNode) + if err != nil { + return err + } + if len(cl.bufNode.Bytes()) != ki.ValueEnd-ki.ValueStart { + return fmt.Errorf("unexpected error, node size mismatch") + } + + return cl.writeEntry(CommitTypeReplace, cl.bufNode.Bytes()) +} + +func (cl *commitLogger) append(node segmentCollectionNode) error { + if cl.paused { + return nil + } + + cl.bufNode.Reset() + + ki, err := node.KeyIndexAndWriteTo(cl.bufNode) + if err != nil { + return err + } + if len(cl.bufNode.Bytes()) != ki.ValueEnd-ki.ValueStart { + return fmt.Errorf("unexpected error, node size mismatch") + } + + return cl.writeEntry(CommitTypeCollection, cl.bufNode.Bytes()) +} + +func (cl *commitLogger) add(node *roaringset.SegmentNodeList) error { + if cl.paused { + return nil + } + + cl.bufNode.Reset() + + ki, err := node.KeyIndexAndWriteTo(cl.bufNode, 0) + if err != nil { + return err + } + if len(cl.bufNode.Bytes()) != ki.ValueEnd-ki.ValueStart { + return fmt.Errorf("unexpected error, node size mismatch") + } + + return cl.writeEntry(CommitTypeRoaringSetList, cl.bufNode.Bytes()) +} + +// Size returns the amount of data that has been written since the commit +// logger was initialized. After a flush a new logger is initialized which +// automatically resets the logger. +func (cl *commitLogger) size() int64 { + return cl.n.Load() +} + +func (cl *commitLogger) sync() error { + return cl.file.Sync() +} + +func (cl *commitLogger) close() error { + if !cl.paused { + if err := cl.writer.Flush(); err != nil { + return err + } + + if err := cl.file.Sync(); err != nil { + return err + } + } + + return cl.file.Close() +} + +func (cl *commitLogger) pause() { + cl.paused = true +} + +func (cl *commitLogger) unpause() { + cl.paused = false +} + +func (cl *commitLogger) delete() error { + return os.Remove(cl.path) +} + +func (cl *commitLogger) flushBuffers() error { + err := cl.writer.Flush() + if err != nil { + return fmt.Errorf("flushing WAL %q: %w", cl.path, err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser.go new file mode 100644 index 0000000000000000000000000000000000000000..34d0c7d5686ac90ff546f8eff67dd5b8d2bca1ad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "encoding/binary" + "io" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/usecases/integrity" +) + +type commitloggerParser struct { + strategy string + + reader io.Reader + checksumReader integrity.ChecksumReader + + bufNode *bytes.Buffer + + memtable *Memtable +} + +func newCommitLoggerParser(strategy string, reader io.Reader, memtable *Memtable, +) *commitloggerParser { + return &commitloggerParser{ + strategy: strategy, + reader: reader, + checksumReader: integrity.NewCRC32Reader(reader), + bufNode: bytes.NewBuffer(nil), + memtable: memtable, + } +} + +func (p *commitloggerParser) Do() error { + switch p.strategy { + case StrategyReplace: + return p.doReplace() + case StrategyMapCollection, StrategySetCollection, StrategyInverted: + return p.doCollection() + case StrategyRoaringSet: + return p.doRoaringSet() + case StrategyRoaringSetRange: + return p.doRoaringSetRange() + default: + return errors.Errorf("unknown strategy %s on commit log parse", p.strategy) + } +} + +func (p *commitloggerParser) doRecord() (r io.Reader, err error) { + var nodeLen uint32 + err = binary.Read(p.checksumReader, binary.LittleEndian, &nodeLen) + if err != nil { + return nil, errors.Wrap(err, "read commit node length") + } + + p.bufNode.Reset() + + io.CopyN(p.bufNode, p.checksumReader, int64(nodeLen)) + + // read checksum directly from the reader + var checksum [4]byte + _, err = io.ReadFull(p.reader, checksum[:]) + if err != nil { + return nil, errors.Wrap(err, "read commit checksum") + } + + // validate checksum + if !bytes.Equal(checksum[:], p.checksumReader.Hash()) { + return nil, errors.Wrap(ErrInvalidChecksum, "read commit entry") + } + + return p.bufNode, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_collection.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_collection.go new file mode 100644 index 0000000000000000000000000000000000000000..f28fda94fbf634dbf6f6e0ea9a4460097d46f7c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_collection.go @@ -0,0 +1,115 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/pkg/errors" +) + +func (p *commitloggerParser) doCollection() error { + for { + var commitType CommitType + + err := binary.Read(p.checksumReader, binary.LittleEndian, &commitType) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return errors.Wrap(err, "read commit type") + } + + if !CommitTypeCollection.Is(commitType) { + return errors.Errorf("found a %s commit on a collection bucket", commitType.String()) + } + + var version uint8 + + err = binary.Read(p.checksumReader, binary.LittleEndian, &version) + if err != nil { + return errors.Wrap(err, "read commit version") + } + + switch version { + case 0: + { + err = p.parseCollectionNodeV0() + } + case 1: + { + err = p.parseCollectionNodeV1() + } + default: + { + return fmt.Errorf("unsupported commit version %d", version) + } + } + if err != nil { + return err + } + } + + return nil +} + +func (p *commitloggerParser) parseCollectionNodeV0() error { + return p.parseCollectionNode(p.reader) +} + +func (p *commitloggerParser) parseCollectionNodeV1() error { + reader, err := p.doRecord() + if err != nil { + return err + } + + return p.parseCollectionNode(reader) +} + +func (p *commitloggerParser) parseCollectionNode(reader io.Reader) error { + n, err := ParseCollectionNode(reader) + if err != nil { + return err + } + + // Commit log nodes are the same for MapCollection and Inverted strategies + if p.strategy == StrategyMapCollection || p.strategy == StrategyInverted { + return p.parseMapNode(n) + } + + return p.memtable.append(n.primaryKey, n.values) +} + +func (p *commitloggerParser) parseMapNode(n segmentCollectionNode) error { + for _, val := range n.values { + mp := MapPair{} + if err := mp.FromBytes(val.value, false); err != nil { + return err + } + mp.Tombstone = val.tombstone + + if p.memtable.strategy == StrategyInverted && val.tombstone { + docID := binary.BigEndian.Uint64(val.value) + if err := p.memtable.SetTombstone(docID); err != nil { + return err + } + } + + if err := p.memtable.appendMapSorted(n.primaryKey, mp); err != nil { + return err + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_replace.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_replace.go new file mode 100644 index 0000000000000000000000000000000000000000..966b501c5bde82bcf9182d221ffe06b3f269f986 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_replace.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/pkg/errors" +) + +// doReplace parsers all entries into a cache for deduplication first and only +// imports unique entries into the actual memtable as a final step. +func (p *commitloggerParser) doReplace() error { + nodeCache := make(map[string]segmentReplaceNode) + + var errWhileParsing error + + for { + var commitType CommitType + + err := binary.Read(p.checksumReader, binary.LittleEndian, &commitType) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + errWhileParsing = errors.Wrap(err, "read commit type") + break + } + if !CommitTypeReplace.Is(commitType) { + return errors.Errorf("found a %s commit on a replace bucket", commitType.String()) + } + + var version uint8 + + err = binary.Read(p.checksumReader, binary.LittleEndian, &version) + if err != nil { + errWhileParsing = errors.Wrap(err, "read commit version") + break + } + + switch version { + case 0: + { + err = p.doReplaceRecordV0(nodeCache) + } + case 1: + { + err = p.doReplaceRecordV1(nodeCache) + } + default: + { + return fmt.Errorf("unsupported commit version %d", version) + } + } + if err != nil { + errWhileParsing = err + break + } + } + + for _, node := range nodeCache { + var opts []SecondaryKeyOption + if p.memtable.secondaryIndices > 0 { + for i, secKey := range node.secondaryKeys { + opts = append(opts, WithSecondaryKey(i, secKey)) + } + } + if node.tombstone { + p.memtable.setTombstone(node.primaryKey, opts...) + } else { + p.memtable.put(node.primaryKey, node.value, opts...) + } + } + + return errWhileParsing +} + +func (p *commitloggerParser) doReplaceRecordV0(nodeCache map[string]segmentReplaceNode) error { + return p.parseReplaceNode(p.reader, nodeCache) +} + +func (p *commitloggerParser) doReplaceRecordV1(nodeCache map[string]segmentReplaceNode) error { + reader, err := p.doRecord() + if err != nil { + return err + } + + return p.parseReplaceNode(reader, nodeCache) +} + +// parseReplaceNode only parses into the deduplication cache, not into the +// final memtable yet. A second step is required to parse from the cache into +// the actual memtable. +func (p *commitloggerParser) parseReplaceNode(r io.Reader, nodeCache map[string]segmentReplaceNode) error { + n, err := ParseReplaceNode(r, p.memtable.secondaryIndices) + if err != nil { + return err + } + + if !n.tombstone { + nodeCache[string(n.primaryKey)] = n + } else { + if existing, ok := nodeCache[string(n.primaryKey)]; ok { + existing.tombstone = true + nodeCache[string(n.primaryKey)] = existing + } else { + nodeCache[string(n.primaryKey)] = n + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..8eb5d21fdf56a0444a9776bf980371ecdb9e5703 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_roaring_set.go @@ -0,0 +1,141 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func (p *commitloggerParser) doRoaringSet() error { + prs := &commitlogParserRoaringSet{ + parser: p, + consume: p.memtable.roaringSetAddRemoveSlices, + } + + return prs.parse() +} + +type commitlogParserRoaringSet struct { + parser *commitloggerParser + consume func(key []byte, additions, deletions []uint64) error +} + +func (prs *commitlogParserRoaringSet) parse() error { + for { + var commitType CommitType + + err := binary.Read(prs.parser.checksumReader, binary.LittleEndian, &commitType) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return errors.Wrap(err, "read commit type") + } + + if !CommitTypeRoaringSet.Is(commitType) && !CommitTypeRoaringSetList.Is(commitType) { + return errors.Errorf("found a %s commit on a roaringset bucket", commitType.String()) + } + + var version uint8 + + err = binary.Read(prs.parser.checksumReader, binary.LittleEndian, &version) + if err != nil { + return errors.Wrap(err, "read commit version") + } + + switch version { + case 0: + { + err = prs.parseNodeV0() + } + case 1: + { + err = prs.parseNodeV1(commitType) + } + default: + { + return fmt.Errorf("unsupported commit version %d", version) + } + } + if err != nil { + return err + } + } + + return nil +} + +func (prs *commitlogParserRoaringSet) parseNodeV0() error { + return prs.parseNode(prs.parser.reader) +} + +func (prs *commitlogParserRoaringSet) parseNodeV1(commitType CommitType) error { + reader, err := prs.parser.doRecord() + if err != nil { + return err + } + if commitType == CommitTypeRoaringSet { + return prs.parseNode(reader) + } else { + return prs.parseNodeList(reader) + } +} + +func (prs *commitlogParserRoaringSet) parseNode(reader io.Reader) error { + lenBuf := make([]byte, 8) + if _, err := io.ReadFull(reader, lenBuf); err != nil { + return errors.Wrap(err, "read segment len") + } + segmentLen := binary.LittleEndian.Uint64(lenBuf) + + segBuf := make([]byte, segmentLen) + copy(segBuf, lenBuf) + if _, err := io.ReadFull(reader, segBuf[8:]); err != nil { + return errors.Wrap(err, "read segment contents") + } + + segment := roaringset.NewSegmentNodeFromBuffer(segBuf) + key := segment.PrimaryKey() + + if err := prs.consume(key, segment.Additions().ToArray(), segment.Deletions().ToArray()); err != nil { + return fmt.Errorf("consume segment additions/deletions: %w", err) + } + + return nil +} + +func (prs *commitlogParserRoaringSet) parseNodeList(reader io.Reader) error { + lenBuf := make([]byte, 8) + if _, err := io.ReadFull(reader, lenBuf); err != nil { + return errors.Wrap(err, "read segment len") + } + segmentLen := binary.LittleEndian.Uint64(lenBuf) + + segBuf := make([]byte, segmentLen) + copy(segBuf, lenBuf) + if _, err := io.ReadFull(reader, segBuf[8:]); err != nil { + return errors.Wrap(err, "read segment contents") + } + + segment := roaringset.NewSegmentNodeListFromBuffer(segBuf) + key := segment.PrimaryKey() + if err := prs.consume(key, segment.Additions(), segment.Deletions()); err != nil { + return errors.Wrap(err, "add/remove bitmaps") + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_roaring_set_range.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_roaring_set_range.go new file mode 100644 index 0000000000000000000000000000000000000000..7b52932cf040063c498233914e7660741e1467e3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_parser_roaring_set_range.go @@ -0,0 +1,33 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "fmt" +) + +func (p *commitloggerParser) doRoaringSetRange() error { + prs := &commitlogParserRoaringSet{ + parser: p, + consume: func(key []byte, additions, deletions []uint64) error { + if len(key) != 8 { + return fmt.Errorf("commitloggerParser: invalid value length %d, should be 8 bytes", len(key)) + } + + return p.memtable.roaringSetRangeAddRemove(binary.BigEndian.Uint64(key), + additions, deletions) + }, + } + + return prs.parse() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_test.go new file mode 100644 index 0000000000000000000000000000000000000000..84804ac55090958b61ce36fb86e498a9c3afa02d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/commitlogger_test.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func BenchmarkCommitlogWriter(b *testing.B) { + for _, val := range []int{10, 100, 1000, 10000} { + b.Run(fmt.Sprintf("%d", val), func(b *testing.B) { + cl, err := newCommitLogger(b.TempDir(), "n/a", 0) + require.NoError(b, err) + + data := make([]byte, val) + for i := 0; i < len(data); i++ { + data[i] = byte(rand.Intn(100)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := cl.writeEntry(CommitTypeReplace, data) + require.NoError(b, err) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_bench_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_bench_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0723a16305045edc8cc5f069c95a2b48a53d1e1b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_bench_test.go @@ -0,0 +1,108 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func BenchmarkCompaction(b *testing.B) { + logger, _ := test.NewNullLogger() + ctx := context.Background() + + valuesPerSegment := 10000 + + for _, strategy := range []string{StrategyMapCollection, StrategyReplace} { + for _, pread := range []bool{true, false} { + opts := []BucketOption{ + WithStrategy(strategy), + WithKeepTombstones(true), + WithPread(pread), + } + + b.Run(fmt.Sprintf("strategy %s with pread: %t", strategy, pread), func(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // avoid page cache by having a unique dir per run and have the content of the segments unique + b.StopTimer() + tmpDir := b.TempDir() + + bu, err := NewBucketCreator().NewBucket(ctx, tmpDir, "", logger, nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.NoError(b, err) + + for j := 0; j < valuesPerSegment; j++ { + addData(b, bu, i, j, "key", "key2", strategy) + } + require.NoError(b, bu.FlushMemtable()) + + fileTypes := getFiles(b, tmpDir) + require.Equal(b, 1, fileTypes[".db"]) + + for j := 0; j < valuesPerSegment; j++ { + addData(b, bu, i, j, "key2", "key", strategy) + } + require.NoError(b, bu.FlushMemtable()) + + fileTypes = getFiles(b, tmpDir) + require.Equal(b, 2, fileTypes[".db"]) + b.StartTimer() + + once, err := bu.disk.compactOnce() + require.NoError(b, err) + require.True(b, once) + + require.NoError(b, bu.Shutdown(ctx)) + } + }) + } + } +} + +func addData(b testing.TB, bu *Bucket, i, j int, prefixInsert, prefixDelete, strategy string) { + if strategy == StrategyReplace { + require.NoError(b, bu.Put([]byte(fmt.Sprintf("%s%d_%d", prefixInsert, j, i)), []byte(fmt.Sprintf("value%d_%d", j, i)))) + } else if strategy == StrategySetCollection { + require.NoError(b, bu.SetAdd([]byte(fmt.Sprintf("%s%d_%d", prefixInsert, j, i)), [][]byte{[]byte(fmt.Sprintf("value%d_%d", j, i))})) + } else if strategy == StrategyRoaringSet { + require.NoError(b, bu.RoaringSetAddOne([]byte(fmt.Sprintf("%s%d_%d", prefixInsert, j, i)), uint64(i+j))) + } else if strategy == StrategyMapCollection { + require.NoError(b, bu.MapSet( + []byte(fmt.Sprintf("%s%d_%d", prefixInsert, j, i)), MapPair{Key: []byte(fmt.Sprintf("%sMP%d_%d", prefixInsert, j, i)), Value: []byte(fmt.Sprintf("value%d_%d", j, i))})) + if j%10 == 0 { + require.NoError(b, bu.MapDeleteKey([]byte(fmt.Sprintf("%s%d_%d", prefixDelete, j, i)), []byte(fmt.Sprintf("%sMP%d_%d", prefixDelete, j, i)))) + } + } else if strategy == StrategyRoaringSetRange { + require.NoError(b, bu.RoaringSetRangeAdd(uint64(i+j), uint64(i+j))) + } else { + require.Fail(b, "unknown strategy %s", strategy) + } +} + +func getFiles(t testing.TB, dirName string) map[string]int { + entries, err := os.ReadDir(dirName) + require.NoError(t, err) + fileTypes := map[string]int{} + for _, entry := range entries { + fileTypes[filepath.Ext(entry.Name())] += 1 + } + return fileTypes +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_integration2_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_integration2_test.go new file mode 100644 index 0000000000000000000000000000000000000000..35fff013ea542abf683868170b448b6dcc8ee757 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_integration2_test.go @@ -0,0 +1,269 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func TestCompactionReplaceStrategyStraggler(t *testing.T) { + opts := []BucketOption{WithStrategy(StrategyReplace), WithCalcCountNetAdditions(true)} + size := 200 + + type kv struct { + key []byte + value []byte + delete bool + } + + var segment1 []kv + var segment2 []kv + var segment3 []kv + var expected []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 4 scenarios evenly: + // + // 1.) created in the first segment, never touched again + // 2.) created in the first segment, updated in the second + // 3.) created in the first segment, deleted in the second + // 4.) not present in the first segment, created in the second + for i := 0; i < size; i++ { + key := []byte(fmt.Sprintf("key-%3d", i)) + originalValue := []byte(fmt.Sprintf("value-%3d-original", i)) + + switch i % 4 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + value: originalValue, + }) + + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: key, + value: originalValue, + }) + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + value: originalValue, + }) + + // update in the second segment + updatedValue := []byte(fmt.Sprintf("value-%3d-updated", i)) + segment2 = append(segment2, kv{ + key: key, + value: updatedValue, + }) + // update in the third segment + updatedValue = []byte(fmt.Sprintf("value-%3d-updated-twice", i)) + segment3 = append(segment3, kv{ + key: key, + value: updatedValue, + }) + + expected = append(expected, kv{ + key: key, + value: updatedValue, + }) + case 2: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + value: originalValue, + }) + + // delete in the third segment + segment3 = append(segment3, kv{ + key: key, + delete: true, + }) + + // do not add to expected at all + + case 3: + // do not add to segment 1 + + // only add to segment 3 (first entry) + segment3 = append(segment3, kv{ + key: key, + value: originalValue, + }) + + expected = append(expected, kv{ + key: key, + value: originalValue, + }) + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(context.TODO(), dirName, "", nullLogger2(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, pair := range segment1 { + if !pair.delete { + err := bucket.Put(pair.key, pair.value) + require.Nil(t, err) + } else { + err := bucket.Delete(pair.key) + require.Nil(t, err) + + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, pair := range segment2 { + if !pair.delete { + err := bucket.Put(pair.key, pair.value) + require.Nil(t, err) + } else { + err := bucket.Delete(pair.key) + require.Nil(t, err) + + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 3", func(t *testing.T) { + for _, pair := range segment3 { + if !pair.delete { + err := bucket.Put(pair.key, pair.value) + require.Nil(t, err) + } else { + err := bucket.Delete(pair.key) + require.Nil(t, err) + + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + keyCopy := copyByteSlice2(k) + valueCopy := copyByteSlice2(v) + retrieved = append(retrieved, kv{ + key: keyCopy, + value: valueCopy, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("verify count control before compaction", func(*testing.T) { + assert.Equal(t, len(expected), bucket.Count()) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + keyCopy := copyByteSlice2(k) + valueCopy := copyByteSlice2(v) + retrieved = append(retrieved, kv{ + key: keyCopy, + value: valueCopy, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("verify control using individual get operations", + func(t *testing.T) { + for _, pair := range expected { + retrieved, err := bucket.Get(pair.key) + require.NoError(t, err) + + assert.Equal(t, pair.value, retrieved) + } + }) + + t.Run("verify count after compaction", func(*testing.T) { + assert.Equal(t, len(expected), bucket.Count()) + }) +} + +func nullLogger2() logrus.FieldLogger { + log, _ := test.NewNullLogger() + return log +} + +func copyByteSlice2(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..490c5f5ee916a33d8d1e1d6a9a530e5e7f1c334a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_integration_test.go @@ -0,0 +1,607 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func testCtx() context.Context { + return context.Background() +} + +type bucketIntegrationTest struct { + name string + f func(context.Context, *testing.T, []BucketOption) + opts []BucketOption +} + +type bucketIntegrationTests []bucketIntegrationTest + +func (tests bucketIntegrationTests) run(ctx context.Context, t *testing.T) { + for _, test := range tests { + t.Run(fmt.Sprintf("%s_no_checksum", test.name), func(t *testing.T) { + test.opts = append(test.opts, WithSegmentsChecksumValidationEnabled(false)) + t.Run("mmap", func(t *testing.T) { + test.f(ctx, t, test.opts) + }) + t.Run("pread", func(t *testing.T) { + test.f(ctx, t, append([]BucketOption{WithPread(true)}, test.opts...)) + }) + }) + + t.Run(fmt.Sprintf("%s_checksum", test.name), func(t *testing.T) { + test.opts = append(test.opts, WithSegmentsChecksumValidationEnabled(true)) + t.Run("mmap", func(t *testing.T) { + test.f(ctx, t, test.opts) + }) + }) + } +} + +func TestCompaction(t *testing.T) { + ctx := testCtx() + tests := bucketIntegrationTests{ + { + name: "compactionReplaceStrategy", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionReplaceStrategy(ctx, t, opts, 12116, 12116) + }, + opts: []BucketOption{ + WithStrategy(StrategyReplace), WithCalcCountNetAdditions(true), + }, + }, + { + name: "compactionReplaceStrategy_KeepTombstones", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionReplaceStrategy(ctx, t, opts, 15266, 15266) + }, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithKeepTombstones(true), + WithCalcCountNetAdditions(true), + }, + }, + { + name: "compactionReplaceStrategy_WithSecondaryKeys", + f: compactionReplaceStrategy_WithSecondaryKeys, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + }, + }, + { + name: "compactionReplaceStrategy_WithSecondaryKeys_KeepTombstones", + f: compactionReplaceStrategy_WithSecondaryKeys, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + WithKeepTombstones(true), + }, + }, + { + name: "compactionReplaceStrategy_RemoveUnnecessaryDeletes", + f: compactionReplaceStrategy_RemoveUnnecessaryDeletes, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + { + name: "compactionReplaceStrategy_RemoveUnnecessaryDeletes_KeepTombstones", + f: compactionReplaceStrategy_RemoveUnnecessaryDeletes, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithKeepTombstones(true), + }, + }, + { + name: "compactionReplaceStrategy_RemoveUnnecessaryUpdates", + f: compactionReplaceStrategy_RemoveUnnecessaryUpdates, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + { + name: "compactionReplaceStrategy_RemoveUnnecessaryUpdates_KeepTombstones", + f: compactionReplaceStrategy_RemoveUnnecessaryUpdates, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithKeepTombstones(true), + }, + }, + { + name: "compactionReplaceStrategy_FrequentPutDeleteOperations", + f: compactionReplaceStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + }, + }, + { + name: "compactionReplaceStrategy_FrequentPutDeleteOperations_KeepTombstones", + f: compactionReplaceStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithKeepTombstones(true), + }, + }, + { + name: "compactionReplaceStrategy_FrequentPutDeleteOperations_WithSecondaryKeys", + f: compactionReplaceStrategy_FrequentPutDeleteOperations_WithSecondaryKeys, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + }, + }, + { + name: "compactionReplaceStrategy_FrequentPutDeleteOperations_WithSecondaryKeys_KeepTombstones", + f: compactionReplaceStrategy_FrequentPutDeleteOperations_WithSecondaryKeys, + opts: []BucketOption{ + WithStrategy(StrategyReplace), + WithSecondaryIndices(1), + WithKeepTombstones(true), + }, + }, + + { + name: "compactionSetStrategy", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionSetStrategy(ctx, t, opts, 6836, 6836) + }, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + }, + }, + { + name: "compactionSetStrategy_KeepTombstones", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionSetStrategy(ctx, t, opts, 9756, 9756) + }, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + WithKeepTombstones(true), + }, + }, + { + name: "compactionSetStrategy_RemoveUnnecessary", + f: compactionSetStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + }, + }, + { + name: "compactionSetStrategy_RemoveUnnecessary_KeepTombstones", + f: compactionSetStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + WithKeepTombstones(true), + }, + }, + { + name: "compactionSetStrategy_FrequentPutDeleteOperations", + f: compactionSetStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + }, + }, + { + name: "compactionSetStrategy_FrequentPutDeleteOperations_KeepTombstones", + f: compactionSetStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategySetCollection), + WithKeepTombstones(true), + }, + }, + + { + name: "compactionMapStrategy", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionMapStrategy(ctx, t, opts, 10676, 10676) + }, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + }, + }, + { + name: "compactionMapStrategy_KeepTombstones", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionMapStrategy(ctx, t, opts, 13416, 13416) + }, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + WithKeepTombstones(true), + }, + }, + { + name: "compactionMapStrategy_RemoveUnnecessary", + f: compactionMapStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + }, + }, + { + name: "compactionMapStrategy_HugeEntries", + f: compactionMapStrategy_HugeEntries, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + }, + }, + { + name: "compactionMapStrategy_RemoveUnnecessary_KeepTombstones", + f: compactionMapStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + WithKeepTombstones(true), + }, + }, + { + name: "compactionMapStrategy_FrequentPutDeleteOperations", + f: compactionMapStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + }, + }, + { + name: "compactionMapStrategy_FrequentPutDeleteOperations_KeepTombstones", + f: compactionMapStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyMapCollection), + WithKeepTombstones(true), + }, + }, + + // RoaringSet + { + name: "compactionRoaringSetStrategy_Random", + f: compactionRoaringSetStrategy_Random, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + }, + }, + { + name: "compactionRoaringSetStrategy_Random_KeepTombstones", + f: compactionRoaringSetStrategy_Random, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + WithKeepTombstones(true), + }, + }, + { + name: "compactionRoaringSetStrategy", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionRoaringSetStrategy(ctx, t, opts, 19168, 19168) + }, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + }, + }, + { + name: "compactionRoaringSetStrategy_KeepTombstones", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionRoaringSetStrategy(ctx, t, opts, 29792, 29792) + }, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + WithKeepTombstones(true), + }, + }, + { + name: "compactionRoaringSetStrategy_RemoveUnnecessary", + f: compactionRoaringSetStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + }, + }, + { + name: "compactionRoaringSetStrategy_RemoveUnnecessary_KeepTombstones", + f: compactionRoaringSetStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + WithKeepTombstones(true), + }, + }, + { + name: "compactionRoaringSetStrategy_FrequentPutDeleteOperations", + f: compactionRoaringSetStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + }, + }, + { + name: "compactionRoaringSetStrategy_FrequentPutDeleteOperations_KeepTombstones", + f: compactionRoaringSetStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + WithKeepTombstones(true), + }, + }, + + // RoaringSetRange + { + name: "compactionRoaringSetRangeStrategy_Random", + f: compactionRoaringSetRangeStrategy_Random, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + }, + }, + { + name: "compactionRoaringSetRangeStrategy_Random_KeepTombstones", + f: compactionRoaringSetRangeStrategy_Random, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + WithKeepTombstones(true), + }, + }, + { + name: "compactionRoaringSetRangeStrategy", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionRoaringSetRangeStrategy(ctx, t, opts, 1824, 1824) + }, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + }, + }, + { + name: "compactionRoaringSetRangeStrategy_KeepTombstones", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionRoaringSetRangeStrategy(ctx, t, opts, 2384, 2384) + }, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + WithKeepTombstones(true), + }, + }, + { + name: "compactionRoaringSetRangeStrategy_RemoveUnnecessary", + f: compactionRoaringSetRangeStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + }, + }, + { + name: "compactionRoaringSetRangeStrategy_RemoveUnnecessary_KeepTombstones", + f: compactionRoaringSetRangeStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + WithKeepTombstones(true), + }, + }, + { + name: "compactionRoaringSetRangeStrategy_FrequentPutDeleteOperations", + f: compactionRoaringSetRangeStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + }, + }, + { + name: "compactionRoaringSetRangeStrategy_FrequentPutDeleteOperations_KeepTombstones", + f: compactionRoaringSetRangeStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + WithKeepTombstones(true), + }, + }, + { + name: "compactionRoaringSetRangeStrategy_BugfixOverwrittenBuffer", + f: compactionRoaringSetRangeStrategy_BugfixOverwrittenBuffer, + opts: []BucketOption{ + WithStrategy(StrategyRoaringSetRange), + }, + }, + // Inverted + { + name: "compactionInvertedStrategy", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionInvertedStrategy(ctx, t, opts, 8627, 8627) + }, + opts: []BucketOption{ + WithStrategy(StrategyInverted), + }, + }, + { + name: "compactionInvertedStrategy_KeepTombstones", + f: func(ctx context.Context, t *testing.T, opts []BucketOption) { + compactionInvertedStrategy(ctx, t, opts, 8931, 8931) + }, + opts: []BucketOption{ + WithStrategy(StrategyInverted), + WithKeepTombstones(true), + }, + }, + { + name: "compactionInvertedStrategy_RemoveUnnecessary", + f: compactionInvertedStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyInverted), + }, + }, + { + name: "compactionInvertedStrategy_RemoveUnnecessary_KeepTombstones", + f: compactionInvertedStrategy_RemoveUnnecessary, + opts: []BucketOption{ + WithStrategy(StrategyInverted), + WithKeepTombstones(true), + }, + }, + { + name: "compactionInvertedStrategy_FrequentPutDeleteOperations", + f: compactionInvertedStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyInverted), + }, + }, + { + name: "compactionInvertedStrategy_FrequentPutDeleteOperations_KeepTombstones", + f: compactionInvertedStrategy_FrequentPutDeleteOperations, + opts: []BucketOption{ + WithStrategy(StrategyInverted), + WithKeepTombstones(true), + }, + }, + } + tests.run(ctx, t) +} + +func nullLogger() logrus.FieldLogger { + log, _ := test.NewNullLogger() + return log +} + +func copyByteSlice(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func assertSingleSegmentOfSize(t *testing.T, bucket *Bucket, expectedMinSize, expectedMaxSize int64) { + files, err := bucket.ListFiles(context.Background(), bucket.dir) + require.NoError(t, err) + + dbFiles := make([]string, 0, len(files)) + for _, f := range files { + if filepath.Ext(f) == ".db" { + dbFiles = append(dbFiles, f) + } + } + require.Len(t, dbFiles, 1) + + if bucket.enableChecksumValidation { + expectedMinSize += segmentindex.ChecksumSize + expectedMaxSize += segmentindex.ChecksumSize + } + + fi, err := os.Stat(dbFiles[0]) + require.NoError(t, err) + assert.LessOrEqual(t, expectedMinSize, fi.Size()) + assert.GreaterOrEqual(t, expectedMaxSize, fi.Size()) + + assertChecksum(t, bucket) +} + +// This test is to ensure that the ValidateChecksum function works correctly for all segment types by: +// - running the tests that create and compact multiple segments, validating the checksums on all intermediate segments +// - checking if the size of the segment is correct (including the checksum size +// - doing some checksum corruptions to ensure that the ValidateChecksum function detects them correctly for all segment types in all possible scenarios +func assertChecksum(t *testing.T, bucket *Bucket) { + require.Len(t, bucket.disk.segments, 1) + segment := bucket.disk.segments[0].getSegment() + + if !bucket.enableChecksumValidation { + return + } + + file, err := os.Open(segment.path) + require.NoError(t, err) + contents, err := io.ReadAll(file) + require.NoError(t, err) + + header, err := segmentindex.ParseHeader(contents[:segmentindex.HeaderSize]) + require.NoError(t, err) + // corrupt header checksum + headerSize := int64(segmentindex.HeaderSize) + if segment.strategy == segmentindex.StrategyInverted { + headerSize += int64(segmentindex.HeaderInvertedSize) + } + positionsToCorrupt := []uint64{ + 1, // corrupt header + uint64(headerSize), // corrupt keys + uint64(header.IndexStart), // corrupt index + uint64(segment.Size() - 1), // corrupt checksum + } + + if segment.strategy == segmentindex.StrategyInverted { + positionsToCorrupt = append(positionsToCorrupt, uint64(headerSize-4)) // corrupt inverted header + } + + expectedErr := fmt.Errorf("invalid checksum") + // corrupt the segment at the specified positions and check that the checksum validation fails + for _, pos := range positionsToCorrupt { + brokenSegment := createBrokenSegment(t, segment, int(pos), segment.size) + file, err := os.Open(brokenSegment) + require.NoError(t, err) + segmentFile := segmentindex.NewSegmentFile(segmentindex.WithReader(file)) + err = segmentFile.ValidateChecksum(segment.size, headerSize) + require.Error(t, err) + assert.Equal(t, expectedErr.Error(), err.Error()) + } + + // special case, trim the checksum from the file + brokenSegment := createBrokenSegment(t, segment, int(segment.size-1), segment.size-segmentindex.ChecksumSize) + file, err = os.Open(brokenSegment) + require.NoError(t, err) + segmentFile := segmentindex.NewSegmentFile(segmentindex.WithReader(file)) + err = segmentFile.ValidateChecksum(segment.size-segmentindex.ChecksumSize, headerSize) + require.Error(t, err) + assert.Equal(t, expectedErr.Error(), err.Error()) +} + +func createBrokenSegment(t *testing.T, segment *segment, byteToCurrupt int, trimTo int64) string { + // read all bytes from the segment + buffer := make([]byte, segment.Size()) + + segment.copyNode(buffer, nodeOffset{ + start: 0, + end: uint64(segment.Size()), + }) + + // corrupt a byte by inverting it + buffer[byteToCurrupt] = ^buffer[byteToCurrupt] + // write the corrupted bytes back to the segment + file, err := os.OpenFile(segment.path+".broken", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644) + require.NoError(t, err) + _, err = file.Write(buffer[:trimTo]) + require.NoError(t, err) + err = file.Close() + require.NoError(t, err) + // return the path to the broken segment + return segment.path + ".broken" +} + +func assertSecondSegmentOfSize(t *testing.T, bucket *Bucket, expectedMinSize, expectedMaxSize int64) { + files, err := bucket.ListFiles(context.Background(), bucket.dir) + require.NoError(t, err) + + dbFiles := make([]string, 0, len(files)) + for _, f := range files { + if filepath.Ext(f) == ".db" { + dbFiles = append(dbFiles, f) + } + } + require.Len(t, dbFiles, 2) + + fi, err := os.Stat(dbFiles[1]) + require.NoError(t, err) + assert.LessOrEqual(t, expectedMinSize, fi.Size()) + assert.GreaterOrEqual(t, expectedMaxSize+segmentindex.ChecksumSize, fi.Size()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_map_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_map_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8159440fc6d8206cf074ec822f7bbd1b6eebc4bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_map_integration_test.go @@ -0,0 +1,679 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package lsmkv + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func compactionMapStrategy(ctx context.Context, t *testing.T, opts []BucketOption, + expectedMinSize, expectedMaxSize int64, +) { + size := 100 + + type kv struct { + key []byte + values []MapPair + } + + // this segment is not part of the merge, but might still play a role in + // overall results. For example if one of the later segments has a tombstone + // for it + var previous1 []kv + var previous2 []kv + + var segment1 []kv + var segment2 []kv + var expected []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 4 scenarios evenly: + // + // 0.) created in the first segment, never touched again + // 1.) created in the first segment, appended to it in the second + // 2.) created in the first segment, first element updated in the second + // 3.) created in the first segment, second element updated in the second + // 4.) created in the first segment, first element deleted in the second + // 5.) created in the first segment, second element deleted in the second + // 6.) not present in the first segment, created in the second + // 7.) present in an unrelated previous segment, deleted in the first + // 8.) present in an unrelated previous segment, deleted in the second + // 9.) present in an unrelated previous segment, never touched again + for i := 0; i < size; i++ { + rowKey := []byte(fmt.Sprintf("row-%03d", i)) + + pair1 := MapPair{ + Key: []byte(fmt.Sprintf("value-%03d-01", i)), + Value: []byte(fmt.Sprintf("value-%03d-01-original", i)), + } + pair2 := MapPair{ + Key: []byte(fmt.Sprintf("value-%03d-02", i)), + Value: []byte(fmt.Sprintf("value-%03d-02-original", i)), + } + pairs := []MapPair{pair1, pair2} + + switch i % 10 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs[:1], + }) + + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: rowKey, + values: pairs[:1], + }) + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs[:1], + }) + + // add extra pair in the second segment + segment2 = append(segment2, kv{ + key: rowKey, + values: pairs[1:2], + }) + + expected = append(expected, kv{ + key: rowKey, + values: pairs, + }) + case 2: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // update first key in the second segment + updated := pair1 + updated.Value = []byte("updated") + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair2, updated}, + }) + + case 3: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // update first key in the second segment + updated := pair2 + updated.Value = []byte("updated") + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair1, updated}, + }) + + case 4: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // delete first key in the second segment + updated := pair1 + updated.Value = nil + updated.Tombstone = true + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair2}, + }) + + case 5: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // delete second key in the second segment + updated := pair2 + updated.Value = nil + updated.Tombstone = true + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair1}, + }) + + case 6: + // do not add to segment 2 + + // only add to segment 2 (first entry) + segment2 = append(segment2, kv{ + key: rowKey, + values: pairs, + }) + + expected = append(expected, kv{ + key: rowKey, + values: pairs, + }) + + case 7: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: rowKey, + values: pairs[:1], + }) + previous2 = append(previous2, kv{ + key: rowKey, + values: pairs[1:], + }) + + // delete in segment 1 + deleted1 := pair1 + deleted1.Value = nil + deleted1.Tombstone = true + + deleted2 := pair2 + deleted2.Value = nil + deleted2.Tombstone = true + + segment1 = append(segment1, kv{ + key: rowKey, + values: []MapPair{deleted1}, + }) + segment1 = append(segment1, kv{ + key: rowKey, + values: []MapPair{deleted2}, + }) + + // should not have any values in expected at all, not even a key + + case 8: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: rowKey, + values: pairs[:1], + }) + previous2 = append(previous2, kv{ + key: rowKey, + values: pairs[1:], + }) + + // delete in segment 1 + deleted1 := pair1 + deleted1.Value = nil + deleted1.Tombstone = true + + deleted2 := pair2 + deleted2.Value = nil + deleted2.Tombstone = true + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{deleted1}, + }) + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{deleted2}, + }) + + // should not have any values in expected at all, not even a key + + case 9: + // only part of a previous segment + previous1 = append(previous1, kv{ + key: rowKey, + values: pairs[:1], + }) + previous2 = append(previous2, kv{ + key: rowKey, + values: pairs[1:], + }) + + expected = append(expected, kv{ + key: rowKey, + values: pairs, + }) + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import and flush previous segments", func(t *testing.T) { + for _, kvs := range previous1 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + require.Nil(t, err) + } + } + + require.Nil(t, bucket.FlushAndSwitch()) + + for _, kvs := range previous2 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + require.Nil(t, err) + } + } + + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, kvs := range segment1 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + require.Nil(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, kvs := range segment2 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + require.Nil(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("within control make sure map keys are sorted", func(t *testing.T) { + for i := range expected { + sort.Slice(expected[i].values, func(a, b int) bool { + return bytes.Compare(expected[i].values[a].Key, expected[i].values[b].Key) < 0 + }) + } + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, v := c.First(ctx); k != nil; k, v = c.Next(ctx) { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + i := 0 + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + if i == 1 { + // segment1 and segment2 merged + // none of them is root segment, so tombstones + // will not be removed regardless of keepTombstones setting + assertSecondSegmentOfSize(t, bucket, 11876, 11876) + } + i++ + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction using a cursor", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, v := c.First(ctx); k != nil; k, v = c.Next(ctx) { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + assertSingleSegmentOfSize(t, bucket, expectedMinSize, expectedMaxSize) + }) + + t.Run("verify control using individual get (MapList) operations", + func(t *testing.T) { + // Previously the only verification was done using the cursor. That + // guaranteed that all pairs are present in the payload, but it did not + // guarantee the integrity of the index (DiskTree) which is used to access + // _individual_ keys. Corrupting this index is exactly what happened in + // https://github.com/weaviate/weaviate/issues/3517 + for _, pair := range expected { + retrieved, err := bucket.MapList(ctx, pair.key) + require.NoError(t, err) + + assert.Equal(t, pair.values, retrieved) + } + }) +} + +func compactionMapStrategy_HugeEntries(ctx context.Context, t *testing.T, opts []BucketOption) { + dirName := t.TempDir() + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + key := []byte("my-key") + + // test with very big values to make sure that the read cache can handle it + byteKey := make([]byte, 10000) + byteVal := make([]byte, 10000) + + for i := 0; i < len(byteKey); i++ { + byteKey[i] = byteKey[i] + 1 + byteVal[i] = byteVal[i] + 1 + } + + // first segment + require.NoError(t, b.MapSet(key, MapPair{Key: byteKey, Value: byteVal})) + require.NoError(t, b.FlushMemtable()) + + // second segment + require.NoError(t, b.MapSet(key, MapPair{Key: byteKey, Value: byteVal})) + require.NoError(t, b.FlushMemtable()) + + // compact and check that value is the same + once, err := b.disk.compactOnce() + require.NoError(t, err) + require.True(t, once) + + list, err := b.MapList(ctx, key) + require.NoError(t, err) + require.Len(t, list, 1) + + require.Equal(t, byteKey, list[0].Key) + require.Equal(t, byteVal, list[0].Value) +} + +func compactionMapStrategy_RemoveUnnecessary(ctx context.Context, t *testing.T, opts []BucketOption) { + // in this test each segment reverses the action of the previous segment so + // that in the end a lot of information is present in the individual segments + // which is no longer needed. We then verify that after all compaction this + // information is gone, thus freeing up disk space + size := 100 + + type kv struct { + key []byte + values []MapPair + } + + key := []byte("my-key") + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + if i != 0 { + // we can only update an existing value if this isn't the first write + pair := MapPair{ + Key: []byte(fmt.Sprintf("value-%05d", i-1)), + Value: []byte(fmt.Sprintf("updated in round %d", i)), + } + err := bucket.MapSet(key, pair) + require.Nil(t, err) + } + + if i > 1 { + // we can only delete two back an existing value if this isn't the + // first or second write + pair := MapPair{ + Key: []byte(fmt.Sprintf("value-%05d", i-2)), + Tombstone: true, + } + err := bucket.MapSet(key, pair) + require.Nil(t, err) + } + + pair := MapPair{ + Key: []byte(fmt.Sprintf("value-%05d", i)), + Value: []byte("original value"), + } + err := bucket.MapSet(key, pair) + require.Nil(t, err) + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + expected := []kv{ + { + key: key, + values: []MapPair{ + { + Key: []byte(fmt.Sprintf("value-%05d", size-2)), + Value: []byte(fmt.Sprintf("updated in round %d", size-1)), + }, + { + Key: []byte(fmt.Sprintf("value-%05d", size-1)), + Value: []byte("original value"), + }, + }, + }, + } + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, v := c.First(ctx); k != nil; k, v = c.Next(ctx) { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, v := c.First(ctx); k != nil; k, v = c.Next(ctx) { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("verify control using individual get (MapList) operations", + func(t *testing.T) { + // Previously the only verification was done using the cursor. That + // guaranteed that all pairs are present in the payload, but it did not + // guarantee the integrity of the index (DiskTree) which is used to access + // _individual_ keys. Corrupting this index is exactly what happened in + // https://github.com/weaviate/weaviate/issues/3517 + for _, pair := range expected { + retrieved, err := bucket.MapList(ctx, pair.key) + require.NoError(t, err) + + assert.Equal(t, pair.values, retrieved) + } + }) +} + +func compactionMapStrategy_FrequentPutDeleteOperations(ctx context.Context, t *testing.T, opts []BucketOption) { + // In this test we are testing that the compaction works well for map collection + maxSize := 10 + + key := []byte("my-key") + mapKey := []byte("value-1") + + for size := 4; size < maxSize; size++ { + t.Run(fmt.Sprintf("compact %v segments", size), func(t *testing.T) { + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + value := []byte(fmt.Sprintf("updated in round %d", i)) + pair := MapPair{Key: mapKey, Value: value} + + err := bucket.MapSet(key, pair) + require.Nil(t, err) + + if size == 5 || size == 6 { + // delete all + err = bucket.MapDeleteKey(key, mapKey) + require.Nil(t, err) + } else if i != size-1 { + // don't delete at the end + err := bucket.MapDeleteKey(key, mapKey) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("check entries before compaction", func(t *testing.T) { + res, err := bucket.MapList(ctx, key) + assert.Nil(t, err) + if size == 5 || size == 6 { + assert.Empty(t, res) + } else { + assert.Len(t, res, 1) + assert.Equal(t, false, res[0].Tombstone) + } + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("check entries after compaction", func(t *testing.T) { + res, err := bucket.MapList(ctx, key) + assert.Nil(t, err) + if size == 5 || size == 6 { + assert.Empty(t, res) + } else { + assert.Len(t, res, 1) + assert.Equal(t, false, res[0].Tombstone) + } + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_replace_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_replace_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b6ee2e0040c61dfa8c47eae9c62ea76f4085d5c5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_replace_integration_test.go @@ -0,0 +1,756 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package lsmkv + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func compactionReplaceStrategy(ctx context.Context, t *testing.T, opts []BucketOption, + expectedMinSize, expectedMaxSize int64, +) { + size := 200 + + type kv struct { + key []byte + value []byte + delete bool + } + + var segment1 []kv + var segment2 []kv + var expected []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 4 scenarios evenly: + // + // 1.) created in the first segment, never touched again + // 2.) created in the first segment, updated in the second + // 3.) created in the first segment, deleted in the second + // 4.) not present in the first segment, created in the second + for i := 0; i < size; i++ { + key := []byte(fmt.Sprintf("key-%3d", i)) + originalValue := []byte(fmt.Sprintf("value-%3d-original", i)) + + switch i % 4 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + value: originalValue, + }) + + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: key, + value: originalValue, + }) + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + value: originalValue, + }) + + // update in the second segment + updatedValue := []byte(fmt.Sprintf("value-%3d-updated", i)) + segment2 = append(segment2, kv{ + key: key, + value: updatedValue, + }) + + expected = append(expected, kv{ + key: key, + value: updatedValue, + }) + case 2: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + value: originalValue, + }) + + // delete in the second segment + segment2 = append(segment2, kv{ + key: key, + delete: true, + }) + + // do not add to expected at all + + case 3: + // do not add to segment 1 + + // only add to segment 2 (first entry) + segment2 = append(segment2, kv{ + key: key, + value: originalValue, + }) + + expected = append(expected, kv{ + key: key, + value: originalValue, + }) + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, pair := range segment1 { + if !pair.delete { + err := bucket.Put(pair.key, pair.value) + require.Nil(t, err) + } else { + err := bucket.Delete(pair.key) + require.Nil(t, err) + + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, pair := range segment2 { + if !pair.delete { + err := bucket.Put(pair.key, pair.value) + require.Nil(t, err) + } else { + err := bucket.Delete(pair.key) + require.Nil(t, err) + + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + keyCopy := copyByteSlice(k) + valueCopy := copyByteSlice(v) + retrieved = append(retrieved, kv{ + key: keyCopy, + value: valueCopy, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("verify count control before compaction", func(*testing.T) { + assert.Equal(t, len(expected), bucket.Count()) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + keyCopy := copyByteSlice(k) + valueCopy := copyByteSlice(v) + retrieved = append(retrieved, kv{ + key: keyCopy, + value: valueCopy, + }) + } + + assert.Equal(t, expected, retrieved) + assertSingleSegmentOfSize(t, bucket, expectedMinSize, expectedMaxSize) + }) + + t.Run("verify control using individual get operations", + func(t *testing.T) { + for _, pair := range expected { + retrieved, err := bucket.Get(pair.key) + require.NoError(t, err) + + assert.Equal(t, pair.value, retrieved) + } + }) + + t.Run("verify count after compaction", func(*testing.T) { + assert.Equal(t, len(expected), bucket.Count()) + }) +} + +func compactionReplaceStrategy_WithSecondaryKeys(ctx context.Context, t *testing.T, opts []BucketOption) { + size := 4 + + type kv struct { + key []byte + value []byte + secondaryKeys [][]byte + delete bool + } + + var segment1 []kv + var segment2 []kv + var expected []kv + var expectedNotPresent []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 4 scenarios evenly: + // + // 1.) created in the first segment, never touched again + // 2.) created in the first segment, updated in the second + // 3.) created in the first segment, deleted in the second + // 4.) not present in the first segment, created in the second + for i := 0; i < size; i++ { + key := []byte(fmt.Sprintf("key-%02d", i)) + secondaryKey := []byte(fmt.Sprintf("secondary-key-%02d", i)) + originalValue := []byte(fmt.Sprintf("value-%2d-original", i)) + + switch i % 4 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + secondaryKeys: [][]byte{secondaryKey}, + value: originalValue, + }) + + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: secondaryKey, + value: originalValue, + }) + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + secondaryKeys: [][]byte{secondaryKey}, + value: originalValue, + }) + + // update in the second segment + updatedValue := []byte(fmt.Sprintf("value-%2d-updated", i)) + segment2 = append(segment2, kv{ + key: key, + secondaryKeys: [][]byte{secondaryKey}, + value: updatedValue, + }) + + expected = append(expected, kv{ + key: secondaryKey, + value: updatedValue, + }) + case 2: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + secondaryKeys: [][]byte{secondaryKey}, + value: originalValue, + }) + + // delete in the second segment + segment2 = append(segment2, kv{ + key: key, + secondaryKeys: [][]byte{secondaryKey}, + delete: true, + }) + + expectedNotPresent = append(expectedNotPresent, kv{ + key: secondaryKey, + }) + + case 3: + // do not add to segment 1 + + // only add to segment 2 (first entry) + segment2 = append(segment2, kv{ + key: key, + secondaryKeys: [][]byte{secondaryKey}, + value: originalValue, + }) + + expected = append(expected, kv{ + key: secondaryKey, + value: originalValue, + }) + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, pair := range segment1 { + if !pair.delete { + err := bucket.Put(pair.key, pair.value, + WithSecondaryKey(0, pair.secondaryKeys[0])) + require.Nil(t, err) + } else { + err := bucket.Delete(pair.key, + WithSecondaryKey(0, pair.secondaryKeys[0])) + require.Nil(t, err) + + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, pair := range segment2 { + if !pair.delete { + err := bucket.Put(pair.key, pair.value, + WithSecondaryKey(0, pair.secondaryKeys[0])) + require.Nil(t, err) + } else { + err := bucket.Delete(pair.key, + WithSecondaryKey(0, pair.secondaryKeys[0])) + require.Nil(t, err) + + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + t.Run("verify the ones that should exist", func(t *testing.T) { + for _, pair := range expected { + res, err := bucket.GetBySecondary(0, pair.key) + require.Nil(t, err) + + assert.Equal(t, pair.value, res) + } + }) + + t.Run("verify the ones that should NOT exist", func(t *testing.T) { + for _, pair := range expectedNotPresent { + res, err := bucket.GetBySecondary(0, pair.key) + require.Nil(t, err) + assert.Nil(t, res) + } + }) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + t.Run("verify the ones that should exist", func(t *testing.T) { + for _, pair := range expected { + res, err := bucket.GetBySecondary(0, pair.key) + require.Nil(t, err) + + assert.Equal(t, pair.value, res) + } + }) + + t.Run("verify the ones that should NOT exist", func(t *testing.T) { + for _, pair := range expectedNotPresent { + res, err := bucket.GetBySecondary(0, pair.key) + require.Nil(t, err) + assert.Nil(t, res) + } + }) + }) +} + +func compactionReplaceStrategy_RemoveUnnecessaryDeletes(ctx context.Context, t *testing.T, opts []BucketOption) { + // in this test each segment reverses the action of the previous segment so + // that in the end a lot of information is present in the individual segments + // which is no longer needed. We then verify that after all compaction this + // information is gone, thus freeing up disk space + size := 100 + + type kv struct { + key []byte + value []byte + } + + key := []byte("my-key") + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + if i != 0 { + // we can only update an existing value if this isn't the first write + err := bucket.Delete(key) + require.Nil(t, err) + } + + err := bucket.Put(key, []byte(fmt.Sprintf("set in round %d", i))) + require.Nil(t, err) + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + expected := []kv{ + { + key: key, + value: []byte(fmt.Sprintf("set in round %d", size-1)), + }, + } + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + value: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + value: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) +} + +func compactionReplaceStrategy_RemoveUnnecessaryUpdates(ctx context.Context, t *testing.T, opts []BucketOption) { + // in this test each segment reverses the action of the previous segment so + // that in the end a lot of information is present in the individual segments + // which is no longer needed. We then verify that after all compaction this + // information is gone, thus freeing up disk space + size := 100 + + type kv struct { + key []byte + value []byte + } + + key := []byte("my-key") + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + err := bucket.Put(key, []byte(fmt.Sprintf("set in round %d", i))) + require.Nil(t, err) + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + expected := []kv{ + { + key: key, + value: []byte(fmt.Sprintf("set in round %d", size-1)), + }, + } + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + value: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.Cursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + value: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) +} + +func compactionReplaceStrategy_FrequentPutDeleteOperations(ctx context.Context, t *testing.T, opts []BucketOption) { + // In this test we are testing that the compaction doesn't make the object to disappear + // We are creating even number of segments in which first we create an object + // then we in the next segment with delete it and we do this operation in loop + // we make sure that the last operation done in the last segment is create object operation + // In this situation after the compaction the object has to exist + size := 100 + + key := []byte("my-key") + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments, leave the last segment with value", func(t *testing.T) { + for i := 0; i < size; i++ { + err := bucket.Put(key, []byte(fmt.Sprintf("set in round %d", i))) + require.Nil(t, err) + + if i != size-1 { + // don't delete from the last segment + err := bucket.Delete(key) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify that the object exists before compaction", func(t *testing.T) { + res, err := bucket.Get(key) + assert.Nil(t, err) + assert.NotNil(t, res) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify that the object still exists after compaction", func(t *testing.T) { + res, err := bucket.Get(key) + assert.Nil(t, err) + assert.NotNil(t, res) + }) +} + +func compactionReplaceStrategy_FrequentPutDeleteOperations_WithSecondaryKeys(ctx context.Context, t *testing.T, opts []BucketOption) { + // In this test we are testing that the compaction doesn't make the object to disappear + // We are creating even number of segments in which first we create an object + // then we in the next segment with delete it and we do this operation in loop + // we make sure that the last operation done in the last segment is create object operation + // We are doing this for 4 to 10 segments scenarios, without the fix for firstWithAllKeys + // cursor method that now sets the nextOffset properly, we got discrepancies + // after compaction on 4 and 8 segments scenario. + maxSize := 10 + + for size := 4; size < maxSize; size++ { + t.Run(fmt.Sprintf("compact %v segments", size), func(t *testing.T) { + var bucket *Bucket + + key := []byte("key-original") + keySecondary := []byte(fmt.Sprintf("secondary-key-%02d", size-1)) + + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments, leave the last segment with value", func(t *testing.T) { + for i := 0; i < size; i++ { + secondaryKey := []byte(fmt.Sprintf("secondary-key-%02d", i)) + originalValue := []byte(fmt.Sprintf("value-%2d-original", i)) + + err := bucket.Put(key, originalValue, WithSecondaryKey(0, secondaryKey)) + require.Nil(t, err) + + if i != size-1 { + // don't delete from the last segment + err := bucket.Delete(key, WithSecondaryKey(0, secondaryKey)) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify that the object exists before compaction", func(t *testing.T) { + res, err := bucket.GetBySecondary(0, keySecondary) + assert.Nil(t, err) + assert.NotNil(t, res) + res, err = bucket.Get(key) + assert.Nil(t, err) + assert.NotNil(t, res) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify that the object still exists after compaction", func(t *testing.T) { + res, err := bucket.GetBySecondary(0, keySecondary) + assert.Nil(t, err) + assert.NotNil(t, res) + res, err = bucket.Get(key) + assert.Nil(t, err) + assert.NotNil(t, res) + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_roaring_set_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_roaring_set_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ae1fde623ab5563725aa3d3d82f84701e8e0765e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_roaring_set_integration_test.go @@ -0,0 +1,639 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "encoding/binary" + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func compactionRoaringSetStrategy_Random(ctx context.Context, t *testing.T, opts []BucketOption) { + maxID := uint64(100) + maxElement := uint64(1e6) + iterations := uint64(100_000) + + deleteRatio := 0.2 // 20% of all operations will be deletes, 80% additions + flushChance := 0.001 // on average one flush per 1000 iterations + + r := getRandomSeed() + + instr := generateRandomInstructions(r, maxID, maxElement, iterations, deleteRatio) + control := controlFromInstructions(instr, maxID) + + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(testCtx()) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + compactions := 0 + for _, inst := range instr { + key := make([]byte, 8) + binary.LittleEndian.PutUint64(key, inst.key) + if inst.addition { + b.RoaringSetAddOne(key, inst.element) + } else { + b.RoaringSetRemoveOne(key, inst.element) + } + + if r.Float64() < flushChance { + require.Nil(t, b.FlushAndSwitch()) + + var compacted bool + var err error + for compacted, err = b.disk.compactOnce(); err == nil && compacted; compacted, err = b.disk.compactOnce() { + compactions++ + } + require.Nil(t, err) + } + + } + + // this is a sanity check to make sure the test setup actually does what we + // want. With the current setup, we expect on average to have ~100 + // compactions. It would be extremely unexpected to have fewer than 25. + assert.Greater(t, compactions, 25) + + verifyBucketAgainstControl(t, b, control) +} + +func verifyBucketAgainstControl(t *testing.T, b *Bucket, control []*sroar.Bitmap) { + // This test was built before the bucket had cursors, so we are retrieving + // each key individually, rather than cursing over the entire bucket. + // However, this is also good for isolation purposes, this test tests + // compactions, not cursors. + + for i, controlBM := range control { + key := make([]byte, 8) + binary.LittleEndian.PutUint64(key, uint64(i)) + + func() { + actual, release, err := b.RoaringSetGet(key) + require.NoError(t, err) + defer release() + + assert.Equal(t, controlBM.ToArray(), actual.ToArray()) + }() + } +} + +type roaringSetInstruction struct { + // is a []byte in reality, but makes the test setup easier if we pretent + // its an int + key uint64 + element uint64 + + // true=addition, false=deletion + addition bool +} + +func generateRandomInstructions(r *rand.Rand, maxID, maxElement, iterations uint64, + deleteRatio float64, +) []roaringSetInstruction { + instr := make([]roaringSetInstruction, iterations) + + for i := range instr { + instr[i].key = uint64(r.Intn(int(maxID))) + instr[i].element = uint64(r.Intn(int(maxElement))) + + if r.Float64() > deleteRatio { + instr[i].addition = true + } else { + instr[i].addition = false + } + } + + return instr +} + +func controlFromInstructions(instr []roaringSetInstruction, maxID uint64) []*sroar.Bitmap { + out := make([]*sroar.Bitmap, maxID) + for i := range out { + out[i] = sroar.NewBitmap() + } + + for _, inst := range instr { + if inst.addition { + out[inst.key].Set(inst.element) + } else { + out[inst.key].Remove(inst.element) + } + } + + return out +} + +func compactionRoaringSetStrategy(ctx context.Context, t *testing.T, opts []BucketOption, + expectedMinSize, expectedMaxSize int64, +) { + size := 100 + + type kv struct { + key []byte + additions []uint64 + deletions []uint64 + } + // this segment is not part of the merge, but might still play a role in + // overall results. For example if one of the later segments has a tombstone + // for it + var previous1 []kv + var previous2 []kv + + var segment1 []kv + var segment2 []kv + var expected []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 4 scenarios evenly: + // + // 0.) created in the first segment, never touched again + // 1.) created in the first segment, appended to it in the second + // 2.) created in the first segment, first element deleted in the second + // 3.) created in the first segment, second element deleted in the second + // 4.) not present in the first segment, created in the second + // 5.) present in an unrelated previous segment, deleted in the first + // 6.) present in an unrelated previous segment, deleted in the second + // 7.) present in an unrelated previous segment, never touched again + for i := 0; i < size; i++ { + key := []byte(fmt.Sprintf("key-%02d", i)) + value1 := uint64(i) + 1 + value2 := uint64(i) + 2 + values := []uint64{value1, value2} + + switch i % 8 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + additions: values[:1], + }) + + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: key, + additions: values[:1], + }) + + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + additions: values[:1], + }) + + // update in the second segment + segment2 = append(segment2, kv{ + key: key, + additions: values[1:], + }) + + expected = append(expected, kv{ + key: key, + additions: values, + }) + + case 2: + // add both to segment 1, delete the first + segment1 = append(segment1, kv{ + key: key, + additions: values, + }) + + // delete first element in the second segment + segment2 = append(segment2, kv{ + key: key, + deletions: values[:1], + }) + + // only the 2nd element should be left in the expected + expected = append(expected, kv{ + key: key, + additions: values[1:], + }) + + case 3: + // add both to segment 1, delete the second + segment1 = append(segment1, kv{ + key: key, + additions: values, + }) + + // delete second element in the second segment + segment2 = append(segment2, kv{ + key: key, + deletions: values[1:], + }) + + // only the 1st element should be left in the expected + expected = append(expected, kv{ + key: key, + additions: values[:1], + }) + + case 4: + // do not add to segment 1 + + // only add to segment 2 (first entry) + segment2 = append(segment2, kv{ + key: key, + additions: values, + }) + + expected = append(expected, kv{ + key: key, + additions: values, + }) + + case 5: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: key, + additions: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + additions: values[1:], + }) + + // delete in segment 1 + segment1 = append(segment1, kv{ + key: key, + deletions: values, + }) + + // should not have any values in expected at all, not even a key + + case 6: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: key, + additions: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + additions: values[1:], + }) + + // delete in segment 2 + segment2 = append(segment2, kv{ + key: key, + deletions: values, + }) + + // should not have any values in expected at all, not even a key + + case 7: + // part of a previous segment + previous1 = append(previous1, kv{ + key: key, + additions: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + additions: values[1:], + }) + + expected = append(expected, kv{ + key: key, + additions: values, + }) + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import and flush previous segments", func(t *testing.T) { + for _, kv := range previous1 { + err := bucket.RoaringSetAddList(kv.key, kv.additions) + require.NoError(t, err) + } + + require.NoError(t, bucket.FlushAndSwitch()) + + for _, kv := range previous2 { + err := bucket.RoaringSetAddList(kv.key, kv.additions) + require.NoError(t, err) + } + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, kv := range segment1 { + if len(kv.additions) > 0 { + err := bucket.RoaringSetAddList(kv.key, kv.additions) + require.NoError(t, err) + } + for i := range kv.deletions { + err := bucket.RoaringSetRemoveOne(kv.key, kv.deletions[i]) + require.NoError(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, kv := range segment2 { + if len(kv.additions) > 0 { + err := bucket.RoaringSetAddList(kv.key, kv.additions) + require.NoError(t, err) + } + for i := range kv.deletions { + err := bucket.RoaringSetRemoveOne(kv.key, kv.deletions[i]) + require.NoError(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + additions: v.ToArray(), + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + i := 0 + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + if i == 1 { + // segment1 and segment2 merged + // none of them is root segment, so tombstones + // will not be removed regardless of keepTombstones setting + assertSecondSegmentOfSize(t, bucket, 26768, 26768) + } + i++ + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + additions: v.ToArray(), + }) + } + + assert.Equal(t, expected, retrieved) + assertSingleSegmentOfSize(t, bucket, expectedMinSize, expectedMaxSize) + }) +} + +func compactionRoaringSetStrategy_RemoveUnnecessary(ctx context.Context, t *testing.T, opts []BucketOption) { + // in this test each segment reverses the action of the previous segment so + // that in the end a lot of information is present in the individual segments + // which is no longer needed. We then verify that after all compaction this + // information is gone, thus freeing up disk space + size := 100 + + type kv struct { + key []byte + values []uint64 + } + + key := []byte("my-key") + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + if i != 0 { + // we can only delete an existing value if this isn't the first write + err := bucket.RoaringSetRemoveOne(key, uint64(i)-1) + require.NoError(t, err) + } + + err := bucket.RoaringSetAddOne(key, uint64(i)) + require.NoError(t, err) + + require.NoError(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + expected := []kv{ + { + key: key, + values: []uint64{uint64(size) - 1}, + }, + } + + c := bucket.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + values: v.ToArray(), + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + expected := []kv{ + { + key: key, + values: []uint64{uint64(size) - 1}, + }, + } + + c := bucket.CursorRoaringSet() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + values: v.ToArray(), + }) + } + + assert.Equal(t, expected, retrieved) + }) +} + +func compactionRoaringSetStrategy_FrequentPutDeleteOperations(ctx context.Context, t *testing.T, opts []BucketOption) { + // In this test we are testing that the compaction works well for set collection + maxSize := 10 + + for size := 4; size < maxSize; size++ { + t.Run(fmt.Sprintf("compact %v segments", size), func(t *testing.T) { + var bucket *Bucket + + key := []byte("key-original") + value1 := uint64(1) + value2 := uint64(2) + values := []uint64{value1, value2} + + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import and flush segments", func(t *testing.T) { + for i := 0; i < size; i++ { + err := bucket.RoaringSetAddList(key, values) + require.Nil(t, err) + + if size == 5 { + // delete all + err := bucket.RoaringSetRemoveOne(key, values[0]) + require.Nil(t, err) + err = bucket.RoaringSetRemoveOne(key, values[1]) + require.Nil(t, err) + } else if size == 6 { + // delete only one value + err := bucket.RoaringSetRemoveOne(key, values[0]) + require.Nil(t, err) + } else if i != size-1 { + // don't delete from the last segment + err := bucket.RoaringSetRemoveOne(key, values[0]) + require.Nil(t, err) + err = bucket.RoaringSetRemoveOne(key, values[1]) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify that objects exist before compaction", func(t *testing.T) { + res, release, err := bucket.RoaringSetGet(key) + require.NoError(t, err) + defer release() + + switch size { + case 5: + assert.Equal(t, 0, res.GetCardinality()) + case 6: + assert.Equal(t, 1, res.GetCardinality()) + default: + assert.Equal(t, 2, res.GetCardinality()) + } + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify that objects exist after compaction", func(t *testing.T) { + res, release, err := bucket.RoaringSetGet(key) + require.NoError(t, err) + defer release() + + switch size { + case 5: + assert.Equal(t, 0, res.GetCardinality()) + case 6: + assert.Equal(t, 1, res.GetCardinality()) + default: + assert.Equal(t, 2, res.GetCardinality()) + } + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_roaring_set_range_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_roaring_set_range_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4e5fb374e962e269a7810d0d4473002ca2cdc438 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_roaring_set_range_integration_test.go @@ -0,0 +1,711 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" +) + +func compactionRoaringSetRangeStrategy_Random(ctx context.Context, t *testing.T, opts []BucketOption) { + maxKey := uint64(100) + maxValue := uint64(1_000_000) + iterations := uint64(100_000) + + deleteRatio := 0.2 // 20% of all operations will be deletes, 80% additions + flushChance := 0.001 // on average one flush per 1000 iterations + + r := getRandomSeed() + + instr := generateRandomRangeInstructions(r, maxKey, maxValue, iterations, deleteRatio) + control := controlFromRangeInstructions(instr, maxKey) + + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + defer b.Shutdown(testCtx()) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + compactions := 0 + for _, inst := range instr { + if inst.addition { + b.RoaringSetRangeAdd(inst.key, inst.value) + } else { + b.RoaringSetRangeRemove(inst.key, inst.value) + } + + if r.Float64() < flushChance { + require.Nil(t, b.FlushAndSwitch()) + } + } + + var compacted bool + for compacted, err = b.disk.compactOnce(); err == nil && compacted; compacted, err = b.disk.compactOnce() { + compactions++ + } + require.Nil(t, err) + + // this is a sanity check to make sure the test setup actually does what we + // want. With the current setup, we expect on average to have ~100 + // compactions. It would be extremely unexpected to have fewer than 25. + assert.Greater(t, compactions, 25) + + verifyBucketRangeAgainstControl(t, b, control) +} + +func verifyBucketRangeAgainstControl(t *testing.T, b *Bucket, control []*sroar.Bitmap) { + reader := b.ReaderRoaringSetRange() + defer reader.Close() + + for i, controlBM := range control { + func() { + actual, release, err := reader.Read(context.Background(), uint64(i), filters.OperatorEqual) + require.NoError(t, err) + defer release() + + require.Equal(t, controlBM.ToArray(), actual.ToArray(), "i = %d", i) + }() + } +} + +type roaringSetRangeInstruction struct { + key uint64 + value uint64 + + // true=addition, false=deletion + addition bool +} + +func generateRandomRangeInstructions(r *rand.Rand, maxKey, maxValue, iterations uint64, + deleteRatio float64, +) []roaringSetRangeInstruction { + instr := make([]roaringSetRangeInstruction, iterations) + + for i := range instr { + instr[i].key = uint64(r.Intn(int(maxKey))) // value + instr[i].value = uint64(r.Intn(int(maxValue))) // docId + instr[i].addition = r.Float64() > deleteRatio + } + + return instr +} + +func controlFromRangeInstructions(instr []roaringSetRangeInstruction, maxKey uint64) []*sroar.Bitmap { + unique := make(map[uint64]uint64) + for _, inst := range instr { + if inst.addition { + unique[inst.value] = inst.key + } else { + delete(unique, inst.value) + } + } + + out := make([]*sroar.Bitmap, maxKey) + for i := range out { + out[i] = sroar.NewBitmap() + } + for value, key := range unique { + out[key].Set(value) + } + + return out +} + +func compactionRoaringSetRangeStrategy(ctx context.Context, t *testing.T, opts []BucketOption, + expectedMinSize, expectedMaxSize int64, +) { + maxKey := uint64(100) + + type kv struct { + key uint64 + additions []uint64 + deletions []uint64 + } + // this segment is not part of the merge, but might still play a role in + // overall results. For example if one of the later segments has a tombstone + // for it + var previous1 []kv + var previous2 []kv + + var segment1 []kv + var segment2 []kv + var expected []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 8 scenarios evenly: + // + // 0.) created in the first segment, never touched again + // 1.) created in the first segment, appended to it in the second + // 2.) created in the first segment, first element deleted in the second + // 3.) created in the first segment, second element deleted in the second + // 4.) not present in the first segment, created in the second + // 5.) present in an unrelated previous segment, deleted in the first + // 6.) present in an unrelated previous segment, deleted in the second + // 7.) present in an unrelated previous segment, never touched again + for k := uint64(0); k < maxKey; k++ { + key := k + values := []uint64{k + 10_000, k + 20_000} + + switch k % 8 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + additions: values[:1], + }) + + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: key, + additions: values[:1], + }) + + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + additions: values[:1], + }) + + // update in the second segment + segment2 = append(segment2, kv{ + key: key, + additions: values[1:], + }) + + expected = append(expected, kv{ + key: key, + additions: values, + }) + + case 2: + // add both to segment 1, delete the first + segment1 = append(segment1, kv{ + key: key, + additions: values, + }) + + // delete first element in the second segment + segment2 = append(segment2, kv{ + key: key, + deletions: values[:1], + }) + + // only the 2nd element should be left in the expected + expected = append(expected, kv{ + key: key, + additions: values[1:], + }) + + case 3: + // add both to segment 1, delete the second + segment1 = append(segment1, kv{ + key: key, + additions: values, + }) + + // delete second element in the second segment + segment2 = append(segment2, kv{ + key: key, + deletions: values[1:], + }) + + // only the 1st element should be left in the expected + expected = append(expected, kv{ + key: key, + additions: values[:1], + }) + + case 4: + // do not add to segment 1 + + // only add to segment 2 (first entry) + segment2 = append(segment2, kv{ + key: key, + additions: values, + }) + + expected = append(expected, kv{ + key: key, + additions: values, + }) + + case 5: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: key, + additions: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + additions: values[1:], + }) + + // delete in segment 1 + segment1 = append(segment1, kv{ + key: key, + deletions: values, + }) + + // should not have any values in expected at all, not even a key + + case 6: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: key, + additions: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + additions: values[1:], + }) + + // delete in segment 2 + segment2 = append(segment2, kv{ + key: key, + deletions: values, + }) + + // should not have any values in expected at all, not even a key + + case 7: + // part of a previous segment + previous1 = append(previous1, kv{ + key: key, + additions: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + additions: values[1:], + }) + + expected = append(expected, kv{ + key: key, + additions: values, + }) + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import and flush previous segments", func(t *testing.T) { + for _, kv := range previous1 { + err := bucket.RoaringSetRangeAdd(kv.key, kv.additions...) + require.NoError(t, err) + } + + require.NoError(t, bucket.FlushAndSwitch()) + + for _, kv := range previous2 { + err := bucket.RoaringSetRangeAdd(kv.key, kv.additions...) + require.NoError(t, err) + } + + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, kv := range segment1 { + if len(kv.additions) > 0 { + err := bucket.RoaringSetRangeAdd(kv.key, kv.additions...) + require.NoError(t, err) + } + if len(kv.deletions) > 0 { + err := bucket.RoaringSetRangeRemove(kv.key, kv.deletions...) + require.NoError(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, kv := range segment2 { + if len(kv.additions) > 0 { + err := bucket.RoaringSetRangeAdd(kv.key, kv.additions...) + require.NoError(t, err) + } + if len(kv.deletions) > 0 { + err := bucket.RoaringSetRangeRemove(kv.key, kv.deletions...) + require.NoError(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.NoError(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + reader := bucket.ReaderRoaringSetRange() + defer reader.Close() + + var retrieved []kv + for k := uint64(0); k < maxKey; k++ { + func() { + bm, release, err := reader.Read(context.Background(), k, filters.OperatorEqual) + require.NoError(t, err) + defer release() + + if !bm.IsEmpty() { + retrieved = append(retrieved, kv{ + key: k, + additions: bm.ToArray(), + }) + } + }() + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + i := 0 + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + if i == 1 { + // segment1 and segment2 merged + // none of them is root segment, so tombstones + // will not be removed regardless of keepTombstones setting + assertSecondSegmentOfSize(t, bucket, 2256, 2256) + } + i++ + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + reader := bucket.ReaderRoaringSetRange() + defer reader.Close() + + var retrieved []kv + for k := uint64(0); k < maxKey; k++ { + func() { + bm, release, err := reader.Read(context.Background(), k, filters.OperatorEqual) + require.NoError(t, err) + defer release() + + if !bm.IsEmpty() { + retrieved = append(retrieved, kv{ + key: k, + additions: bm.ToArray(), + }) + } + }() + } + + assert.Equal(t, expected, retrieved) + assertSingleSegmentOfSize(t, bucket, expectedMinSize, expectedMaxSize) + }) +} + +func compactionRoaringSetRangeStrategy_RemoveUnnecessary(ctx context.Context, t *testing.T, opts []BucketOption) { + // in this test each segment reverses the action of the previous segment so + // that in the end a lot of information is present in the individual segments + // which is no longer needed. We then verify that after all compaction this + // information is gone, thus freeing up disk space + iterations := uint64(100) + key := uint64(1) + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for v := uint64(0); v < iterations; v++ { + if v != 0 { + // we can only delete an existing value if this isn't the first write + err := bucket.RoaringSetRangeRemove(key, v-1) + require.NoError(t, err) + } + + err := bucket.RoaringSetRangeAdd(key, v) + require.NoError(t, err) + + require.NoError(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify control before compaction", func(t *testing.T) { + reader := bucket.ReaderRoaringSetRange() + defer reader.Close() + + bm, release, err := reader.Read(context.Background(), key, filters.OperatorEqual) + require.NoError(t, err) + defer release() + + assert.Equal(t, []uint64{iterations - 1}, bm.ToArray()) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + reader := bucket.ReaderRoaringSetRange() + defer reader.Close() + + bm, release, err := reader.Read(context.Background(), key, filters.OperatorEqual) + require.NoError(t, err) + defer release() + + assert.Equal(t, []uint64{iterations - 1}, bm.ToArray()) + }) +} + +func compactionRoaringSetRangeStrategy_FrequentPutDeleteOperations(ctx context.Context, t *testing.T, opts []BucketOption) { + // In this test we are testing that the compaction works well for set collection + maxSegments := 10 + + for segments := 4; segments < maxSegments; segments++ { + t.Run(fmt.Sprintf("compact %v segments", segments), func(t *testing.T) { + var bucket *Bucket + + key := uint64(1) + values := []uint64{1, 2} + + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import and flush segments", func(t *testing.T) { + for segment := 0; segment < segments; segment++ { + err := bucket.RoaringSetRangeAdd(key, values...) + require.Nil(t, err) + + if segments == 5 { + // delete all + err := bucket.RoaringSetRangeRemove(key, values[0]) + require.Nil(t, err) + err = bucket.RoaringSetRangeRemove(key, values[1]) + require.Nil(t, err) + } else if segments == 6 { + // delete only one value + err := bucket.RoaringSetRangeRemove(key, values[0]) + require.Nil(t, err) + } else if segment != segments-1 { + // don't delete from the last segment + err := bucket.RoaringSetRangeRemove(key, values[0]) + require.Nil(t, err) + err = bucket.RoaringSetRangeRemove(key, values[1]) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify that objects exist before compaction", func(t *testing.T) { + reader := bucket.ReaderRoaringSetRange() + defer reader.Close() + + bm, release, err := reader.Read(context.Background(), key, filters.OperatorEqual) + require.NoError(t, err) + defer release() + + switch segments { + case 5: + assert.Equal(t, 0, bm.GetCardinality()) + case 6: + assert.Equal(t, 1, bm.GetCardinality()) + default: + assert.Equal(t, 2, bm.GetCardinality()) + } + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify that objects exist after compaction", func(t *testing.T) { + reader := bucket.ReaderRoaringSetRange() + defer reader.Close() + + bm, release, err := reader.Read(context.Background(), key, filters.OperatorEqual) + require.NoError(t, err) + defer release() + + switch segments { + case 5: + assert.Equal(t, 0, bm.GetCardinality()) + case 6: + assert.Equal(t, 1, bm.GetCardinality()) + default: + assert.Equal(t, 2, bm.GetCardinality()) + } + }) + }) + } +} + +// Regression test to verify overwritten buffer in pread segment cursor. +// Since pread segment cursor reuses buffer to read data for single key, advancing to next +// key overwrites data loaded for previous key, therefore it is important to make sure data is utilized +// before moving to next key. As deletions are stored only in key 0 but merged with other keys' additions +// during compaction, compactor was improved to clone deletions before advancing from key 0. +// +// Data in this test come from compactionRoaringSetRangeStrategy_Random test for which test failed. +func compactionRoaringSetRangeStrategy_BugfixOverwrittenBuffer(ctx context.Context, t *testing.T, opts []BucketOption) { + b, err := NewBucketCreator().NewBucket(ctx, t.TempDir(), "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + defer b.Shutdown(testCtx()) + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + maxKey := uint64(100) + additions := []map[uint64]uint64{ + {582: 31, 2457: 1, 7014: 84, 10074: 19, 15857: 21, 17389: 59, 20641: 39, 22654: 31, 26615: 68, 29628: 77, 34629: 88, 36259: 41, 38356: 40, 41907: 12, 47389: 9, 54234: 67, 57477: 48, 63860: 52, 65936: 73, 66936: 83, 68719: 93, 69983: 73, 73312: 87, 79075: 87, 79750: 80, 84822: 62, 89404: 34, 92677: 8, 92920: 99, 114337: 93, 120795: 3, 137241: 65, 137566: 57, 141451: 12, 141590: 73, 141737: 1, 146854: 11, 157621: 7, 158380: 48, 168107: 22, 174002: 34, 174061: 12, 175938: 32, 176461: 28, 180894: 27, 185953: 99, 188827: 7, 189017: 33, 190976: 65, 193880: 9, 195549: 44, 201959: 39, 215286: 1, 227231: 70, 227853: 19, 233336: 60, 245655: 97, 247745: 2, 250395: 53, 255416: 85, 259850: 7, 261589: 67, 267553: 18, 276859: 5, 281001: 36, 281342: 89, 286924: 69, 289336: 97, 304796: 89, 308377: 83, 315617: 44, 319441: 65, 334869: 89, 349908: 40, 353155: 69, 354711: 27, 358443: 54, 364522: 57, 372484: 83, 372883: 5, 377380: 75, 378862: 63, 381456: 4, 382128: 5, 398907: 36, 403740: 68, 412506: 69, 418014: 67, 418588: 36, 419184: 30, 419861: 47, 420078: 57, 421345: 44, 422573: 2, 428339: 23, 429818: 37, 438506: 67, 443207: 71, 445249: 61, 448140: 47, 450461: 23, 451021: 40, 465803: 60, 468608: 34, 471398: 21, 471871: 57, 472561: 35, 477304: 57, 482443: 9, 482743: 1, 490800: 62, 494687: 64, 495412: 60, 504717: 40, 506351: 15, 506701: 96, 507051: 14, 508263: 35, 514357: 1, 514572: 9, 519761: 13, 521475: 1, 523911: 67, 523958: 24, 525325: 44, 532566: 71, 534416: 42, 537652: 48, 538026: 21, 538831: 69, 545807: 3, 546217: 22, 553210: 66, 556123: 94, 557398: 38, 565813: 87, 568596: 70, 570487: 26, 573210: 9, 580573: 21, 587063: 11, 589574: 79, 594780: 25, 595905: 52, 596992: 20, 597223: 72, 599964: 80, 621697: 25, 622681: 78, 626797: 61, 636896: 75, 638507: 67, 644633: 11, 645102: 76, 645249: 57, 647232: 50, 648061: 32, 655584: 16, 662134: 92, 662870: 51, 664021: 70, 664947: 53, 673987: 56, 674080: 14, 676228: 4, 677240: 84, 684016: 6, 684662: 63, 692925: 18, 693473: 90, 695394: 1, 701424: 29, 711145: 80, 719341: 54, 725949: 99, 739548: 15, 742095: 12, 744237: 4, 745522: 38, 748390: 2, 752683: 48, 764642: 8, 764744: 42, 778885: 78, 783438: 72, 783725: 32, 786208: 90, 793675: 74, 804884: 18, 808337: 69, 808850: 59, 811400: 18, 812434: 64, 813132: 74, 813638: 25, 818764: 63, 819782: 14, 819912: 79, 820983: 7, 821591: 89, 826149: 68, 831816: 1, 832127: 62, 833326: 71, 833399: 15, 834476: 5, 836818: 20, 842899: 76, 843875: 99, 847120: 90, 847236: 38, 855452: 73, 864728: 9, 865576: 16, 867235: 90, 874232: 86, 875925: 66, 882191: 62, 905422: 92, 906540: 17, 912249: 88, 912491: 31, 943672: 17, 949369: 64, 950691: 15, 953102: 33, 963659: 11, 970330: 62, 970821: 19, 975728: 60, 979035: 44, 981544: 67, 987386: 98, 989328: 36, 995279: 89}, + {556: 42, 1929: 87, 2090: 78, 2522: 5, 3908: 29, 10292: 63, 11934: 29, 12165: 48, 12614: 6, 12741: 44, 16143: 39, 16189: 16, 16859: 8, 18436: 40, 18735: 94, 18884: 25, 19843: 64, 21477: 53, 21927: 60, 24427: 16, 25665: 37, 26464: 21, 28448: 7, 29261: 9, 30539: 63, 32052: 42, 34871: 3, 35661: 8, 39015: 98, 39828: 75, 40404: 44, 41893: 73, 44348: 55, 46066: 98, 49015: 95, 49939: 24, 50607: 82, 51143: 4, 51556: 59, 51725: 6, 52673: 25, 54255: 57, 55139: 89, 55731: 5, 57361: 5, 57587: 84, 57666: 40, 59288: 49, 60448: 2, 61140: 92, 61621: 19, 62666: 29, 64023: 7, 67178: 80, 67560: 7, 69056: 17, 69847: 66, 70354: 53, 73765: 84, 74839: 93, 76108: 88, 76641: 4, 79954: 58, 80684: 61, 81779: 12, 82604: 0, 83696: 63, 84844: 60, 85213: 3, 89178: 53, 94228: 81, 94325: 57, 96947: 90, 98172: 69, 100058: 2, 101090: 69, 103769: 46, 105506: 92, 106996: 27, 107041: 70, 107440: 97, 108664: 59, 111724: 66, 112341: 88, 113543: 43, 115509: 61, 115562: 82, 120568: 17, 120913: 59, 122549: 32, 123819: 65, 124945: 39, 125770: 78, 129760: 91, 131788: 80, 132257: 70, 132843: 55, 133124: 10, 133645: 37, 134313: 34, 135084: 66, 136805: 96, 137340: 86, 138452: 93, 139825: 42, 139901: 52, 141056: 47, 141946: 95, 143782: 82, 144813: 12, 144988: 33, 146215: 26, 147526: 44, 148114: 14, 150797: 36, 151344: 29, 151752: 14, 152123: 12, 152366: 39, 153551: 69, 154340: 83, 160247: 10, 160538: 34, 161321: 48, 162746: 81, 163083: 8, 166246: 35, 166427: 73, 167370: 77, 170519: 51, 170977: 74, 170989: 77, 171601: 17, 172584: 35, 175825: 31, 176517: 36, 176543: 44, 178539: 73, 179579: 18, 179885: 19, 180301: 65, 180827: 37, 186639: 25, 188285: 46, 189236: 83, 190679: 83, 190872: 21, 191361: 53, 192654: 58, 195186: 56, 196043: 80, 196083: 18, 196157: 37, 196286: 2, 196539: 13, 197904: 45, 198578: 93, 198799: 15, 199253: 5, 204045: 65, 205000: 11, 205091: 84, 205859: 53, 207201: 81, 207407: 37, 209812: 46, 210970: 61, 211680: 33, 212009: 92, 213579: 86, 214882: 49, 217456: 2, 217693: 24, 221590: 77, 223766: 49, 224892: 61, 225984: 73, 230083: 97, 233015: 88, 235641: 6, 235984: 27, 238950: 29, 239328: 4, 240803: 43, 240904: 90, 241069: 79, 243321: 72, 245740: 96, 248387: 50, 248906: 11, 248917: 25, 249462: 47, 250268: 64, 254342: 74, 254975: 29, 257002: 69, 258317: 60, 260037: 83, 260775: 77, 261360: 22, 261458: 82, 266419: 63, 268315: 54, 274845: 47, 275153: 70, 275268: 48, 277489: 45, 277729: 61, 277798: 76, 277917: 40, 279463: 26, 279501: 56, 281123: 32, 282810: 99, 282832: 83, 283319: 2, 288491: 14, 289713: 51, 292441: 50, 292994: 6, 297789: 31, 299480: 47, 300270: 26, 303220: 60, 303998: 2, 304959: 6, 306655: 85, 307353: 63, 307496: 56, 307674: 83, 309754: 77, 309795: 0, 309938: 42, 311884: 86, 320379: 2, 320570: 40, 320608: 40, 326719: 40, 327601: 0, 328231: 53, 329456: 70, 330403: 71, 334787: 10, 336316: 58, 336766: 57, 336837: 80, 339109: 89, 339135: 77, 339840: 28, 340616: 18, 342396: 26, 343451: 17, 346573: 24, 346798: 77, 347182: 53, 349340: 73, 351484: 71, 351501: 84, 352038: 38, 352675: 80, 352964: 70, 353980: 67, 355200: 79, 355434: 90, 355629: 62, 355789: 83, 356394: 17, 356486: 59, 356783: 27, 356856: 34, 358588: 83, 358888: 76, 359701: 81, 360053: 85, 360206: 86, 360522: 80, 361274: 56, 363302: 99, 365867: 22, 366103: 12, 366779: 22, 368496: 84, 369262: 92, 371188: 61, 372863: 55, 373853: 96, 373975: 38, 377399: 0, 379796: 60, 381519: 87, 382434: 41, 384635: 79, 385074: 4, 385361: 12, 386388: 22, 386558: 70, 388000: 10, 390373: 90, 391106: 18, 391555: 78, 394091: 26, 394175: 13, 394240: 65, 396231: 75, 396596: 57, 398463: 72, 398517: 87, 398910: 59, 399363: 10, 402421: 99, 406169: 89, 411309: 74, 412308: 91, 412937: 16, 413814: 68, 414324: 81, 414521: 70, 414819: 4, 416955: 95, 420400: 64, 421770: 18, 422533: 87, 424069: 11, 424078: 26, 426124: 10, 428478: 35, 429913: 35, 429944: 27, 429955: 28, 430255: 63, 432974: 94, 434675: 33, 435184: 93, 437892: 61, 437932: 97, 439446: 94, 441344: 13, 441617: 74, 442721: 93, 442820: 81, 443200: 23, 443571: 1, 445216: 39, 448677: 30, 451938: 20, 452299: 80, 453183: 76, 453591: 43, 453988: 81, 454836: 90, 454908: 35, 456242: 48, 456702: 55, 457738: 32, 460827: 68, 463895: 36, 464650: 27, 464853: 39, 464994: 48, 468394: 20, 469508: 4, 470363: 37, 473019: 90, 474784: 21, 475502: 72, 476345: 91, 477441: 26, 478416: 63, 478591: 91, 479592: 28, 480764: 47, 482385: 16, 483784: 53, 484443: 0, 486715: 13, 487869: 76, 487918: 71, 490913: 22, 490935: 24, 493289: 93, 494013: 55, 496235: 32, 496337: 96, 496619: 7, 496800: 47, 498644: 70, 501955: 1, 502641: 97, 503108: 91, 504655: 32, 505345: 9, 505644: 24, 507678: 73, 507931: 23, 508455: 25, 509439: 83, 509810: 42, 511646: 3, 512856: 3, 514656: 37, 514689: 96, 514820: 15, 514853: 80, 515196: 94, 515891: 49, 517463: 10, 518513: 13, 520455: 22, 521635: 56, 524886: 70, 524980: 96, 525267: 24, 525907: 96, 526435: 84, 530003: 36, 530590: 28, 533627: 77, 533996: 86, 534005: 73, 534024: 83, 535260: 52, 536384: 96, 536762: 26, 539783: 19, 544941: 20, 546042: 57, 548084: 4, 548495: 31, 549317: 68, 549824: 20, 550005: 54, 550551: 73, 551805: 69, 552193: 23, 553261: 0, 557853: 97, 558651: 88, 561647: 34, 563626: 46, 566205: 85, 566623: 37, 566812: 36, 567389: 18, 567623: 4, 573626: 73, 573821: 13, 573967: 96, 575271: 94, 576665: 40, 579965: 45, 580222: 76, 581892: 4, 584918: 73, 586066: 20, 586119: 72, 590645: 18, 597250: 81, 597667: 3, 598059: 55, 599858: 68, 600572: 46, 603053: 4, 604060: 25, 606866: 42, 606929: 53, 609214: 26, 609675: 98, 609981: 11, 610055: 96, 610063: 73, 610426: 84, 610625: 13, 611569: 14, 613895: 19, 614065: 49, 614872: 42, 618084: 71, 618347: 34, 619259: 45, 619374: 24, 619914: 79, 620108: 11, 622534: 38, 624525: 24, 624802: 72, 625326: 16, 626628: 68, 627339: 12, 627393: 6, 628378: 74, 628923: 92, 630023: 68, 632141: 50, 633421: 39, 634273: 45, 634661: 46, 634790: 22, 635817: 27, 636409: 66, 636431: 30, 637743: 59, 640650: 77, 640887: 79, 641218: 86, 642080: 90, 643865: 15, 644272: 43, 645666: 84, 646106: 78, 646978: 70, 647541: 37, 650879: 38, 651263: 51, 651581: 60, 653623: 89, 654276: 2, 654769: 93, 657039: 35, 657181: 43, 657750: 56, 658645: 48, 659503: 41, 659750: 73, 660420: 68, 661926: 7, 664747: 17, 665133: 0, 668942: 94, 670854: 73, 673813: 46, 674536: 20, 677534: 56, 678539: 62, 680304: 69, 680423: 34, 681499: 55, 682809: 5, 682984: 22, 683188: 97, 683755: 8, 686379: 86, 686414: 30, 688609: 32, 689193: 88, 689834: 16, 689910: 79, 692825: 76, 693283: 66, 694118: 59, 695067: 76, 695685: 82, 697129: 62, 697890: 59, 699737: 99, 699830: 65, 702619: 54, 702712: 20, 706647: 3, 706800: 32, 709276: 97, 712284: 9, 715200: 32, 716270: 36, 717118: 31, 717786: 72, 718712: 58, 720005: 2, 720965: 57, 721770: 64, 721835: 89, 724545: 92, 725604: 43, 727008: 25, 732183: 77, 734947: 41, 737066: 76, 737321: 51, 737342: 5, 738840: 32, 738904: 32, 740760: 13, 743124: 17, 746716: 39, 747367: 68, 751529: 47, 752170: 60, 752757: 56, 753792: 7, 759656: 10, 760489: 70, 760944: 34, 761419: 75, 761570: 94, 761572: 34, 762682: 91, 762701: 38, 763172: 95, 763884: 21, 766708: 52, 768802: 6, 770483: 66, 771175: 9, 771285: 70, 772790: 70, 774912: 17, 775198: 58, 775273: 92, 775724: 23, 775907: 68, 776262: 85, 777096: 88, 777200: 90, 778932: 59, 779043: 44, 779753: 65, 782147: 99, 784200: 58, 784745: 73, 785135: 46, 785654: 23, 786223: 70, 787467: 33, 788198: 61, 789681: 81, 789814: 43, 790083: 43, 790257: 15, 790506: 98, 792019: 40, 795960: 71, 796760: 98, 797281: 96, 797982: 30, 798702: 85, 801428: 58, 802913: 93, 803042: 50, 803358: 24, 804312: 96, 805815: 94, 806164: 83, 807203: 64, 808838: 79, 809153: 7, 810941: 46, 811237: 7, 813198: 31, 815993: 83, 816779: 23, 816853: 51, 817044: 27, 817454: 86, 820601: 12, 822009: 3, 822103: 40, 824528: 84, 825128: 35, 825726: 77, 825967: 50, 828432: 36, 828662: 83, 828964: 1, 830985: 46, 834175: 15, 834773: 84, 834922: 62, 836598: 99, 843548: 46, 845534: 76, 845743: 1, 845963: 17, 847407: 52, 847431: 13, 848451: 34, 855235: 61, 857547: 9, 858901: 23, 867204: 6, 870558: 20, 871497: 68, 872621: 15, 875148: 67, 875763: 96, 878731: 61, 880861: 94, 881185: 8, 881535: 61, 883257: 66, 883297: 55, 883853: 72, 885307: 53, 886034: 70, 886703: 3, 887159: 71, 887346: 78, 887840: 44, 890576: 50, 892247: 48, 892335: 94, 892706: 41, 893667: 40, 895266: 64, 895452: 78, 895537: 69, 896929: 61, 897351: 27, 898093: 65, 898281: 67, 899680: 6, 900056: 53, 900630: 45, 903669: 26, 903907: 74, 909758: 47, 909775: 26, 914586: 1, 914671: 58, 916915: 90, 919245: 22, 922157: 63, 924937: 93, 925563: 28, 925762: 71, 925915: 22, 926626: 70, 928713: 73, 931411: 32, 932040: 19, 932433: 16, 932712: 51, 934707: 62, 936601: 44, 937207: 35, 937413: 44, 939328: 41, 940271: 31, 941832: 51, 943928: 2, 949561: 33, 949643: 84, 953454: 12, 953577: 48, 953790: 64, 956412: 42, 956736: 7, 957733: 96, 959630: 34, 959884: 93, 961126: 14, 961426: 62, 961479: 60, 962153: 77, 962250: 60, 964030: 98, 966876: 45, 967357: 65, 968048: 49, 968175: 42, 968878: 60, 969289: 38, 969878: 4, 970570: 13, 971187: 71, 971607: 1, 974056: 59, 974098: 63, 974834: 34, 975286: 59, 976309: 23, 977038: 77, 977510: 48, 978883: 55, 978885: 67, 981089: 95, 981695: 22, 982690: 95, 983348: 14, 983849: 37, 986240: 53, 986292: 4, 988121: 15, 988620: 92, 992366: 10, 992907: 95, 993168: 82, 993443: 62, 995180: 46, 995716: 70, 995960: 83, 997045: 88}, + {1335: 43, 1788: 48, 2902: 22, 3121: 5, 5555: 68, 7573: 18, 8590: 46, 12273: 7, 15262: 89, 16503: 77, 18155: 86, 23792: 91, 24338: 26, 24474: 29, 24821: 45, 27005: 57, 31420: 19, 31432: 23, 31748: 98, 36122: 24, 37255: 33, 37815: 61, 40727: 64, 42468: 18, 43098: 85, 43313: 82, 44320: 17, 45427: 19, 45504: 52, 45728: 61, 47022: 18, 48752: 33, 48847: 79, 48899: 63, 50000: 89, 50124: 12, 51314: 4, 54592: 33, 54997: 9, 55230: 91, 56247: 67, 57871: 66, 58854: 92, 67347: 38, 69640: 84, 70338: 11, 74616: 27, 75077: 6, 76111: 86, 79943: 38, 82466: 0, 84717: 42, 86107: 62, 86128: 86, 86532: 82, 86691: 8, 87906: 52, 91304: 9, 91583: 40, 93301: 61, 94802: 51, 96196: 2, 97022: 8, 97529: 18, 97835: 18, 98664: 48, 99251: 83, 99622: 63, 99667: 30, 101886: 5, 102154: 70, 102447: 57, 102457: 41, 105717: 27, 108022: 65, 108590: 15, 108843: 3, 111299: 17, 111972: 60, 117197: 27, 117775: 38, 118122: 28, 118269: 75, 119924: 59, 120127: 19, 120946: 56, 126087: 60, 127003: 12, 127860: 80, 130694: 1, 132668: 30, 136445: 49, 136496: 52, 138794: 86, 138885: 30, 139949: 15, 142343: 26, 142632: 51, 143202: 20, 145783: 22, 145844: 88, 146361: 79, 147895: 1, 148036: 46, 151005: 12, 155150: 9, 155431: 59, 156272: 13, 156601: 81, 156829: 46, 157825: 0, 157984: 71, 159374: 71, 160628: 55, 162502: 85, 162887: 38, 164551: 8, 166207: 38, 168933: 91, 170410: 24, 171358: 27, 172496: 65, 172695: 78, 174463: 52, 176039: 96, 176404: 83, 177358: 9, 177617: 64, 179004: 34, 179026: 4, 179238: 40, 179591: 32, 181144: 37, 181998: 53, 182376: 91, 185037: 92, 189408: 76, 189628: 33, 190375: 62, 191723: 23, 192271: 73, 193891: 16, 195157: 3, 195751: 26, 196388: 16, 199254: 66, 199440: 84, 201299: 76, 202814: 6, 204600: 69, 204931: 85, 204959: 80, 211379: 43, 211887: 28, 214305: 56, 216531: 48, 218311: 75, 218514: 85, 218660: 89, 219998: 20, 220557: 95, 220975: 24, 222505: 18, 224150: 67, 226389: 3, 226505: 0, 227573: 48, 228064: 40, 228310: 94, 231401: 28, 232316: 95, 234559: 54, 235156: 86, 235663: 77, 236333: 53, 239202: 69, 240667: 96, 240952: 0, 241029: 77, 244757: 59, 245082: 7, 247385: 86, 248016: 19, 248711: 36, 249511: 93, 252101: 8, 252484: 89, 253359: 12, 254488: 15, 257169: 47, 258316: 64, 263957: 55, 266490: 35, 266632: 1, 266758: 82, 267162: 11, 268088: 51, 269762: 18, 271116: 28, 271370: 60, 272648: 98, 273473: 12, 274646: 30, 276271: 15, 276540: 27, 276566: 68, 278953: 89, 280221: 51, 284383: 87, 284798: 76, 284866: 53, 285757: 94, 289898: 69, 292499: 16, 292565: 18, 297086: 52, 298832: 40, 300792: 43, 303096: 43, 303507: 4, 303626: 26, 304590: 57, 304949: 90, 306243: 10, 309270: 42, 316948: 6, 317599: 68, 317848: 70, 319518: 52, 321335: 67, 321521: 6, 321628: 55, 323959: 6, 325914: 25, 328502: 76, 330179: 49, 330433: 25, 331781: 25, 333674: 34, 334604: 83, 334607: 3, 336816: 50, 336901: 32, 337185: 36, 338826: 40, 339846: 3, 340445: 19, 340854: 86, 343974: 74, 350352: 80, 352253: 54, 355420: 10, 359841: 15, 360783: 83, 361411: 44, 362986: 91, 363821: 86, 363979: 37, 364253: 42, 365116: 55, 368895: 62, 369664: 59, 372578: 84, 374149: 17, 374391: 56, 374548: 96, 375111: 39, 376373: 55, 376649: 90, 377158: 71, 380732: 99, 382042: 66, 383503: 49, 383584: 15, 385740: 81, 386617: 25, 392482: 0, 395323: 83, 395784: 10, 398704: 11, 399405: 49, 400361: 11, 400729: 61, 401457: 42, 402526: 49, 403907: 97, 404296: 40, 404468: 8, 405951: 99, 408673: 55, 409633: 86, 411599: 39, 412059: 22, 420008: 66, 420447: 29, 425070: 47, 426710: 80, 430261: 11, 434850: 53, 437932: 28, 438663: 53, 439924: 42, 440331: 44, 444243: 72, 445251: 69, 446112: 40, 450807: 3, 451087: 45, 452421: 7, 452794: 66, 453257: 39, 464397: 34, 468127: 11, 468377: 90, 470576: 78, 472098: 69, 472322: 23, 477969: 14, 481103: 41, 481162: 97, 483536: 60, 483948: 5, 486591: 11, 487667: 75, 487812: 70, 489352: 18, 492833: 77, 493079: 7, 495390: 19, 496804: 0, 497330: 6, 500895: 52, 502910: 16, 503234: 70, 508892: 25, 509573: 99, 513672: 59, 516400: 55, 516736: 29, 518060: 56, 519956: 34, 521125: 11, 521702: 7, 523686: 11, 523752: 94, 524276: 28, 525010: 13, 526641: 25, 526673: 53, 527213: 67, 528283: 38, 528452: 35, 528742: 29, 530479: 17, 530558: 58, 532609: 92, 532857: 37, 533066: 73, 533676: 96, 533957: 3, 534800: 90, 535733: 37, 536705: 99, 537917: 38, 539178: 4, 540411: 41, 540749: 37, 542948: 82, 543979: 2, 548263: 83, 549059: 47, 550243: 80, 553885: 38, 556855: 50, 559903: 51, 560219: 49, 563869: 4, 564474: 46, 566533: 71, 566944: 60, 569802: 5, 570322: 1, 571633: 40, 572228: 61, 575163: 58, 576038: 92, 576278: 98, 577167: 60, 578011: 81, 578502: 56, 582462: 11, 584828: 79, 587601: 57, 587654: 65, 590225: 86, 591917: 97, 592104: 16, 593365: 83, 593852: 25, 599264: 24, 601191: 96, 603568: 39, 603619: 50, 604353: 60, 605673: 90, 606623: 68, 608910: 89, 611803: 5, 611841: 10, 612709: 66, 613636: 86, 614548: 41, 617031: 63, 617377: 6, 618936: 23, 619222: 33, 619376: 93, 619671: 38, 622241: 40, 624293: 42, 624356: 16, 627015: 42, 629556: 37, 629657: 73, 633350: 30, 634510: 97, 636133: 77, 637604: 15, 638669: 20, 639172: 35, 640322: 89, 640835: 17, 642314: 18, 642696: 57, 645423: 38, 646223: 78, 652957: 77, 655881: 71, 659020: 45, 659302: 83, 659761: 25, 662031: 40, 662863: 46, 663429: 86, 664241: 70, 664780: 64, 666337: 64, 668289: 86, 668895: 29, 670001: 24, 671217: 90, 671355: 29, 673385: 98, 674603: 72, 676435: 2, 677841: 72, 677913: 72, 678569: 15, 680514: 43, 680769: 91, 681942: 81, 682494: 16, 683743: 7, 685387: 65, 686556: 31, 687054: 25, 688169: 27, 690262: 92, 690270: 15, 693683: 74, 697817: 37, 701470: 73, 703049: 21, 708738: 29, 708965: 9, 709248: 66, 709337: 36, 709752: 29, 709766: 8, 709937: 89, 711943: 44, 712424: 29, 713056: 62, 714227: 79, 714847: 74, 717015: 61, 719310: 71, 723390: 7, 724823: 28, 725034: 41, 725801: 77, 727132: 40, 729486: 54, 732716: 45, 734666: 82, 735382: 7, 738272: 81, 742499: 85, 742519: 63, 742532: 98, 742918: 51, 745078: 90, 748399: 78, 752190: 64, 752399: 65, 757528: 94, 761499: 66, 763597: 56, 765666: 25, 766026: 48, 766449: 48, 766511: 7, 767723: 30, 769249: 6, 769599: 69, 770515: 18, 773412: 45, 773492: 9, 776185: 48, 779400: 27, 779971: 85, 781041: 41, 781176: 87, 784893: 26, 786955: 15, 790706: 12, 790821: 90, 791190: 16, 792032: 53, 794093: 0, 794609: 19, 796803: 90, 796819: 27, 797566: 35, 798148: 82, 799325: 13, 800404: 82, 806263: 73, 806677: 45, 808900: 75, 813668: 32, 814028: 83, 814466: 39, 816348: 63, 817702: 59, 817767: 16, 819169: 70, 820302: 57, 822724: 70, 824106: 27, 826680: 5, 830378: 51, 832365: 82, 837031: 16, 837479: 32, 838343: 83, 838894: 46, 841495: 6, 843805: 65, 844590: 11, 845704: 9, 845728: 14, 846595: 71, 847364: 86, 852700: 2, 854374: 56, 857609: 67, 859942: 88, 861390: 6, 865190: 2, 865642: 7, 866702: 42, 867147: 28, 868031: 93, 868465: 39, 868731: 82, 869485: 84, 872238: 18, 872663: 82, 873005: 32, 874555: 73, 875160: 96, 875293: 80, 876187: 54, 876961: 43, 879457: 53, 880223: 49, 880860: 67, 881327: 16, 883751: 3, 885097: 16, 886785: 26, 889284: 47, 889525: 68, 889860: 36, 891545: 46, 893905: 7, 894557: 19, 895924: 43, 896582: 56, 897299: 96, 901024: 43, 905196: 53, 907443: 54, 910630: 38, 912748: 75, 915067: 32, 917506: 28, 919668: 53, 920575: 35, 922142: 27, 924297: 88, 926503: 57, 928497: 92, 929160: 75, 932110: 49, 932114: 10, 942928: 15, 943552: 69, 945335: 32, 945558: 91, 945594: 74, 948319: 37, 949689: 4, 952132: 29, 952541: 10, 952809: 34, 954961: 76, 955500: 45, 956852: 53, 957192: 96, 958658: 41, 961082: 58, 961302: 65, 963013: 30, 963180: 9, 965592: 64, 965886: 56, 966073: 63, 968916: 27, 970289: 29, 971684: 59, 974027: 10, 976220: 50, 978734: 14, 980687: 66, 981069: 63, 981159: 75, 986089: 78, 986252: 29, 986384: 8, 987231: 99, 988360: 81, 988400: 99, 990370: 67, 990613: 81, 990965: 59, 997768: 95}, + {5692: 36, 5791: 32, 6158: 42, 6834: 53, 7277: 72, 13631: 65, 16436: 43, 17918: 34, 18441: 32, 19055: 36, 23336: 58, 24416: 25, 27233: 56, 29382: 37, 30556: 84, 32950: 26, 36909: 67, 36932: 24, 39564: 42, 42490: 39, 42525: 8, 43510: 35, 44632: 45, 44754: 98, 44828: 16, 45772: 4, 46188: 79, 46234: 59, 47280: 8, 47430: 44, 47527: 95, 48226: 20, 49005: 32, 49483: 26, 51845: 65, 52623: 26, 57331: 11, 61393: 38, 61789: 70, 64944: 45, 67152: 86, 67542: 53, 67599: 85, 69172: 27, 72267: 56, 76035: 64, 79632: 11, 81610: 28, 81758: 72, 83003: 28, 85266: 48, 86211: 83, 89029: 31, 89344: 86, 90228: 74, 94951: 36, 97763: 75, 103999: 19, 106558: 38, 106691: 58, 109813: 11, 111249: 12, 112107: 56, 113014: 65, 114506: 79, 115164: 36, 115373: 78, 117398: 41, 117448: 40, 118800: 79, 121603: 7, 125773: 1, 127533: 35, 129980: 58, 130058: 44, 130237: 0, 131241: 72, 131588: 46, 134651: 40, 134753: 42, 135290: 24, 136027: 56, 138574: 19, 143121: 71, 144101: 41, 147081: 0, 147765: 96, 153275: 44, 153576: 36, 155007: 7, 155105: 63, 156572: 26, 159431: 97, 159571: 50, 160124: 64, 160410: 35, 160941: 1, 162087: 32, 163482: 85, 164899: 47, 166442: 76, 172472: 51, 176974: 11, 177727: 17, 178481: 43, 180859: 99, 182562: 7, 184875: 89, 185935: 96, 187389: 76, 187842: 72, 190795: 28, 191950: 67, 192333: 80, 194245: 81, 197614: 42, 204852: 47, 207556: 56, 210492: 76, 211006: 91, 213217: 78, 213824: 83, 217166: 92, 221277: 63, 223144: 13, 224054: 13, 226583: 33, 227297: 12, 238363: 99, 238846: 28, 242153: 3, 242828: 78, 243936: 84, 244243: 63, 245115: 54, 246895: 77, 247151: 32, 250964: 30, 252353: 15, 252537: 19, 254894: 75, 257230: 49, 259170: 30, 259359: 73, 262588: 74, 264626: 60, 269627: 94, 272175: 26, 272640: 55, 274690: 36, 277993: 62, 286173: 98, 290549: 78, 291417: 82, 291594: 69, 294141: 53, 296733: 19, 297120: 7, 301292: 26, 301338: 98, 301637: 99, 304756: 30, 306590: 15, 307188: 8, 308459: 46, 316103: 50, 317017: 62, 317180: 30, 317559: 45, 317627: 55, 317903: 79, 320478: 51, 325723: 38, 327678: 67, 327693: 55, 328915: 86, 330690: 5, 335674: 41, 337980: 54, 338780: 15, 340558: 49, 346730: 36, 347382: 86, 350146: 88, 350337: 33, 351406: 56, 351552: 12, 357584: 12, 358238: 18, 358414: 41, 361887: 75, 362885: 72, 367002: 96, 367910: 80, 370394: 27, 370677: 98, 379234: 70, 383227: 89, 384698: 84, 390968: 41, 393968: 29, 398056: 30, 398607: 83, 400916: 61, 401314: 33, 405639: 40, 409000: 92, 409230: 64, 410873: 88, 417293: 22, 421253: 92, 421818: 73, 422884: 14, 424169: 17, 429980: 46, 433669: 98, 438692: 51, 439583: 48, 439697: 45, 441368: 99, 443093: 0, 443617: 53, 445237: 51, 445543: 93, 445838: 51, 448226: 54, 450659: 61, 450661: 24, 451909: 74, 456541: 71, 456763: 96, 457675: 9, 459230: 60, 460730: 74, 467425: 40, 471515: 94, 472543: 42, 473059: 68, 476002: 67, 476897: 87, 476944: 16, 477845: 76, 478153: 12, 481174: 66, 483918: 19, 486170: 29, 490044: 20, 491002: 38, 492244: 18, 492386: 87, 492505: 17, 493107: 49, 493125: 91, 496834: 91, 498345: 1, 499312: 67, 500302: 4, 500750: 53, 501343: 93, 502827: 0, 503614: 52, 504261: 8, 509365: 25, 512530: 81, 513242: 81, 514131: 26, 520223: 27, 520693: 34, 526780: 82, 528171: 9, 528706: 43, 530726: 0, 532076: 46, 532599: 61, 533867: 81, 535704: 91, 536458: 3, 536610: 76, 536789: 86, 537732: 63, 539100: 38, 539609: 31, 540648: 93, 542566: 18, 543765: 44, 546096: 89, 546409: 59, 547891: 73, 549590: 16, 550403: 24, 551191: 41, 553952: 85, 556178: 56, 556808: 95, 558712: 70, 558847: 17, 560726: 43, 565495: 73, 566573: 29, 569040: 10, 569665: 37, 569720: 42, 574102: 77, 577239: 71, 577422: 97, 578615: 99, 579292: 32, 579487: 43, 581671: 48, 582005: 25, 588052: 15, 591513: 22, 593964: 20, 600793: 23, 601711: 84, 603132: 35, 604442: 87, 604866: 58, 606611: 17, 607466: 30, 610596: 27, 612174: 37, 612486: 8, 614891: 6, 617504: 2, 624549: 78, 624957: 48, 627526: 73, 630133: 42, 631582: 76, 631950: 18, 634821: 40, 635020: 54, 635556: 32, 637862: 68, 639467: 50, 639755: 48, 639832: 46, 642616: 55, 644929: 29, 646006: 1, 647235: 90, 649760: 23, 651204: 93, 651991: 75, 652144: 88, 656345: 18, 659135: 94, 659293: 78, 659808: 50, 660870: 59, 661709: 24, 666124: 45, 668285: 62, 669425: 85, 671678: 74, 672842: 99, 673638: 12, 673803: 58, 674626: 56, 674763: 76, 676487: 13, 679847: 80, 682187: 32, 682690: 23, 682724: 36, 685148: 85, 685253: 38, 688048: 19, 689841: 62, 690439: 56, 694808: 68, 694936: 17, 695041: 29, 699706: 26, 700462: 60, 702025: 44, 702920: 15, 703331: 50, 706135: 78, 706156: 92, 706408: 71, 707425: 23, 707682: 35, 709528: 21, 711539: 79, 721337: 90, 723375: 34, 726355: 51, 728727: 96, 729653: 5, 730596: 77, 732511: 34, 733045: 30, 733566: 77, 734559: 80, 734636: 39, 735716: 12, 740950: 60, 747058: 57, 747279: 37, 748052: 89, 749952: 24, 754624: 18, 755567: 96, 758607: 83, 759355: 4, 760156: 24, 772596: 89, 774248: 89, 774327: 64, 774425: 62, 776457: 34, 776820: 69, 777257: 2, 786216: 36, 787272: 65, 787604: 34, 787723: 15, 787875: 25, 788116: 69, 791311: 9, 791858: 67, 794785: 58, 794894: 52, 795433: 71, 795589: 60, 796973: 62, 797684: 45, 800026: 41, 806507: 48, 807703: 23, 808095: 53, 808829: 67, 810389: 8, 816672: 74, 818621: 18, 819703: 0, 819868: 8, 824262: 51, 824702: 16, 826754: 94, 827834: 13, 829540: 30, 831103: 35, 833992: 70, 834286: 71, 837843: 97, 838991: 10, 844327: 1, 844604: 64, 848227: 83, 848747: 95, 852952: 87, 859333: 86, 861237: 75, 864199: 62, 864809: 81, 868631: 22, 870725: 13, 871645: 15, 875347: 11, 877680: 8, 878559: 24, 882713: 42, 884222: 81, 885633: 64, 886877: 49, 889165: 42, 892664: 95, 897796: 9, 899063: 67, 900057: 98, 900237: 51, 904604: 6, 906474: 55, 906498: 22, 912934: 85, 914630: 55, 915477: 65, 917797: 29, 918175: 7, 919674: 48, 923127: 68, 925155: 75, 928021: 32, 930689: 10, 932380: 76, 932405: 8, 935055: 61, 935187: 42, 937757: 91, 939189: 56, 939594: 84, 940607: 80, 945688: 21, 947403: 71, 947440: 72, 949002: 70, 949584: 40, 951834: 35, 955944: 9, 958498: 9, 958723: 19, 960331: 24, 961372: 77, 962008: 57, 963839: 2, 965831: 67, 966409: 49, 968484: 26, 970620: 69, 971168: 26, 971906: 55, 972506: 50, 972668: 81, 974723: 2, 975615: 95, 978133: 0, 978301: 65, 978559: 5, 979530: 38, 984208: 57, 985454: 5, 986156: 34, 986834: 24, 989027: 50, 989468: 27, 989569: 39, 990023: 82, 991282: 2, 992612: 48, 994317: 50, 994925: 5, 995029: 76, 998131: 21, 998625: 52, 998675: 21, 999918: 99}, + {2033: 3, 2553: 4, 2596: 84, 2617: 81, 4022: 99, 4632: 82, 5997: 44, 7201: 4, 7971: 7, 8770: 66, 8987: 80, 9179: 44, 11276: 99, 12357: 66, 12779: 40, 12860: 33, 13714: 80, 14105: 31, 14225: 45, 14251: 53, 14744: 67, 17232: 55, 18538: 17, 19650: 57, 19781: 24, 19816: 1, 20147: 33, 20540: 94, 25093: 26, 26394: 40, 27645: 53, 27990: 66, 28000: 12, 28215: 72, 29048: 40, 29209: 93, 30022: 35, 31844: 11, 31893: 39, 33471: 13, 33517: 30, 33612: 13, 33896: 92, 34246: 36, 35922: 8, 37841: 58, 39153: 78, 39258: 75, 41510: 94, 41750: 67, 42272: 72, 43389: 15, 44549: 50, 44632: 64, 44643: 78, 46140: 34, 46151: 82, 49758: 65, 50618: 87, 50680: 41, 50943: 96, 51031: 6, 51344: 48, 51373: 25, 52078: 49, 52697: 10, 53967: 78, 55206: 22, 55367: 52, 56602: 68, 56965: 80, 57402: 78, 57501: 26, 58162: 89, 58971: 52, 59624: 60, 59643: 71, 60431: 83, 60532: 92, 62365: 18, 65199: 18, 66070: 1, 68359: 26, 68753: 28, 69376: 80, 70401: 69, 70654: 96, 70850: 62, 71915: 24, 72285: 87, 72877: 2, 73108: 40, 73617: 59, 74039: 58, 76935: 34, 78328: 58, 78405: 94, 78656: 54, 79017: 7, 80196: 55, 80571: 63, 81270: 6, 82734: 86, 82854: 27, 84775: 71, 85501: 10, 86277: 10, 86361: 47, 86916: 11, 88152: 22, 89618: 23, 90195: 45, 90419: 58, 92833: 34, 95256: 20, 95669: 48, 96128: 13, 96964: 74, 97025: 17, 97094: 84, 97278: 16, 99508: 87, 100996: 89, 101372: 27, 102862: 55, 102995: 91, 106658: 22, 108449: 99, 109185: 91, 109858: 51, 110322: 30, 111124: 10, 112271: 27, 115064: 94, 116592: 4, 117489: 31, 118204: 32, 119469: 51, 120387: 96, 120467: 61, 121542: 81, 121670: 65, 123148: 80, 123322: 22, 123849: 2, 124075: 92, 124609: 9, 124929: 96, 125637: 48, 125933: 15, 126151: 19, 126531: 8, 126626: 75, 126998: 46, 128984: 48, 129277: 66, 129897: 18, 132865: 90, 133085: 23, 135356: 55, 135671: 41, 136647: 52, 137856: 51, 138360: 11, 138480: 77, 141856: 6, 142152: 45, 142669: 22, 143582: 15, 143802: 81, 144018: 58, 144203: 76, 145235: 79, 145639: 59, 145982: 0, 147151: 62, 147726: 19, 149051: 24, 152150: 16, 152700: 58, 153467: 98, 153959: 12, 154563: 77, 154996: 55, 155399: 85, 156605: 67, 156626: 48, 157097: 21, 158741: 28, 159123: 6, 159220: 96, 160326: 73, 163035: 90, 164205: 64, 165114: 97, 166166: 50, 166483: 21, 166877: 72, 168903: 5, 171316: 24, 172227: 63, 172716: 79, 174229: 10, 174231: 19, 175072: 33, 176899: 73, 179190: 17, 180457: 27, 180896: 6, 184415: 16, 184825: 34, 184855: 41, 185051: 15, 185381: 13, 185630: 49, 186352: 58, 186485: 35, 187557: 49, 188539: 43, 188634: 43, 190140: 31, 191909: 54, 193643: 98, 194401: 94, 194760: 47, 195006: 3, 195337: 81, 196854: 70, 197332: 11, 197438: 67, 197701: 88, 198846: 86, 200296: 72, 203107: 79, 203738: 49, 205063: 58, 206265: 55, 208619: 10, 208640: 76, 208673: 1, 209079: 8, 209944: 17, 211321: 65, 211722: 74, 212137: 46, 212247: 30, 212781: 46, 213575: 99, 213758: 85, 214648: 7, 216157: 64, 218238: 77, 218277: 73, 218731: 98, 219108: 43, 219290: 14, 219577: 39, 220589: 91, 222306: 35, 224405: 89, 225989: 19, 226438: 44, 227049: 2, 227241: 86, 227287: 53, 227298: 45, 227751: 21, 227850: 46, 228022: 39, 228067: 98, 228665: 43, 229984: 59, 230450: 73, 230681: 15, 230991: 2, 231535: 6, 231880: 77, 232827: 95, 232933: 12, 234603: 7, 234732: 5, 234904: 82, 235318: 78, 235972: 44, 236561: 88, 237759: 73, 238094: 64, 238162: 92, 239016: 45, 239066: 96, 241361: 86, 241999: 23, 242083: 23, 243559: 4, 243922: 8, 245748: 19, 246485: 85, 247399: 64, 247527: 10, 250702: 55, 252926: 98, 253091: 45, 254212: 84, 258566: 98, 258599: 23, 259079: 12, 260358: 67, 261004: 26, 262222: 85, 264914: 92, 265194: 47, 265581: 79, 266630: 21, 266799: 49, 267753: 49, 269429: 17, 269475: 19, 271572: 43, 271940: 38, 277723: 92, 277736: 33, 279190: 47, 280206: 82, 281080: 46, 281375: 64, 281647: 85, 282380: 10, 282438: 5, 282745: 88, 284149: 27, 284233: 13, 284327: 72, 285883: 96, 286057: 39, 286224: 17, 286563: 24, 288024: 94, 290494: 41, 291174: 70, 292385: 91, 292681: 59, 294003: 97, 294744: 45, 295196: 5, 296264: 85, 296698: 15, 296700: 55, 297262: 95, 297824: 13, 298604: 57, 298735: 51, 301325: 91, 301744: 70, 301811: 59, 307659: 62, 308899: 20, 309955: 82, 310603: 38, 310988: 58, 312405: 36, 314278: 79, 315722: 93, 315784: 88, 316279: 28, 317147: 26, 317286: 32, 319155: 57, 323039: 14, 323209: 96, 325695: 25, 326795: 65, 328295: 85, 328557: 7, 329043: 97, 329180: 0, 329504: 29, 329600: 51, 330069: 86, 331479: 15, 334363: 35, 338150: 83, 338278: 96, 339221: 73, 339267: 59, 339677: 2, 340637: 33, 341186: 63, 342045: 41, 342142: 18, 344219: 38, 344723: 14, 346446: 84, 347894: 70, 348364: 49, 349073: 66, 349703: 41, 351389: 33, 352803: 72, 352860: 56, 353026: 93, 354102: 52, 354547: 64, 355772: 60, 357243: 10, 358019: 78, 358550: 79, 359508: 97, 360316: 57, 360583: 72, 361330: 60, 361800: 79, 362339: 33, 362935: 32, 363307: 28, 364290: 27, 364711: 24, 367110: 18, 368431: 5, 370214: 7, 370341: 86, 371463: 48, 372050: 71, 372393: 71, 374062: 76, 374759: 99, 375955: 41, 376419: 73, 376517: 94, 378504: 29, 378622: 27, 378931: 98, 379003: 42, 379021: 13, 381257: 34, 381958: 65, 382021: 9, 382473: 51, 382605: 35, 384040: 1, 384363: 45, 385690: 27, 387244: 90, 387309: 27, 387823: 82, 388590: 2, 388822: 13, 388947: 12, 390026: 46, 390948: 67, 391214: 37, 391647: 32, 393110: 15, 393235: 18, 393249: 63, 394537: 4, 395611: 71, 397165: 37, 399424: 6, 399580: 97, 399620: 1, 400551: 12, 401626: 1, 402517: 69, 403968: 12, 404271: 27, 406345: 12, 408294: 0, 410175: 45, 412471: 13, 412802: 69, 412846: 45, 413755: 70, 415162: 72, 415396: 31, 416916: 55, 417836: 90, 417949: 74, 418892: 42, 419678: 50, 419731: 22, 420070: 50, 420214: 54, 420815: 44, 420955: 17, 421009: 32, 421308: 57, 421389: 65, 422050: 40, 422090: 25, 422354: 13, 422439: 87, 427587: 78, 427675: 1, 427698: 87, 428397: 51, 428638: 70, 429081: 71, 429382: 89, 429534: 57, 431829: 73, 432348: 89, 432360: 84, 432976: 66, 433854: 11, 434872: 40, 435962: 41, 436351: 68, 436499: 85, 436613: 95, 437449: 45, 438862: 97, 438906: 78, 439522: 59, 439624: 74, 439937: 57, 440371: 4, 441895: 72, 442381: 86, 442916: 87, 444679: 82, 444907: 91, 445265: 1, 445595: 74, 446736: 74, 448512: 34, 449082: 37, 449473: 7, 451164: 66, 452559: 99, 453676: 35, 455258: 90, 455388: 44, 457018: 74, 458746: 91, 459221: 12, 459536: 10, 463317: 69, 464085: 51, 466128: 46, 466762: 10, 467079: 12, 468680: 23, 469734: 59, 469741: 82, 470784: 41, 472784: 3, 473214: 80, 474603: 7, 474768: 66, 475790: 3, 476176: 76, 477111: 19, 479519: 64, 479745: 70, 480644: 83, 481450: 85, 481520: 40, 482221: 84, 483865: 68, 483971: 22, 484913: 91, 485270: 45, 485352: 45, 485650: 55, 486291: 33, 486924: 34, 487933: 88, 488106: 10, 488389: 40, 488401: 57, 489244: 78, 489741: 73, 490196: 51, 490212: 40, 490302: 88, 490564: 20, 492358: 44, 494366: 73, 497452: 1, 498337: 27, 498346: 58, 499565: 5, 500821: 28, 502460: 51, 504901: 55, 505042: 63, 506304: 65, 507760: 12, 508056: 84, 508240: 58, 509210: 49, 509732: 97, 509858: 42, 511633: 91, 512532: 15, 512726: 20, 515052: 63, 516656: 49, 519929: 75, 521186: 57, 521667: 88, 523820: 92, 523934: 75, 524119: 23, 525796: 55, 525902: 82, 527671: 18, 528050: 42, 530186: 53, 530957: 6, 531115: 96, 531652: 97, 531661: 39, 533281: 11, 535519: 41, 535765: 32, 535964: 83, 536687: 32, 537599: 30, 537753: 58, 537770: 40, 537771: 11, 538757: 90, 539376: 94, 541909: 79, 542715: 33, 543322: 44, 543972: 69, 544025: 11, 544029: 35, 544952: 16, 544998: 42, 545920: 62, 548335: 55, 549384: 25, 550379: 59, 550446: 64, 550764: 38, 551121: 59, 551411: 54, 551807: 23, 552378: 94, 553015: 52, 553461: 80, 553480: 39, 555277: 72, 556327: 57, 556408: 40, 556601: 46, 556854: 98, 559061: 68, 560827: 73, 560845: 11, 561289: 44, 562144: 57, 562872: 77, 563527: 52, 564509: 3, 565061: 71, 565588: 50, 565798: 68, 567185: 35, 567792: 38, 569200: 95, 571160: 84, 574908: 56, 574921: 68, 574962: 87, 576115: 38, 576815: 4, 578196: 99, 580343: 11, 582533: 6, 583069: 2, 584246: 31, 585759: 96, 586173: 37, 586692: 69, 587706: 0, 588035: 18, 589762: 92, 591854: 60, 593246: 31, 594093: 10, 595350: 60, 595663: 36, 597283: 35, 598453: 73, 599322: 44, 599546: 91, 599935: 7, 600511: 6, 601635: 37, 601799: 79, 603257: 33, 604497: 23, 604532: 72, 605212: 78, 605524: 6, 610876: 12, 611144: 73, 612197: 95, 612704: 53, 612893: 3, 613296: 3, 614144: 12, 618938: 35, 619194: 95, 620704: 83, 621725: 24, 621866: 72, 622746: 8, 623630: 21, 623938: 67, 624383: 10, 624442: 48, 624814: 9, 625449: 75, 625906: 57, 626452: 74, 628867: 44, 630091: 8, 630628: 86, 631730: 8, 632458: 46, 632731: 12, 633546: 76, 633590: 38, 636159: 87, 636193: 47, 636219: 72, 636572: 23, 636758: 74, 636892: 0, 637044: 27, 640214: 37, 640395: 26, 640551: 64, 642150: 56, 642834: 66, 643932: 5, 644686: 36, 645644: 42, 645908: 64, 646185: 75, 646472: 33, 647620: 19, 647814: 59, 648214: 20, 651049: 29, 651947: 6, 653294: 75, 653383: 29, 654050: 19, 657298: 12, 659957: 89, 660808: 29, 660928: 87, 661793: 99, 662216: 68, 662600: 82, 663113: 8, 665859: 46, 668283: 16, 668518: 19, 668582: 17, 670184: 35, 670669: 45, 671394: 38, 671749: 21, 671997: 58, 673590: 48, 673877: 44, 674308: 42, 675925: 33, 675934: 91, 677921: 31, 678751: 20, 678769: 88, 679661: 87, 680358: 8, 682552: 4, 683698: 0, 683991: 53, 684166: 69, 689358: 30, 689474: 16, 690403: 76, 690545: 65, 690984: 79, 692733: 86, 692923: 93, 693202: 19, 693291: 18, 693574: 95, 694080: 96, 694154: 64, 694757: 90, 695987: 14, 697875: 80, 698212: 98, 698919: 70, 699088: 51, 699400: 10, 699450: 59, 701321: 53, 701385: 5, 702341: 93, 702759: 35, 703058: 90, 704690: 1, 706799: 83, 707461: 80, 707782: 96, 709394: 65, 709674: 68, 710134: 39, 711321: 2, 711617: 48, 711697: 54, 711865: 27, 712280: 95, 712674: 93, 714395: 49, 715752: 65, 716594: 77, 718628: 6, 719717: 92, 719836: 98, 722326: 90, 727429: 77, 727486: 39, 727995: 17, 728725: 81, 730320: 97, 732662: 31, 732987: 54, 734077: 45, 734579: 17, 735287: 23, 735582: 54, 736172: 35, 736333: 21, 738337: 38, 740944: 78, 740955: 20, 742041: 67, 744781: 56, 747104: 45, 747629: 22, 747992: 29, 748677: 75, 748973: 0, 749012: 38, 749334: 70, 749404: 57, 749878: 87, 751771: 80, 753201: 76, 754298: 35, 756605: 81, 756641: 51, 757131: 11, 757454: 47, 757620: 69, 758402: 39, 758782: 35, 759085: 50, 759161: 34, 759562: 43, 761286: 62, 761908: 16, 762165: 77, 766751: 74, 767125: 9, 767654: 32, 768455: 99, 769620: 54, 769622: 87, 769895: 29, 769998: 22, 770716: 63, 771903: 93, 773959: 45, 775912: 60, 777448: 77, 778798: 70, 780896: 63, 781794: 43, 784133: 10, 785835: 17, 785951: 99, 786015: 91, 786774: 93, 787556: 39, 790828: 29, 791374: 94, 791638: 17, 793837: 77, 797673: 18, 797807: 77, 798273: 71, 798470: 92, 799642: 94, 800687: 30, 800972: 16, 801580: 62, 802470: 22, 803485: 99, 803527: 8, 807959: 77, 808066: 80, 808158: 8, 808318: 94, 811566: 19, 812314: 7, 813275: 25, 814647: 68, 815622: 79, 816386: 13, 817345: 31, 818114: 74, 818152: 21, 819731: 39, 819890: 97, 820113: 48, 820378: 98, 821034: 27, 821469: 34, 821727: 31, 821990: 46, 826765: 50, 827085: 16, 827387: 98, 829335: 62, 829776: 94, 830440: 38, 830510: 0, 835764: 94, 835904: 79, 837256: 6, 838071: 41, 838079: 46, 839644: 35, 839955: 11, 839970: 34, 840469: 66, 840678: 71, 840984: 73, 841770: 10, 842703: 13, 843215: 48, 845241: 99, 845720: 11, 846107: 9, 846255: 46, 846976: 28, 847685: 39, 849303: 96, 850295: 45, 851493: 28, 853398: 36, 853515: 35, 853887: 52, 855241: 51, 855487: 10, 856775: 89, 857068: 19, 858067: 71, 858851: 93, 859671: 8, 860218: 53, 861415: 62, 861450: 17, 862042: 50, 862302: 59, 863567: 22, 865658: 28, 867472: 6, 867601: 7, 868704: 83, 870271: 20, 871057: 0, 872726: 3, 873153: 91, 873787: 45, 875682: 40, 876394: 44, 878497: 85, 878579: 53, 878917: 36, 879259: 15, 879940: 48, 880396: 56, 886129: 0, 888904: 73, 889248: 28, 889997: 78, 892443: 87, 892894: 55, 893982: 74, 894195: 96, 895431: 8, 895727: 96, 895735: 62, 895920: 52, 898136: 23, 899851: 95, 900925: 12, 902396: 34, 904724: 48, 905590: 37, 907013: 29, 908843: 48, 909146: 12, 910456: 60, 911306: 6, 911398: 21, 915909: 0, 916743: 75, 917104: 74, 920164: 19, 920898: 25, 922146: 28, 922661: 71, 923060: 74, 923239: 81, 923639: 83, 924040: 59, 924702: 44, 926614: 16, 929311: 11, 930004: 53, 931459: 33, 931963: 18, 932798: 95, 933776: 72, 934793: 46, 936611: 46, 936672: 39, 936697: 92, 936883: 2, 938703: 16, 938983: 34, 939308: 18, 940777: 49, 942629: 44, 942998: 46, 943374: 60, 945307: 67, 945786: 63, 945984: 51, 946908: 30, 946973: 77, 947360: 8, 947698: 95, 947963: 32, 948269: 50, 948277: 82, 948336: 71, 948383: 97, 949580: 81, 950817: 67, 953986: 38, 954249: 9, 954415: 10, 954717: 19, 955029: 46, 955032: 95, 956264: 26, 956430: 11, 957404: 84, 958966: 7, 959004: 76, 959564: 79, 960542: 8, 961129: 67, 961620: 63, 962991: 25, 963001: 64, 963666: 1, 964929: 36, 965353: 74, 966547: 17, 968411: 29, 969960: 39, 970823: 12, 971050: 46, 973315: 62, 973528: 69, 974091: 64, 974279: 7, 974955: 49, 976428: 15, 977616: 66, 983977: 86, 984204: 72, 984439: 97, 984756: 54, 985182: 88, 986313: 99, 987209: 22, 989771: 32, 990139: 1, 991082: 43, 991264: 50, 993516: 82, 993858: 86, 994768: 21, 996406: 61, 996635: 49, 997225: 86, 999298: 11, 999821: 65}, + {3945: 65, 6587: 51, 7370: 2, 7700: 23, 7821: 39, 9337: 27, 13175: 57, 17375: 64, 22124: 39, 23294: 51, 25044: 37, 29220: 82, 33292: 75, 34810: 34, 36381: 39, 43120: 25, 43146: 76, 45066: 15, 48005: 66, 52733: 59, 53645: 96, 57492: 11, 57841: 82, 58406: 33, 58643: 76, 65165: 0, 65695: 48, 68192: 18, 70547: 57, 70977: 64, 72071: 51, 80134: 53, 80203: 1, 86323: 56, 86983: 42, 88293: 13, 90566: 5, 90746: 15, 91939: 13, 94620: 58, 95169: 42, 97302: 88, 97924: 66, 98281: 15, 100112: 71, 100506: 27, 102572: 12, 102688: 57, 102762: 41, 105041: 52, 110000: 86, 110626: 32, 114594: 34, 118174: 22, 119027: 51, 121789: 90, 122935: 23, 138784: 57, 139868: 94, 143428: 20, 147504: 30, 149329: 32, 153043: 38, 159597: 72, 166218: 49, 182985: 80, 184103: 66, 187374: 79, 190993: 24, 192570: 46, 197977: 82, 202705: 9, 204343: 87, 205864: 52, 206320: 91, 208402: 0, 209497: 60, 210209: 36, 211342: 99, 217546: 98, 223191: 40, 225549: 19, 228316: 67, 229132: 58, 231797: 10, 232095: 4, 233635: 56, 239339: 55, 246572: 5, 258277: 10, 263345: 53, 265899: 27, 272556: 85, 272687: 44, 273068: 2, 273727: 29, 276583: 39, 277248: 89, 277355: 86, 281640: 34, 285073: 93, 288066: 3, 301611: 63, 305099: 74, 307891: 77, 308616: 33, 314104: 15, 314464: 98, 317752: 57, 323406: 17, 324067: 50, 328547: 9, 330531: 92, 333527: 51, 335557: 16, 335637: 83, 340179: 23, 342232: 1, 346632: 84, 348963: 8, 349842: 98, 350970: 28, 355388: 93, 356031: 93, 357412: 2, 361335: 57, 361365: 24, 362473: 88, 362536: 86, 368831: 8, 369157: 2, 369854: 15, 374862: 84, 378383: 27, 380553: 14, 381838: 55, 382207: 84, 382309: 39, 383567: 13, 386141: 68, 391384: 34, 392404: 30, 393763: 7, 396467: 95, 401084: 39, 401312: 83, 406280: 92, 406353: 76, 407378: 64, 410876: 86, 411083: 61, 418229: 30, 422701: 67, 424307: 47, 428125: 85, 439168: 40, 442653: 20, 447677: 40, 453225: 23, 453564: 49, 456040: 51, 461723: 53, 465904: 53, 468100: 0, 469470: 47, 472075: 87, 474177: 10, 476675: 3, 481653: 29, 482070: 67, 482923: 9, 486889: 23, 489147: 92, 491258: 52, 495780: 38, 496119: 41, 496681: 97, 499295: 23, 501420: 97, 503220: 45, 505962: 73, 507419: 16, 510476: 89, 511330: 81, 513390: 63, 515508: 37, 517870: 4, 526612: 61, 529104: 71, 531081: 75, 536267: 36, 536291: 19, 538835: 39, 542698: 70, 544528: 81, 545799: 56, 551892: 80, 552121: 79, 554313: 12, 555754: 92, 565859: 15, 570099: 63, 571249: 78, 574156: 16, 575369: 76, 579318: 93, 582263: 1, 585518: 36, 585575: 83, 591339: 8, 591637: 20, 594239: 45, 594414: 26, 596943: 21, 597431: 54, 599002: 94, 600261: 90, 601109: 36, 603651: 38, 604498: 72, 604579: 0, 605048: 71, 608864: 77, 611510: 1, 614541: 22, 619335: 1, 620060: 8, 622683: 15, 625527: 60, 626004: 87, 626845: 54, 635726: 22, 639661: 13, 641287: 87, 644052: 91, 647145: 59, 648623: 87, 649390: 60, 649956: 97, 650156: 40, 651989: 44, 655724: 47, 666143: 79, 669110: 5, 671274: 18, 671530: 53, 672848: 78, 673295: 82, 673873: 89, 675010: 99, 681355: 25, 694231: 89, 696800: 51, 697627: 38, 697935: 42, 699204: 5, 699669: 78, 701847: 88, 701888: 35, 712135: 67, 713051: 27, 714032: 32, 716614: 58, 717199: 11, 722130: 87, 722172: 3, 725329: 56, 727355: 99, 730024: 69, 730409: 22, 735799: 81, 739335: 67, 741856: 86, 742378: 39, 743830: 91, 744609: 95, 746859: 74, 749754: 30, 750608: 17, 757619: 55, 759588: 26, 761640: 60, 762324: 58, 764017: 31, 767890: 50, 768895: 8, 772917: 93, 775528: 20, 778908: 96, 781148: 73, 790529: 14, 793090: 44, 793308: 38, 803564: 97, 811782: 14, 812016: 40, 812733: 81, 815668: 2, 818658: 35, 824115: 20, 832470: 83, 835259: 14, 835961: 28, 839229: 17, 839436: 44, 839530: 63, 839708: 48, 840122: 36, 843028: 92, 844050: 90, 844975: 82, 845995: 25, 848886: 18, 848951: 33, 850932: 97, 851105: 80, 851360: 37, 854049: 29, 856196: 71, 858700: 53, 861714: 24, 871229: 27, 874253: 8, 875515: 56, 879199: 0, 883684: 58, 885940: 54, 888507: 33, 890730: 11, 891307: 95, 891502: 55, 901943: 57, 902885: 51, 907538: 30, 909424: 13, 910885: 76, 913035: 4, 916384: 5, 916410: 78, 922463: 30, 923685: 12, 924012: 23, 924165: 94, 925071: 83, 925452: 91, 930741: 65, 934368: 62, 936301: 3, 936364: 86, 940995: 97, 941267: 52, 942942: 90, 944015: 30, 953073: 86, 964229: 15, 964758: 37, 967530: 68, 975105: 15, 978769: 49, 981272: 44, 982415: 0, 985883: 53, 988354: 81, 990979: 11, 991475: 40}, + {519: 67, 1875: 0, 2257: 63, 3246: 49, 3770: 61, 3773: 63, 3892: 48, 4026: 0, 4261: 13, 5489: 37, 7337: 54, 7838: 47, 7873: 8, 8505: 18, 9008: 45, 10211: 83, 10613: 24, 11187: 11, 11196: 33, 11825: 58, 11927: 60, 13209: 25, 13301: 2, 13510: 97, 14764: 41, 16246: 49, 16549: 3, 17146: 62, 17716: 77, 21137: 26, 21389: 58, 21423: 72, 21803: 35, 22433: 67, 22649: 60, 22665: 25, 23761: 85, 24171: 65, 25731: 92, 26216: 43, 27369: 59, 27941: 12, 28062: 99, 28640: 17, 30444: 27, 30755: 82, 31123: 44, 32408: 21, 32461: 89, 33294: 97, 34048: 64, 34952: 14, 35835: 77, 38739: 58, 38849: 21, 38877: 8, 39569: 31, 39657: 22, 41005: 68, 41094: 45, 43488: 90, 43760: 28, 44073: 68, 44099: 80, 44379: 64, 44508: 68, 44511: 64, 44917: 6, 46303: 0, 47827: 82, 48325: 37, 49201: 24, 49361: 79, 50880: 24, 51437: 91, 52712: 51, 52723: 32, 52828: 14, 52845: 57, 53634: 7, 53903: 59, 55105: 54, 55169: 12, 55934: 11, 56400: 39, 57351: 73, 57452: 24, 58327: 17, 58388: 13, 58495: 81, 58669: 56, 59647: 44, 60045: 3, 60743: 91, 60870: 10, 60876: 18, 61230: 4, 62352: 45, 63196: 69, 63264: 99, 63382: 10, 63543: 87, 63668: 19, 67396: 69, 67559: 9, 69366: 18, 69486: 5, 70535: 48, 71166: 42, 71781: 29, 73375: 13, 73870: 98, 73996: 16, 76279: 77, 76320: 8, 76572: 79, 76661: 25, 77975: 38, 79655: 33, 80372: 29, 80867: 72, 80951: 83, 83556: 81, 84602: 7, 85008: 47, 85520: 90, 85624: 73, 87229: 0, 88457: 91, 89401: 29, 92340: 91, 92753: 29, 93167: 65, 93356: 59, 95017: 99, 95442: 85, 97141: 66, 97641: 37, 98684: 15, 98778: 81, 98805: 93, 99722: 32, 99942: 41, 100031: 93, 102323: 38, 102548: 63, 102908: 85, 106273: 7, 107431: 49, 108312: 60, 108338: 65, 110071: 7, 110374: 16, 111615: 69, 112046: 32, 112393: 45, 115167: 62, 115283: 15, 115707: 93, 116062: 41, 116808: 13, 117386: 66, 117441: 66, 118465: 85, 119873: 85, 120977: 4, 121176: 57, 123319: 35, 123578: 13, 124723: 64, 125272: 72, 130734: 6, 131239: 73, 131514: 22, 132057: 4, 132679: 94, 132680: 4, 134265: 37, 135522: 87, 138286: 35, 141196: 6, 141771: 72, 141896: 0, 142100: 28, 142319: 74, 144170: 32, 148285: 74, 148605: 8, 148967: 94, 150173: 33, 151324: 2, 152096: 28, 152613: 41, 152834: 36, 153209: 23, 153768: 87, 155335: 20, 156522: 35, 158066: 37, 158576: 46, 160136: 46, 160158: 35, 160598: 82, 160894: 63, 162785: 97, 163061: 32, 166001: 27, 166254: 57, 166913: 74, 167823: 42, 169047: 52, 169194: 97, 169648: 52, 170412: 26, 172186: 89, 173991: 47, 174291: 68, 175151: 56, 175775: 61, 176274: 55, 177262: 95, 177359: 75, 179595: 96, 180357: 22, 181316: 41, 182054: 87, 183370: 55, 183386: 85, 184206: 28, 184346: 64, 184543: 45, 185244: 59, 185551: 84, 187619: 30, 189035: 21, 190318: 72, 191741: 10, 193654: 51, 194384: 39, 194842: 57, 196114: 67, 196451: 1, 196832: 89, 197191: 98, 197767: 56, 197889: 91, 199187: 15, 200954: 9, 201017: 33, 201510: 16, 202081: 62, 202459: 46, 203395: 42, 203442: 91, 203799: 95, 204808: 30, 206606: 28, 206664: 91, 207051: 3, 207265: 47, 208430: 41, 208457: 48, 211842: 43, 211860: 35, 212137: 75, 212574: 76, 213499: 79, 213597: 58, 215162: 38, 215467: 56, 215468: 25, 215520: 23, 217821: 31, 217920: 13, 218264: 60, 218323: 19, 218851: 85, 219470: 6, 219555: 1, 220375: 74, 222708: 92, 223013: 10, 224527: 48, 225121: 93, 226810: 21, 227746: 11, 228224: 73, 229419: 74, 229970: 25, 230898: 69, 231547: 27, 233281: 36, 233407: 66, 235232: 21, 235591: 24, 236842: 11, 237016: 93, 238850: 50, 239188: 63, 240959: 23, 241901: 81, 242259: 82, 242731: 17, 243797: 63, 244328: 71, 244444: 13, 246815: 47, 247046: 59, 251273: 38, 253241: 5, 253812: 98, 254007: 86, 254329: 21, 254558: 91, 254836: 78, 255904: 0, 256352: 84, 257847: 96, 259544: 88, 259730: 75, 260460: 24, 260734: 5, 260841: 30, 261291: 64, 262835: 29, 263129: 91, 263135: 62, 264519: 32, 265334: 11, 265964: 36, 267300: 14, 268299: 35, 268374: 69, 268378: 88, 269351: 44, 271285: 53, 272279: 38, 272312: 79, 272505: 58, 272802: 40, 274192: 58, 274976: 94, 275180: 35, 276230: 16, 277116: 86, 278461: 86, 278801: 58, 280557: 65, 281864: 49, 281897: 1, 284100: 27, 285043: 29, 285540: 10, 285603: 85, 285692: 75, 289577: 68, 289951: 27, 290206: 86, 290549: 92, 290794: 23, 292503: 48, 292534: 90, 292649: 54, 292834: 54, 293777: 51, 294479: 50, 295572: 88, 296910: 2, 297076: 75, 297607: 45, 298270: 40, 298369: 80, 299788: 85, 301595: 56, 302137: 62, 305017: 33, 305950: 64, 307050: 27, 309253: 87, 310201: 75, 310643: 51, 311338: 74, 311381: 34, 311393: 11, 312626: 23, 313446: 30, 315593: 29, 315840: 42, 315955: 57, 316623: 82, 317237: 63, 318486: 74, 318935: 11, 320798: 95, 320998: 39, 321500: 5, 321551: 4, 322131: 47, 322307: 80, 322378: 67, 324193: 16, 324528: 14, 324673: 28, 325470: 59, 325822: 57, 326041: 43, 326482: 3, 326944: 12, 327668: 53, 327670: 85, 327997: 62, 330062: 77, 331719: 3, 332239: 76, 332612: 29, 332637: 75, 333856: 30, 335154: 53, 336933: 5, 337048: 18, 338057: 61, 339028: 89, 339126: 89, 339236: 42, 339596: 51, 341602: 65, 342033: 21, 342357: 80, 342888: 66, 345712: 98, 346281: 64, 346570: 92, 348939: 21, 349966: 67, 350583: 63, 350779: 41, 351105: 16, 351193: 85, 352617: 37, 354108: 39, 354796: 62, 356457: 5, 356577: 61, 356915: 49, 357989: 8, 358100: 13, 358578: 86, 359316: 47, 360279: 9, 361356: 60, 362020: 24, 362160: 69, 362182: 81, 362353: 80, 365512: 84, 366869: 51, 368374: 94, 368694: 50, 369496: 17, 372742: 59, 373991: 9, 374548: 76, 374749: 64, 376269: 66, 378966: 69, 381412: 38, 381857: 45, 382394: 91, 383379: 0, 385967: 15, 386463: 55, 388003: 50, 388011: 97, 388156: 41, 388278: 62, 389413: 32, 391630: 70, 391727: 23, 392952: 97, 393129: 71, 393172: 70, 394574: 67, 396081: 53, 396953: 59, 397246: 41, 397304: 29, 398137: 92, 398373: 13, 399561: 70, 400361: 95, 400779: 76, 400900: 22, 402959: 45, 403034: 62, 403041: 23, 403245: 75, 404021: 24, 404278: 35, 404931: 84, 405267: 74, 405890: 21, 407132: 60, 407287: 67, 407486: 67, 407654: 91, 410245: 38, 411039: 7, 412097: 5, 412502: 33, 412688: 42, 413053: 74, 413993: 24, 415189: 98, 416075: 48, 417370: 12, 417553: 68, 418793: 68, 419302: 45, 419534: 59, 419635: 4, 419696: 16, 419735: 75, 421484: 70, 421533: 8, 421593: 68, 421605: 56, 422536: 39, 423147: 4, 423273: 20, 424640: 89, 425073: 68, 425748: 17, 425964: 99, 426844: 88, 430039: 53, 430111: 45, 431260: 13, 434235: 99, 434733: 15, 434755: 79, 435020: 97, 435490: 5, 436703: 13, 437143: 37, 437632: 85, 438116: 14, 439272: 76, 439848: 14, 440258: 25, 441121: 42, 441449: 62, 441834: 57, 442882: 17, 443134: 53, 446132: 11, 446979: 77, 447596: 14, 449099: 29, 450373: 81, 450511: 91, 450748: 16, 453386: 61, 453669: 82, 454338: 67, 455994: 67, 456374: 75, 456406: 72, 456664: 86, 456899: 7, 457678: 7, 458898: 18, 460968: 93, 461585: 70, 462348: 67, 462546: 18, 464225: 81, 464373: 45, 464421: 28, 464575: 10, 464922: 71, 465343: 6, 465777: 26, 466454: 31, 466486: 70, 467027: 82, 467585: 83, 467836: 31, 468744: 30, 468986: 84, 469018: 67, 470492: 89, 471112: 58, 471827: 19, 472170: 54, 474023: 10, 477903: 47, 480626: 41, 481202: 11, 481989: 88, 482826: 8, 483881: 15, 484175: 40, 484752: 28, 486260: 89, 488230: 74, 489081: 14, 489917: 8, 490068: 92, 490214: 52, 490328: 26, 490492: 47, 491333: 17, 492037: 29, 493145: 57, 493761: 95, 494154: 96, 494523: 67, 494821: 42, 499507: 95, 500700: 91, 501135: 77, 502330: 53, 502932: 0, 503410: 69, 505339: 70, 505395: 78, 505589: 47, 505829: 28, 507302: 17, 507454: 56, 508708: 28, 509445: 54, 509846: 66, 510248: 45, 510345: 36, 510572: 89, 512413: 28, 513039: 13, 516216: 97, 516233: 61, 516326: 16, 516724: 21, 516761: 89, 517149: 57, 517934: 7, 519423: 40, 519482: 60, 519911: 9, 520355: 16, 520668: 77, 522617: 79, 522908: 22, 523335: 37, 523786: 43, 526742: 34, 527536: 17, 528212: 70, 528319: 56, 529005: 6, 530893: 86, 530988: 0, 531013: 12, 531066: 9, 531632: 18, 532689: 69, 532806: 51, 533569: 80, 534605: 40, 534924: 16, 535152: 92, 535950: 3, 538389: 84, 539089: 54, 540366: 35, 541344: 95, 541471: 64, 541627: 59, 542197: 8, 545243: 18, 545591: 98, 546257: 57, 546646: 13, 547936: 20, 549036: 76, 550823: 29, 552155: 23, 553641: 45, 554501: 24, 554567: 56, 555194: 52, 555459: 59, 555911: 65, 557036: 63, 557618: 56, 557758: 70, 559420: 89, 561008: 91, 561452: 29, 563198: 80, 563336: 59, 563740: 92, 564130: 6, 564169: 32, 565522: 38, 565910: 5, 567262: 48, 567512: 79, 567676: 14, 568613: 79, 568708: 23, 569162: 16, 569839: 6, 571310: 4, 572283: 77, 573318: 16, 573472: 65, 574367: 68, 575459: 6, 576399: 4, 576845: 98, 578636: 11, 578639: 62, 580246: 48, 580462: 26, 581996: 77, 582174: 61, 583523: 61, 584354: 49, 587620: 19, 587688: 57, 589181: 10, 589723: 64, 589975: 2, 594555: 82, 595189: 38, 595224: 24, 596667: 76, 596753: 56, 596895: 14, 597770: 76, 599944: 66, 601345: 76, 602023: 71, 602315: 44, 603406: 81, 606255: 29, 606602: 43, 608658: 49, 610076: 32, 610747: 73, 611414: 51, 611834: 10, 612301: 42, 612352: 14, 612873: 32, 614161: 60, 614299: 21, 614656: 84, 616584: 10, 617872: 21, 618317: 89, 619326: 33, 619333: 24, 619396: 81, 619605: 33, 620714: 67, 621789: 78, 622409: 20, 623177: 32, 623193: 96, 623422: 63, 623605: 12, 624038: 97, 624832: 43, 624978: 4, 626804: 86, 628203: 20, 629225: 99, 629498: 72, 630169: 81, 631781: 51, 634228: 60, 634371: 26, 635602: 41, 636119: 67, 636562: 26, 637582: 40, 638412: 64, 638455: 19, 639345: 49, 639440: 82, 639933: 79, 640142: 58, 640529: 57, 641093: 44, 641239: 35, 641955: 55, 642772: 8, 642782: 66, 644135: 28, 644181: 46, 645450: 34, 646268: 77, 647198: 18, 647597: 90, 649414: 4, 649627: 78, 649881: 25, 651972: 66, 652786: 8, 653519: 78, 654669: 67, 655046: 52, 655101: 97, 656293: 62, 657529: 71, 658261: 13, 658638: 92, 659718: 86, 659974: 92, 662852: 38, 663837: 87, 663897: 87, 665097: 98, 666556: 68, 667869: 8, 671682: 80, 672130: 51, 672487: 56, 673662: 41, 673916: 35, 674828: 78, 675614: 50, 676502: 15, 678654: 23, 679041: 45, 680176: 52, 680651: 99, 680904: 31, 681123: 56, 681409: 36, 681982: 69, 682220: 41, 682734: 92, 682808: 66, 683541: 3, 684703: 34, 685853: 43, 688217: 93, 688664: 36, 689163: 31, 689384: 85, 690692: 19, 691668: 47, 692755: 65, 693139: 41, 693264: 17, 693366: 27, 694338: 94, 694785: 61, 695041: 58, 696759: 33, 697584: 82, 697708: 29, 698863: 43, 699099: 70, 699109: 81, 699201: 63, 699928: 25, 700105: 3, 700425: 82, 700689: 99, 701684: 95, 702396: 95, 702831: 66, 703673: 80, 704276: 1, 705404: 95, 709337: 5, 711208: 37, 711758: 45, 714525: 77, 715499: 57, 716304: 39, 716494: 91, 716774: 85, 717835: 33, 719079: 10, 719540: 30, 720951: 96, 721314: 28, 722218: 77, 723686: 77, 724112: 84, 724777: 62, 725241: 23, 725290: 55, 725741: 90, 726325: 15, 726829: 15, 727810: 51, 729546: 84, 730825: 29, 730966: 5, 731949: 72, 732562: 22, 732770: 93, 732878: 73, 733658: 35, 733980: 66, 734372: 97, 734623: 54, 735046: 35, 736578: 80, 737473: 64, 737537: 42, 737799: 0, 738770: 23, 739554: 42, 739823: 91, 741553: 74, 742877: 53, 743068: 0, 743180: 28, 743616: 34, 743977: 76, 744167: 26, 745194: 75, 745730: 52, 746048: 39, 746468: 9, 747215: 9, 747512: 91, 748707: 75, 748733: 96, 750313: 81, 751199: 48, 751425: 76, 752293: 56, 753933: 42, 757191: 7, 757198: 55, 757250: 37, 757608: 56, 757743: 70, 758266: 89, 758908: 76, 759178: 92, 760310: 67, 760356: 78, 760676: 46, 761155: 42, 761258: 51, 761947: 48, 762765: 6, 762896: 27, 764061: 95, 764863: 24, 764874: 91, 765842: 71, 767268: 95, 768188: 38, 769420: 18, 770298: 68, 772858: 96, 773030: 49, 773722: 37, 774748: 37, 775016: 99, 776155: 52, 776482: 30, 777149: 35, 778068: 24, 778394: 18, 779017: 3, 779767: 8, 782402: 69, 782982: 30, 783015: 50, 783190: 45, 783515: 24, 784613: 33, 785142: 14, 785942: 1, 786581: 25, 789013: 73, 790733: 92, 791028: 88, 793792: 97, 795835: 35, 796023: 91, 796557: 23, 797062: 22, 798368: 29, 799922: 19, 800652: 7, 800932: 65, 801364: 85, 801793: 50, 803244: 25, 803262: 26, 804484: 71, 804791: 90, 804889: 9, 805156: 35, 808558: 47, 809399: 93, 810560: 83, 811781: 35, 812213: 68, 813322: 20, 814386: 84, 814916: 47, 814981: 33, 815136: 72, 815845: 75, 816459: 84, 817457: 44, 817873: 59, 818608: 10, 819806: 88, 820027: 65, 820238: 45, 820337: 90, 820618: 17, 822494: 17, 823083: 64, 823150: 53, 823474: 0, 823624: 21, 823724: 68, 824164: 90, 825098: 92, 825656: 71, 825901: 44, 826118: 58, 826154: 68, 826919: 61, 828446: 72, 829119: 40, 834045: 45, 835706: 50, 836149: 68, 836188: 12, 836750: 62, 837104: 29, 837860: 76, 838458: 47, 839786: 55, 839972: 72, 841224: 61, 842648: 80, 843126: 68, 843433: 12, 844734: 64, 848521: 92, 848722: 13, 849458: 23, 850620: 21, 854392: 65, 854801: 7, 855932: 96, 856047: 77, 856269: 71, 858514: 81, 859065: 85, 859479: 46, 860578: 68, 861499: 68, 863310: 66, 863362: 34, 863461: 65, 864647: 34, 865683: 60, 865939: 25, 866937: 68, 867110: 98, 867766: 64, 867778: 13, 870161: 31, 870825: 41, 871352: 24, 871574: 30, 872575: 39, 872759: 1, 873158: 77, 874114: 29, 875697: 66, 876085: 50, 876376: 15, 876593: 33, 876633: 72, 876895: 32, 878373: 51, 879176: 23, 880457: 52, 880887: 12, 881027: 41, 882158: 81, 883385: 68, 884168: 24, 884189: 16, 884501: 21, 891255: 27, 892501: 97, 892944: 40, 893042: 51, 893903: 60, 896266: 55, 896361: 94, 896450: 95, 896495: 2, 896774: 29, 897398: 33, 898193: 41, 898820: 56, 899746: 76, 900025: 18, 900421: 15, 901295: 76, 901592: 95, 902227: 31, 902552: 11, 903323: 66, 903588: 51, 904286: 28, 905041: 78, 905044: 84, 905190: 21, 905513: 24, 906285: 58, 907209: 42, 907685: 25, 908311: 28, 908584: 95, 908939: 33, 909927: 44, 910198: 1, 911093: 54, 911536: 47, 912579: 5, 913264: 53, 913304: 22, 914605: 60, 914906: 59, 915069: 64, 916604: 2, 917270: 32, 917711: 54, 917981: 19, 918497: 39, 918577: 52, 920653: 75, 921860: 65, 922002: 81, 922030: 21, 922076: 20, 923455: 40, 923544: 94, 924122: 43, 924290: 63, 924338: 86, 924653: 1, 924714: 34, 926292: 46, 927924: 31, 928191: 86, 928725: 14, 931574: 33, 933043: 54, 933193: 66, 934600: 50, 934883: 5, 936354: 15, 936753: 79, 938462: 56, 938571: 59, 940540: 77, 940986: 17, 941000: 33, 942663: 55, 945161: 16, 950066: 28, 950836: 40, 952999: 87, 953027: 0, 953057: 33, 954014: 10, 954113: 47, 954581: 66, 954781: 71, 956347: 66, 956456: 31, 957029: 60, 959340: 30, 959738: 64, 959906: 88, 960761: 89, 961371: 78, 961372: 90, 961521: 61, 963510: 4, 965090: 16, 966196: 3, 966795: 39, 966811: 9, 967150: 8, 967620: 24, 967688: 45, 967952: 26, 968049: 29, 968545: 68, 968864: 68, 969776: 87, 970155: 86, 970987: 87, 971993: 92, 973445: 85, 973669: 21, 974070: 4, 974957: 29, 976781: 12, 978861: 14, 980579: 61, 980745: 53, 981386: 6, 981490: 5, 982540: 38, 982995: 51, 983214: 11, 983391: 53, 983466: 57, 983526: 54, 983531: 51, 984568: 53, 985179: 32, 985275: 20, 986659: 90, 988178: 39, 988495: 90, 990769: 30, 991667: 33, 991803: 15, 992541: 74, 993414: 10, 993617: 52, 993709: 9, 993794: 61, 994380: 57, 995043: 52, 995818: 79, 997145: 66, 997589: 61, 998346: 38, 999093: 86, 999132: 53, 999185: 83}, + {4229: 12, 6206: 18, 8635: 50, 9423: 40, 12358: 76, 16675: 56, 17542: 22, 20719: 97, 30818: 38, 42267: 88, 46510: 37, 47907: 60, 49098: 14, 53513: 30, 56007: 52, 58823: 9, 59690: 30, 66198: 11, 71995: 14, 79780: 99, 90060: 74, 91921: 18, 92086: 85, 93281: 76, 95138: 48, 98660: 44, 102567: 40, 103382: 54, 104234: 42, 104840: 30, 106330: 50, 106743: 54, 107772: 65, 114144: 49, 121077: 5, 121811: 84, 125995: 46, 130522: 84, 133101: 66, 133437: 2, 134095: 52, 140035: 49, 142433: 71, 143614: 80, 149240: 18, 166070: 82, 169362: 26, 172872: 76, 176818: 15, 177342: 26, 184897: 75, 193011: 74, 200518: 33, 201179: 5, 202361: 80, 207862: 36, 210160: 62, 212905: 3, 215011: 59, 215039: 26, 222794: 73, 225907: 53, 227925: 64, 231672: 81, 234381: 20, 237409: 23, 244708: 65, 250601: 70, 259430: 43, 261967: 63, 262369: 9, 265863: 66, 265998: 4, 268787: 5, 270216: 26, 270941: 63, 277835: 19, 284279: 13, 285323: 28, 286657: 41, 291833: 85, 296465: 7, 300420: 86, 314054: 43, 317790: 48, 318857: 98, 321623: 99, 327795: 15, 329243: 74, 333093: 18, 340684: 63, 344679: 91, 347510: 81, 348667: 13, 349472: 46, 349510: 33, 350960: 81, 352696: 39, 360183: 68, 360299: 16, 360732: 72, 364372: 70, 366146: 18, 368004: 54, 372954: 77, 374731: 36, 375405: 94, 382815: 47, 384012: 26, 389380: 38, 394762: 1, 395636: 97, 402573: 0, 403190: 48, 403908: 30, 405297: 17, 405844: 28, 406997: 75, 410408: 11, 412529: 5, 412921: 14, 415926: 37, 425567: 18, 427297: 28, 434853: 32, 448379: 96, 450795: 29, 456537: 95, 465784: 11, 467790: 74, 472421: 46, 480025: 74, 486206: 6, 490316: 53, 490889: 46, 499107: 99, 511051: 48, 514571: 82, 519877: 85, 521826: 82, 529286: 25, 529957: 73, 532833: 88, 534620: 60, 535834: 70, 537719: 50, 540404: 29, 543912: 73, 545381: 28, 546191: 25, 548339: 19, 557334: 90, 558598: 2, 559296: 26, 562539: 68, 579671: 58, 579800: 89, 583589: 83, 584741: 96, 590846: 4, 596052: 55, 606858: 82, 612954: 89, 614066: 16, 618890: 52, 618905: 34, 626006: 75, 627467: 28, 628986: 46, 630264: 64, 634275: 46, 639711: 62, 641345: 48, 645152: 97, 648322: 30, 652284: 54, 654445: 73, 656540: 98, 656689: 48, 658826: 84, 660427: 65, 665482: 11, 666259: 42, 667226: 59, 667896: 59, 670676: 17, 671471: 22, 674506: 3, 684574: 90, 687098: 39, 689153: 49, 689844: 92, 689940: 17, 690995: 84, 703010: 5, 706981: 45, 710281: 68, 717127: 7, 721158: 93, 723880: 5, 724587: 15, 738271: 72, 739947: 85, 742089: 85, 743022: 96, 747240: 51, 748171: 31, 748635: 63, 751687: 43, 751795: 57, 758700: 39, 759639: 98, 762843: 29, 766325: 51, 767685: 58, 779133: 7, 784948: 17, 787086: 56, 789910: 43, 791720: 58, 793295: 18, 794509: 48, 797648: 80, 802627: 21, 805190: 57, 814941: 6, 822306: 92, 826851: 19, 830610: 92, 830657: 77, 833943: 89, 836011: 7, 840596: 9, 845283: 28, 845843: 57, 851450: 54, 851941: 26, 865096: 79, 868696: 14, 870160: 61, 872677: 3, 872859: 33, 873340: 18, 875747: 51, 876401: 14, 886316: 15, 889998: 96, 890751: 14, 895769: 12, 896078: 84, 896250: 54, 900011: 53, 904209: 14, 912246: 23, 912704: 17, 922288: 69, 923661: 4, 927412: 83, 927709: 9, 930036: 29, 933384: 83, 936954: 1, 941459: 46, 944872: 54, 949616: 29, 951335: 70, 951955: 7, 963551: 10, 966015: 80, 967680: 61, 971223: 63, 971258: 9, 971550: 31, 972189: 77, 974358: 96, 977817: 66, 978426: 99, 983330: 8, 984897: 71, 987329: 79, 988411: 63, 991951: 35, 992909: 33}, + {103: 49, 3083: 77, 3817: 3, 11680: 54, 14133: 26, 15020: 83, 16925: 18, 17751: 64, 18749: 44, 22143: 54, 23281: 48, 24137: 86, 24908: 48, 29536: 82, 29695: 81, 30093: 55, 33387: 62, 36534: 25, 39913: 45, 42889: 74, 44035: 43, 44205: 65, 44642: 59, 44778: 73, 45216: 58, 46813: 25, 48181: 47, 49942: 94, 50202: 83, 51578: 43, 52055: 35, 52116: 46, 52940: 45, 53476: 7, 53593: 7, 53627: 90, 54475: 58, 58337: 84, 63548: 29, 63986: 59, 65935: 26, 68260: 29, 69076: 92, 69268: 97, 71096: 52, 73583: 62, 74926: 16, 75442: 76, 75539: 37, 76551: 24, 76592: 8, 76623: 69, 77114: 9, 78468: 22, 78998: 65, 79871: 46, 79919: 16, 82534: 78, 83744: 4, 85206: 25, 86979: 44, 87764: 39, 88181: 87, 89812: 70, 89986: 84, 90815: 66, 92773: 2, 92904: 78, 93296: 85, 95712: 77, 97002: 41, 97684: 46, 97757: 18, 97883: 17, 98469: 28, 99226: 79, 99935: 46, 103056: 23, 103103: 67, 104083: 74, 104568: 66, 105241: 86, 105925: 90, 107152: 2, 109166: 43, 109458: 73, 111426: 86, 112979: 70, 113783: 9, 114519: 72, 114598: 2, 114630: 1, 114726: 96, 116513: 44, 116731: 0, 118847: 75, 121069: 41, 121432: 16, 121494: 17, 121753: 16, 121806: 9, 122134: 88, 123172: 89, 125289: 32, 126383: 7, 130477: 18, 133323: 43, 133554: 85, 139522: 8, 139589: 73, 141433: 55, 141703: 89, 143681: 21, 144353: 74, 145218: 52, 145230: 30, 145416: 64, 145624: 43, 145970: 46, 147573: 56, 147987: 21, 148837: 59, 149502: 4, 152878: 30, 154941: 33, 155799: 25, 157396: 30, 158160: 55, 158614: 82, 159702: 85, 161012: 66, 163404: 18, 164011: 82, 166284: 2, 167170: 12, 170113: 1, 170761: 69, 172046: 28, 172675: 86, 173458: 55, 174960: 15, 175064: 56, 176549: 13, 176614: 79, 176780: 52, 177008: 52, 180739: 35, 181298: 3, 185879: 15, 191912: 89, 193990: 67, 196049: 36, 196289: 7, 197449: 99, 198221: 52, 198978: 61, 199194: 19, 199501: 82, 201032: 99, 203648: 90, 205158: 45, 205539: 43, 205701: 7, 205827: 64, 207848: 86, 209613: 23, 210655: 3, 211120: 8, 212164: 30, 213341: 26, 215087: 55, 218082: 61, 218851: 61, 220377: 51, 221914: 22, 226339: 9, 227314: 95, 228952: 28, 231167: 34, 233158: 40, 235212: 62, 237866: 36, 239355: 99, 240219: 68, 240563: 33, 240654: 41, 241250: 19, 242498: 2, 243545: 92, 246318: 54, 246476: 59, 246829: 36, 247457: 12, 248229: 42, 248801: 18, 249743: 63, 252436: 82, 254721: 68, 254910: 97, 256832: 3, 257473: 27, 259208: 73, 261544: 34, 262077: 93, 262653: 36, 262659: 62, 263182: 14, 264634: 85, 265605: 48, 271492: 49, 273526: 0, 274786: 89, 275930: 87, 279940: 23, 280843: 46, 281871: 88, 282106: 3, 282385: 71, 282711: 75, 283000: 0, 283032: 70, 283117: 67, 285782: 79, 289567: 56, 289906: 55, 291475: 69, 291687: 83, 291702: 90, 292281: 25, 293921: 54, 294512: 37, 296070: 88, 296206: 47, 298818: 27, 299963: 88, 300082: 44, 300630: 66, 300715: 38, 301765: 83, 303073: 82, 303889: 68, 304297: 33, 304532: 57, 305604: 95, 305965: 37, 307369: 97, 307754: 76, 308543: 9, 309017: 49, 311394: 65, 313100: 37, 314111: 91, 314222: 20, 315489: 6, 315739: 26, 316814: 17, 318821: 31, 323201: 73, 323752: 35, 324034: 50, 325129: 82, 326245: 31, 327530: 25, 331200: 98, 331837: 85, 332672: 28, 332912: 34, 333255: 11, 333388: 62, 336717: 79, 337696: 23, 337968: 67, 338500: 76, 339206: 12, 339513: 35, 340891: 56, 342188: 37, 342961: 23, 344578: 99, 344621: 83, 346942: 84, 347372: 62, 350083: 97, 350979: 60, 351769: 18, 353998: 93, 356297: 95, 356561: 25, 358687: 58, 360055: 59, 361446: 51, 361495: 0, 361841: 70, 364134: 33, 365030: 89, 365554: 79, 365879: 9, 366344: 73, 366569: 59, 366598: 60, 370884: 85, 371396: 32, 373207: 37, 375696: 51, 377580: 47, 377987: 70, 378151: 82, 381262: 20, 382745: 65, 383824: 31, 384231: 64, 384682: 38, 384905: 75, 385307: 9, 386122: 13, 386493: 12, 389613: 77, 389973: 72, 390899: 50, 391942: 67, 392371: 3, 393368: 33, 393556: 3, 393721: 53, 394015: 8, 394899: 21, 394917: 79, 397334: 85, 397396: 8, 400518: 16, 401180: 83, 403302: 34, 404284: 87, 404570: 47, 408574: 18, 409881: 98, 410229: 99, 410591: 49, 412570: 28, 412876: 76, 415329: 39, 415355: 40, 419413: 22, 421373: 63, 421385: 80, 426365: 43, 426930: 64, 428810: 96, 428857: 1, 429601: 11, 430882: 14, 432705: 17, 433570: 48, 438664: 42, 439683: 17, 442139: 98, 442698: 11, 443321: 67, 443624: 91, 445152: 8, 445358: 48, 445678: 90, 447971: 45, 448424: 75, 448443: 0, 449706: 97, 453888: 14, 454806: 60, 454990: 85, 455657: 61, 456523: 90, 457383: 69, 457732: 34, 457932: 90, 458599: 64, 458607: 61, 459047: 56, 459157: 21, 459457: 50, 460410: 51, 460487: 9, 461123: 24, 462881: 56, 463299: 88, 465605: 87, 465682: 9, 465998: 93, 467397: 80, 468276: 42, 469463: 38, 472735: 19, 473915: 49, 474178: 46, 474540: 33, 477150: 90, 480929: 44, 481678: 66, 481842: 82, 482966: 7, 484397: 56, 485258: 78, 485892: 53, 487211: 10, 488533: 73, 489956: 60, 490519: 22, 492482: 99, 492736: 90, 494020: 73, 494539: 3, 497567: 49, 498075: 77, 498645: 98, 499651: 92, 500484: 50, 500746: 95, 500955: 92, 501219: 61, 501319: 14, 501715: 64, 502654: 17, 503231: 77, 504064: 52, 504146: 99, 506497: 60, 508862: 20, 510462: 37, 511414: 4, 512183: 26, 512358: 66, 512982: 15, 513617: 69, 514384: 81, 514530: 40, 516251: 39, 516839: 27, 521897: 53, 526088: 98, 526644: 90, 529686: 47, 529835: 81, 530062: 8, 530263: 19, 531302: 2, 532154: 60, 532465: 36, 532481: 54, 534161: 47, 534363: 42, 534411: 69, 536970: 80, 537596: 63, 538819: 98, 540704: 67, 543783: 11, 544362: 29, 545639: 34, 545724: 46, 548103: 4, 549335: 78, 549845: 50, 551756: 29, 552075: 19, 552784: 49, 554356: 60, 554544: 69, 555105: 65, 555753: 35, 557014: 92, 557260: 52, 557959: 88, 559061: 72, 560474: 44, 563779: 64, 564044: 82, 565105: 43, 566453: 1, 568061: 82, 570151: 53, 571912: 83, 572735: 62, 572964: 94, 572998: 51, 574700: 0, 575077: 14, 575926: 72, 578003: 29, 579182: 74, 580783: 57, 581215: 96, 581264: 53, 582157: 25, 583274: 72, 583739: 95, 585264: 17, 586767: 94, 589301: 63, 591028: 62, 592555: 44, 593011: 8, 593254: 97, 594467: 54, 595143: 67, 597030: 20, 599662: 83, 601542: 55, 601634: 26, 601861: 74, 603111: 42, 606431: 53, 606812: 53, 607075: 22, 607284: 64, 608567: 92, 608899: 73, 609688: 44, 610139: 61, 611468: 85, 612981: 53, 612990: 4, 614312: 69, 614881: 10, 615038: 18, 616538: 66, 623444: 93, 623578: 46, 625201: 96, 625525: 81, 626405: 52, 627816: 65, 629806: 85, 629826: 52, 629905: 27, 630002: 62, 630076: 17, 632380: 80, 633353: 21, 634745: 67, 635525: 50, 635889: 57, 636013: 12, 636039: 48, 636084: 32, 637659: 93, 638485: 0, 641080: 21, 642251: 46, 645288: 77, 647067: 50, 648424: 89, 649299: 50, 649734: 93, 649856: 41, 653106: 38, 653510: 73, 654345: 64, 656448: 16, 656695: 69, 658362: 27, 658434: 51, 658692: 57, 659144: 98, 660292: 63, 660365: 6, 660399: 84, 660693: 4, 661582: 86, 663184: 47, 664950: 64, 665033: 3, 667212: 28, 668284: 70, 669784: 20, 670447: 15, 670493: 8, 674148: 39, 674450: 61, 675495: 69, 676404: 34, 676497: 89, 677150: 32, 677286: 80, 677874: 27, 681112: 28, 681298: 80, 681645: 60, 682876: 65, 682932: 65, 685135: 11, 685461: 56, 686352: 12, 687204: 25, 689718: 88, 689850: 3, 690324: 3, 690993: 47, 692002: 26, 695104: 52, 695228: 19, 695931: 72, 696084: 1, 696181: 78, 698237: 34, 698800: 27, 699011: 59, 701397: 44, 702537: 16, 703426: 89, 705464: 88, 705646: 15, 706324: 87, 707486: 53, 707507: 81, 708450: 87, 709155: 63, 709305: 4, 709923: 7, 710333: 12, 710819: 95, 711288: 79, 711435: 69, 712724: 14, 713514: 74, 717343: 27, 717635: 17, 718463: 69, 720213: 17, 722861: 76, 724056: 98, 724507: 33, 725246: 42, 725686: 48, 726051: 41, 727012: 15, 727250: 51, 731580: 32, 731874: 88, 733042: 61, 735597: 2, 736012: 40, 738519: 16, 739296: 96, 742115: 96, 745432: 64, 746096: 61, 748812: 78, 749822: 8, 751224: 76, 751708: 80, 752571: 56, 755503: 80, 757475: 81, 757648: 28, 757769: 44, 757968: 38, 759579: 83, 759762: 46, 760083: 49, 760348: 19, 760682: 95, 760715: 44, 762011: 70, 766219: 1, 767837: 7, 769003: 69, 769551: 86, 769790: 29, 771974: 11, 772056: 56, 772655: 23, 772788: 15, 773240: 3, 773441: 43, 774199: 1, 774498: 93, 776195: 7, 776485: 67, 776762: 62, 782013: 70, 782127: 11, 783510: 5, 783558: 90, 783562: 78, 783981: 59, 784214: 49, 785029: 73, 786462: 28, 787187: 46, 789123: 48, 790931: 82, 791654: 36, 792116: 99, 794380: 63, 795592: 18, 796357: 43, 796392: 27, 796766: 3, 797105: 72, 797574: 88, 798231: 81, 799360: 90, 801420: 35, 802004: 12, 802160: 18, 807010: 39, 807209: 33, 807571: 78, 808706: 34, 809319: 9, 809329: 55, 809609: 75, 811930: 26, 813054: 63, 814343: 94, 814707: 89, 815461: 39, 817306: 11, 817463: 1, 818559: 89, 820522: 16, 822949: 97, 822989: 92, 823170: 81, 825346: 9, 825427: 45, 829731: 78, 832215: 32, 833926: 96, 834127: 20, 835735: 36, 835915: 78, 836190: 8, 836754: 20, 836964: 62, 837387: 97, 842440: 14, 842793: 98, 844278: 81, 846626: 42, 847137: 26, 847451: 21, 847686: 9, 852136: 65, 852281: 36, 852614: 8, 854377: 77, 855482: 96, 857372: 8, 858274: 11, 859052: 3, 859262: 74, 859981: 67, 862070: 4, 863931: 10, 864539: 46, 864917: 39, 865643: 39, 865822: 46, 868272: 38, 869764: 70, 871544: 64, 873508: 48, 874683: 68, 876043: 50, 877150: 92, 879732: 31, 880929: 5, 881041: 71, 882236: 31, 882774: 42, 883861: 40, 885877: 80, 886113: 52, 887170: 14, 888096: 54, 888182: 75, 888322: 52, 888586: 39, 890386: 10, 890559: 9, 892535: 71, 894707: 59, 894843: 0, 895344: 44, 895515: 35, 896948: 92, 897796: 0, 899162: 44, 900856: 85, 901357: 4, 902363: 49, 905049: 37, 905052: 90, 905121: 54, 905189: 61, 906532: 43, 907469: 13, 908134: 13, 908768: 99, 910454: 13, 911001: 48, 911751: 30, 914223: 58, 915199: 80, 917777: 57, 917964: 8, 919198: 68, 922004: 15, 922367: 24, 922990: 31, 924756: 63, 924799: 46, 927563: 81, 928623: 24, 932090: 95, 932457: 51, 933748: 63, 934414: 84, 935264: 42, 936266: 91, 939121: 97, 939673: 72, 942751: 69, 943675: 93, 943968: 94, 945144: 70, 946940: 87, 948056: 68, 948125: 82, 948201: 92, 949042: 25, 950695: 80, 957932: 84, 958693: 27, 961440: 6, 962188: 37, 964513: 12, 966015: 94, 966213: 14, 966278: 81, 966446: 70, 966469: 14, 968284: 19, 969698: 43, 970712: 71, 971635: 44, 973613: 7, 976882: 29, 977558: 75, 978294: 80, 978383: 37, 979509: 88, 980710: 69, 980775: 0, 980822: 68, 982732: 89, 983159: 74, 983648: 6, 984225: 98, 984656: 9, 986209: 50, 986220: 70, 986466: 28, 987101: 55, 988122: 36, 989173: 94, 989966: 81, 991119: 68, 992898: 26, 993000: 78, 993120: 80, 993926: 78, 995517: 32, 996678: 95, 998944: 81, 999673: 75}, + {104: 93, 2826: 54, 3631: 11, 7704: 34, 12367: 12, 12513: 18, 23041: 18, 24057: 61, 24216: 27, 24943: 10, 27275: 65, 27879: 73, 28034: 63, 30170: 71, 30335: 26, 32421: 81, 32484: 85, 32846: 6, 33023: 2, 34921: 32, 35026: 36, 36088: 85, 36155: 42, 38219: 25, 38833: 44, 38992: 24, 40970: 47, 41312: 39, 41558: 5, 43308: 92, 43747: 38, 43850: 63, 44652: 62, 44714: 95, 44782: 66, 46377: 47, 48248: 21, 48286: 77, 50098: 40, 50321: 21, 52879: 29, 52986: 52, 56076: 13, 57466: 91, 57928: 35, 60117: 84, 60836: 79, 62872: 89, 65046: 89, 65132: 22, 65684: 11, 69645: 59, 69770: 64, 74550: 8, 78107: 0, 78135: 85, 78246: 60, 78424: 47, 82844: 82, 84166: 8, 85662: 65, 87554: 52, 89737: 26, 90189: 28, 90884: 18, 91824: 7, 91828: 63, 92032: 78, 95314: 60, 96274: 20, 97473: 85, 97600: 56, 98398: 9, 99438: 5, 100424: 13, 100438: 53, 107409: 39, 108193: 78, 108388: 80, 109066: 71, 111143: 18, 117828: 34, 124225: 69, 125877: 66, 129800: 43, 131298: 9, 131564: 55, 132562: 38, 132893: 82, 133243: 15, 134597: 51, 136111: 3, 136979: 0, 139415: 73, 139432: 57, 140118: 13, 140703: 72, 143901: 40, 143934: 39, 144081: 57, 144362: 86, 144866: 33, 145620: 45, 145790: 85, 148535: 69, 148801: 76, 148909: 58, 151180: 65, 151535: 82, 151659: 88, 154963: 61, 155962: 65, 156172: 61, 157064: 36, 158222: 99, 160644: 22, 160728: 51, 160831: 63, 163678: 59, 165372: 47, 168352: 83, 169144: 1, 171703: 4, 172211: 3, 172290: 52, 182220: 22, 182655: 87, 183971: 77, 186200: 14, 187796: 22, 188891: 2, 190231: 68, 191456: 36, 191871: 44, 192210: 24, 192664: 17, 194871: 74, 199159: 84, 202029: 76, 204165: 15, 206540: 33, 207198: 83, 209157: 3, 210933: 17, 211471: 91, 212751: 26, 214642: 23, 214672: 91, 214907: 79, 218575: 20, 219766: 57, 220428: 61, 220770: 15, 221064: 46, 222343: 97, 223672: 94, 225672: 37, 227264: 90, 227862: 85, 231654: 19, 232936: 38, 233519: 72, 233829: 4, 234629: 16, 235099: 46, 236451: 85, 237967: 80, 238751: 47, 238803: 75, 240491: 80, 241517: 82, 242327: 61, 247789: 15, 249900: 48, 252776: 80, 253257: 39, 253621: 33, 253744: 3, 256320: 53, 256404: 96, 257100: 56, 257882: 69, 258649: 82, 260877: 79, 261159: 87, 261175: 62, 262444: 65, 265527: 30, 266562: 56, 267706: 49, 272360: 73, 273155: 19, 273527: 5, 274722: 54, 275927: 41, 276006: 90, 276035: 14, 277041: 6, 277198: 62, 277435: 49, 282335: 16, 283611: 11, 283705: 3, 284132: 2, 285653: 77, 286067: 25, 289994: 58, 293118: 36, 295213: 32, 295276: 77, 299466: 16, 300986: 35, 301743: 27, 303354: 81, 305357: 19, 307592: 26, 308394: 41, 310203: 81, 313655: 29, 313725: 65, 314962: 93, 315135: 21, 315549: 8, 317702: 27, 318422: 69, 319548: 96, 321507: 77, 323339: 10, 323674: 18, 323754: 82, 324702: 27, 325100: 64, 325676: 85, 326379: 41, 329239: 80, 330725: 7, 331202: 8, 331414: 4, 333520: 24, 334511: 78, 335607: 37, 338063: 89, 338473: 37, 339454: 1, 339895: 30, 340711: 30, 343502: 24, 344778: 64, 345000: 52, 349750: 69, 349788: 22, 350173: 27, 351762: 14, 352137: 89, 353055: 69, 355067: 94, 355310: 86, 356668: 93, 357074: 37, 360194: 76, 360357: 88, 360443: 33, 360893: 22, 360997: 33, 363537: 92, 363737: 11, 364446: 21, 364703: 21, 364877: 74, 365316: 69, 366954: 79, 368789: 84, 368923: 30, 372683: 79, 372708: 21, 373198: 24, 373360: 46, 373939: 6, 377209: 14, 379860: 71, 381090: 34, 383102: 46, 384019: 83, 384600: 0, 387051: 79, 387724: 58, 387773: 43, 389410: 36, 391388: 38, 395601: 32, 396513: 71, 398585: 96, 399641: 16, 400226: 88, 401891: 55, 403747: 56, 404384: 90, 405884: 42, 406058: 53, 406063: 87, 406344: 35, 406462: 60, 406635: 96, 408058: 14, 411285: 87, 412712: 47, 414209: 20, 416090: 58, 417000: 32, 417392: 70, 417822: 39, 418565: 7, 419166: 40, 419371: 35, 421652: 52, 423376: 10, 423646: 58, 423831: 49, 424934: 7, 426394: 13, 428561: 4, 429278: 93, 429477: 87, 429778: 93, 431601: 9, 434780: 78, 435540: 59, 436603: 44, 436622: 57, 437855: 76, 443032: 79, 443617: 87, 443630: 34, 445532: 17, 447799: 87, 449343: 61, 456054: 99, 456609: 70, 456661: 70, 456727: 27, 456861: 65, 457181: 53, 457858: 56, 458771: 90, 460529: 5, 462884: 5, 467318: 61, 467454: 1, 468960: 93, 469475: 76, 470708: 25, 471906: 27, 472655: 62, 475096: 46, 477671: 22, 478692: 49, 479031: 36, 481755: 87, 484926: 83, 485820: 43, 486253: 61, 486731: 57, 486908: 92, 490991: 23, 491784: 17, 493124: 42, 496868: 88, 497266: 95, 497375: 11, 498040: 7, 499579: 79, 500497: 46, 502438: 17, 503415: 21, 507065: 41, 507953: 94, 511984: 84, 513439: 98, 517170: 41, 519371: 57, 520144: 14, 522876: 33, 523823: 73, 524317: 6, 524601: 91, 525485: 42, 527425: 14, 528540: 48, 529481: 11, 529815: 10, 530099: 94, 531328: 80, 532866: 81, 535400: 73, 536033: 48, 539799: 75, 541117: 84, 542554: 98, 543900: 18, 544220: 84, 544741: 27, 544793: 16, 545372: 94, 547229: 73, 549770: 22, 551063: 59, 553056: 85, 554469: 14, 554858: 29, 556429: 99, 557382: 43, 557928: 20, 558528: 10, 562122: 54, 562695: 24, 564534: 83, 564736: 33, 564784: 93, 565299: 0, 567121: 43, 567171: 95, 571399: 34, 571799: 49, 572382: 14, 574109: 23, 574184: 67, 574188: 33, 575234: 68, 576722: 77, 579351: 25, 581925: 63, 585773: 31, 587248: 88, 587641: 33, 588817: 33, 589837: 15, 591872: 29, 592301: 97, 593294: 11, 596138: 4, 598426: 93, 598939: 34, 599677: 5, 600256: 15, 600587: 16, 600603: 86, 601183: 21, 602690: 50, 605561: 18, 605584: 56, 607079: 61, 608789: 35, 609388: 13, 610718: 70, 614264: 53, 615172: 72, 615794: 37, 622331: 83, 622907: 45, 624060: 42, 625602: 90, 626057: 40, 626392: 34, 627030: 25, 628628: 22, 630110: 51, 630155: 92, 639485: 80, 640191: 28, 640745: 90, 641873: 51, 644961: 61, 646811: 61, 647914: 19, 648351: 17, 649081: 56, 650669: 26, 653242: 23, 654286: 35, 654348: 43, 656598: 88, 657473: 34, 660097: 87, 662188: 45, 662734: 67, 663292: 9, 664443: 19, 666713: 42, 667174: 64, 667431: 97, 668750: 28, 669219: 45, 669411: 2, 675492: 64, 677045: 9, 677665: 46, 677782: 8, 679390: 54, 681627: 54, 683656: 0, 686214: 37, 686373: 96, 687728: 68, 688464: 13, 688579: 51, 689222: 14, 691801: 42, 694145: 84, 694148: 61, 695066: 91, 695397: 95, 696479: 4, 697028: 73, 697352: 4, 697498: 17, 697912: 45, 698589: 63, 699746: 91, 700757: 75, 701252: 68, 701352: 99, 701764: 60, 704095: 47, 704956: 50, 706967: 58, 707167: 84, 707745: 75, 711861: 78, 712103: 84, 712395: 96, 712423: 43, 714530: 69, 715765: 79, 719517: 89, 721161: 70, 721391: 35, 722005: 29, 723684: 74, 724907: 14, 724995: 65, 726695: 5, 727756: 85, 728649: 25, 728782: 35, 729164: 0, 729196: 44, 731409: 10, 732665: 17, 733689: 67, 734338: 15, 734532: 56, 737976: 10, 738420: 27, 739803: 3, 740440: 97, 741909: 83, 742516: 60, 743000: 47, 745293: 54, 746294: 20, 748150: 19, 748402: 71, 750683: 28, 752984: 45, 755731: 34, 756632: 74, 756676: 82, 757953: 0, 762306: 34, 762561: 37, 762837: 26, 763080: 40, 763342: 54, 768276: 24, 769696: 84, 771040: 32, 772851: 52, 774483: 33, 775233: 25, 780129: 1, 780305: 25, 785015: 23, 787121: 2, 787531: 44, 787574: 2, 787657: 55, 787991: 70, 790158: 47, 790247: 29, 791641: 63, 794199: 97, 797421: 25, 797960: 46, 799704: 4, 799920: 42, 800396: 39, 801280: 36, 802421: 32, 805092: 17, 805576: 40, 806799: 13, 811563: 38, 812621: 67, 812888: 58, 816149: 24, 816931: 47, 818452: 13, 821387: 58, 822976: 40, 828679: 17, 834254: 39, 835090: 41, 835951: 4, 836173: 2, 836680: 14, 838158: 64, 838561: 52, 840792: 95, 841037: 60, 841057: 48, 842049: 7, 843617: 83, 844401: 52, 844760: 15, 844882: 82, 847858: 59, 848163: 72, 848787: 54, 849921: 80, 854512: 69, 855501: 89, 859521: 10, 860991: 2, 864694: 98, 865393: 21, 867804: 77, 867948: 97, 869059: 39, 869785: 58, 871541: 21, 872701: 88, 873297: 22, 877351: 40, 880464: 68, 882821: 96, 882996: 80, 887066: 34, 891660: 79, 895934: 78, 897747: 23, 898360: 97, 900316: 47, 902358: 27, 902437: 86, 903947: 68, 904936: 44, 906636: 41, 913986: 18, 914003: 31, 917383: 60, 919996: 24, 921602: 50, 922515: 39, 922586: 73, 924845: 99, 925509: 84, 926343: 76, 927182: 31, 927647: 96, 928755: 56, 930592: 77, 933736: 89, 933800: 15, 934284: 57, 935283: 87, 936070: 0, 936390: 83, 939136: 54, 942111: 92, 947293: 11, 947958: 23, 948698: 64, 951145: 67, 951867: 60, 951948: 58, 952282: 28, 954353: 85, 954684: 95, 959580: 98, 961752: 98, 965734: 5, 965755: 8, 966623: 44, 966777: 40, 968564: 56, 968989: 66, 969414: 84, 970077: 64, 970856: 77, 971791: 54, 972685: 80, 974642: 17, 975566: 58, 977047: 89, 977528: 2, 980054: 44, 982401: 38, 983626: 15, 987473: 74, 987734: 82, 988234: 57, 990838: 41, 993594: 58, 997452: 67, 997606: 96, 997751: 24, 998028: 91, 998522: 26}, + {5464: 9, 5466: 59, 10744: 86, 33789: 40, 40590: 63, 43748: 32, 45781: 31, 52143: 90, 53769: 77, 57337: 69, 64309: 20, 65910: 43, 73802: 56, 75645: 15, 79312: 93, 86296: 15, 98368: 66, 102766: 57, 107912: 59, 108635: 49, 110037: 73, 111177: 79, 122212: 47, 122577: 58, 132114: 52, 132232: 89, 136762: 77, 136789: 20, 144056: 17, 145000: 90, 151342: 49, 153973: 37, 164224: 96, 168626: 65, 173550: 80, 198630: 82, 200014: 19, 203048: 23, 203307: 63, 216591: 15, 221654: 77, 223182: 77, 228226: 50, 236397: 29, 237458: 17, 243300: 78, 243431: 9, 247475: 95, 265210: 30, 268400: 91, 276692: 69, 282066: 73, 284148: 89, 284904: 62, 285096: 27, 290710: 29, 295596: 10, 301811: 75, 303835: 89, 314781: 54, 316929: 40, 319724: 99, 321840: 58, 322439: 17, 336339: 45, 337269: 87, 345548: 50, 346041: 17, 350667: 26, 351727: 17, 353544: 42, 353822: 86, 355212: 69, 358785: 46, 370178: 1, 373634: 99, 381539: 93, 381733: 44, 384350: 15, 404770: 45, 406667: 34, 422187: 99, 427887: 16, 431568: 63, 439595: 83, 441495: 37, 445802: 35, 447818: 84, 452104: 7, 460747: 3, 473002: 9, 478921: 21, 482838: 16, 484879: 78, 487511: 48, 491848: 69, 498758: 33, 507332: 66, 507349: 50, 509188: 41, 510335: 95, 518977: 71, 524882: 46, 535200: 37, 541886: 88, 551429: 92, 552855: 93, 571353: 42, 571864: 71, 581200: 90, 586057: 77, 590847: 68, 598739: 0, 601610: 86, 609683: 74, 610139: 39, 618485: 4, 622121: 78, 638448: 60, 641846: 32, 644069: 29, 646643: 89, 649411: 38, 654548: 57, 660140: 37, 673395: 47, 697102: 72, 703581: 57, 704641: 95, 710989: 44, 721950: 2, 722650: 79, 733593: 85, 734462: 10, 734496: 65, 736375: 4, 737828: 85, 742891: 78, 748616: 10, 764220: 81, 767025: 90, 767918: 8, 773479: 23, 774990: 53, 777275: 50, 779054: 52, 782652: 37, 803761: 62, 816359: 46, 819933: 19, 839291: 17, 841075: 19, 848796: 93, 850774: 41, 851172: 53, 852455: 2, 855539: 53, 863706: 27, 865030: 58, 878005: 14, 884603: 62, 885344: 59, 887533: 45, 914275: 18, 922800: 48, 930393: 72, 935449: 94, 945993: 96, 948610: 41, 951164: 8, 964601: 91, 965264: 90, 967766: 49, 980057: 6, 989996: 33, 997735: 71}, + {250: 58, 2092: 83, 3940: 9, 7107: 61, 32285: 55, 42581: 1, 57607: 6, 62266: 16, 67052: 19, 77481: 62, 106045: 22, 113890: 5, 125438: 50, 143514: 32, 153795: 96, 154264: 23, 167487: 79, 169202: 18, 176888: 73, 178940: 16, 183970: 3, 186632: 31, 187112: 69, 199048: 31, 213101: 8, 223991: 61, 230544: 4, 238191: 72, 244427: 29, 253141: 41, 268256: 45, 274831: 14, 288758: 47, 289008: 24, 289387: 93, 304443: 27, 305341: 84, 307329: 67, 309102: 71, 316821: 14, 317610: 43, 321476: 39, 322274: 51, 324293: 61, 339922: 70, 354637: 29, 358274: 8, 365213: 18, 379290: 84, 392343: 30, 410604: 9, 419994: 80, 456353: 68, 475758: 84, 487407: 53, 500684: 16, 507992: 13, 509796: 74, 515128: 86, 534158: 61, 547640: 93, 556347: 25, 557018: 9, 557474: 16, 558278: 69, 560026: 30, 563514: 45, 591949: 41, 597566: 11, 607950: 11, 626681: 11, 640826: 22, 655216: 20, 660711: 17, 669985: 39, 696273: 35, 700605: 75, 728751: 79, 734298: 0, 742666: 45, 749679: 19, 756679: 91, 759332: 72, 762785: 75, 765996: 78, 780281: 15, 796088: 35, 806484: 84, 808197: 1, 812348: 90, 839001: 59, 866034: 44, 872543: 36, 879447: 14, 882504: 5, 886442: 94, 911425: 25, 935290: 77, 936060: 2, 957042: 28, 960506: 30, 961658: 97, 965351: 91, 979931: 13, 981213: 87, 989867: 75, 999341: 54}, + {598: 30, 1065: 56, 1260: 60, 1528: 42, 2568: 92, 3492: 84, 4907: 6, 5500: 25, 7881: 78, 8310: 26, 8860: 38, 9607: 66, 11542: 14, 11663: 40, 12332: 44, 12967: 69, 13598: 88, 16440: 62, 17023: 76, 18344: 34, 19958: 92, 21063: 45, 21865: 99, 22676: 95, 22831: 63, 23008: 2, 23126: 75, 24046: 32, 24416: 65, 26503: 89, 27011: 69, 28250: 64, 28535: 22, 29235: 80, 29317: 35, 30892: 46, 30929: 56, 31340: 97, 32405: 1, 32442: 97, 33080: 28, 37294: 31, 37360: 61, 39479: 94, 39961: 81, 40247: 87, 42230: 47, 42879: 46, 43886: 62, 44062: 66, 47884: 19, 47933: 48, 47965: 62, 49805: 7, 50131: 91, 54419: 5, 54849: 12, 55331: 74, 55669: 2, 58779: 32, 59713: 39, 59919: 19, 60441: 48, 62360: 71, 62430: 78, 64449: 64, 67446: 13, 71246: 46, 71571: 84, 72235: 30, 72862: 14, 73058: 91, 73426: 8, 73468: 63, 73638: 7, 75351: 11, 76679: 1, 77650: 4, 78053: 80, 78792: 94, 81534: 40, 81929: 6, 81972: 14, 83395: 82, 83682: 90, 84395: 89, 85489: 83, 86324: 79, 86396: 57, 90540: 61, 93393: 39, 94528: 90, 95998: 3, 97010: 22, 99350: 36, 103735: 38, 105939: 99, 108035: 84, 108204: 62, 109272: 80, 109552: 13, 110726: 21, 112876: 22, 113583: 15, 115370: 28, 115707: 2, 116004: 92, 119682: 52, 119814: 52, 119836: 65, 120060: 10, 121363: 25, 121952: 7, 122676: 65, 124357: 65, 124555: 60, 124964: 85, 125459: 97, 126161: 23, 128112: 84, 128529: 69, 129625: 65, 130594: 68, 130723: 33, 131046: 18, 133200: 89, 133380: 93, 134304: 54, 135053: 84, 135822: 9, 135920: 96, 135984: 41, 137195: 5, 139756: 36, 144124: 47, 145241: 91, 147100: 0, 149204: 81, 154456: 53, 155920: 84, 155939: 20, 156362: 92, 157193: 68, 157254: 38, 157651: 24, 159114: 48, 159372: 71, 159903: 49, 161065: 18, 161938: 20, 163562: 16, 164235: 68, 164639: 37, 165206: 13, 165361: 25, 166247: 11, 167366: 78, 168631: 3, 168636: 23, 168944: 48, 169686: 5, 172412: 7, 173044: 95, 173803: 95, 175243: 30, 175852: 66, 176558: 50, 176757: 14, 177388: 37, 177496: 57, 177578: 23, 178173: 29, 178432: 72, 179703: 32, 179737: 74, 181424: 10, 182332: 46, 184407: 94, 184790: 10, 185441: 92, 185661: 19, 187823: 90, 187833: 77, 190428: 43, 191140: 48, 192971: 70, 193540: 4, 193938: 2, 194420: 17, 195309: 94, 196049: 81, 196706: 10, 196956: 21, 197828: 42, 198371: 55, 199335: 92, 200870: 68, 202002: 38, 202187: 31, 202627: 10, 204296: 68, 205244: 32, 205476: 10, 205477: 93, 205823: 80, 206740: 55, 207139: 76, 207820: 37, 209834: 93, 210211: 83, 210938: 20, 215015: 91, 215305: 94, 215799: 34, 219994: 64, 221518: 52, 222281: 58, 223786: 1, 223875: 59, 224280: 29, 224744: 36, 227033: 31, 228806: 96, 229287: 55, 229527: 86, 229532: 15, 229971: 33, 230880: 9, 231223: 51, 232700: 12, 233447: 63, 237221: 32, 238055: 0, 239057: 75, 241881: 31, 243643: 21, 245080: 73, 245296: 78, 245627: 96, 245688: 28, 246240: 75, 248101: 49, 248696: 43, 249193: 96, 249339: 87, 252326: 82, 254218: 46, 254396: 87, 255569: 98, 255700: 97, 256014: 46, 256725: 96, 256891: 29, 257521: 14, 260766: 54, 260920: 64, 264355: 31, 264531: 56, 271097: 0, 275861: 1, 275899: 63, 275910: 44, 277184: 78, 277996: 94, 279929: 56, 279996: 44, 281399: 90, 282976: 48, 286796: 74, 289727: 64, 289909: 82, 290484: 15, 293486: 35, 293574: 45, 295304: 87, 295592: 71, 296079: 25, 298124: 73, 298683: 85, 298979: 7, 299644: 9, 299922: 10, 305100: 13, 305384: 66, 307657: 63, 310597: 54, 313366: 59, 314081: 34, 314465: 41, 315321: 9, 318991: 43, 319024: 65, 319508: 38, 319625: 26, 322611: 24, 322968: 43, 323122: 36, 323277: 36, 324539: 86, 327582: 72, 328477: 85, 330500: 44, 331024: 66, 332620: 66, 333257: 42, 333371: 71, 333611: 66, 336706: 48, 337010: 12, 338142: 78, 339253: 11, 342399: 75, 344618: 46, 345166: 71, 345174: 25, 347418: 58, 349423: 15, 349742: 62, 350038: 65, 351975: 30, 352047: 61, 352145: 80, 352598: 9, 353540: 65, 354574: 33, 356152: 21, 356204: 86, 357131: 18, 357720: 35, 359467: 47, 364289: 35, 364671: 58, 365910: 71, 367658: 63, 368307: 12, 368495: 55, 368670: 64, 370310: 68, 372814: 49, 374251: 80, 374321: 25, 375905: 92, 377468: 71, 377606: 55, 379007: 16, 380769: 76, 381148: 8, 382434: 88, 384851: 63, 384872: 24, 385269: 48, 385311: 90, 386620: 68, 387104: 31, 388609: 89, 388730: 87, 390076: 56, 390921: 13, 394675: 30, 395010: 94, 395290: 7, 395379: 26, 396213: 40, 396565: 41, 396681: 60, 396914: 30, 398869: 97, 400142: 54, 402125: 92, 402312: 66, 403700: 76, 404744: 23, 409204: 14, 409720: 91, 411157: 45, 419221: 96, 420847: 4, 421226: 49, 421331: 31, 421447: 73, 422699: 0, 424724: 84, 425529: 94, 426169: 81, 426711: 40, 429397: 5, 430871: 19, 434853: 23, 436437: 54, 436794: 57, 438325: 76, 438651: 75, 438696: 27, 440074: 32, 441110: 76, 442836: 19, 444264: 87, 448259: 26, 450246: 56, 450522: 75, 450871: 16, 451062: 18, 453327: 32, 456632: 74, 456974: 3, 457704: 96, 460905: 75, 463184: 71, 467842: 62, 468183: 25, 469544: 60, 470578: 43, 470664: 84, 471762: 13, 473319: 66, 473320: 29, 474081: 86, 477782: 30, 480220: 90, 480882: 5, 482489: 75, 486770: 98, 488554: 18, 488876: 38, 489473: 3, 491169: 45, 491810: 77, 491996: 10, 494645: 19, 495248: 40, 498022: 37, 498314: 3, 498742: 54, 499922: 91, 500130: 49, 501648: 44, 501883: 47, 502305: 77, 504125: 74, 505197: 51, 505910: 86, 507140: 35, 507790: 92, 507880: 93, 508357: 6, 509260: 48, 513255: 75, 514737: 19, 515806: 75, 517751: 22, 518219: 97, 519738: 7, 520853: 71, 520957: 36, 522035: 85, 523573: 77, 525078: 22, 527266: 70, 529194: 37, 530153: 27, 530771: 91, 532873: 12, 533181: 6, 534719: 41, 535627: 74, 535755: 59, 537949: 27, 541438: 81, 544094: 76, 544874: 36, 545366: 75, 545533: 76, 547846: 34, 548733: 84, 549367: 70, 549621: 86, 550352: 70, 552037: 26, 552876: 93, 554502: 14, 554798: 34, 554925: 39, 554983: 65, 556751: 61, 557611: 9, 557861: 40, 561941: 97, 561957: 61, 562895: 1, 565198: 66, 565326: 96, 565624: 17, 565889: 9, 566401: 30, 567563: 38, 567832: 4, 570220: 94, 570490: 35, 570736: 94, 570938: 24, 570982: 59, 571458: 24, 574516: 42, 578171: 29, 578619: 37, 579836: 31, 579983: 17, 581397: 15, 587659: 21, 587808: 44, 588589: 44, 589344: 95, 590032: 6, 594803: 53, 595217: 53, 596037: 57, 597372: 76, 597702: 88, 600239: 96, 601144: 66, 601611: 28, 602010: 65, 603106: 42, 603343: 33, 603879: 38, 604111: 78, 604510: 49, 605058: 6, 605713: 63, 605897: 57, 607922: 42, 611871: 76, 612412: 5, 612434: 20, 613322: 47, 613877: 27, 614292: 67, 616909: 4, 617750: 61, 618961: 52, 619737: 81, 619989: 71, 620473: 11, 620867: 62, 621870: 21, 622282: 54, 622662: 36, 625035: 73, 625779: 91, 627625: 27, 630444: 19, 630531: 27, 631927: 86, 632735: 81, 633705: 71, 633916: 3, 635463: 95, 635958: 95, 639717: 63, 645122: 13, 647962: 47, 649399: 51, 654583: 3, 655008: 21, 655222: 34, 655449: 42, 655595: 15, 657494: 58, 657638: 61, 660040: 45, 660166: 68, 661502: 5, 662615: 46, 662892: 84, 663487: 44, 663677: 12, 663840: 99, 664080: 97, 664266: 43, 664847: 74, 666728: 88, 666943: 18, 667684: 84, 669888: 34, 670917: 77, 671630: 92, 672762: 14, 673143: 10, 675022: 42, 675483: 64, 676154: 90, 676293: 23, 677035: 49, 677271: 30, 677689: 64, 678680: 5, 679682: 63, 680309: 54, 681992: 57, 685308: 68, 685992: 17, 688465: 74, 688724: 70, 690377: 52, 691076: 30, 691629: 27, 692122: 54, 692378: 83, 695828: 36, 696881: 2, 697002: 57, 699732: 48, 701181: 57, 701581: 51, 701584: 99, 701976: 27, 703467: 42, 705454: 82, 705870: 1, 707460: 22, 710095: 17, 710292: 15, 713012: 85, 713646: 55, 715366: 1, 716782: 72, 717224: 75, 717351: 69, 717670: 68, 720428: 94, 721118: 29, 721380: 98, 722030: 10, 722680: 42, 723690: 63, 723944: 54, 724781: 59, 725396: 13, 725712: 38, 725762: 96, 729442: 81, 731528: 15, 734001: 87, 734511: 16, 735447: 13, 735796: 38, 735862: 15, 737799: 93, 737878: 74, 738358: 68, 738810: 90, 739323: 80, 740880: 82, 741170: 81, 741197: 87, 741302: 7, 741607: 36, 741828: 59, 741929: 9, 742704: 50, 742789: 63, 743610: 63, 743613: 44, 745203: 6, 746988: 71, 746993: 84, 749003: 29, 750214: 14, 750228: 10, 750509: 47, 751160: 20, 755554: 30, 758176: 25, 758312: 8, 762047: 69, 762859: 54, 764339: 1, 765735: 60, 765920: 15, 766245: 14, 766492: 16, 766890: 73, 768066: 78, 769194: 88, 770806: 60, 774686: 3, 775493: 91, 775523: 11, 775928: 41, 777740: 98, 779042: 93, 779328: 15, 779996: 6, 780416: 52, 780866: 40, 781025: 16, 783483: 36, 783886: 75, 784203: 7, 784802: 93, 785296: 75, 785456: 45, 786484: 2, 786913: 58, 787111: 77, 787888: 56, 788831: 20, 789045: 54, 789660: 2, 791949: 57, 793190: 35, 793547: 97, 794103: 53, 794380: 25, 795421: 55, 797797: 32, 798305: 54, 798759: 4, 800283: 38, 801441: 82, 803000: 57, 803118: 84, 806603: 82, 807107: 63, 807407: 93, 809316: 30, 810005: 92, 814695: 90, 815390: 97, 817198: 78, 817370: 76, 818081: 6, 818272: 93, 820821: 88, 821408: 18, 821819: 78, 822189: 54, 823437: 86, 823730: 5, 824300: 30, 825043: 88, 826902: 5, 828710: 27, 829968: 48, 830199: 97, 831060: 70, 832456: 85, 833810: 16, 834008: 60, 834789: 54, 834821: 34, 835678: 21, 836942: 74, 838116: 69, 838187: 55, 838436: 49, 839333: 20, 840284: 46, 840494: 86, 841076: 50, 841586: 30, 843683: 53, 845396: 0, 846623: 26, 847088: 79, 847700: 72, 850570: 87, 850587: 32, 851580: 94, 852222: 76, 854323: 14, 854681: 28, 855302: 38, 856226: 64, 856888: 95, 857103: 3, 860591: 91, 860688: 82, 861383: 8, 861760: 11, 862928: 66, 863434: 29, 863696: 1, 864991: 93, 865889: 27, 866449: 39, 866890: 27, 867175: 31, 867329: 59, 872112: 38, 872277: 29, 872303: 40, 873381: 16, 874653: 47, 874975: 14, 875135: 0, 875415: 12, 876116: 83, 879364: 67, 879947: 86, 880820: 20, 881637: 70, 882377: 77, 882799: 23, 883313: 96, 884282: 79, 884501: 94, 884571: 40, 885608: 25, 888712: 42, 888794: 75, 888856: 70, 891943: 29, 892177: 41, 893242: 69, 894805: 78, 894822: 44, 897054: 35, 897192: 29, 901220: 78, 903448: 18, 904689: 21, 906708: 93, 907563: 62, 908042: 86, 909498: 2, 911010: 75, 911911: 64, 911997: 31, 912461: 59, 912488: 56, 912611: 91, 913277: 5, 914371: 41, 914471: 18, 916415: 58, 917079: 18, 918399: 9, 918450: 84, 919066: 93, 919595: 52, 921655: 42, 921673: 78, 921799: 28, 921890: 81, 922106: 8, 922310: 44, 923117: 12, 925494: 30, 927415: 38, 928358: 76, 928411: 35, 930233: 46, 931197: 87, 933015: 76, 934080: 57, 938310: 32, 938531: 67, 940712: 4, 941423: 33, 942391: 69, 945401: 21, 948004: 59, 949535: 95, 949881: 66, 951741: 7, 953246: 88, 954537: 59, 954612: 73, 955475: 43, 956007: 23, 956372: 37, 956819: 40, 958748: 84, 960924: 83, 962564: 98, 963201: 19, 963773: 20, 964219: 75, 964450: 52, 965019: 17, 965331: 77, 966723: 56, 967569: 75, 967725: 77, 968651: 63, 970250: 48, 970448: 60, 970804: 38, 970936: 34, 971039: 92, 973104: 89, 974201: 56, 975345: 14, 975603: 7, 975758: 13, 976440: 1, 977842: 17, 979602: 61, 980588: 88, 980738: 0, 983484: 70, 983586: 72, 985771: 31, 987256: 51, 987445: 49, 987713: 77, 987741: 8, 987971: 15, 989633: 39, 990015: 72, 990607: 24, 992791: 84, 994434: 24, 995823: 25, 996014: 97, 997730: 44, 998157: 14, 998370: 97}, + {147: 7, 2199: 23, 5106: 28, 5975: 90, 10160: 3, 10338: 6, 10574: 65, 13843: 45, 15303: 8, 15808: 25, 20949: 41, 21606: 51, 22107: 99, 23361: 87, 23972: 29, 24696: 53, 25908: 66, 26243: 52, 27025: 73, 28059: 1, 28840: 13, 32185: 1, 32405: 14, 33163: 87, 35330: 32, 35910: 23, 35995: 13, 36098: 82, 39606: 94, 40928: 18, 41592: 54, 41858: 78, 41961: 81, 42070: 28, 43433: 3, 44211: 38, 45015: 83, 45639: 60, 45777: 31, 45845: 94, 46292: 20, 46499: 58, 49818: 29, 50866: 68, 54336: 40, 56454: 37, 56662: 8, 57926: 66, 58945: 99, 59261: 92, 60386: 40, 60450: 92, 60908: 5, 61464: 58, 61771: 82, 63625: 99, 65172: 30, 71489: 15, 73061: 54, 73437: 17, 75413: 44, 77616: 60, 77830: 31, 78670: 63, 80254: 67, 81086: 77, 82766: 68, 84205: 39, 84773: 6, 85723: 2, 87523: 89, 88130: 22, 90189: 78, 90868: 27, 90976: 34, 91394: 60, 91494: 61, 91755: 57, 96371: 47, 96515: 99, 96763: 50, 97242: 79, 97918: 2, 98012: 35, 98202: 57, 99166: 53, 99216: 15, 100051: 10, 100631: 42, 100760: 71, 106731: 39, 107402: 37, 110373: 4, 110404: 96, 111412: 26, 112475: 2, 113689: 18, 114913: 72, 117522: 67, 119733: 67, 122959: 91, 123184: 93, 123492: 32, 124413: 60, 126355: 46, 126730: 42, 128215: 15, 128399: 2, 131232: 83, 131975: 86, 132384: 24, 133247: 70, 136144: 77, 137511: 55, 138987: 42, 145080: 30, 146185: 28, 147563: 61, 148759: 37, 149448: 47, 149643: 96, 155581: 83, 156669: 41, 157317: 45, 159943: 39, 161949: 88, 163625: 69, 163752: 59, 167366: 58, 167920: 77, 168465: 44, 169754: 78, 173970: 6, 174289: 81, 177043: 65, 178234: 68, 179300: 50, 179572: 47, 180042: 20, 181934: 25, 182043: 46, 184327: 28, 184541: 99, 185282: 63, 186074: 61, 186086: 98, 187178: 21, 188054: 59, 189177: 4, 189282: 64, 190944: 29, 192102: 38, 193876: 74, 194952: 91, 198934: 30, 203128: 96, 206900: 22, 210341: 16, 211327: 53, 212374: 53, 213589: 41, 213734: 74, 213812: 55, 215160: 90, 215713: 61, 215904: 36, 217190: 30, 220448: 70, 222290: 40, 222593: 11, 227252: 88, 228460: 78, 230625: 47, 231274: 85, 235816: 10, 236547: 64, 236799: 41, 239172: 75, 242665: 65, 244026: 81, 246243: 52, 247373: 62, 248664: 37, 248725: 70, 250119: 42, 250161: 36, 250395: 29, 251155: 74, 251204: 62, 254199: 15, 258472: 38, 259023: 98, 260863: 23, 260899: 22, 261482: 84, 269301: 17, 270972: 98, 271116: 19, 274363: 35, 275727: 94, 277276: 8, 277841: 95, 278565: 73, 279263: 25, 279369: 58, 281218: 18, 281858: 17, 284650: 60, 285168: 15, 285406: 39, 285700: 6, 286594: 95, 287719: 48, 287790: 38, 288074: 57, 288143: 99, 289472: 24, 290156: 26, 294949: 85, 298674: 29, 299078: 63, 301069: 31, 301227: 44, 301346: 1, 301701: 27, 301742: 66, 308613: 60, 308812: 34, 309107: 0, 309355: 48, 309474: 17, 311773: 22, 313844: 16, 314028: 80, 315870: 66, 319041: 32, 319650: 97, 321637: 95, 322372: 91, 324264: 68, 325019: 69, 330427: 4, 331966: 71, 333983: 21, 334988: 94, 336800: 17, 338496: 21, 339158: 73, 340143: 39, 341535: 26, 341559: 58, 342785: 61, 342865: 71, 342892: 75, 342993: 81, 344724: 7, 352915: 67, 356352: 63, 357189: 5, 361159: 94, 361439: 32, 361713: 85, 363802: 28, 365622: 6, 366020: 17, 366725: 44, 366929: 6, 367617: 44, 368254: 89, 373027: 92, 374201: 63, 374717: 8, 376957: 94, 379787: 74, 382410: 52, 385523: 23, 386109: 5, 386193: 62, 386905: 51, 387198: 74, 388500: 44, 389319: 57, 389883: 72, 390598: 38, 391376: 40, 391995: 8, 396975: 3, 398774: 80, 399497: 64, 399558: 17, 401604: 5, 401837: 3, 402405: 15, 403027: 82, 404133: 46, 406794: 23, 407163: 39, 408866: 1, 412062: 69, 412652: 13, 414219: 46, 416879: 92, 417493: 42, 418124: 63, 418910: 97, 422670: 71, 425469: 29, 425474: 85, 425856: 8, 426358: 13, 427145: 73, 428226: 71, 428283: 30, 428802: 25, 428991: 30, 430009: 55, 430867: 86, 433908: 3, 434681: 72, 435966: 91, 437315: 4, 439341: 79, 439458: 3, 439928: 92, 440001: 53, 440160: 98, 441703: 50, 442080: 3, 443419: 47, 443680: 33, 448640: 5, 450256: 69, 450658: 70, 452707: 55, 453768: 7, 454197: 93, 455422: 27, 456826: 95, 460039: 87, 460965: 70, 463509: 31, 465993: 63, 466633: 42, 466748: 39, 467663: 3, 468090: 88, 470406: 11, 470669: 25, 470695: 8, 471307: 62, 472775: 89, 473701: 97, 474013: 69, 475940: 29, 476967: 29, 477428: 60, 480143: 96, 480466: 90, 480516: 35, 481617: 86, 483613: 41, 486491: 49, 487065: 46, 488016: 24, 489835: 26, 490142: 72, 490402: 43, 490430: 75, 492119: 51, 493601: 64, 493900: 30, 495035: 11, 497126: 20, 499492: 57, 500198: 19, 500254: 21, 503180: 92, 503990: 80, 504130: 20, 504279: 22, 505181: 48, 507199: 28, 508238: 86, 508627: 65, 512221: 78, 512683: 78, 513576: 43, 515095: 96, 518028: 88, 518050: 14, 519387: 62, 522303: 98, 523025: 57, 524021: 34, 524093: 57, 527315: 2, 527822: 80, 527961: 44, 528976: 32, 531261: 79, 533556: 12, 534236: 78, 535209: 87, 537209: 51, 539147: 17, 540099: 68, 542667: 48, 543582: 40, 545682: 68, 545945: 20, 546728: 91, 549778: 17, 551243: 63, 551391: 0, 551946: 21, 552155: 99, 553428: 63, 554228: 84, 556468: 2, 558047: 79, 559680: 94, 562681: 62, 562938: 62, 563905: 62, 565023: 69, 569740: 78, 570634: 40, 571115: 64, 572553: 54, 572708: 47, 572749: 42, 572800: 56, 572885: 82, 573419: 23, 573599: 0, 575322: 81, 575517: 54, 576405: 8, 577124: 80, 582835: 84, 582852: 9, 585973: 28, 586336: 88, 587483: 8, 588553: 54, 588651: 90, 589731: 46, 591683: 87, 592164: 53, 594119: 31, 594604: 61, 595447: 55, 595897: 2, 599253: 85, 600817: 13, 602311: 50, 602548: 11, 604452: 31, 607313: 33, 607338: 86, 608152: 89, 608713: 64, 609461: 10, 610700: 58, 611191: 12, 612698: 25, 613328: 68, 614158: 59, 615815: 82, 617213: 50, 617548: 70, 617641: 97, 619496: 41, 620655: 69, 621160: 36, 622472: 40, 622924: 64, 626102: 95, 627040: 30, 627516: 82, 627857: 58, 631046: 15, 633085: 33, 633332: 30, 636285: 86, 636408: 88, 637370: 98, 638860: 60, 640035: 55, 642782: 0, 644346: 49, 644692: 34, 644961: 53, 645050: 84, 648080: 67, 648475: 43, 651306: 8, 654470: 59, 658345: 45, 659582: 95, 662231: 43, 662416: 79, 662601: 7, 671461: 33, 673217: 8, 674555: 28, 675840: 96, 676254: 53, 676884: 6, 677169: 40, 677592: 62, 682641: 70, 682726: 19, 684534: 87, 685025: 78, 686211: 96, 686824: 22, 687060: 28, 688144: 2, 690643: 62, 694800: 72, 695675: 63, 696377: 33, 704158: 51, 705011: 18, 706374: 72, 707760: 65, 709135: 60, 709888: 52, 710416: 16, 711383: 57, 713508: 47, 713706: 38, 714608: 79, 715038: 98, 715256: 48, 715537: 12, 716057: 92, 717981: 78, 720315: 68, 722991: 67, 724041: 90, 726089: 88, 726136: 95, 726195: 98, 727765: 20, 728574: 51, 730391: 9, 731518: 79, 732078: 36, 732811: 31, 739849: 22, 741364: 67, 742465: 51, 742552: 82, 745257: 47, 748011: 45, 754881: 57, 755209: 39, 756435: 94, 756610: 87, 757418: 75, 758243: 14, 759799: 54, 760249: 42, 761456: 19, 761867: 6, 766247: 35, 769750: 64, 775914: 72, 776428: 55, 781635: 23, 781920: 85, 782900: 66, 784438: 43, 786641: 93, 788649: 83, 788938: 97, 789582: 84, 789905: 1, 790749: 57, 791122: 78, 791398: 31, 791403: 22, 792402: 8, 792755: 78, 794490: 28, 796349: 29, 796368: 18, 796627: 8, 797027: 15, 802737: 81, 804078: 1, 808922: 79, 809196: 88, 810047: 80, 811425: 26, 813117: 64, 813903: 51, 814260: 53, 815074: 30, 816280: 41, 817274: 30, 820980: 58, 822346: 50, 823264: 38, 824041: 24, 824984: 56, 826239: 36, 826594: 54, 829789: 46, 830743: 41, 831805: 47, 833644: 1, 835202: 92, 836256: 58, 838153: 60, 838417: 13, 839633: 16, 841885: 73, 842344: 13, 843381: 47, 843886: 50, 845842: 37, 849030: 99, 849364: 66, 850885: 98, 854109: 75, 854114: 74, 855706: 78, 855719: 85, 857076: 62, 857322: 13, 863695: 64, 864206: 39, 868262: 40, 868309: 41, 869825: 45, 872481: 69, 873379: 78, 873781: 46, 874554: 81, 874930: 23, 875671: 75, 875802: 42, 878328: 22, 879488: 49, 879768: 2, 880312: 52, 880980: 53, 881885: 45, 885588: 72, 886922: 86, 886984: 57, 890303: 1, 890532: 35, 897089: 95, 897311: 89, 898681: 39, 898685: 17, 899685: 98, 900162: 31, 901218: 17, 901273: 79, 901311: 43, 901349: 57, 903128: 23, 905048: 67, 908057: 22, 908082: 99, 908436: 28, 908855: 33, 909444: 44, 909850: 10, 909980: 10, 911917: 11, 912479: 21, 912820: 56, 915204: 19, 916895: 19, 918139: 82, 921020: 69, 921028: 80, 923045: 78, 924218: 3, 925977: 81, 928706: 7, 932400: 53, 932675: 81, 934307: 50, 935870: 36, 936141: 39, 936335: 37, 937232: 99, 941758: 2, 942923: 28, 943465: 73, 944891: 98, 946555: 41, 949704: 24, 951474: 90, 951883: 92, 956507: 17, 956636: 89, 957469: 72, 957698: 82, 958091: 65, 958326: 68, 959118: 89, 959936: 28, 960276: 67, 962231: 89, 965850: 92, 966868: 80, 967721: 35, 968072: 22, 969135: 17, 970511: 72, 972960: 47, 975325: 48, 975604: 99, 975936: 96, 976987: 22, 980336: 28, 982406: 9, 984859: 27, 985111: 62, 985331: 99, 985945: 84, 986854: 92, 991951: 96, 994664: 38, 996100: 51, 996346: 59, 998167: 25, 998521: 27, 999206: 28, 999316: 9}, + {5368: 50, 18858: 50, 44410: 5, 59448: 39, 61358: 22, 61370: 31, 65112: 86, 74203: 89, 88000: 35, 91294: 14, 103377: 67, 114684: 64, 119815: 57, 137806: 93, 154170: 22, 156379: 78, 157804: 31, 161594: 91, 165892: 27, 173721: 39, 180360: 2, 199883: 21, 200162: 99, 201070: 50, 206870: 21, 236470: 44, 245668: 72, 256166: 1, 259372: 43, 269422: 3, 270728: 11, 270804: 81, 278837: 25, 283122: 85, 291451: 43, 307581: 86, 318715: 92, 321801: 87, 325557: 38, 327781: 2, 330765: 12, 367133: 49, 371153: 40, 379166: 88, 381214: 58, 392617: 87, 402077: 61, 407347: 68, 409697: 37, 415735: 3, 423765: 15, 456704: 97, 480124: 76, 491952: 77, 493505: 32, 523047: 4, 533350: 30, 540554: 99, 547830: 38, 548716: 8, 557527: 50, 564089: 25, 575755: 62, 591138: 51, 600218: 9, 648927: 2, 652127: 70, 655248: 50, 676320: 13, 682604: 47, 702287: 77, 702521: 70, 711850: 24, 732558: 47, 737515: 10, 740894: 52, 741030: 90, 741348: 97, 745002: 29, 760333: 73, 766074: 20, 779497: 66, 781565: 65, 791890: 53, 792179: 22, 799343: 67, 800464: 37, 802681: 0, 802685: 12, 805020: 59, 843787: 80, 851189: 19, 859197: 16, 867297: 50, 888002: 20, 890154: 47, 923002: 55, 924676: 13, 924766: 31, 931838: 12, 932148: 48, 933340: 16, 938085: 20, 959415: 36, 965995: 59, 967455: 61, 988119: 35, 997438: 0}, + {4002: 79, 19027: 22, 19652: 43, 21028: 30, 24069: 16, 27627: 37, 31043: 94, 38909: 27, 44445: 94, 44460: 19, 56587: 33, 59184: 20, 88128: 32, 89060: 29, 94969: 46, 103911: 79, 104775: 16, 109983: 75, 117347: 4, 125925: 70, 131876: 94, 137229: 39, 141330: 7, 153587: 97, 167310: 76, 173141: 78, 189296: 80, 214122: 31, 216278: 87, 231657: 57, 232115: 3, 234465: 1, 241733: 37, 253791: 97, 257558: 63, 260492: 17, 260930: 56, 267578: 35, 268255: 74, 268984: 26, 273215: 20, 287322: 82, 293754: 68, 315163: 22, 317005: 67, 321915: 90, 332211: 52, 337803: 68, 353408: 8, 354855: 21, 355831: 59, 360337: 27, 388584: 89, 401863: 71, 410265: 41, 410546: 76, 412377: 0, 418865: 15, 419085: 51, 425228: 85, 427575: 98, 440043: 28, 445282: 33, 449724: 42, 453916: 72, 454447: 62, 455435: 38, 460222: 91, 481890: 78, 492917: 14, 494637: 27, 498853: 78, 500600: 23, 505477: 78, 508134: 98, 509187: 73, 510080: 62, 510154: 26, 511664: 88, 525386: 69, 536028: 12, 539357: 81, 539621: 79, 541386: 15, 547578: 66, 547716: 8, 561166: 19, 566125: 71, 567745: 96, 572777: 63, 607149: 89, 607476: 60, 614829: 83, 620610: 18, 622562: 81, 629701: 67, 657826: 83, 661820: 93, 673443: 19, 686853: 95, 687024: 50, 700163: 34, 706137: 39, 710250: 16, 718780: 91, 721516: 17, 735123: 11, 736049: 30, 736146: 98, 743001: 57, 744528: 2, 745598: 70, 746546: 78, 750283: 11, 757689: 26, 765284: 38, 766763: 69, 767250: 63, 773339: 15, 777359: 3, 798062: 47, 824954: 27, 835039: 87, 838451: 54, 838780: 6, 841469: 22, 844898: 95, 847144: 21, 848184: 29, 855815: 80, 858906: 91, 861828: 67, 871641: 50, 882119: 42, 885610: 26, 887043: 80, 889087: 41, 889165: 46, 890248: 5, 890932: 63, 894805: 11, 913009: 85, 930628: 62, 932437: 12, 936520: 15, 971626: 10, 972110: 58, 982964: 80, 989070: 58}, + {653: 28, 1280: 83, 1713: 73, 3230: 15, 3464: 66, 3811: 65, 4083: 54, 4731: 63, 4752: 76, 5484: 20, 6108: 74, 6497: 23, 6516: 34, 7307: 40, 8841: 96, 9029: 82, 9286: 61, 9419: 54, 10067: 18, 10475: 58, 11699: 49, 11857: 95, 12559: 81, 12713: 1, 13193: 60, 13726: 39, 14863: 14, 14890: 58, 15020: 82, 15427: 78, 15697: 88, 15910: 71, 16113: 51, 16318: 32, 17141: 38, 17617: 23, 18685: 97, 18789: 23, 19032: 21, 19159: 73, 19373: 92, 19971: 66, 20464: 25, 20557: 67, 20618: 81, 20665: 81, 21255: 24, 22369: 49, 22412: 53, 22837: 64, 23597: 48, 25567: 46, 26078: 83, 26099: 91, 26164: 54, 26228: 73, 26248: 12, 27643: 29, 27840: 5, 28576: 67, 28883: 56, 28915: 77, 28947: 55, 29183: 94, 30202: 12, 30385: 99, 30927: 32, 31064: 77, 31860: 62, 32183: 6, 34224: 10, 34987: 80, 35083: 57, 35732: 44, 37061: 89, 37092: 36, 37354: 76, 38216: 53, 38445: 82, 38505: 7, 38565: 39, 39895: 18, 40135: 23, 41308: 15, 41520: 43, 42557: 6, 43115: 45, 43191: 25, 43222: 23, 44571: 93, 44704: 31, 45362: 54, 46245: 3, 46613: 83, 47077: 81, 47160: 69, 47431: 76, 47773: 36, 48951: 48, 49329: 35, 49355: 12, 49865: 26, 50630: 32, 50659: 3, 51406: 89, 51487: 15, 52040: 84, 52798: 64, 53136: 32, 54093: 53, 54670: 96, 54766: 21, 55284: 58, 57241: 0, 57956: 80, 58160: 54, 60577: 23, 62220: 70, 62478: 52, 63948: 46, 64693: 40, 64908: 25, 65279: 88, 65327: 61, 65404: 25, 65807: 70, 65982: 76, 66349: 64, 66391: 19, 66742: 96, 67617: 76, 67977: 78, 68017: 58, 68533: 86, 68910: 94, 69322: 15, 70175: 83, 70449: 78, 71090: 53, 71356: 98, 71895: 71, 72041: 0, 72869: 87, 73443: 14, 74009: 70, 75007: 15, 75337: 2, 75397: 14, 75818: 45, 75987: 28, 76712: 20, 76840: 50, 77158: 12, 77177: 35, 77682: 13, 77977: 40, 78423: 64, 78597: 36, 78941: 45, 80172: 2, 80346: 16, 80654: 33, 80711: 46, 80916: 98, 81233: 61, 81340: 40, 82450: 25, 82753: 96, 82904: 19, 84529: 45, 84646: 8, 85464: 32, 85691: 89, 85803: 86, 85831: 61, 85954: 92, 86578: 88, 86663: 8, 86947: 21, 87330: 24, 90040: 25, 90123: 11, 90181: 81, 90216: 38, 90794: 64, 91459: 17, 92783: 57, 93740: 8, 93993: 8, 94481: 54, 95577: 99, 96827: 33, 97044: 68, 97168: 89, 97727: 53, 99110: 90, 101277: 44, 101308: 95, 101582: 48, 101854: 20, 102556: 88, 102873: 48, 103050: 74, 103076: 72, 103320: 66, 104331: 9, 105242: 68, 105795: 20, 106583: 10, 107103: 66, 107122: 7, 108334: 37, 108966: 5, 109654: 63, 109919: 94, 109996: 37, 110149: 99, 110713: 23, 110921: 48, 111572: 75, 112580: 67, 112836: 74, 113272: 51, 113926: 31, 114648: 1, 115120: 68, 115409: 5, 116776: 56, 116859: 98, 116934: 20, 117022: 58, 117224: 14, 117886: 5, 119597: 51, 119938: 85, 119968: 20, 120546: 67, 120614: 19, 120846: 0, 120852: 11, 120901: 80, 121610: 99, 121616: 99, 122238: 25, 122259: 62, 122808: 88, 122925: 17, 124028: 4, 124406: 24, 124674: 84, 125860: 14, 126030: 74, 126194: 70, 126372: 38, 127721: 84, 127967: 39, 128969: 47, 130002: 20, 130400: 45, 130964: 51, 131505: 48, 131516: 18, 131589: 84, 132403: 28, 132530: 89, 132817: 65, 133184: 85, 133395: 75, 133793: 46, 134027: 18, 134100: 92, 134512: 62, 134735: 67, 135242: 65, 136137: 92, 137036: 17, 137141: 97, 137639: 13, 137917: 1, 138214: 43, 139786: 86, 140689: 25, 140929: 78, 141709: 31, 142110: 9, 142316: 16, 142750: 14, 142787: 52, 142969: 98, 143612: 2, 143934: 46, 144022: 74, 144774: 95, 144886: 53, 148922: 86, 149198: 34, 149471: 62, 149693: 85, 149892: 6, 149972: 78, 150147: 2, 150666: 65, 151095: 75, 151627: 0, 152111: 14, 152238: 58, 152348: 7, 152674: 92, 153250: 69, 153464: 53, 153579: 32, 154068: 50, 155343: 51, 156667: 79, 156751: 23, 157639: 66, 159738: 91, 160257: 4, 161090: 65, 161513: 36, 161976: 92, 161981: 20, 164175: 92, 164433: 52, 164505: 31, 164605: 71, 164685: 7, 164778: 55, 164865: 33, 165509: 41, 165618: 45, 165969: 39, 166566: 81, 167219: 7, 167478: 74, 168550: 11, 168589: 39, 169315: 73, 169548: 33, 169617: 45, 170511: 52, 170715: 83, 170850: 37, 171409: 86, 171465: 43, 171661: 83, 172447: 49, 172719: 59, 173057: 59, 173079: 81, 173796: 5, 174031: 52, 174045: 60, 175234: 28, 175821: 44, 176286: 81, 176413: 36, 177906: 8, 177909: 34, 178130: 37, 178378: 91, 178460: 77, 178783: 52, 179873: 22, 180090: 6, 180354: 43, 180380: 73, 180466: 95, 180820: 48, 181302: 66, 181706: 13, 181890: 42, 182312: 3, 182604: 99, 183513: 76, 184167: 44, 185076: 86, 185696: 89, 186674: 11, 187718: 48, 187809: 95, 187890: 5, 187909: 10, 189036: 79, 189178: 13, 189864: 40, 190083: 93, 190424: 62, 190488: 26, 190893: 9, 192263: 39, 192415: 44, 192941: 62, 194293: 39, 194443: 70, 194948: 21, 194976: 79, 195624: 93, 196338: 53, 196432: 81, 196954: 74, 197115: 3, 197135: 96, 197398: 36, 197426: 93, 198089: 84, 198215: 28, 199906: 23, 200092: 87, 200843: 43, 201178: 57, 201329: 8, 201583: 61, 201854: 30, 202025: 62, 202085: 53, 202123: 4, 202188: 88, 202256: 62, 202347: 49, 202526: 86, 202593: 58, 202632: 72, 203592: 68, 203705: 11, 203849: 8, 204296: 45, 205019: 40, 205422: 79, 205490: 13, 207062: 11, 207229: 36, 208095: 40, 208419: 95, 208746: 96, 209432: 50, 209948: 9, 210309: 53, 210334: 49, 210462: 14, 210540: 54, 210938: 37, 211077: 27, 211800: 82, 212013: 26, 212281: 27, 213145: 21, 213518: 70, 214176: 8, 214206: 56, 214587: 72, 216090: 68, 216292: 11, 216320: 75, 216956: 18, 217049: 83, 219247: 81, 219388: 72, 219595: 56, 219783: 96, 220140: 71, 220924: 10, 222295: 96, 222316: 51, 222360: 28, 222953: 30, 223387: 17, 223698: 2, 224368: 37, 227243: 72, 227542: 38, 227553: 53, 227705: 80, 227796: 99, 228224: 71, 228870: 37, 229450: 26, 230715: 64, 231774: 81, 231997: 60, 232564: 54, 232791: 67, 233542: 77, 233545: 62, 234606: 33, 236046: 78, 236145: 10, 236212: 48, 236430: 20, 236501: 32, 236637: 34, 236906: 25, 236992: 72, 237018: 37, 237380: 6, 238451: 31, 238769: 39, 238818: 48, 238968: 20, 239921: 78, 240204: 59, 240788: 16, 241045: 66, 241428: 93, 241764: 84, 241899: 54, 242690: 62, 242930: 39, 244816: 36, 245106: 69, 245238: 50, 245397: 12, 245948: 2, 246043: 17, 246164: 34, 246509: 23, 247016: 16, 248475: 46, 249393: 89, 249559: 24, 249654: 2, 249801: 1, 250006: 53, 251111: 67, 251566: 99, 252014: 59, 252413: 72, 252582: 37, 252773: 71, 252815: 21, 253104: 83, 253166: 5, 253541: 65, 253954: 12, 254040: 17, 254989: 80, 255152: 44, 255486: 1, 256334: 43, 258181: 91, 259442: 38, 259464: 12, 259873: 84, 260280: 38, 261102: 42, 261383: 53, 261440: 38, 262424: 22, 262640: 2, 263107: 78, 263161: 86, 263421: 98, 264132: 65, 264328: 68, 264647: 35, 264728: 67, 265539: 20, 266123: 44, 267481: 97, 267661: 69, 268210: 8, 268424: 84, 268444: 26, 269349: 47, 269654: 3, 270043: 34, 270559: 26, 271324: 94, 271449: 86, 272250: 12, 272267: 38, 272906: 85, 273717: 1, 273760: 10, 274380: 23, 274923: 43, 275418: 82, 275504: 21, 276330: 37, 278083: 43, 279333: 38, 280823: 56, 281233: 25, 281902: 15, 281995: 41, 282055: 2, 282205: 25, 283166: 24, 285745: 22, 286257: 96, 286546: 49, 287169: 74, 287635: 39, 288220: 29, 288343: 0, 289532: 5, 290349: 5, 290634: 23, 291685: 46, 292613: 43, 292704: 70, 292886: 3, 293880: 94, 294127: 50, 294262: 75, 294597: 54, 295915: 11, 296551: 35, 296983: 69, 298523: 55, 298731: 5, 298800: 5, 298880: 0, 299382: 3, 299397: 32, 299515: 30, 301261: 79, 301610: 4, 302121: 8, 303078: 50, 303238: 53, 303452: 38, 303487: 82, 303576: 11, 304769: 34, 306373: 17, 307384: 96, 307582: 40, 307938: 78, 308050: 50, 308127: 1, 309093: 61, 309703: 35, 310022: 17, 311273: 66, 311823: 87, 311869: 66, 311972: 10, 313967: 75, 314184: 85, 314650: 20, 314982: 33, 315203: 75, 315703: 93, 316325: 93, 316339: 65, 316700: 31, 317104: 23, 317301: 4, 318045: 47, 318315: 80, 318974: 25, 319691: 15, 319760: 35, 320669: 86, 320733: 30, 322590: 58, 322912: 95, 324895: 38, 325716: 95, 325779: 25, 326168: 74, 326395: 37, 326996: 70, 327146: 96, 329264: 98, 329396: 37, 329525: 80, 329538: 32, 329740: 99, 329782: 64, 330552: 34, 330617: 20, 332497: 95, 333337: 59, 333727: 36, 334150: 90, 335266: 68, 335363: 68, 335560: 99, 336351: 43, 336622: 71, 336931: 2, 338285: 58, 338653: 50, 338876: 41, 339008: 64, 339775: 0, 340458: 12, 340568: 24, 341583: 64, 342522: 84, 343295: 26, 343339: 86, 343424: 97, 343647: 8, 346186: 48, 346730: 31, 346770: 12, 346821: 66, 347236: 48, 347314: 30, 348071: 93, 348477: 2, 349772: 80, 349913: 70, 350464: 21, 350597: 13, 350677: 27, 351334: 41, 351984: 42, 352699: 85, 354381: 56, 354407: 60, 356894: 16, 357508: 45, 357578: 34, 358431: 37, 359346: 47, 360011: 21, 360535: 0, 360580: 91, 360632: 89, 361406: 40, 362400: 15, 363544: 69, 363714: 89, 364215: 50, 364450: 60, 365839: 31, 366189: 49, 366539: 86, 366611: 51, 369666: 2, 369678: 97, 369790: 91, 370256: 8, 370309: 87, 371338: 3, 373097: 47, 373171: 48, 373615: 33, 374292: 67, 374757: 19, 375075: 8, 375542: 21, 376107: 57, 376126: 2, 376272: 35, 376587: 60, 378186: 91, 378294: 49, 379198: 65, 380921: 74, 381645: 45, 382159: 8, 383454: 57, 383614: 61, 383615: 68, 384746: 94, 385457: 77, 385677: 81, 385790: 64, 385846: 79, 386204: 89, 386943: 57, 387316: 63, 388027: 49, 388030: 59, 388226: 45, 388326: 0, 388570: 76, 388839: 98, 388970: 85, 389808: 55, 390222: 96, 391197: 40, 391756: 49, 392118: 0, 392893: 48, 393887: 83, 394027: 96, 395414: 31, 395631: 96, 395821: 77, 396000: 38, 396143: 34, 398647: 95, 398931: 28, 399429: 57, 399577: 86, 399578: 47, 400470: 18, 401226: 42, 401489: 87, 401789: 89, 402948: 96, 402996: 42, 403514: 76, 404059: 72, 404487: 77, 404701: 81, 405334: 4, 408425: 91, 408965: 73, 409236: 55, 409339: 68, 409948: 49, 410817: 17, 411078: 90, 412566: 36, 412586: 83, 412649: 3, 412670: 80, 412773: 28, 412786: 56, 414054: 27, 414331: 48, 414436: 7, 414855: 1, 415456: 40, 415891: 19, 416153: 44, 417057: 66, 417506: 86, 418002: 33, 418005: 10, 418335: 14, 418918: 2, 419619: 81, 421095: 76, 421372: 63, 422424: 84, 422915: 6, 423195: 99, 423812: 70, 423950: 89, 424086: 47, 424130: 92, 425370: 66, 425434: 37, 426043: 5, 426168: 92, 426469: 10, 426705: 56, 428360: 11, 428453: 54, 428523: 96, 429329: 95, 430095: 19, 430585: 10, 430655: 42, 430913: 37, 432610: 44, 433878: 9, 434200: 59, 434639: 98, 434776: 33, 436026: 24, 436502: 92, 437338: 81, 437449: 79, 438645: 76, 438782: 90, 439287: 87, 440058: 87, 440243: 59, 440604: 30, 441110: 91, 441609: 5, 441685: 22, 442247: 90, 442429: 90, 442591: 11, 443269: 18, 443881: 76, 444139: 64, 444597: 24, 445845: 13, 446330: 65, 446592: 15, 446626: 38, 446768: 90, 447096: 77, 447373: 29, 447815: 84, 448337: 68, 449096: 44, 449262: 15, 449376: 94, 449691: 10, 449868: 76, 449921: 64, 450826: 84, 451055: 31, 451249: 37, 451394: 46, 451665: 7, 451757: 96, 453159: 99, 453349: 32, 453584: 54, 453722: 77, 453728: 63, 453837: 45, 453895: 52, 454505: 77, 454658: 3, 454801: 49, 455130: 86, 455190: 35, 456138: 85, 457321: 1, 457406: 44, 457469: 33, 458491: 72, 458663: 64, 459596: 1, 460195: 98, 460268: 29, 462672: 47, 462936: 8, 463124: 85, 463566: 11, 464054: 6, 465212: 44, 465457: 93, 466048: 92, 466190: 32, 466819: 47, 467051: 40, 467138: 79, 467139: 26, 467584: 78, 468203: 37, 468206: 40, 468817: 50, 469011: 1, 469071: 86, 469482: 2, 471416: 74, 471705: 41, 471769: 1, 472098: 96, 472327: 45, 474214: 84, 474381: 35, 474433: 86, 474484: 79, 475413: 6, 476750: 20, 477017: 78, 477297: 12, 477722: 59, 478436: 94, 478843: 33, 479297: 62, 479481: 70, 480629: 0, 480676: 85, 481302: 0, 481920: 21, 482500: 23, 482644: 99, 484333: 96, 485042: 73, 486116: 67, 486133: 50, 486580: 2, 487952: 64, 488225: 93, 488474: 67, 488494: 8, 488584: 6, 489717: 69, 491525: 85, 491789: 53, 492131: 33, 492269: 25, 492368: 5, 492444: 95, 492478: 15, 494533: 55, 494995: 79, 495017: 64, 495334: 74, 495625: 81, 495929: 48, 496356: 27, 496959: 43, 497329: 71, 498945: 53, 499065: 53, 499094: 44, 499626: 11, 499967: 11, 500588: 48, 501678: 19, 502185: 43, 502433: 46, 502900: 82, 503795: 65, 503951: 46, 503960: 53, 504053: 13, 504187: 48, 504653: 83, 504705: 69, 505687: 72, 506049: 86, 507134: 74, 507363: 20, 507681: 28, 507815: 6, 509081: 2, 510133: 97, 510863: 21, 512446: 52, 512953: 17, 512965: 4, 513216: 82, 513625: 59, 518501: 79, 519320: 8, 519545: 31, 520629: 37, 520647: 63, 521100: 20, 522201: 39, 522218: 88, 522244: 20, 524535: 51, 524628: 66, 524739: 63, 524830: 8, 525585: 37, 525811: 40, 526081: 8, 526527: 49, 526532: 13, 526799: 46, 527065: 70, 528512: 47, 528685: 79, 528789: 12, 528992: 1, 529602: 79, 529969: 61, 531288: 15, 531483: 38, 532155: 99, 532873: 51, 533017: 46, 533101: 32, 533994: 72, 534014: 69, 534081: 96, 534134: 11, 534854: 17, 535339: 64, 535556: 98, 535697: 51, 535959: 32, 536394: 10, 536441: 79, 537700: 36, 538812: 19, 539087: 31, 539135: 97, 539456: 92, 539672: 89, 539693: 31, 539947: 92, 540646: 40, 541023: 35, 541659: 74, 541925: 16, 542171: 12, 542393: 86, 544267: 32, 545798: 71, 547255: 35, 547390: 82, 547565: 32, 547603: 20, 548345: 97, 549639: 85, 549661: 81, 550014: 57, 550073: 20, 550307: 14, 550510: 6, 550772: 45, 551332: 70, 551654: 34, 552751: 76, 553648: 29, 554271: 1, 554289: 97, 554359: 56, 554878: 80, 555147: 3, 555799: 77, 556717: 26, 556757: 6, 557310: 30, 558512: 13, 558637: 51, 558800: 45, 559708: 13, 559811: 10, 560213: 86, 560897: 19, 560932: 46, 561549: 10, 562185: 15, 562285: 12, 562306: 24, 562388: 88, 562396: 44, 563336: 89, 563750: 21, 563784: 68, 563796: 51, 564069: 56, 564617: 16, 565290: 46, 567378: 81, 567881: 28, 568320: 14, 568658: 33, 569207: 76, 569585: 52, 569658: 31, 569963: 26, 570061: 85, 570443: 87, 571032: 69, 571998: 66, 572143: 4, 573272: 32, 573304: 20, 574104: 59, 574566: 19, 574760: 49, 574829: 38, 574903: 74, 574979: 26, 575233: 14, 575421: 56, 576744: 72, 577560: 30, 577782: 49, 577959: 69, 578033: 14, 578204: 69, 578574: 36, 578939: 42, 579917: 66, 580596: 88, 580674: 72, 580832: 20, 581452: 94, 581792: 0, 581801: 37, 581960: 42, 582130: 35, 582663: 39, 582749: 12, 583097: 43, 583544: 48, 583745: 33, 583939: 63, 584453: 54, 584741: 10, 584888: 85, 585515: 56, 585771: 49, 586042: 20, 586360: 27, 586370: 83, 586398: 33, 586442: 62, 586694: 44, 586932: 51, 586964: 92, 588329: 99, 589029: 33, 589047: 40, 589632: 38, 589661: 51, 589914: 21, 590455: 47, 590850: 75, 591077: 2, 591205: 14, 591634: 37, 592517: 76, 592729: 75, 593378: 39, 593449: 34, 593478: 61, 594541: 71, 594986: 9, 595962: 50, 597273: 90, 597462: 4, 598736: 86, 599117: 63, 599157: 83, 599482: 20, 601553: 42, 601738: 67, 601752: 24, 601857: 61, 602488: 60, 602506: 24, 602560: 55, 602763: 32, 602837: 8, 603023: 0, 603592: 31, 604328: 72, 604517: 2, 605571: 96, 605676: 99, 606893: 79, 607403: 42, 608063: 5, 608307: 33, 608993: 19, 609046: 50, 609171: 95, 609374: 4, 609696: 95, 609768: 90, 610234: 68, 611311: 91, 611313: 75, 611711: 82, 612164: 41, 612561: 47, 612663: 74, 613058: 44, 613233: 97, 613827: 57, 614161: 36, 614226: 40, 616020: 89, 616339: 12, 616492: 50, 616519: 85, 617576: 29, 617870: 21, 618728: 99, 619121: 38, 620862: 42, 621029: 97, 621172: 92, 621915: 8, 622086: 67, 623420: 47, 624713: 87, 624960: 36, 625132: 46, 625913: 89, 626006: 19, 626007: 73, 626044: 5, 626156: 50, 627618: 29, 628386: 49, 628736: 94, 628943: 51, 629589: 14, 629913: 30, 631554: 73, 631599: 41, 632685: 11, 633217: 70, 633708: 4, 633722: 67, 634205: 41, 634684: 91, 635013: 26, 635094: 91, 635189: 68, 637370: 48, 637626: 13, 638969: 85, 639888: 27, 640093: 81, 640174: 43, 640415: 63, 640829: 46, 640847: 53, 642042: 61, 642085: 74, 642118: 89, 642587: 28, 643154: 74, 643933: 15, 644343: 64, 644554: 76, 644994: 58, 645020: 59, 645813: 49, 645962: 83, 646212: 20, 646542: 42, 646607: 85, 646776: 79, 646923: 19, 647034: 29, 647080: 1, 647904: 85, 648641: 36, 649050: 41, 649247: 19, 649969: 68, 651463: 3, 652052: 96, 652518: 13, 653368: 22, 654091: 1, 654201: 80, 654408: 97, 654607: 12, 655524: 3, 655541: 25, 655959: 33, 656045: 89, 657025: 12, 657428: 47, 657928: 78, 658225: 0, 658598: 60, 658926: 48, 659430: 31, 659574: 45, 661646: 30, 661764: 48, 662135: 53, 662238: 7, 662412: 53, 662495: 89, 663690: 30, 664486: 22, 664622: 65, 664652: 40, 665414: 60, 665569: 27, 665588: 15, 666110: 80, 666661: 92, 667329: 76, 669684: 60, 670825: 30, 670897: 50, 671160: 53, 672151: 55, 672666: 84, 673049: 6, 673131: 54, 674383: 70, 674465: 49, 676864: 54, 677108: 56, 677480: 1, 677938: 49, 680422: 48, 680704: 50, 680722: 55, 680990: 13, 681685: 80, 681855: 32, 682291: 92, 683498: 54, 683638: 84, 684487: 27, 685000: 76, 686131: 60, 686726: 38, 686770: 47, 688290: 20, 688943: 3, 689074: 92, 689279: 23, 689281: 76, 689608: 1, 689695: 28, 690458: 6, 690610: 81, 690665: 54, 690795: 37, 691111: 95, 692329: 60, 692672: 89, 693303: 55, 693483: 52, 693535: 68, 694192: 26, 694700: 34, 694981: 5, 695206: 24, 695245: 5, 696088: 92, 696262: 72, 696503: 57, 696543: 69, 698007: 20, 698162: 9, 698296: 62, 698317: 41, 698675: 55, 699156: 35, 699219: 88, 699246: 37, 699585: 71, 700154: 72, 700363: 92, 700496: 26, 702210: 41, 702648: 35, 702785: 94, 702980: 31, 703455: 59, 703540: 23, 703745: 26, 704037: 35, 705858: 27, 706205: 99, 706247: 47, 706592: 43, 707868: 14, 708020: 2, 708500: 62, 709239: 40, 709531: 58, 709545: 34, 709986: 33, 710792: 26, 710994: 2, 711841: 50, 711855: 63, 712388: 40, 712521: 31, 713075: 55, 713918: 55, 714713: 54, 717065: 67, 717445: 14, 717531: 90, 717646: 84, 718101: 85, 718226: 28, 718413: 60, 719337: 72, 719348: 21, 719836: 72, 720359: 41, 720857: 64, 720981: 18, 721343: 98, 722285: 37, 722419: 65, 723438: 23, 724037: 69, 724062: 21, 724456: 38, 724719: 41, 724872: 20, 725082: 48, 725182: 97, 725332: 80, 725719: 7, 725742: 11, 725832: 4, 726387: 28, 728880: 10, 729552: 14, 729696: 20, 731211: 28, 731382: 13, 732042: 39, 732108: 9, 732176: 64, 732211: 95, 732393: 40, 732876: 91, 732928: 65, 733673: 47, 733989: 63, 734715: 51, 734979: 40, 735030: 35, 735101: 70, 735191: 3, 735219: 54, 736927: 58, 736963: 97, 737015: 10, 737183: 76, 737371: 78, 738234: 7, 738239: 30, 738629: 46, 738958: 80, 739877: 7, 739882: 21, 740041: 53, 740365: 33, 740801: 44, 741004: 14, 742492: 65, 742514: 64, 743235: 93, 743874: 71, 744672: 70, 745274: 35, 745346: 78, 745641: 12, 746033: 82, 746475: 2, 746509: 98, 748123: 92, 748872: 65, 750054: 80, 750693: 58, 750732: 18, 751048: 7, 751770: 42, 751779: 83, 753592: 74, 753871: 41, 754738: 78, 755360: 1, 755549: 53, 755584: 82, 755651: 48, 755924: 26, 756898: 28, 758287: 9, 758525: 73, 759110: 6, 759266: 34, 759538: 49, 760378: 71, 761018: 22, 761749: 24, 761776: 69, 761823: 85, 762930: 59, 763117: 54, 763409: 9, 763632: 86, 763768: 61, 764488: 71, 764562: 60, 765077: 76, 765342: 86, 765429: 73, 765707: 88, 765714: 22, 766079: 43, 766592: 91, 767403: 4, 768081: 8, 768484: 89, 768632: 9, 768769: 20, 769080: 25, 769218: 72, 769872: 91, 769969: 58, 770016: 58, 770831: 84, 771025: 58, 771438: 20, 772348: 98, 772545: 2, 773013: 45, 773239: 61, 773479: 69, 773514: 47, 773730: 58, 775282: 21, 775471: 29, 775683: 75, 776976: 26, 777295: 33, 777443: 91, 778328: 57, 779787: 97, 780331: 28, 781066: 45, 782513: 99, 782745: 62, 782848: 60, 783394: 42, 783753: 5, 784264: 35, 784382: 70, 784985: 19, 785436: 92, 785605: 23, 785843: 55, 786112: 27, 786332: 96, 786916: 4, 787298: 81, 788343: 32, 790625: 30, 790659: 66, 791536: 14, 792251: 75, 792418: 66, 793177: 94, 793298: 91, 793750: 18, 793941: 92, 794342: 61, 794794: 54, 794967: 87, 795069: 77, 795112: 97, 795413: 85, 796146: 61, 796217: 82, 796504: 66, 796713: 8, 797400: 90, 797880: 50, 798399: 36, 798555: 26, 799071: 92, 800962: 95, 801035: 90, 801274: 82, 801871: 93, 801886: 13, 802271: 3, 802710: 47, 803771: 22, 803850: 39, 804118: 44, 805504: 45, 805592: 90, 805660: 47, 805736: 13, 806134: 55, 806504: 13, 807892: 98, 808113: 60, 808613: 24, 808663: 8, 809303: 23, 809397: 88, 809541: 20, 809627: 63, 810041: 53, 810935: 86, 810950: 85, 811416: 67, 811551: 1, 811687: 86, 812941: 44, 813491: 44, 813523: 93, 814684: 63, 814960: 13, 816030: 71, 816287: 48, 816578: 15, 817093: 54, 817191: 47, 817274: 77, 817385: 11, 819330: 70, 819541: 3, 819736: 55, 819872: 73, 819953: 51, 820427: 75, 820628: 64, 821199: 97, 821274: 38, 821665: 76, 822157: 50, 823056: 49, 825124: 92, 825373: 41, 825532: 33, 825696: 50, 825775: 56, 826584: 54, 827127: 52, 827382: 75, 827924: 65, 829049: 65, 829553: 6, 830116: 69, 830945: 80, 832344: 10, 832403: 45, 832975: 90, 834404: 3, 834587: 18, 834901: 30, 834950: 84, 835412: 49, 835512: 98, 835606: 50, 835877: 67, 836109: 85, 836303: 9, 836654: 82, 836800: 26, 837088: 13, 837516: 90, 838517: 34, 839321: 53, 839396: 84, 839523: 79, 839744: 51, 839887: 54, 840081: 73, 841745: 65, 841992: 39, 842878: 57, 842913: 68, 843592: 57, 844107: 58, 844144: 43, 844573: 67, 845766: 74, 845903: 76, 846375: 18, 847106: 6, 847528: 86, 847778: 58, 847838: 68, 847915: 97, 848589: 46, 848921: 63, 849653: 88, 850644: 2, 850988: 98, 850998: 65, 851268: 37, 851356: 94, 851864: 2, 852188: 88, 852534: 67, 852891: 46, 853190: 80, 853594: 67, 853818: 42, 853989: 73, 854351: 37, 854517: 96, 855712: 77, 856208: 34, 856298: 24, 856334: 66, 856423: 17, 856571: 78, 857097: 63, 857239: 19, 858276: 27, 858689: 58, 859179: 29, 859829: 63, 859945: 0, 860792: 35, 861010: 1, 861590: 76, 861603: 9, 862660: 48, 863056: 77, 864294: 81, 865698: 28, 866179: 31, 866371: 0, 866463: 40, 866517: 47, 866688: 72, 866903: 65, 867050: 32, 867107: 37, 867111: 7, 867260: 97, 867289: 91, 867818: 54, 867890: 95, 868897: 88, 869632: 8, 869879: 4, 870554: 69, 870788: 69, 871230: 70, 871328: 89, 871380: 40, 871567: 57, 871787: 86, 871887: 96, 872655: 42, 873916: 84, 874052: 82, 874367: 82, 874566: 8, 874999: 97, 875355: 54, 876393: 44, 876889: 65, 877006: 25, 877116: 25, 877320: 47, 877356: 66, 877852: 14, 878372: 43, 878773: 81, 879667: 12, 879733: 15, 879995: 30, 881202: 61, 881535: 80, 881881: 66, 882012: 8, 883196: 28, 883324: 89, 884565: 55, 884594: 62, 884622: 32, 884977: 1, 885601: 51, 885949: 23, 886016: 91, 886497: 3, 887012: 61, 887377: 56, 887994: 4, 888812: 38, 890385: 60, 891253: 17, 892188: 6, 892319: 0, 892516: 66, 892692: 76, 892855: 5, 892911: 62, 893184: 9, 893551: 81, 894407: 89, 895009: 15, 898357: 68, 898564: 46, 901102: 60, 903164: 50, 903249: 16, 903533: 12, 903872: 59, 903974: 35, 904538: 70, 904597: 40, 905929: 7, 906460: 80, 906561: 25, 906751: 59, 907172: 65, 907602: 23, 908701: 16, 908708: 31, 909201: 11, 910288: 38, 910407: 77, 910723: 87, 910982: 34, 911304: 18, 911648: 86, 914196: 6, 915170: 25, 915608: 23, 916202: 6, 917150: 45, 917318: 17, 917650: 17, 918530: 38, 918980: 0, 919096: 15, 919248: 27, 919559: 66, 920467: 36, 921062: 49, 921267: 74, 921288: 31, 921590: 83, 921726: 21, 921854: 57, 922272: 70, 923001: 97, 923133: 85, 924462: 25, 924479: 7, 925722: 6, 926124: 20, 926349: 75, 926355: 9, 927664: 14, 928426: 47, 928504: 19, 928705: 92, 929482: 45, 930044: 64, 930833: 57, 931402: 32, 931782: 11, 931927: 19, 932452: 56, 933267: 15, 933412: 1, 933923: 18, 934436: 59, 934754: 60, 934945: 63, 935825: 38, 936402: 70, 936525: 0, 936762: 56, 937921: 59, 938472: 53, 939008: 28, 939367: 82, 939981: 18, 940232: 87, 940831: 71, 941125: 40, 941212: 82, 941344: 77, 941769: 53, 941911: 46, 942221: 31, 942308: 48, 943013: 25, 943397: 13, 943603: 37, 945024: 35, 945797: 28, 946457: 72, 946937: 91, 948326: 82, 948384: 63, 949373: 73, 950910: 86, 950968: 70, 950982: 70, 951778: 18, 951960: 21, 952202: 38, 952577: 3, 953852: 73, 955881: 3, 955921: 44, 956079: 63, 956820: 12, 957224: 92, 957308: 20, 957598: 72, 957802: 73, 957981: 34, 958068: 84, 958105: 87, 958199: 53, 958218: 75, 958949: 59, 959081: 56, 960025: 36, 960036: 17, 960136: 28, 960219: 31, 961008: 97, 961400: 71, 961573: 57, 961687: 15, 961705: 80, 961727: 97, 961786: 80, 961912: 15, 963104: 32, 963561: 10, 964612: 94, 964676: 91, 965027: 14, 965242: 53, 965753: 56, 966923: 13, 967133: 29, 967136: 82, 967458: 36, 967721: 80, 968382: 55, 968466: 3, 968607: 0, 969225: 44, 969767: 21, 970248: 19, 970251: 46, 970405: 0, 970615: 39, 970696: 94, 970960: 82, 971468: 14, 971724: 22, 972114: 8, 972199: 30, 973090: 2, 973104: 67, 973488: 18, 973680: 63, 973775: 36, 973976: 1, 974093: 29, 974645: 26, 975518: 82, 975592: 14, 975653: 14, 976142: 85, 976282: 70, 977219: 17, 978295: 39, 978567: 39, 980461: 37, 982196: 59, 982356: 51, 983101: 43, 983467: 76, 983872: 29, 984230: 24, 985009: 85, 985960: 55, 986134: 43, 986237: 17, 986667: 5, 987035: 36, 987341: 23, 988004: 66, 988066: 93, 988766: 34, 989664: 9, 989826: 37, 990237: 40, 990813: 9, 990860: 4, 992260: 41, 992547: 0, 992600: 14, 993084: 52, 993257: 96, 993770: 20, 993951: 51, 994549: 27, 994832: 79, 995166: 31, 995488: 82, 997500: 4, 998084: 86, 998800: 91, 999355: 30}, + {1060: 88, 5767: 44, 6015: 55, 6303: 77, 9905: 6, 10705: 78, 12523: 68, 13669: 12, 15534: 65, 17092: 52, 21072: 65, 21990: 58, 22096: 68, 24342: 82, 25407: 35, 25694: 95, 25824: 49, 27122: 32, 27215: 39, 28541: 64, 29084: 16, 33303: 70, 34230: 97, 34572: 57, 34850: 12, 36310: 36, 37586: 57, 37643: 31, 38047: 31, 38829: 3, 39312: 87, 39714: 45, 40073: 93, 43222: 36, 43669: 21, 44710: 92, 44952: 92, 46800: 9, 47080: 95, 51340: 17, 51351: 66, 51807: 73, 53156: 41, 55669: 84, 55884: 70, 59397: 99, 59827: 87, 60926: 57, 62644: 54, 63794: 97, 65417: 96, 66546: 4, 67160: 85, 67586: 18, 69670: 76, 72785: 78, 73060: 31, 73719: 53, 73929: 87, 74134: 5, 75771: 45, 76038: 72, 76607: 31, 79852: 37, 81148: 41, 81307: 88, 81488: 87, 82917: 33, 83532: 55, 85592: 75, 86789: 59, 87090: 88, 87682: 83, 88153: 92, 89394: 99, 92519: 90, 94628: 49, 95524: 70, 96813: 69, 98375: 89, 98585: 12, 101078: 15, 101999: 0, 102067: 60, 103924: 41, 107863: 61, 108757: 84, 111436: 52, 112912: 23, 113155: 85, 113227: 48, 114151: 63, 114372: 1, 115080: 61, 115349: 31, 116694: 81, 116862: 50, 117750: 10, 118350: 29, 118436: 18, 119512: 90, 120194: 23, 120216: 85, 120284: 56, 122926: 59, 125422: 51, 128434: 4, 128846: 1, 130765: 19, 133364: 10, 133924: 60, 135055: 67, 136187: 0, 137013: 89, 138896: 3, 140707: 26, 141348: 21, 142934: 80, 147327: 86, 148553: 51, 148831: 66, 150022: 64, 150061: 75, 150674: 27, 151968: 29, 153016: 92, 160761: 14, 163613: 79, 163679: 92, 163686: 31, 164414: 24, 165610: 72, 166174: 35, 166823: 23, 167550: 79, 169820: 42, 170269: 19, 171816: 83, 172444: 59, 174412: 6, 175384: 70, 177352: 41, 178237: 34, 179019: 98, 180092: 91, 183382: 19, 185273: 87, 186721: 94, 186858: 22, 187252: 36, 188195: 81, 188747: 83, 189718: 46, 192094: 46, 193537: 50, 193811: 25, 195363: 15, 196022: 96, 196863: 58, 197656: 7, 200508: 89, 201072: 18, 201140: 23, 206274: 32, 207846: 64, 207847: 76, 208484: 58, 211773: 34, 212316: 80, 213847: 3, 213952: 82, 216235: 57, 216636: 93, 220514: 32, 220876: 2, 226726: 3, 226932: 12, 229386: 15, 231216: 29, 231802: 12, 233483: 27, 236066: 33, 237869: 74, 238118: 23, 239410: 14, 240527: 67, 242800: 49, 242856: 96, 245335: 88, 246279: 80, 246714: 40, 247069: 78, 247359: 98, 252279: 59, 253730: 41, 253996: 5, 254683: 75, 254788: 49, 254943: 46, 256895: 4, 257083: 95, 257600: 6, 257660: 94, 257668: 97, 261702: 62, 263561: 20, 265301: 5, 265471: 56, 267570: 82, 268869: 76, 269825: 66, 269935: 14, 270046: 1, 271262: 55, 271422: 74, 274390: 82, 277421: 74, 277861: 8, 278233: 57, 278617: 78, 279305: 28, 280675: 24, 282166: 58, 282406: 97, 285371: 37, 287849: 79, 287865: 17, 288172: 87, 289938: 75, 292464: 19, 293254: 57, 293269: 98, 297959: 66, 298112: 0, 298736: 42, 300307: 85, 304850: 79, 305554: 2, 307580: 46, 307802: 94, 308827: 74, 313139: 17, 314620: 96, 314683: 32, 315281: 86, 316364: 93, 318225: 57, 319669: 74, 320094: 72, 320460: 48, 321574: 75, 323624: 18, 323710: 56, 325273: 80, 326318: 18, 327229: 62, 330470: 13, 330475: 0, 334580: 42, 336079: 34, 337130: 27, 337427: 55, 337512: 31, 338099: 60, 340468: 99, 340541: 67, 340745: 70, 340887: 26, 342005: 23, 343186: 75, 347266: 11, 348186: 74, 350681: 70, 351889: 57, 352628: 17, 352846: 94, 354053: 44, 356654: 52, 356928: 81, 357069: 63, 357548: 54, 359018: 53, 360920: 63, 361001: 68, 361472: 98, 362283: 78, 363210: 41, 365059: 77, 365561: 67, 367195: 74, 367644: 19, 367799: 14, 369031: 8, 370372: 51, 371774: 30, 372140: 83, 372142: 90, 374691: 27, 375297: 77, 375541: 80, 376008: 84, 377301: 49, 378554: 13, 380558: 84, 381382: 12, 381489: 48, 381549: 73, 383492: 49, 388947: 86, 389858: 94, 390252: 48, 391416: 12, 391459: 92, 392830: 79, 393341: 74, 393836: 22, 395130: 38, 395579: 67, 396588: 59, 397178: 28, 397885: 8, 399204: 65, 399544: 71, 400380: 4, 400721: 53, 403814: 82, 406200: 56, 406421: 83, 406656: 78, 408078: 92, 408197: 11, 413042: 70, 415022: 82, 415222: 58, 416060: 46, 416766: 9, 417160: 11, 418639: 82, 419121: 50, 420086: 83, 420781: 46, 426520: 61, 427273: 10, 427710: 48, 428136: 65, 428158: 13, 431939: 4, 432377: 13, 434048: 30, 434707: 42, 434963: 84, 435016: 23, 436601: 36, 438380: 79, 440722: 26, 441227: 85, 442074: 72, 442363: 73, 446344: 42, 447303: 17, 448190: 27, 448224: 51, 449687: 78, 451303: 39, 451515: 52, 452014: 49, 452088: 40, 452624: 29, 454137: 26, 454250: 97, 454324: 59, 454362: 2, 455244: 6, 455297: 18, 458251: 68, 461675: 32, 462768: 3, 462889: 2, 463417: 23, 465948: 78, 466148: 90, 467034: 4, 467547: 18, 467916: 68, 468407: 52, 469458: 73, 469567: 26, 471082: 88, 472476: 92, 473001: 76, 474153: 36, 475732: 62, 476853: 3, 477206: 48, 477788: 78, 478625: 14, 478752: 71, 479993: 90, 480243: 6, 480514: 5, 482263: 58, 487861: 83, 487913: 18, 489763: 13, 494077: 79, 495061: 10, 495218: 81, 495277: 44, 497620: 83, 504318: 48, 506161: 83, 508381: 64, 508468: 6, 508622: 51, 509387: 2, 510563: 89, 512903: 89, 514086: 22, 515229: 13, 516264: 62, 516325: 9, 516823: 12, 518092: 90, 518852: 31, 519567: 38, 522063: 18, 523235: 93, 523692: 50, 524477: 44, 524721: 46, 524922: 72, 525275: 72, 527381: 90, 529163: 61, 531458: 64, 532379: 31, 532672: 45, 533962: 88, 535201: 57, 536566: 52, 541858: 58, 542389: 72, 543252: 95, 543716: 51, 545529: 94, 546577: 25, 549140: 61, 550864: 96, 551455: 42, 551838: 92, 552151: 57, 553282: 65, 554324: 30, 554707: 38, 555215: 29, 556253: 2, 556915: 91, 557338: 78, 559803: 55, 560044: 15, 560926: 62, 561310: 11, 561625: 64, 561924: 50, 563155: 42, 569331: 10, 569960: 10, 570788: 0, 571030: 3, 572679: 37, 573096: 64, 574491: 7, 575152: 49, 575326: 72, 575511: 46, 575642: 45, 579027: 33, 581035: 21, 582044: 86, 583405: 81, 583702: 57, 584901: 41, 585216: 68, 585904: 99, 586643: 95, 587834: 66, 587945: 87, 589665: 45, 590478: 22, 595663: 33, 596037: 87, 596572: 63, 596994: 57, 597555: 16, 599138: 92, 599516: 72, 600059: 28, 600724: 63, 601396: 50, 602187: 25, 602287: 14, 604286: 46, 604977: 26, 610429: 60, 610857: 24, 611384: 97, 612685: 7, 613668: 91, 613733: 45, 614438: 93, 616916: 6, 621132: 28, 623381: 25, 624567: 94, 626670: 19, 628772: 47, 629628: 50, 630703: 18, 631304: 43, 637151: 40, 637268: 41, 637923: 54, 639403: 11, 639674: 82, 642231: 1, 645564: 61, 646394: 67, 647729: 75, 648762: 92, 650610: 44, 651102: 87, 653865: 37, 653900: 77, 656077: 82, 656204: 22, 657185: 43, 659756: 95, 660564: 95, 662908: 8, 663655: 85, 664042: 19, 665078: 15, 665867: 67, 665891: 50, 668093: 42, 668095: 40, 671953: 19, 672842: 63, 673173: 22, 675176: 34, 675620: 9, 676609: 95, 678712: 89, 680371: 9, 680915: 47, 680979: 94, 681486: 55, 681969: 30, 683149: 17, 684532: 11, 685131: 25, 685270: 49, 685839: 21, 687065: 63, 687588: 40, 689915: 41, 690218: 34, 691091: 77, 691758: 87, 692634: 81, 695927: 58, 696493: 10, 696808: 98, 697197: 77, 697756: 89, 697844: 44, 698134: 32, 701685: 53, 701730: 19, 704936: 23, 709034: 18, 709042: 67, 709769: 67, 714409: 30, 715669: 34, 716974: 63, 719369: 26, 719859: 33, 720169: 7, 725232: 54, 729296: 18, 731904: 79, 732729: 58, 733178: 62, 735327: 59, 736710: 33, 740600: 89, 744558: 24, 744987: 11, 746422: 94, 746598: 22, 746885: 27, 750078: 45, 751475: 30, 752596: 91, 753156: 74, 754303: 6, 755024: 91, 755083: 59, 755156: 2, 755416: 82, 755841: 64, 758128: 63, 758952: 43, 762002: 54, 764910: 31, 765486: 3, 767970: 50, 769297: 69, 771052: 82, 771866: 84, 771893: 54, 772957: 71, 776422: 89, 777118: 31, 777455: 23, 779623: 40, 780655: 36, 780972: 16, 781701: 50, 782142: 34, 786740: 16, 786783: 50, 787120: 87, 789945: 17, 792741: 92, 794690: 8, 796875: 78, 797325: 77, 798943: 46, 799635: 69, 799920: 21, 801758: 45, 802419: 15, 803085: 22, 803612: 62, 804023: 80, 804682: 76, 805046: 70, 805818: 2, 806605: 74, 806972: 12, 808945: 16, 810244: 42, 810683: 30, 810935: 84, 812700: 96, 815984: 29, 817484: 22, 818627: 42, 818858: 41, 820093: 67, 820781: 75, 822433: 46, 823235: 21, 824210: 12, 824479: 32, 824878: 9, 825596: 5, 825623: 83, 825818: 67, 825875: 3, 830397: 1, 833517: 97, 833621: 32, 833806: 51, 836786: 39, 837105: 79, 837133: 95, 837414: 44, 839871: 4, 840345: 35, 841120: 27, 841314: 45, 845102: 11, 850453: 89, 851081: 87, 853220: 41, 854976: 11, 856204: 78, 856271: 94, 857052: 85, 857372: 9, 858394: 0, 858768: 44, 860173: 99, 862931: 71, 863464: 23, 864285: 28, 866882: 59, 867312: 84, 870455: 12, 871297: 82, 874168: 90, 875774: 52, 878000: 74, 878845: 46, 879641: 48, 880744: 46, 881227: 38, 881352: 51, 881481: 52, 881932: 27, 881980: 13, 883299: 83, 883380: 91, 885103: 55, 885884: 73, 886293: 1, 890155: 35, 890515: 20, 891916: 91, 893079: 56, 893818: 94, 893839: 15, 894233: 15, 896948: 28, 897360: 79, 900791: 60, 901158: 79, 901805: 74, 901898: 7, 902878: 52, 904155: 72, 904496: 84, 904932: 43, 905019: 35, 905021: 4, 907251: 78, 907922: 16, 908768: 60, 909854: 85, 910036: 34, 911882: 85, 912167: 29, 913048: 67, 913188: 15, 913735: 44, 914355: 35, 915191: 48, 917412: 39, 917721: 71, 921550: 6, 921565: 62, 921828: 28, 922564: 22, 923576: 64, 927537: 52, 927568: 89, 931575: 92, 931729: 50, 932272: 1, 932634: 54, 934574: 53, 934946: 93, 938467: 93, 941107: 82, 943981: 66, 946700: 42, 946746: 45, 946965: 3, 947957: 91, 948723: 3, 949811: 23, 951249: 69, 953533: 30, 953828: 67, 955808: 29, 955858: 39, 957811: 15, 960256: 20, 960639: 41, 961361: 11, 961655: 52, 961711: 69, 964037: 64, 964714: 33, 967672: 0, 969120: 90, 969501: 46, 970467: 31, 971858: 58, 972594: 29, 973809: 76, 973858: 62, 974023: 78, 974826: 74, 975979: 37, 976587: 22, 976718: 55, 978113: 59, 978526: 51, 978660: 88, 980455: 36, 982015: 8, 986034: 73, 988656: 62, 991680: 85, 992862: 6, 993134: 15, 993227: 53, 994088: 39, 994112: 38, 994742: 20, 994926: 66, 995006: 27, 999193: 41}, + {4591: 18, 5879: 14, 6009: 92, 6276: 92, 10130: 37, 11676: 13, 18118: 94, 21817: 45, 23678: 63, 28422: 80, 34831: 86, 35091: 58, 35142: 87, 41438: 53, 42375: 86, 42775: 84, 47043: 46, 47638: 81, 49336: 32, 49419: 75, 50033: 51, 51204: 13, 51227: 74, 52888: 69, 53375: 50, 53503: 37, 54054: 29, 57570: 70, 60212: 38, 60428: 63, 62036: 32, 63878: 54, 64755: 86, 66975: 69, 67099: 86, 68192: 26, 70384: 90, 71863: 95, 73283: 93, 74048: 21, 76412: 18, 83168: 76, 85557: 25, 86061: 74, 86582: 79, 87289: 9, 92416: 11, 94889: 93, 96580: 84, 97776: 88, 98109: 60, 98787: 43, 99542: 69, 99563: 2, 99810: 9, 102882: 63, 103692: 65, 105528: 10, 108150: 73, 108287: 40, 110744: 58, 113090: 42, 116196: 1, 116842: 17, 117080: 43, 119678: 27, 122903: 28, 123779: 21, 124134: 64, 126452: 34, 127814: 72, 128245: 35, 133803: 62, 134684: 74, 135291: 43, 136660: 12, 137208: 38, 138055: 8, 139637: 54, 139799: 23, 142332: 0, 142454: 17, 145536: 10, 146898: 59, 150008: 68, 155344: 70, 157760: 77, 159003: 38, 164577: 91, 166829: 11, 168530: 69, 170045: 40, 171619: 12, 172727: 0, 174003: 12, 174727: 95, 175531: 89, 177797: 20, 178705: 88, 179161: 44, 179298: 70, 185309: 19, 187456: 60, 187867: 42, 187932: 20, 190296: 36, 191828: 40, 191847: 76, 193677: 20, 194766: 81, 205968: 68, 206485: 79, 207379: 68, 211656: 58, 212321: 28, 215526: 60, 216217: 12, 224013: 62, 224473: 73, 225481: 90, 226171: 14, 228461: 12, 229520: 98, 229697: 87, 229741: 51, 230466: 2, 233247: 32, 234027: 46, 234807: 31, 235003: 2, 235501: 35, 235866: 87, 239545: 67, 240046: 54, 240193: 17, 243621: 18, 244185: 40, 245709: 72, 245901: 73, 245920: 83, 249404: 82, 254807: 36, 256213: 98, 259858: 92, 260324: 92, 262468: 94, 269031: 17, 270516: 73, 271996: 94, 273200: 76, 273947: 49, 275320: 33, 280387: 9, 280613: 62, 280619: 21, 284764: 18, 285419: 62, 285659: 3, 286253: 93, 289076: 64, 292280: 60, 293957: 51, 295418: 76, 296044: 72, 296619: 37, 296622: 73, 297582: 57, 297997: 76, 298086: 48, 298234: 98, 299395: 70, 300352: 63, 300894: 52, 302299: 74, 304569: 12, 304839: 7, 305472: 92, 308107: 41, 314421: 42, 317099: 46, 321477: 2, 325384: 96, 326565: 61, 333665: 84, 334624: 2, 335333: 5, 335391: 35, 339639: 43, 342063: 92, 344626: 84, 348992: 98, 349075: 54, 350279: 58, 359179: 54, 367061: 21, 367408: 61, 368647: 22, 369067: 30, 372411: 4, 374727: 36, 374846: 61, 381555: 17, 384926: 22, 386185: 48, 386541: 4, 389358: 5, 394776: 84, 395528: 4, 396319: 41, 397741: 85, 401138: 9, 401440: 19, 405238: 50, 406822: 79, 406934: 5, 417249: 31, 417378: 19, 420239: 20, 424437: 42, 427017: 1, 431729: 47, 432099: 43, 438883: 72, 440549: 78, 448480: 80, 448607: 33, 450051: 28, 450939: 94, 452260: 26, 454563: 20, 456980: 70, 457418: 31, 458723: 49, 460194: 61, 461598: 78, 461723: 7, 462031: 58, 462073: 57, 462123: 69, 463505: 76, 465566: 53, 469143: 58, 472362: 44, 473285: 11, 474988: 51, 475277: 30, 476345: 58, 479850: 8, 482223: 71, 482754: 78, 483706: 30, 484803: 0, 485501: 2, 485739: 95, 485873: 73, 496315: 85, 498856: 67, 498920: 17, 500620: 37, 500758: 59, 501412: 62, 503668: 42, 504359: 75, 508889: 62, 510982: 23, 511678: 43, 511977: 52, 517043: 70, 517151: 92, 517892: 17, 518133: 85, 520871: 27, 523989: 92, 526211: 39, 526688: 29, 530194: 66, 538710: 1, 539969: 61, 540679: 10, 541121: 77, 543939: 6, 547547: 5, 547718: 78, 549263: 90, 555368: 24, 555846: 66, 557663: 49, 563464: 17, 565128: 37, 567956: 34, 568005: 54, 569151: 59, 571200: 44, 572868: 92, 576660: 7, 576718: 92, 576730: 29, 578625: 6, 579038: 31, 579477: 63, 584170: 14, 586633: 78, 586670: 97, 588044: 76, 588869: 36, 592670: 28, 594667: 63, 599186: 13, 601398: 7, 606925: 74, 608891: 76, 611602: 27, 614160: 57, 621027: 87, 621344: 82, 621933: 54, 624914: 78, 628835: 90, 631242: 24, 631306: 34, 631634: 81, 632251: 41, 632303: 12, 634855: 52, 637160: 67, 637701: 55, 639498: 95, 640218: 74, 640538: 53, 641119: 66, 644425: 4, 644960: 90, 647767: 51, 647976: 97, 648378: 5, 648555: 82, 649482: 26, 651596: 45, 655101: 26, 655601: 7, 656916: 94, 660750: 34, 665619: 90, 673095: 23, 674051: 1, 676243: 26, 678172: 38, 678293: 93, 685240: 63, 688862: 60, 689676: 91, 696223: 81, 697419: 78, 701692: 13, 703018: 72, 703590: 57, 704967: 52, 705229: 85, 705352: 61, 706180: 83, 707148: 30, 708784: 25, 710551: 71, 712923: 1, 714094: 48, 715264: 60, 718802: 33, 718946: 52, 720042: 83, 721522: 68, 723175: 43, 723186: 67, 724855: 72, 726893: 13, 730698: 71, 731250: 82, 732730: 32, 737016: 33, 737063: 74, 738266: 93, 738517: 57, 745126: 46, 748412: 31, 749994: 91, 750249: 83, 752271: 3, 756040: 31, 757247: 67, 758414: 44, 758654: 84, 759762: 11, 760207: 48, 763143: 3, 763341: 8, 764824: 73, 767125: 57, 770056: 16, 773766: 47, 775587: 3, 777544: 99, 781813: 7, 782464: 68, 783882: 33, 783930: 32, 785391: 98, 786639: 29, 786691: 39, 788546: 38, 792873: 12, 793419: 78, 795876: 12, 797049: 93, 797226: 63, 798076: 85, 799191: 13, 803889: 37, 804414: 24, 806779: 54, 809955: 81, 813310: 39, 817561: 88, 826117: 79, 827176: 6, 827408: 50, 831113: 48, 833142: 69, 837240: 84, 839035: 56, 840402: 10, 842750: 58, 844593: 22, 848244: 81, 850570: 94, 855640: 68, 857794: 69, 858563: 90, 858576: 92, 862713: 39, 863883: 14, 864082: 85, 866918: 59, 867943: 86, 869983: 74, 874864: 90, 876898: 40, 879031: 95, 880256: 39, 884440: 60, 884686: 93, 886224: 8, 894980: 86, 897323: 2, 901035: 50, 902073: 77, 904519: 35, 904775: 93, 905113: 33, 908658: 98, 910545: 61, 914125: 13, 914282: 34, 916474: 23, 920107: 28, 920388: 89, 921022: 3, 925047: 36, 925657: 4, 927615: 10, 928776: 68, 929727: 70, 935196: 94, 940166: 92, 942181: 86, 942651: 56, 945293: 73, 945706: 93, 952597: 46, 953794: 0, 956688: 5, 960163: 36, 961233: 21, 963916: 73, 965067: 65, 966030: 88, 968542: 67, 970117: 77, 970189: 2, 970803: 26, 974621: 57, 975099: 9, 977671: 85, 981660: 81, 982267: 28, 986247: 67, 993259: 25, 994943: 3, 997397: 66, 997627: 37, 997631: 45, 999575: 42}, + {891: 31, 1544: 65, 4765: 80, 5850: 28, 6768: 7, 9039: 99, 9577: 28, 10651: 80, 15746: 55, 17139: 45, 20769: 7, 20782: 50, 25088: 25, 25955: 15, 31832: 64, 32204: 81, 33318: 37, 33419: 67, 34359: 87, 41477: 22, 42337: 76, 43902: 3, 45155: 22, 46737: 91, 58179: 98, 60897: 89, 61752: 63, 62778: 60, 63762: 59, 67804: 60, 74091: 54, 74900: 53, 76120: 92, 76251: 42, 78076: 85, 78120: 14, 78161: 70, 80667: 89, 80850: 69, 81444: 38, 82657: 83, 84770: 77, 86718: 13, 91768: 74, 91853: 21, 93121: 3, 94776: 11, 97085: 57, 98401: 66, 99070: 52, 100433: 8, 100852: 92, 108097: 15, 109045: 20, 109546: 79, 109777: 60, 110972: 54, 112412: 42, 114111: 72, 117154: 89, 117271: 14, 117661: 5, 121278: 30, 126662: 56, 127878: 82, 133830: 72, 133957: 32, 139737: 68, 140589: 64, 149819: 52, 150289: 41, 150994: 68, 156056: 6, 157410: 47, 158434: 16, 159047: 91, 160966: 96, 163006: 34, 164276: 20, 165688: 71, 165967: 65, 169905: 59, 170819: 36, 178941: 49, 179209: 84, 180855: 37, 183217: 20, 185777: 46, 186080: 23, 186152: 11, 186863: 44, 189823: 71, 190530: 80, 190901: 25, 192065: 25, 192604: 97, 193194: 93, 196182: 6, 196898: 99, 197906: 31, 199016: 65, 200452: 8, 200646: 85, 208553: 34, 215151: 77, 217639: 59, 217890: 17, 218479: 93, 225138: 63, 232800: 36, 233270: 11, 233495: 57, 234500: 82, 235595: 13, 237050: 2, 238125: 12, 239679: 31, 240794: 18, 241021: 33, 244195: 77, 244857: 74, 251889: 31, 253870: 97, 258054: 93, 259720: 30, 261361: 59, 265101: 9, 265629: 90, 266720: 23, 267907: 12, 270227: 97, 270586: 43, 270824: 62, 271902: 56, 274176: 54, 275130: 66, 275785: 43, 275840: 21, 276541: 83, 277199: 90, 278706: 66, 279406: 75, 282700: 95, 285376: 76, 286959: 35, 287247: 75, 288014: 74, 289805: 29, 292360: 96, 293820: 64, 294667: 48, 297047: 69, 297497: 92, 298064: 14, 299389: 26, 300536: 79, 300594: 8, 302603: 46, 304587: 37, 307113: 83, 308536: 3, 310164: 57, 310232: 77, 311363: 48, 311546: 63, 311564: 89, 311803: 12, 312136: 85, 312166: 15, 313228: 90, 313279: 44, 314497: 90, 318123: 41, 320304: 32, 322636: 54, 326322: 61, 329689: 80, 330304: 51, 331597: 27, 333870: 53, 337397: 46, 341001: 70, 342067: 22, 343954: 95, 353737: 44, 354352: 0, 354359: 2, 356213: 90, 357307: 96, 359515: 68, 360132: 15, 360184: 31, 364029: 85, 369139: 78, 369326: 36, 369740: 98, 370055: 98, 370352: 0, 371499: 72, 374917: 84, 377371: 43, 378389: 75, 378529: 93, 379702: 70, 380727: 52, 386254: 52, 389384: 54, 394815: 32, 395006: 10, 395409: 51, 397130: 19, 402616: 1, 409460: 91, 409581: 66, 411845: 94, 412386: 72, 413062: 47, 415195: 83, 415260: 42, 415648: 22, 425514: 77, 425810: 18, 426461: 33, 427427: 60, 427776: 65, 431345: 81, 431874: 36, 433746: 78, 436602: 81, 437774: 83, 438600: 70, 446729: 98, 448834: 70, 455484: 45, 455585: 26, 459968: 28, 460156: 88, 460543: 94, 461261: 36, 463403: 29, 471250: 80, 472910: 50, 474616: 25, 480510: 25, 482376: 89, 490948: 83, 491084: 90, 494088: 29, 497140: 48, 500187: 42, 500723: 10, 501599: 3, 502629: 48, 505230: 26, 513688: 9, 514151: 39, 516909: 37, 517165: 96, 517249: 71, 520015: 90, 525269: 39, 526566: 61, 531875: 56, 533518: 13, 533916: 76, 534139: 47, 535735: 20, 544175: 14, 545254: 53, 546587: 50, 549404: 39, 549980: 16, 550988: 31, 552640: 44, 555461: 56, 555894: 58, 556219: 60, 556221: 74, 558969: 76, 560859: 64, 561940: 96, 565574: 86, 567649: 46, 567760: 49, 567946: 21, 568788: 10, 572299: 19, 572316: 27, 573235: 25, 574575: 70, 576695: 22, 577082: 88, 578945: 92, 579057: 28, 581335: 91, 581473: 66, 585764: 26, 589311: 46, 589538: 77, 589594: 3, 590202: 30, 591564: 8, 592218: 11, 592327: 60, 593456: 74, 593481: 8, 593785: 71, 594378: 60, 604138: 54, 607046: 25, 611878: 14, 624704: 30, 629141: 33, 634088: 80, 637616: 0, 638525: 59, 641007: 71, 642453: 56, 646457: 95, 654076: 47, 654098: 95, 654178: 35, 656013: 45, 656188: 57, 656773: 50, 660732: 8, 662252: 44, 675650: 39, 679371: 65, 682183: 51, 682236: 5, 684544: 66, 686292: 39, 686972: 95, 687732: 56, 689855: 82, 690499: 38, 690815: 45, 692217: 17, 692727: 11, 693127: 51, 696045: 40, 696344: 70, 697010: 7, 697120: 29, 698245: 91, 698349: 72, 698664: 17, 701986: 68, 702741: 81, 707582: 11, 708129: 35, 709967: 20, 711879: 74, 712550: 12, 713229: 53, 714379: 16, 716300: 65, 717011: 97, 718897: 92, 719006: 28, 719883: 79, 719967: 95, 720528: 91, 724988: 22, 726419: 91, 726638: 11, 726782: 93, 729852: 26, 732373: 81, 732577: 60, 734469: 64, 734680: 32, 736589: 71, 737004: 53, 737958: 5, 739425: 77, 739432: 78, 751662: 68, 755154: 73, 756804: 31, 757637: 67, 758906: 28, 761198: 22, 762571: 75, 764933: 7, 767826: 36, 768596: 75, 768760: 77, 769542: 53, 769982: 43, 774182: 27, 775738: 88, 777239: 12, 780054: 73, 781495: 84, 782893: 63, 785749: 2, 788053: 78, 788093: 67, 789550: 20, 794040: 66, 794249: 5, 796353: 63, 798621: 66, 800683: 40, 801522: 50, 804326: 25, 804514: 49, 805766: 75, 806473: 30, 809483: 11, 810992: 59, 815366: 6, 817354: 90, 818080: 29, 818145: 57, 818625: 36, 819371: 35, 823024: 81, 824470: 7, 825025: 92, 828106: 46, 829812: 33, 831517: 3, 834657: 22, 838806: 41, 844648: 77, 846493: 73, 848770: 14, 850843: 76, 851841: 69, 853062: 73, 853852: 54, 855376: 37, 855595: 95, 856571: 8, 856909: 55, 859595: 68, 860100: 67, 861458: 78, 862586: 97, 864225: 39, 867426: 73, 872073: 13, 873517: 12, 874097: 1, 875409: 80, 878605: 40, 879666: 44, 880574: 5, 881038: 90, 885471: 23, 887483: 89, 887701: 19, 888681: 57, 888849: 91, 891368: 19, 892762: 18, 894367: 77, 900262: 63, 900723: 42, 903007: 7, 907614: 81, 913303: 3, 914122: 44, 917228: 83, 918194: 59, 919442: 59, 920189: 67, 920637: 25, 929339: 27, 929695: 8, 930968: 80, 931341: 80, 932216: 55, 935001: 96, 939849: 98, 940316: 65, 942002: 35, 944394: 74, 946605: 30, 946923: 65, 947001: 56, 947361: 81, 948490: 26, 949268: 97, 957808: 20, 958338: 93, 958366: 83, 960459: 38, 960574: 94, 960706: 26, 962481: 82, 962881: 32, 974633: 62, 978642: 26, 982009: 18, 985011: 69, 986651: 65, 989052: 48, 991466: 2, 991785: 18, 998539: 25, 999169: 39}, + {626: 4, 1215: 97, 4282: 47, 4992: 16, 8673: 79, 16710: 62, 16754: 35, 17182: 13, 18255: 29, 19095: 17, 21460: 68, 21897: 80, 22677: 95, 24337: 21, 24678: 69, 27032: 5, 27105: 77, 28859: 48, 29641: 5, 31407: 76, 31958: 11, 32397: 38, 33477: 26, 34649: 98, 35625: 29, 37398: 24, 37660: 40, 40579: 8, 40737: 54, 41132: 64, 41742: 44, 46326: 79, 46758: 71, 47012: 72, 50947: 29, 51067: 44, 53108: 41, 54587: 9, 59717: 49, 60435: 86, 61016: 58, 61063: 48, 63297: 35, 63764: 51, 64678: 41, 67596: 49, 67798: 52, 70043: 47, 70124: 17, 72118: 24, 74100: 75, 76443: 99, 79540: 82, 80669: 35, 83400: 91, 87551: 48, 87931: 53, 88254: 58, 89937: 45, 90492: 34, 92074: 70, 95555: 57, 97925: 5, 98126: 4, 98693: 68, 98966: 64, 99397: 23, 102458: 6, 102706: 58, 105193: 90, 107330: 40, 112289: 91, 112474: 50, 114256: 30, 122125: 41, 123459: 2, 127672: 10, 131020: 73, 131986: 41, 132112: 75, 133285: 85, 133813: 30, 133868: 80, 135509: 30, 138191: 85, 138457: 43, 140546: 0, 141218: 61, 144896: 22, 145847: 75, 145920: 86, 146681: 16, 147062: 46, 147312: 9, 148461: 32, 149789: 39, 150083: 28, 150596: 95, 153651: 11, 154954: 64, 156735: 16, 157418: 47, 157798: 84, 158922: 69, 159154: 31, 162653: 0, 162881: 70, 164764: 61, 166894: 59, 167973: 82, 169756: 87, 170359: 60, 170974: 14, 171132: 74, 173961: 13, 175303: 40, 177682: 76, 178955: 56, 179311: 55, 181195: 80, 182698: 12, 182702: 34, 184548: 43, 185643: 66, 185874: 43, 186582: 39, 188016: 52, 191234: 41, 191425: 83, 193308: 38, 194667: 7, 195084: 16, 198861: 64, 198917: 39, 200500: 63, 203327: 2, 206104: 8, 208417: 34, 210463: 23, 210613: 48, 210957: 16, 211002: 70, 214479: 67, 215557: 33, 216479: 15, 218689: 30, 219908: 25, 220226: 29, 222605: 95, 223923: 69, 226020: 56, 227822: 60, 232163: 45, 232526: 9, 232992: 49, 233750: 22, 234050: 82, 240520: 4, 241323: 16, 244949: 26, 246464: 51, 248307: 73, 249528: 28, 250186: 47, 253073: 35, 253567: 12, 254223: 67, 254339: 65, 257071: 50, 260965: 38, 263213: 67, 267681: 56, 269120: 27, 272202: 66, 274700: 4, 278661: 40, 278855: 28, 279453: 84, 279914: 41, 282197: 49, 285297: 78, 290586: 30, 293730: 88, 295843: 96, 299952: 97, 300088: 35, 301501: 3, 302911: 70, 303074: 75, 305127: 97, 308955: 22, 310225: 30, 313627: 17, 313707: 22, 316223: 75, 316968: 96, 317253: 39, 319278: 19, 320705: 55, 321172: 78, 323177: 22, 327983: 29, 328605: 5, 330557: 29, 331869: 20, 332723: 13, 334571: 4, 334820: 79, 334981: 55, 335056: 63, 335991: 33, 337742: 20, 341333: 53, 341887: 89, 341915: 27, 343078: 26, 343458: 92, 344361: 94, 345990: 18, 346134: 1, 346809: 94, 348618: 66, 348929: 45, 350786: 28, 351353: 3, 351472: 94, 351573: 38, 354058: 20, 354817: 2, 358183: 7, 359330: 55, 359703: 10, 361334: 78, 361834: 32, 362474: 24, 363788: 42, 364265: 48, 366030: 28, 366112: 42, 367886: 90, 372350: 6, 372595: 3, 374984: 92, 375299: 18, 381014: 69, 382178: 82, 384471: 48, 385468: 41, 385854: 58, 387621: 11, 388685: 61, 389471: 11, 389717: 4, 389818: 34, 392016: 77, 392737: 2, 393551: 4, 396826: 22, 400407: 9, 401466: 14, 402241: 78, 402301: 96, 403336: 17, 404088: 66, 404756: 82, 406138: 93, 410272: 42, 410812: 70, 413961: 64, 414543: 73, 415612: 36, 416940: 33, 418690: 84, 418788: 67, 419159: 23, 422605: 34, 429026: 2, 429191: 71, 432953: 1, 433082: 33, 435652: 31, 439214: 85, 440654: 41, 440672: 51, 440916: 35, 443084: 86, 445788: 32, 446002: 27, 446476: 78, 448001: 69, 449216: 3, 450390: 84, 450482: 26, 455600: 79, 457238: 7, 457454: 64, 458114: 92, 458290: 72, 463324: 49, 463599: 82, 464053: 2, 464101: 62, 464140: 82, 465682: 0, 465721: 4, 466769: 2, 467268: 31, 468004: 42, 468018: 78, 469432: 89, 470699: 91, 472833: 84, 473668: 2, 474127: 59, 475801: 98, 479999: 18, 482647: 30, 482936: 10, 488119: 95, 488286: 12, 488841: 82, 489518: 63, 493008: 1, 494296: 63, 496669: 61, 497907: 33, 500054: 33, 506271: 94, 507429: 22, 507685: 55, 508815: 16, 510481: 3, 510597: 51, 511067: 42, 511380: 79, 511630: 71, 512138: 30, 512932: 46, 516573: 19, 518398: 1, 519970: 23, 520067: 60, 526069: 95, 526462: 15, 528428: 42, 530236: 99, 533122: 14, 533716: 86, 536229: 50, 538642: 75, 543387: 11, 545618: 67, 547254: 27, 549269: 78, 550523: 50, 552648: 47, 553543: 64, 553799: 83, 555796: 5, 562616: 99, 563844: 15, 564494: 72, 570264: 96, 570444: 18, 570558: 75, 575175: 64, 576404: 64, 576566: 77, 577338: 17, 582891: 9, 585850: 26, 586049: 29, 588927: 4, 589299: 2, 590058: 60, 591401: 34, 596586: 72, 598075: 44, 598492: 78, 599712: 32, 600551: 9, 601221: 17, 602588: 24, 603878: 59, 605047: 74, 607018: 99, 611930: 57, 612305: 35, 613268: 2, 613910: 59, 613916: 86, 615062: 45, 615178: 31, 616078: 2, 617673: 64, 618567: 76, 618752: 75, 618808: 90, 618879: 79, 618958: 73, 620672: 63, 621198: 0, 622181: 22, 623897: 1, 624263: 49, 624905: 14, 626232: 11, 627065: 22, 629632: 79, 630041: 61, 630177: 54, 630731: 76, 631520: 71, 633599: 75, 637302: 72, 637962: 14, 638163: 6, 640553: 92, 641160: 32, 641339: 71, 643097: 67, 647764: 71, 647909: 51, 652821: 70, 653069: 90, 653646: 32, 654768: 36, 654803: 25, 655100: 98, 657934: 37, 660397: 44, 660556: 66, 661149: 88, 661998: 34, 662464: 50, 665998: 98, 668396: 83, 668815: 35, 669281: 56, 671361: 90, 672142: 11, 675393: 60, 676746: 89, 677911: 19, 679743: 92, 680500: 27, 682534: 53, 683111: 4, 683130: 46, 683773: 3, 684168: 20, 687875: 19, 689655: 97, 689932: 82, 690712: 3, 692409: 44, 692641: 21, 693523: 45, 695441: 80, 696912: 54, 700356: 18, 700393: 52, 707969: 89, 709747: 40, 714389: 19, 715767: 11, 717267: 77, 718293: 78, 719453: 18, 720103: 60, 721830: 75, 723964: 56, 725530: 74, 727159: 67, 727845: 85, 728035: 25, 730178: 64, 731027: 74, 734417: 37, 736097: 76, 736714: 49, 737586: 70, 737631: 33, 740282: 97, 742273: 20, 742813: 1, 743057: 62, 743208: 59, 744490: 30, 746255: 57, 747929: 83, 748043: 23, 749269: 89, 749802: 57, 750700: 96, 751196: 89, 751870: 18, 753282: 6, 753377: 59, 754568: 25, 755985: 40, 756371: 53, 757721: 93, 757930: 1, 758147: 40, 759268: 70, 761419: 41, 761622: 99, 762093: 68, 762300: 86, 762776: 46, 764724: 55, 769993: 63, 770541: 99, 771074: 6, 771532: 99, 772563: 56, 776564: 51, 780088: 67, 780146: 25, 780386: 26, 780779: 71, 781521: 1, 781713: 40, 781939: 26, 782495: 68, 783661: 82, 785751: 9, 786950: 15, 789963: 91, 794227: 68, 794280: 22, 796429: 65, 798287: 22, 799662: 19, 802968: 82, 803245: 43, 804296: 19, 804368: 50, 806702: 94, 807619: 60, 808326: 0, 810441: 65, 811557: 40, 812486: 83, 812985: 89, 814157: 56, 815018: 22, 815938: 19, 817555: 3, 821683: 89, 821858: 50, 822501: 24, 823489: 99, 825220: 74, 830742: 22, 832073: 42, 834352: 15, 836171: 81, 836898: 4, 837696: 53, 841127: 54, 841884: 80, 845892: 31, 847361: 65, 848418: 88, 848609: 93, 848964: 15, 850121: 86, 850967: 3, 851852: 89, 852763: 61, 855836: 64, 860628: 7, 861953: 99, 864237: 80, 867419: 90, 873654: 53, 873732: 11, 874185: 70, 875193: 55, 876216: 76, 876319: 77, 878259: 92, 878964: 3, 880238: 77, 880850: 42, 881557: 62, 882027: 24, 883147: 39, 884923: 25, 885634: 29, 886317: 41, 891012: 92, 892873: 57, 894854: 73, 895071: 33, 896893: 97, 897192: 97, 898378: 94, 898628: 56, 899431: 38, 899561: 98, 901710: 84, 901942: 19, 902196: 21, 902253: 32, 903668: 74, 903980: 81, 906099: 90, 908488: 42, 914516: 35, 916587: 68, 917276: 79, 922940: 78, 924988: 83, 928679: 25, 931279: 72, 932462: 37, 932735: 39, 932744: 52, 933022: 78, 937473: 9, 940263: 76, 940690: 68, 942270: 36, 944449: 33, 946313: 71, 951278: 99, 955877: 96, 957950: 32, 960114: 44, 960502: 8, 966016: 44, 967150: 47, 969165: 71, 976663: 85, 977799: 24, 978247: 96, 978858: 14, 981123: 32, 982374: 91, 983105: 11, 986752: 27, 986966: 83, 990861: 37, 992341: 64, 993139: 69, 994726: 20, 996206: 32}, + {3196: 2, 3500: 50, 4025: 96, 4828: 59, 5771: 34, 5792: 91, 6045: 81, 6177: 63, 6258: 45, 9181: 8, 9930: 40, 11243: 58, 11893: 44, 11978: 39, 12327: 90, 13921: 91, 16258: 30, 17977: 29, 18730: 90, 18945: 3, 18958: 43, 19226: 22, 19352: 70, 19620: 83, 19739: 70, 19805: 65, 19941: 50, 20661: 4, 20803: 99, 21587: 77, 21921: 79, 22531: 83, 24524: 18, 26092: 52, 26294: 57, 27249: 52, 30717: 53, 31299: 57, 31537: 39, 32420: 47, 33758: 93, 35503: 29, 35538: 97, 38009: 93, 38614: 50, 38735: 56, 40714: 43, 40778: 58, 40988: 69, 42952: 48, 44535: 12, 45822: 63, 47412: 7, 47770: 90, 48446: 89, 48527: 90, 50501: 88, 50721: 17, 50806: 47, 51862: 35, 53816: 78, 54867: 78, 55295: 40, 55723: 3, 56192: 52, 56787: 46, 56906: 71, 58923: 37, 59614: 42, 62140: 63, 62684: 64, 63166: 29, 63183: 71, 63481: 5, 66314: 3, 68609: 11, 68841: 46, 70064: 11, 71883: 42, 71983: 22, 72523: 13, 72563: 1, 72827: 16, 73705: 55, 74270: 50, 74872: 59, 76087: 6, 76830: 17, 77794: 27, 77912: 54, 79147: 13, 79289: 20, 81308: 58, 81825: 84, 84443: 90, 84878: 15, 86218: 80, 88702: 74, 88730: 58, 91482: 14, 91941: 66, 92131: 48, 93826: 29, 94193: 64, 94431: 9, 96305: 46, 96890: 24, 97119: 23, 98156: 38, 99411: 55, 99605: 23, 100346: 73, 102358: 80, 102920: 22, 103603: 57, 103791: 32, 105122: 71, 107978: 82, 108439: 42, 109098: 73, 109209: 26, 110009: 9, 111365: 64, 111384: 45, 111458: 5, 111733: 90, 112366: 71, 113047: 76, 113989: 62, 114080: 14, 115645: 31, 116332: 56, 117110: 62, 117685: 9, 118125: 73, 118279: 14, 118844: 24, 119180: 34, 119378: 26, 120958: 85, 121005: 83, 121302: 24, 121316: 37, 121496: 53, 121692: 31, 122346: 46, 122769: 79, 124824: 53, 125306: 96, 126524: 66, 128509: 73, 129340: 58, 129413: 12, 130012: 5, 130745: 2, 131634: 35, 131998: 33, 132567: 68, 133759: 30, 135160: 56, 136473: 69, 137048: 74, 138009: 4, 138680: 66, 141057: 83, 141425: 92, 142411: 31, 142494: 7, 142682: 95, 144111: 15, 145930: 35, 147496: 91, 147922: 12, 148113: 84, 148542: 85, 149184: 40, 150896: 95, 150902: 9, 151292: 60, 151437: 71, 151865: 49, 152935: 80, 153586: 78, 153901: 33, 154843: 34, 155238: 66, 157165: 2, 157316: 61, 157377: 89, 158144: 59, 160573: 42, 162031: 33, 162331: 88, 163695: 97, 164507: 35, 164777: 51, 167449: 65, 167555: 14, 168721: 19, 168945: 72, 170018: 26, 171007: 13, 171289: 36, 171701: 76, 172454: 13, 172627: 27, 173001: 38, 173760: 40, 173983: 71, 174599: 9, 174830: 70, 176149: 25, 176894: 31, 178044: 35, 178933: 23, 179913: 72, 181405: 63, 182498: 45, 182536: 55, 183228: 68, 184389: 41, 184436: 95, 185878: 89, 187157: 37, 188809: 84, 189593: 0, 190116: 81, 190287: 4, 192293: 7, 193201: 63, 193260: 74, 193373: 64, 193422: 7, 193458: 72, 195138: 61, 195467: 51, 196085: 6, 196563: 30, 198751: 72, 201078: 57, 202053: 63, 202860: 47, 203433: 71, 204779: 32, 205170: 21, 205261: 49, 206042: 46, 206525: 17, 206621: 15, 206879: 84, 207020: 11, 207427: 83, 209791: 16, 210618: 47, 210934: 21, 212836: 20, 214057: 82, 214444: 32, 216239: 26, 216240: 99, 216825: 69, 216853: 45, 219461: 79, 219656: 11, 220208: 95, 221189: 10, 222235: 8, 223163: 63, 224058: 1, 224534: 38, 225984: 73, 226233: 33, 227155: 99, 228053: 61, 230238: 40, 230333: 64, 230375: 28, 230876: 94, 232630: 7, 234447: 38, 234697: 47, 235382: 86, 235432: 66, 236949: 10, 237970: 28, 238603: 4, 238711: 81, 239012: 66, 239300: 32, 240996: 4, 241736: 61, 243012: 69, 243360: 64, 244699: 97, 245018: 65, 245810: 72, 246812: 6, 247396: 66, 247617: 74, 247896: 86, 248105: 45, 250180: 64, 250370: 59, 250765: 4, 253052: 4, 253380: 15, 253562: 55, 253883: 0, 253951: 25, 254552: 50, 255452: 75, 255615: 99, 255673: 12, 255933: 76, 257173: 54, 257481: 46, 258078: 19, 258742: 64, 259739: 27, 261310: 6, 263000: 13, 264738: 60, 265241: 17, 267555: 54, 269652: 29, 269655: 47, 270503: 73, 271300: 53, 272901: 39, 273819: 60, 275563: 19, 275773: 76, 276344: 5, 277072: 62, 279041: 67, 280097: 30, 282515: 45, 283061: 15, 284653: 36, 284655: 98, 284704: 23, 285310: 97, 286033: 55, 286144: 13, 287504: 21, 287596: 13, 287716: 31, 287811: 21, 288778: 74, 290032: 2, 290883: 33, 291782: 54, 291919: 12, 292683: 24, 294140: 72, 294376: 67, 295202: 33, 297014: 39, 297183: 6, 301737: 69, 304423: 88, 305101: 20, 305919: 99, 306200: 84, 306219: 64, 306276: 45, 307179: 58, 308060: 33, 308841: 2, 309943: 94, 311902: 69, 311925: 54, 312457: 14, 313263: 24, 314436: 70, 317446: 98, 319647: 62, 320307: 21, 320598: 91, 320622: 49, 320681: 53, 321205: 37, 321248: 84, 321966: 23, 324167: 7, 324938: 58, 325599: 82, 325722: 15, 326081: 10, 326736: 4, 327178: 88, 327474: 23, 327885: 21, 327907: 99, 328624: 19, 329017: 51, 330069: 53, 330341: 87, 331515: 41, 331816: 40, 335162: 61, 336248: 92, 336496: 19, 338903: 24, 339906: 25, 341356: 67, 344464: 50, 345652: 39, 346582: 82, 347054: 70, 347075: 94, 349530: 27, 350586: 59, 350616: 33, 351040: 50, 351336: 64, 352428: 72, 352849: 66, 353556: 47, 353838: 79, 353882: 40, 354065: 6, 357263: 0, 359913: 90, 360646: 1, 360827: 49, 362515: 13, 362555: 71, 362565: 3, 362881: 33, 363215: 64, 363261: 38, 363432: 35, 364498: 3, 366541: 59, 367317: 0, 367822: 39, 368036: 0, 368098: 62, 369820: 25, 371266: 95, 372082: 24, 373088: 63, 373758: 59, 373841: 30, 374497: 68, 377627: 77, 378651: 9, 379095: 58, 379588: 35, 379996: 49, 380544: 49, 382564: 1, 382576: 44, 383065: 19, 383611: 22, 385211: 56, 386980: 7, 387673: 12, 388115: 68, 388263: 94, 389705: 74, 391188: 29, 391251: 55, 391572: 92, 392966: 68, 393019: 33, 393344: 87, 395517: 42, 395600: 71, 395964: 44, 396327: 6, 397115: 98, 398451: 70, 399886: 3, 400325: 61, 401504: 80, 401770: 65, 403112: 88, 403328: 69, 403821: 39, 404026: 45, 405161: 84, 406779: 85, 406962: 12, 407086: 87, 407890: 31, 408630: 77, 409604: 24, 412421: 48, 412893: 55, 414003: 92, 415826: 30, 417779: 9, 418999: 54, 420775: 50, 423245: 22, 423289: 17, 423712: 88, 424255: 95, 425507: 30, 425755: 18, 426858: 66, 427414: 86, 429301: 73, 429710: 19, 431244: 2, 433624: 22, 434443: 94, 434631: 83, 435803: 91, 436602: 79, 438881: 45, 439310: 26, 439380: 92, 439998: 35, 440513: 18, 441054: 94, 442435: 68, 443982: 42, 445907: 65, 446710: 98, 448936: 64, 449327: 25, 449508: 80, 449660: 91, 450394: 71, 450493: 82, 450628: 48, 451928: 19, 452904: 58, 453008: 70, 453264: 77, 453420: 73, 454258: 46, 459182: 32, 459968: 32, 461678: 54, 462062: 43, 462888: 42, 463711: 16, 465441: 74, 465699: 53, 466367: 15, 467406: 30, 467705: 55, 468791: 88, 468940: 44, 469995: 79, 471724: 75, 471996: 24, 472242: 19, 472964: 60, 474428: 95, 475411: 18, 476356: 81, 479528: 94, 480106: 87, 480361: 32, 481829: 49, 483272: 44, 483685: 39, 484804: 16, 485065: 69, 485130: 66, 485639: 54, 487430: 23, 487458: 65, 487705: 49, 489199: 26, 489512: 37, 490168: 15, 492921: 60, 495206: 15, 495816: 96, 500206: 58, 501569: 16, 504214: 39, 504231: 27, 504393: 52, 506010: 74, 508354: 83, 508463: 83, 509519: 94, 510306: 28, 511755: 90, 512454: 1, 514843: 84, 515021: 17, 515109: 96, 516162: 4, 516579: 10, 516888: 10, 517608: 24, 518272: 70, 520054: 25, 520494: 17, 521619: 14, 521643: 42, 522093: 93, 524125: 50, 526285: 24, 527009: 28, 528813: 92, 532155: 55, 533659: 15, 533674: 43, 535041: 83, 535043: 27, 535513: 55, 537803: 78, 539091: 14, 541962: 24, 542850: 26, 543429: 65, 543814: 71, 546125: 48, 546494: 9, 548402: 50, 548766: 37, 549661: 13, 550084: 85, 550600: 38, 551393: 44, 553448: 63, 554302: 19, 554664: 96, 554843: 61, 556240: 58, 556636: 44, 556761: 0, 557982: 53, 558153: 35, 558260: 7, 559718: 72, 560220: 67, 561672: 66, 561814: 9, 562767: 48, 563270: 41, 563808: 67, 565781: 46, 565796: 14, 566628: 21, 566830: 69, 568859: 57, 570892: 22, 571022: 83, 571498: 12, 572686: 46, 573747: 59, 576618: 38, 583046: 58, 584131: 98, 584490: 96, 584708: 25, 585665: 18, 587000: 81, 588040: 34, 588692: 67, 588713: 5, 589348: 52, 589410: 50, 589430: 1, 589496: 94, 590494: 12, 590771: 42, 592833: 98, 595858: 83, 596849: 17, 597571: 3, 598489: 83, 598707: 0, 600061: 63, 600159: 62, 600791: 19, 601806: 76, 603048: 46, 603823: 25, 605614: 67, 605636: 6, 607538: 74, 608079: 34, 608201: 96, 608422: 95, 610549: 91, 612199: 57, 612371: 70, 613011: 53, 614329: 24, 614447: 59, 615298: 78, 615317: 80, 616274: 85, 617466: 49, 619328: 24, 620058: 32, 620521: 37, 621474: 3, 621864: 45, 622458: 87, 622944: 81, 623921: 83, 624389: 73, 624491: 15, 626085: 35, 627279: 20, 627363: 60, 627737: 30, 629631: 64, 631681: 86, 632522: 7, 632631: 7, 632847: 27, 633451: 49, 633835: 61, 634929: 47, 635783: 37, 636857: 3, 637819: 56, 637862: 91, 639413: 80, 639638: 45, 640404: 32, 642957: 12, 643324: 3, 643972: 2, 645006: 8, 645478: 7, 645600: 98, 645784: 42, 646298: 80, 646832: 50, 647389: 32, 648498: 92, 650908: 42, 652456: 92, 653928: 49, 654318: 25, 656792: 6, 656938: 33, 657310: 34, 658275: 39, 658726: 45, 659744: 81, 661068: 35, 662418: 22, 663941: 78, 664825: 11, 666431: 82, 666998: 68, 667040: 30, 669435: 13, 671004: 89, 671603: 49, 672106: 20, 672217: 75, 672691: 24, 672838: 54, 673250: 95, 673418: 12, 673424: 59, 674730: 39, 674975: 70, 675510: 50, 676039: 70, 676279: 69, 676561: 19, 680801: 42, 680843: 82, 681554: 42, 681787: 76, 682182: 66, 684033: 45, 686126: 24, 687206: 83, 687225: 85, 688317: 46, 688767: 41, 689581: 48, 689747: 87, 691643: 61, 692099: 96, 692458: 2, 693936: 65, 694449: 57, 695024: 46, 695093: 59, 695335: 61, 695870: 28, 696362: 77, 696772: 36, 696871: 21, 696929: 87, 697292: 84, 697297: 16, 698016: 46, 698040: 96, 699253: 21, 699532: 35, 700752: 75, 700795: 99, 701665: 32, 704898: 72, 705250: 19, 706104: 48, 708118: 89, 708353: 47, 709176: 21, 709965: 12, 709968: 61, 710059: 20, 712106: 81, 712785: 79, 713546: 53, 713713: 65, 714116: 16, 715053: 57, 716185: 31, 716228: 85, 717083: 43, 718554: 19, 718782: 37, 720606: 92, 721080: 25, 721361: 70, 722309: 8, 723516: 57, 723545: 69, 723915: 42, 725507: 77, 726232: 99, 726574: 73, 727071: 25, 729286: 20, 729945: 19, 729981: 15, 730846: 73, 731931: 16, 734999: 68, 736330: 71, 737658: 69, 737724: 61, 739121: 64, 739650: 88, 740044: 27, 740050: 4, 740358: 89, 742924: 89, 743269: 38, 743791: 5, 746270: 64, 752742: 38, 753307: 64, 756725: 10, 758739: 23, 759048: 13, 759135: 41, 760341: 31, 760425: 89, 761078: 19, 761587: 21, 764351: 47, 765118: 86, 766570: 80, 766910: 82, 770947: 88, 770978: 88, 771944: 30, 772947: 27, 773219: 58, 774367: 73, 775969: 26, 776650: 8, 777389: 12, 777822: 2, 781316: 98, 781363: 96, 781636: 24, 782471: 3, 783454: 97, 783746: 38, 784695: 97, 785597: 54, 785695: 79, 785699: 12, 786888: 94, 787495: 29, 788007: 55, 788650: 16, 789150: 37, 789170: 84, 789618: 90, 789789: 54, 790289: 35, 792670: 13, 792794: 44, 792969: 10, 793460: 3, 795356: 99, 795893: 83, 796339: 13, 796390: 34, 796636: 72, 796669: 19, 797023: 91, 798715: 17, 799182: 28, 799803: 8, 801657: 7, 802376: 51, 804444: 36, 806527: 44, 807094: 95, 807636: 72, 808867: 64, 809011: 18, 809242: 88, 809383: 74, 811766: 90, 812207: 69, 812857: 24, 813891: 88, 814296: 41, 815121: 38, 815499: 93, 815617: 69, 816483: 69, 817010: 52, 818298: 94, 819372: 39, 819471: 82, 821008: 46, 821171: 80, 821793: 86, 822149: 96, 822250: 78, 823081: 71, 823436: 29, 824209: 99, 824285: 60, 824724: 70, 826736: 57, 828963: 60, 829074: 76, 829186: 79, 830311: 77, 830451: 11, 832395: 82, 833056: 3, 834124: 10, 836137: 80, 836943: 35, 837218: 18, 837873: 64, 838013: 39, 839829: 11, 841031: 11, 844084: 11, 844612: 14, 846249: 2, 846930: 1, 847466: 18, 847514: 22, 849132: 14, 850671: 48, 852570: 43, 853294: 48, 855456: 6, 856214: 20, 856463: 56, 857088: 30, 858120: 12, 858601: 27, 858833: 99, 859539: 56, 859817: 99, 860210: 48, 860930: 16, 861238: 64, 863113: 46, 863592: 2, 863611: 78, 864283: 83, 864341: 17, 866885: 82, 868534: 83, 870346: 65, 871209: 71, 871595: 22, 874467: 2, 875148: 2, 875614: 55, 877808: 80, 878682: 69, 878960: 35, 879137: 6, 880326: 50, 880702: 35, 881202: 43, 881210: 98, 881298: 0, 882895: 64, 883347: 38, 884521: 9, 885228: 39, 886412: 56, 886878: 53, 887199: 41, 889319: 11, 889769: 94, 890299: 23, 890971: 1, 892290: 14, 892898: 87, 894507: 43, 894745: 49, 895501: 24, 895736: 73, 895790: 39, 898191: 65, 898460: 71, 898494: 20, 899431: 96, 899853: 83, 900099: 32, 901015: 41, 901026: 69, 901092: 25, 902076: 48, 902464: 23, 903491: 25, 904523: 49, 904543: 58, 905080: 8, 905968: 8, 906382: 85, 906747: 74, 907923: 47, 909160: 56, 909197: 59, 910263: 85, 910813: 21, 910868: 55, 911081: 16, 911548: 42, 912112: 86, 913144: 70, 913201: 84, 915325: 9, 915335: 49, 915381: 52, 915604: 93, 915795: 17, 916449: 87, 917062: 9, 917495: 93, 918019: 33, 919594: 80, 919609: 36, 920876: 78, 921103: 74, 921277: 74, 922831: 38, 923770: 25, 924988: 62, 925464: 28, 925719: 9, 926693: 84, 928358: 50, 928495: 56, 929972: 64, 929981: 52, 932722: 55, 932723: 30, 933161: 15, 933983: 77, 934528: 4, 935592: 13, 937158: 33, 937825: 80, 938419: 46, 938808: 72, 939317: 67, 939333: 97, 939707: 48, 939834: 68, 940873: 98, 940951: 49, 942556: 27, 942634: 36, 942720: 62, 946001: 69, 946345: 57, 947218: 0, 948572: 78, 949488: 53, 950393: 15, 955674: 20, 956473: 87, 956840: 80, 957170: 94, 958539: 66, 958883: 69, 959060: 40, 960599: 65, 961679: 5, 961855: 18, 962650: 60, 963611: 22, 964189: 51, 964277: 19, 964418: 95, 964511: 66, 965853: 73, 966572: 8, 966605: 88, 966772: 7, 968259: 80, 968671: 63, 970289: 31, 970567: 7, 976469: 18, 979131: 4, 979666: 27, 982385: 9, 982537: 67, 982895: 54, 983250: 2, 984342: 9, 986437: 26, 986696: 66, 987249: 88, 987269: 94, 988214: 40, 988944: 91, 991525: 84, 991703: 83, 992033: 37, 992174: 99, 992884: 42, 993041: 80, 993310: 46, 994541: 37, 995171: 74, 995195: 36, 995800: 2, 996174: 72, 998344: 3, 998629: 36}, + {4463: 25, 8728: 67, 8822: 73, 10441: 77, 11210: 2, 22646: 14, 23393: 16, 23778: 34, 24469: 17, 28910: 3, 30667: 96, 32551: 50, 35208: 26, 39387: 73, 41225: 60, 44021: 74, 44218: 45, 48490: 32, 58798: 29, 59728: 39, 65470: 63, 66986: 78, 67916: 36, 73869: 73, 75073: 25, 79982: 64, 81732: 24, 82403: 81, 86292: 72, 87434: 52, 92467: 41, 92814: 87, 97831: 19, 98684: 50, 105952: 75, 107075: 46, 107130: 13, 109199: 81, 109567: 43, 116612: 83, 120843: 6, 121324: 20, 121637: 24, 124351: 93, 126743: 96, 128936: 80, 132183: 51, 134728: 99, 135536: 29, 135727: 88, 143362: 11, 145495: 72, 145542: 86, 145705: 35, 147157: 28, 147670: 80, 149794: 15, 155306: 13, 158739: 75, 159068: 74, 159735: 66, 161048: 66, 162105: 0, 164279: 33, 169814: 68, 175252: 13, 176420: 76, 179391: 74, 179498: 47, 179957: 82, 180973: 33, 183789: 86, 184226: 55, 186466: 41, 192195: 81, 192554: 89, 193667: 87, 194549: 18, 197578: 50, 202018: 50, 205540: 53, 208702: 99, 209162: 90, 209316: 88, 209720: 89, 214134: 91, 215018: 76, 215559: 35, 218498: 59, 226456: 70, 228432: 8, 229084: 95, 230587: 5, 233548: 38, 234780: 73, 234975: 85, 238547: 8, 245717: 15, 247044: 60, 248354: 24, 248598: 86, 250325: 44, 256550: 43, 261021: 52, 263690: 17, 268759: 53, 271943: 72, 274162: 72, 274445: 16, 274673: 50, 274967: 65, 276222: 84, 277653: 13, 280027: 25, 280864: 18, 283548: 5, 285036: 41, 287183: 29, 291069: 3, 296323: 34, 299554: 24, 300354: 8, 300945: 0, 302540: 13, 302817: 25, 303439: 10, 307934: 96, 310248: 6, 311349: 26, 311458: 25, 311510: 51, 315106: 76, 315160: 62, 317165: 22, 318056: 62, 320662: 75, 321391: 41, 322892: 17, 324065: 22, 324556: 37, 324607: 9, 325272: 23, 329228: 69, 330817: 84, 332307: 32, 332772: 79, 335418: 39, 336495: 35, 336804: 16, 339802: 93, 341736: 44, 343345: 90, 344515: 35, 344674: 70, 346924: 35, 349077: 75, 349320: 51, 349877: 66, 355516: 89, 355868: 13, 356726: 72, 357880: 30, 357958: 4, 359718: 37, 359933: 2, 361255: 5, 362240: 42, 366534: 35, 367097: 44, 375973: 73, 376953: 81, 379730: 30, 379844: 37, 382443: 18, 383146: 13, 383276: 38, 386130: 91, 390958: 29, 393220: 77, 394491: 40, 396914: 59, 397902: 10, 400433: 0, 400704: 82, 401796: 19, 407680: 98, 411813: 93, 419448: 8, 420231: 85, 420466: 21, 421094: 57, 421655: 83, 424351: 11, 430787: 31, 438613: 3, 442082: 10, 444463: 95, 446469: 73, 448309: 50, 448478: 8, 448645: 5, 449666: 55, 451752: 67, 451820: 71, 452419: 4, 456267: 76, 456413: 33, 458714: 85, 459974: 40, 460735: 44, 463042: 13, 465411: 2, 466338: 21, 469392: 99, 469913: 6, 471895: 18, 479697: 48, 481943: 0, 485915: 92, 488510: 48, 489810: 35, 491861: 99, 492798: 25, 493987: 14, 497878: 71, 498846: 47, 500406: 31, 501034: 23, 502198: 9, 507185: 80, 512670: 93, 514487: 96, 515368: 90, 516234: 74, 517250: 80, 517665: 81, 521326: 29, 522472: 83, 525473: 90, 525503: 23, 526587: 12, 528304: 37, 528352: 0, 530535: 74, 530864: 16, 531848: 43, 531871: 6, 532801: 97, 534974: 23, 538160: 37, 541552: 5, 542436: 22, 542712: 38, 548966: 76, 555057: 27, 570013: 77, 573292: 98, 574608: 26, 575009: 11, 578180: 97, 579345: 43, 581704: 44, 585168: 84, 586215: 91, 587862: 43, 591016: 15, 599232: 95, 601506: 73, 606349: 76, 610547: 61, 610622: 47, 610843: 19, 611257: 52, 611510: 71, 613505: 74, 616283: 33, 617078: 28, 623013: 95, 628573: 83, 629352: 4, 631988: 57, 632415: 84, 633522: 1, 633686: 33, 635837: 7, 636947: 91, 639590: 86, 639854: 43, 641703: 6, 642557: 52, 643398: 84, 647355: 42, 648707: 55, 653410: 46, 655850: 53, 656873: 77, 658934: 55, 660628: 38, 666912: 80, 666917: 15, 667559: 24, 667837: 51, 668041: 17, 672392: 93, 676340: 63, 677238: 89, 679430: 63, 679981: 49, 683124: 79, 690441: 52, 692611: 36, 695941: 95, 697618: 3, 701975: 80, 702574: 38, 704298: 32, 705294: 93, 707925: 37, 714736: 51, 717038: 30, 717209: 42, 724020: 36, 724545: 91, 726644: 79, 728321: 75, 730517: 61, 734790: 84, 735514: 34, 738238: 32, 738259: 75, 739419: 41, 744659: 36, 744725: 59, 745915: 61, 748392: 20, 748876: 19, 749037: 69, 749583: 68, 752833: 12, 756355: 98, 767693: 95, 769892: 18, 771909: 61, 772781: 25, 774624: 89, 785322: 18, 785984: 9, 790840: 68, 793505: 9, 793647: 37, 794220: 38, 795799: 30, 800655: 8, 802693: 12, 805801: 39, 809799: 83, 812749: 72, 823231: 98, 823911: 90, 828948: 67, 830379: 92, 831195: 80, 833136: 73, 833659: 87, 836201: 47, 838950: 79, 840764: 55, 841986: 0, 843606: 36, 844719: 41, 846505: 22, 858598: 31, 859293: 30, 863795: 69, 864847: 78, 865249: 32, 866680: 35, 867614: 73, 868004: 93, 869344: 27, 869623: 20, 873628: 49, 876331: 20, 879452: 43, 879462: 76, 882430: 63, 887383: 74, 887511: 52, 892797: 77, 893194: 4, 894587: 39, 897336: 35, 905897: 33, 905979: 26, 906768: 69, 909776: 82, 913035: 90, 914162: 27, 915750: 58, 917363: 68, 920725: 57, 921916: 52, 922191: 15, 927830: 17, 929848: 18, 930481: 1, 931565: 1, 931822: 29, 932291: 62, 934950: 97, 935858: 54, 936010: 74, 939671: 7, 939786: 19, 940419: 39, 944655: 15, 945416: 66, 948601: 19, 954225: 22, 955188: 75, 960172: 62, 963793: 47, 965822: 37, 969991: 59, 970465: 45, 974633: 74, 979331: 52, 980567: 27, 981201: 83, 985460: 40, 986763: 39, 986793: 75, 988881: 71, 989450: 96, 990099: 69, 990538: 29, 990901: 75, 992558: 53, 992728: 96, 994003: 63, 995089: 56, 995619: 8, 995763: 27, 996012: 85, 997513: 72}, + {1304: 85, 1346: 43, 1380: 93, 2425: 58, 2843: 7, 3202: 44, 4333: 95, 4699: 88, 5249: 46, 6258: 98, 6810: 63, 8799: 17, 10289: 62, 10393: 37, 11161: 20, 11392: 50, 15249: 41, 15667: 97, 16427: 80, 19823: 63, 20465: 20, 21826: 82, 22926: 63, 23021: 86, 24004: 31, 24614: 81, 26069: 73, 27654: 54, 30572: 96, 31192: 85, 32302: 57, 33873: 47, 35017: 98, 36148: 68, 37424: 17, 38349: 91, 38848: 40, 41433: 10, 42488: 21, 43925: 53, 45692: 92, 46465: 13, 47633: 36, 47925: 71, 48596: 43, 49838: 90, 50829: 67, 51853: 84, 57151: 94, 57332: 28, 57939: 9, 58871: 27, 60843: 11, 62540: 53, 63990: 4, 64500: 39, 65267: 14, 66426: 53, 68435: 43, 69572: 19, 69888: 64, 70579: 73, 70855: 61, 71446: 28, 71633: 22, 73019: 26, 78382: 57, 79052: 96, 81045: 91, 83994: 49, 84223: 49, 84267: 87, 85135: 35, 85918: 52, 88172: 12, 88560: 68, 93300: 38, 93801: 69, 97734: 91, 99439: 52, 99642: 28, 101397: 96, 111307: 77, 114223: 58, 115653: 74, 117997: 31, 118792: 96, 119010: 65, 120633: 42, 121213: 48, 124709: 46, 125439: 30, 126022: 29, 126806: 1, 127203: 84, 129813: 28, 130438: 95, 132123: 1, 137145: 4, 137959: 53, 138714: 55, 139464: 39, 141103: 1, 141754: 92, 142228: 58, 144393: 17, 145419: 22, 145999: 57, 148659: 32, 148762: 96, 151053: 40, 151896: 14, 152618: 22, 154508: 68, 154631: 14, 154657: 73, 155950: 84, 158963: 1, 160967: 74, 161061: 86, 165002: 83, 165496: 52, 166053: 84, 167768: 44, 168270: 58, 168872: 33, 172594: 72, 172936: 26, 173354: 46, 176629: 28, 177546: 88, 179933: 52, 179982: 52, 180236: 0, 180780: 20, 183160: 75, 184834: 95, 185091: 36, 190177: 40, 190462: 88, 190602: 45, 193828: 90, 194503: 19, 194581: 39, 200236: 45, 200523: 80, 203860: 39, 204283: 77, 206646: 25, 208862: 73, 211168: 33, 211249: 3, 212099: 71, 214477: 9, 214638: 87, 214953: 90, 217859: 5, 218155: 11, 218241: 28, 218570: 65, 218723: 12, 219328: 38, 219482: 23, 220355: 25, 222507: 36, 223103: 61, 226319: 1, 228637: 46, 231724: 98, 232038: 12, 232607: 66, 232638: 20, 235178: 21, 236315: 73, 237124: 56, 237856: 97, 240636: 29, 242349: 87, 242774: 41, 243746: 54, 246725: 43, 250211: 11, 250370: 7, 251999: 22, 252511: 50, 252933: 53, 253608: 99, 254126: 69, 254490: 34, 258255: 75, 260314: 56, 261248: 98, 261592: 47, 261682: 61, 262348: 47, 263594: 40, 265105: 61, 265624: 3, 265875: 78, 266786: 22, 267043: 64, 267427: 31, 271281: 34, 272217: 43, 272529: 91, 272699: 96, 278766: 35, 279667: 22, 279779: 47, 280385: 21, 280940: 6, 283446: 45, 283714: 79, 283839: 30, 287036: 95, 288250: 99, 288349: 91, 288702: 95, 291135: 55, 293883: 36, 293918: 34, 294182: 39, 294722: 24, 298055: 86, 298151: 56, 299127: 88, 300579: 41, 304935: 9, 305474: 59, 308105: 67, 310650: 81, 313250: 22, 314294: 44, 316142: 66, 317733: 91, 319896: 10, 320232: 82, 320682: 25, 321154: 34, 321163: 40, 323380: 77, 326535: 3, 327867: 0, 330621: 83, 332381: 42, 334983: 47, 336543: 13, 337058: 25, 337704: 43, 339663: 30, 339738: 84, 340021: 49, 340292: 97, 341100: 96, 342171: 40, 345359: 71, 348287: 62, 349376: 85, 349916: 35, 351424: 59, 353308: 85, 355850: 11, 356003: 81, 357265: 61, 358914: 19, 359191: 90, 361119: 0, 362767: 44, 363146: 7, 364314: 98, 366799: 8, 367738: 84, 369518: 51, 370240: 48, 373120: 84, 373216: 76, 373628: 71, 374827: 60, 376061: 47, 377119: 83, 377498: 47, 380303: 84, 380328: 5, 381182: 21, 387060: 0, 387877: 68, 389645: 80, 397672: 81, 397876: 20, 400002: 65, 400441: 51, 400505: 3, 401574: 15, 402742: 40, 403189: 71, 403396: 86, 403638: 15, 406215: 18, 406867: 47, 408849: 14, 409941: 59, 410184: 22, 410288: 41, 410893: 58, 410957: 34, 410976: 56, 412159: 78, 413596: 23, 413685: 71, 413785: 30, 416249: 91, 418336: 3, 418443: 79, 420439: 23, 421542: 39, 424914: 50, 425137: 79, 425745: 32, 426693: 72, 428276: 81, 428577: 59, 429064: 26, 429585: 28, 430235: 54, 431555: 11, 432019: 57, 432370: 56, 437095: 12, 439638: 17, 441500: 69, 444730: 88, 445083: 9, 450432: 56, 451166: 33, 451336: 31, 455436: 27, 456078: 66, 456690: 55, 457042: 65, 457066: 94, 458004: 88, 460787: 24, 462569: 46, 463999: 89, 464509: 75, 465613: 93, 466809: 36, 467848: 60, 468796: 47, 471150: 94, 471818: 81, 472923: 32, 474883: 43, 475696: 66, 476678: 69, 477806: 61, 478808: 98, 478897: 11, 480046: 11, 482081: 21, 482241: 99, 484083: 87, 484323: 50, 488483: 61, 493611: 84, 498792: 50, 499061: 70, 500093: 76, 501143: 54, 501403: 28, 501843: 29, 502549: 63, 503633: 72, 505241: 2, 505315: 35, 507440: 92, 508240: 43, 508975: 8, 509576: 91, 509711: 6, 510440: 92, 514759: 61, 515102: 76, 515673: 26, 515863: 13, 516853: 17, 517287: 85, 517468: 3, 519447: 74, 520048: 5, 521186: 51, 524555: 7, 525257: 13, 526463: 56, 527461: 49, 529642: 12, 530428: 97, 530950: 4, 536011: 71, 536443: 85, 537848: 95, 540989: 92, 544113: 21, 545217: 24, 545382: 62, 546716: 55, 546975: 29, 548471: 4, 548499: 94, 548553: 11, 549548: 74, 549685: 58, 550406: 8, 550619: 48, 550901: 66, 552497: 85, 554741: 36, 556581: 81, 559232: 65, 560673: 97, 567875: 94, 572495: 56, 572534: 87, 572944: 96, 573663: 8, 575246: 22, 575418: 35, 578043: 82, 580863: 75, 582135: 93, 582300: 29, 583753: 84, 588106: 28, 592246: 82, 596803: 24, 599280: 27, 600389: 87, 600461: 63, 604623: 47, 605295: 24, 605918: 26, 606650: 76, 608813: 40, 608824: 39, 611204: 44, 611459: 16, 612693: 93, 612819: 60, 614604: 10, 617213: 82, 617792: 4, 618135: 88, 623828: 74, 624700: 79, 626184: 92, 627484: 42, 628313: 37, 631080: 60, 632495: 61, 633402: 84, 634115: 88, 634666: 82, 637919: 49, 638316: 38, 638341: 49, 638595: 33, 638951: 56, 639226: 35, 642214: 30, 642942: 38, 644401: 27, 646165: 68, 649740: 61, 654153: 13, 654411: 17, 654510: 79, 654526: 17, 654865: 92, 656402: 5, 657430: 59, 658622: 93, 659908: 89, 666696: 68, 667481: 14, 667807: 69, 668617: 41, 670743: 39, 671897: 81, 673993: 38, 674062: 86, 677127: 83, 677155: 62, 677277: 98, 678332: 53, 679179: 70, 679853: 24, 680795: 10, 682279: 57, 683136: 57, 684407: 54, 687234: 23, 688990: 20, 689445: 94, 689727: 36, 690302: 35, 690375: 47, 691628: 53, 691636: 84, 692284: 86, 694604: 96, 695403: 0, 696193: 17, 697297: 53, 698628: 72, 699900: 12, 700310: 50, 700501: 13, 702035: 86, 703781: 64, 705544: 85, 705546: 97, 706974: 79, 710250: 86, 712243: 92, 712745: 61, 713879: 25, 714075: 70, 716519: 96, 716828: 50, 721119: 79, 722589: 19, 722770: 82, 722981: 77, 723510: 21, 725749: 19, 726280: 13, 728849: 26, 730201: 68, 731196: 29, 732257: 23, 738171: 74, 738177: 10, 741516: 76, 743013: 69, 744560: 79, 745267: 58, 747514: 37, 748277: 40, 750927: 58, 751007: 23, 752947: 83, 753211: 79, 755256: 60, 758246: 39, 759422: 22, 759475: 62, 760836: 34, 761842: 18, 762368: 94, 764538: 39, 764974: 91, 765611: 29, 768012: 97, 770083: 13, 770124: 6, 772349: 75, 774244: 91, 776684: 65, 778283: 25, 779022: 75, 779252: 88, 779279: 61, 782035: 77, 782707: 30, 782762: 79, 785994: 15, 786980: 74, 788477: 40, 788736: 26, 790020: 48, 790048: 38, 790594: 66, 792433: 19, 792911: 91, 794773: 28, 797219: 85, 799275: 8, 799319: 65, 799693: 70, 802270: 67, 805943: 89, 806099: 52, 806144: 50, 808246: 92, 808614: 58, 809540: 77, 809690: 83, 811757: 50, 811981: 0, 812279: 72, 812764: 24, 813538: 97, 813865: 62, 815445: 55, 817212: 86, 818173: 59, 818860: 22, 821381: 23, 821416: 6, 824121: 52, 824307: 54, 826237: 98, 826308: 42, 826765: 19, 829279: 65, 830037: 14, 831031: 29, 832237: 98, 832503: 80, 832795: 94, 833960: 46, 835217: 55, 836088: 40, 837858: 55, 838563: 56, 838831: 37, 838945: 86, 839036: 26, 840246: 43, 840384: 77, 841312: 94, 843417: 69, 843736: 22, 845179: 1, 847413: 19, 848072: 46, 848220: 83, 852602: 13, 856787: 33, 857907: 16, 859767: 3, 859796: 2, 862852: 56, 863260: 10, 864564: 7, 865665: 50, 867534: 92, 867597: 34, 868399: 72, 869613: 25, 871200: 99, 872249: 72, 875212: 23, 875788: 22, 876236: 84, 880647: 56, 881196: 90, 883051: 8, 885002: 60, 885958: 55, 888321: 8, 888415: 61, 889266: 84, 891745: 42, 892127: 72, 893983: 3, 894836: 23, 895423: 33, 896914: 44, 897229: 35, 898953: 50, 899371: 21, 899593: 56, 899709: 19, 901276: 10, 901930: 8, 902296: 23, 902706: 79, 903722: 58, 904112: 39, 904255: 27, 904278: 63, 906501: 74, 907620: 99, 913586: 0, 914671: 19, 915171: 99, 916155: 14, 917039: 51, 917353: 74, 919108: 97, 919506: 5, 919711: 27, 921264: 8, 921713: 98, 922696: 15, 922788: 32, 924248: 46, 924574: 71, 929686: 71, 930687: 84, 931983: 29, 935775: 65, 936321: 62, 938911: 91, 939426: 29, 944280: 22, 947023: 74, 947793: 27, 948431: 73, 949049: 2, 954278: 78, 957523: 53, 958360: 72, 959478: 48, 960766: 57, 961182: 83, 962679: 54, 963285: 48, 964179: 28, 970544: 92, 975061: 94, 981380: 70, 981765: 97, 982518: 42, 984764: 85, 984930: 29, 987013: 16, 987757: 22, 988670: 3, 989758: 53, 991787: 16, 992619: 86, 992993: 24, 996092: 9, 996555: 53, 998323: 96, 998461: 49, 999835: 1, 999936: 37}, + } + + deletions := []map[uint64]struct{}{ + {5158: {}, 22282: {}, 26625: {}, 30464: {}, 32276: {}, 39406: {}, 45829: {}, 49653: {}, 138857: {}, 139624: {}, 147712: {}, 169597: {}, 174727: {}, 185264: {}, 194404: {}, 206469: {}, 285942: {}, 290568: {}, 337242: {}, 348698: {}, 378784: {}, 386443: {}, 405301: {}, 410675: {}, 419614: {}, 425138: {}, 434550: {}, 485705: {}, 497978: {}, 549654: {}, 557748: {}, 564079: {}, 586240: {}, 589415: {}, 601790: {}, 608517: {}, 614523: {}, 620589: {}, 634586: {}, 645052: {}, 656207: {}, 664575: {}, 670473: {}, 674601: {}, 683011: {}, 686123: {}, 688524: {}, 689926: {}, 691224: {}, 691424: {}, 716584: {}, 737597: {}, 756233: {}, 817565: {}, 834273: {}, 835674: {}, 843958: {}, 855323: {}, 859622: {}, 875131: {}, 878883: {}, 905881: {}, 913565: {}, 915876: {}, 924544: {}, 938320: {}, 949593: {}, 957490: {}, 958340: {}, 960272: {}, 961744: {}, 981075: {}, 996546: {}}, + {1670: {}, 15244: {}, 19759: {}, 32442: {}, 32679: {}, 32986: {}, 46757: {}, 48853: {}, 53264: {}, 55916: {}, 58096: {}, 60847: {}, 73138: {}, 83351: {}, 89429: {}, 90182: {}, 112112: {}, 113801: {}, 122272: {}, 126406: {}, 134535: {}, 135558: {}, 138510: {}, 140540: {}, 141735: {}, 147340: {}, 153321: {}, 157112: {}, 160937: {}, 161243: {}, 164574: {}, 196972: {}, 198736: {}, 205425: {}, 218105: {}, 218263: {}, 220110: {}, 221218: {}, 226221: {}, 243603: {}, 246545: {}, 251669: {}, 258510: {}, 265861: {}, 268183: {}, 273011: {}, 282006: {}, 283042: {}, 286410: {}, 291378: {}, 291794: {}, 294225: {}, 298630: {}, 298923: {}, 310448: {}, 310808: {}, 317001: {}, 324642: {}, 326424: {}, 328942: {}, 333803: {}, 338863: {}, 344224: {}, 344283: {}, 350586: {}, 351893: {}, 368390: {}, 368863: {}, 373620: {}, 376862: {}, 380629: {}, 384258: {}, 387176: {}, 387309: {}, 390920: {}, 402819: {}, 403165: {}, 405780: {}, 407894: {}, 409206: {}, 409244: {}, 411759: {}, 413125: {}, 415512: {}, 417914: {}, 423447: {}, 428217: {}, 432413: {}, 434673: {}, 435388: {}, 439939: {}, 443231: {}, 443779: {}, 444281: {}, 447088: {}, 448414: {}, 452958: {}, 456867: {}, 467354: {}, 474480: {}, 493342: {}, 504752: {}, 507387: {}, 512063: {}, 513624: {}, 518528: {}, 521166: {}, 522076: {}, 525752: {}, 530582: {}, 536669: {}, 546914: {}, 549647: {}, 550482: {}, 558867: {}, 562064: {}, 562359: {}, 576889: {}, 584299: {}, 599101: {}, 615164: {}, 616858: {}, 623644: {}, 634673: {}, 639800: {}, 642274: {}, 646201: {}, 646649: {}, 656731: {}, 657747: {}, 670842: {}, 671493: {}, 674596: {}, 681633: {}, 681917: {}, 689703: {}, 698103: {}, 702740: {}, 711014: {}, 730100: {}, 731111: {}, 732358: {}, 736800: {}, 744204: {}, 745690: {}, 751993: {}, 756793: {}, 758521: {}, 768092: {}, 771774: {}, 774079: {}, 778012: {}, 781028: {}, 781855: {}, 783301: {}, 789312: {}, 790883: {}, 794225: {}, 795070: {}, 795602: {}, 796438: {}, 798460: {}, 803608: {}, 806721: {}, 809539: {}, 813844: {}, 817088: {}, 826185: {}, 830373: {}, 833175: {}, 844964: {}, 848515: {}, 854143: {}, 854210: {}, 856841: {}, 861722: {}, 870211: {}, 872311: {}, 882582: {}, 884939: {}, 888756: {}, 899099: {}, 905083: {}, 912337: {}, 914438: {}, 914645: {}, 918657: {}, 920789: {}, 923228: {}, 925633: {}, 925768: {}, 932217: {}, 945536: {}, 951565: {}, 953265: {}, 953344: {}, 959174: {}, 969560: {}, 971625: {}, 985901: {}, 988307: {}, 998830: {}}, + {230: {}, 9171: {}, 10017: {}, 11774: {}, 15921: {}, 17111: {}, 32945: {}, 37053: {}, 42004: {}, 52276: {}, 72718: {}, 78267: {}, 94048: {}, 100823: {}, 105501: {}, 117557: {}, 122303: {}, 123329: {}, 134817: {}, 138300: {}, 143103: {}, 144135: {}, 148593: {}, 157049: {}, 161434: {}, 161575: {}, 171081: {}, 179850: {}, 180682: {}, 181650: {}, 185019: {}, 192986: {}, 197098: {}, 202299: {}, 203830: {}, 208265: {}, 216550: {}, 221033: {}, 229841: {}, 230327: {}, 243084: {}, 244852: {}, 262904: {}, 269371: {}, 274193: {}, 281878: {}, 282131: {}, 282827: {}, 283436: {}, 288729: {}, 301048: {}, 321164: {}, 321878: {}, 324579: {}, 330616: {}, 331597: {}, 336592: {}, 339177: {}, 354393: {}, 354432: {}, 354610: {}, 358031: {}, 358892: {}, 363424: {}, 367029: {}, 367697: {}, 369444: {}, 371654: {}, 375169: {}, 380061: {}, 391525: {}, 394589: {}, 396845: {}, 398553: {}, 399230: {}, 402384: {}, 422035: {}, 425419: {}, 427800: {}, 429143: {}, 429150: {}, 430041: {}, 438716: {}, 442357: {}, 442385: {}, 446415: {}, 446822: {}, 456980: {}, 458424: {}, 461708: {}, 463685: {}, 465661: {}, 471348: {}, 473669: {}, 480251: {}, 492242: {}, 492279: {}, 497307: {}, 500225: {}, 501878: {}, 503117: {}, 503269: {}, 517587: {}, 523854: {}, 528094: {}, 547324: {}, 547557: {}, 549743: {}, 569258: {}, 594258: {}, 599347: {}, 602441: {}, 604071: {}, 604649: {}, 612989: {}, 613926: {}, 614762: {}, 615426: {}, 615450: {}, 623338: {}, 638153: {}, 638348: {}, 646079: {}, 650541: {}, 652613: {}, 655790: {}, 656460: {}, 658961: {}, 660481: {}, 666636: {}, 667998: {}, 668270: {}, 675631: {}, 689386: {}, 694256: {}, 698834: {}, 700520: {}, 704395: {}, 722188: {}, 723284: {}, 728834: {}, 730975: {}, 732586: {}, 740905: {}, 763342: {}, 771537: {}, 776940: {}, 781934: {}, 783064: {}, 785573: {}, 785968: {}, 803106: {}, 805313: {}, 807320: {}, 811875: {}, 829791: {}, 831010: {}, 833714: {}, 836578: {}, 840712: {}, 843070: {}, 862883: {}, 868463: {}, 870996: {}, 873847: {}, 876788: {}, 883263: {}, 884478: {}, 903996: {}, 907082: {}, 916467: {}, 928289: {}, 931349: {}, 944705: {}, 972898: {}, 973354: {}, 976583: {}, 980025: {}, 980040: {}, 986292: {}, 988765: {}, 998142: {}}, + {1310: {}, 4830: {}, 14416: {}, 23668: {}, 25886: {}, 36924: {}, 39926: {}, 56649: {}, 60048: {}, 65335: {}, 66706: {}, 72564: {}, 86292: {}, 98389: {}, 102878: {}, 117971: {}, 130251: {}, 134714: {}, 140994: {}, 168509: {}, 170831: {}, 171220: {}, 174936: {}, 182954: {}, 193947: {}, 198041: {}, 199200: {}, 210965: {}, 212914: {}, 224559: {}, 255060: {}, 271038: {}, 271866: {}, 275017: {}, 276357: {}, 281966: {}, 293453: {}, 301337: {}, 302076: {}, 305864: {}, 309570: {}, 330769: {}, 338014: {}, 349554: {}, 353582: {}, 361045: {}, 377268: {}, 393124: {}, 399013: {}, 401794: {}, 411217: {}, 422986: {}, 428447: {}, 437937: {}, 446697: {}, 447665: {}, 452386: {}, 453667: {}, 456186: {}, 461207: {}, 465564: {}, 476646: {}, 481083: {}, 482062: {}, 484883: {}, 503210: {}, 516330: {}, 518184: {}, 533574: {}, 541363: {}, 543742: {}, 544879: {}, 553121: {}, 554225: {}, 568910: {}, 570280: {}, 581300: {}, 581322: {}, 589655: {}, 599522: {}, 612893: {}, 615816: {}, 622272: {}, 634599: {}, 644164: {}, 644835: {}, 651861: {}, 652952: {}, 655278: {}, 671854: {}, 682906: {}, 685084: {}, 685862: {}, 705319: {}, 725610: {}, 729164: {}, 732593: {}, 739018: {}, 739929: {}, 751909: {}, 752658: {}, 755396: {}, 762356: {}, 764668: {}, 781960: {}, 792103: {}, 802875: {}, 808640: {}, 809937: {}, 827348: {}, 835279: {}, 839480: {}, 841321: {}, 860038: {}, 871288: {}, 891454: {}, 893765: {}, 894473: {}, 897727: {}, 901657: {}, 902069: {}, 906039: {}, 908168: {}, 911085: {}, 912309: {}, 913217: {}, 913647: {}, 915451: {}, 917587: {}, 926545: {}, 930857: {}, 931364: {}, 936420: {}, 939282: {}, 941380: {}, 950454: {}, 952362: {}, 954392: {}, 982918: {}, 985548: {}, 985651: {}, 987805: {}}, + {1441: {}, 3540: {}, 5357: {}, 8130: {}, 12519: {}, 14897: {}, 24822: {}, 25324: {}, 25681: {}, 26461: {}, 26721: {}, 28762: {}, 35617: {}, 37358: {}, 38551: {}, 42260: {}, 42645: {}, 43249: {}, 44226: {}, 48453: {}, 49347: {}, 53407: {}, 53463: {}, 62786: {}, 63290: {}, 64018: {}, 73518: {}, 75663: {}, 78876: {}, 82961: {}, 100810: {}, 108069: {}, 109159: {}, 118093: {}, 120407: {}, 127018: {}, 134112: {}, 136883: {}, 149287: {}, 149868: {}, 150394: {}, 152904: {}, 155702: {}, 161857: {}, 164435: {}, 166967: {}, 172536: {}, 175949: {}, 177306: {}, 184256: {}, 186810: {}, 187418: {}, 197877: {}, 201508: {}, 204326: {}, 206409: {}, 207903: {}, 212641: {}, 216701: {}, 229921: {}, 231483: {}, 231509: {}, 238334: {}, 243465: {}, 256977: {}, 257507: {}, 260531: {}, 263216: {}, 263944: {}, 274939: {}, 276775: {}, 278555: {}, 285829: {}, 290362: {}, 290474: {}, 291863: {}, 297294: {}, 298447: {}, 304352: {}, 305169: {}, 307837: {}, 313750: {}, 314031: {}, 319711: {}, 320989: {}, 322715: {}, 331780: {}, 335089: {}, 335679: {}, 348845: {}, 353341: {}, 354250: {}, 354983: {}, 358531: {}, 363474: {}, 364014: {}, 365599: {}, 366482: {}, 394768: {}, 401926: {}, 405562: {}, 406339: {}, 415014: {}, 416268: {}, 420249: {}, 429397: {}, 435623: {}, 437560: {}, 439822: {}, 440111: {}, 440234: {}, 441604: {}, 441794: {}, 442085: {}, 445008: {}, 451865: {}, 454879: {}, 455212: {}, 456680: {}, 460510: {}, 462417: {}, 469137: {}, 482030: {}, 482495: {}, 483131: {}, 493497: {}, 497625: {}, 499713: {}, 501621: {}, 505510: {}, 506075: {}, 507459: {}, 509359: {}, 510207: {}, 511126: {}, 513013: {}, 516132: {}, 516386: {}, 517093: {}, 518736: {}, 526286: {}, 532489: {}, 535153: {}, 536650: {}, 543543: {}, 544087: {}, 544658: {}, 545236: {}, 549066: {}, 549662: {}, 549698: {}, 549886: {}, 550597: {}, 552387: {}, 552494: {}, 555567: {}, 557116: {}, 559384: {}, 566830: {}, 571293: {}, 572630: {}, 577523: {}, 581414: {}, 584025: {}, 585884: {}, 587521: {}, 590423: {}, 590651: {}, 603067: {}, 605464: {}, 607641: {}, 614885: {}, 620596: {}, 620977: {}, 622243: {}, 629263: {}, 629333: {}, 629789: {}, 632609: {}, 634512: {}, 636206: {}, 642264: {}, 643384: {}, 645316: {}, 655582: {}, 658432: {}, 660365: {}, 676628: {}, 678642: {}, 686819: {}, 690768: {}, 694285: {}, 696507: {}, 702038: {}, 702242: {}, 709243: {}, 710754: {}, 711690: {}, 712914: {}, 717191: {}, 717695: {}, 718223: {}, 718915: {}, 719179: {}, 721726: {}, 722942: {}, 735566: {}, 738393: {}, 744404: {}, 744696: {}, 749720: {}, 755387: {}, 764240: {}, 765136: {}, 768853: {}, 772481: {}, 772710: {}, 783860: {}, 786447: {}, 787460: {}, 792089: {}, 796711: {}, 797142: {}, 802504: {}, 805807: {}, 807481: {}, 807706: {}, 808924: {}, 810981: {}, 811520: {}, 813840: {}, 815607: {}, 817332: {}, 817363: {}, 832584: {}, 839838: {}, 852521: {}, 857539: {}, 864707: {}, 864748: {}, 870258: {}, 885850: {}, 889916: {}, 891849: {}, 893310: {}, 896852: {}, 898862: {}, 900050: {}, 900732: {}, 904004: {}, 908024: {}, 912130: {}, 912705: {}, 916279: {}, 917032: {}, 923460: {}, 925252: {}, 928542: {}, 928585: {}, 937856: {}, 938757: {}, 941107: {}, 951266: {}, 952365: {}, 953912: {}, 959249: {}, 966553: {}, 968960: {}, 970018: {}, 977484: {}, 980796: {}, 991058: {}, 992151: {}, 997506: {}}, + {35277: {}, 44224: {}, 65905: {}, 71222: {}, 84923: {}, 101362: {}, 106323: {}, 107019: {}, 154290: {}, 167904: {}, 172701: {}, 179129: {}, 187133: {}, 202967: {}, 213598: {}, 228585: {}, 257131: {}, 259691: {}, 266969: {}, 294103: {}, 300660: {}, 310341: {}, 314430: {}, 332156: {}, 339779: {}, 342972: {}, 343067: {}, 350487: {}, 386587: {}, 426807: {}, 430301: {}, 430778: {}, 433214: {}, 460518: {}, 468912: {}, 470598: {}, 470606: {}, 472423: {}, 506719: {}, 510410: {}, 520683: {}, 530859: {}, 536257: {}, 543864: {}, 571389: {}, 575146: {}, 575640: {}, 581231: {}, 587696: {}, 599205: {}, 609032: {}, 627172: {}, 637921: {}, 650569: {}, 656569: {}, 680383: {}, 702382: {}, 703474: {}, 726705: {}, 734261: {}, 745267: {}, 779289: {}, 782663: {}, 788771: {}, 806065: {}, 814407: {}, 823806: {}, 841446: {}, 841496: {}, 855680: {}, 892547: {}, 904767: {}, 905616: {}, 921488: {}, 922278: {}, 947382: {}, 948985: {}, 963684: {}, 966861: {}, 974988: {}, 982871: {}}, + {935: {}, 1797: {}, 5265: {}, 5381: {}, 8363: {}, 10810: {}, 15241: {}, 21403: {}, 21663: {}, 23001: {}, 24223: {}, 26259: {}, 27071: {}, 27245: {}, 27685: {}, 29397: {}, 31192: {}, 34862: {}, 35972: {}, 36427: {}, 39805: {}, 42574: {}, 48731: {}, 49683: {}, 50798: {}, 51385: {}, 51897: {}, 55773: {}, 59104: {}, 61970: {}, 64567: {}, 65780: {}, 66218: {}, 72926: {}, 79767: {}, 83343: {}, 84192: {}, 85496: {}, 88961: {}, 89348: {}, 89911: {}, 90346: {}, 91860: {}, 91871: {}, 93799: {}, 96566: {}, 96762: {}, 98931: {}, 106663: {}, 107060: {}, 108198: {}, 109595: {}, 111651: {}, 116320: {}, 118359: {}, 126717: {}, 127103: {}, 129549: {}, 130295: {}, 132404: {}, 133248: {}, 135545: {}, 144992: {}, 150710: {}, 151872: {}, 153028: {}, 156486: {}, 158115: {}, 158991: {}, 159658: {}, 162917: {}, 163188: {}, 172209: {}, 179404: {}, 182991: {}, 191545: {}, 191884: {}, 197816: {}, 199345: {}, 199376: {}, 200521: {}, 204064: {}, 206193: {}, 211466: {}, 213857: {}, 214630: {}, 215354: {}, 220516: {}, 221103: {}, 227586: {}, 227748: {}, 228191: {}, 232441: {}, 239181: {}, 242116: {}, 245659: {}, 259454: {}, 263029: {}, 263506: {}, 264527: {}, 266456: {}, 268589: {}, 271629: {}, 273639: {}, 278633: {}, 285908: {}, 286667: {}, 288812: {}, 298395: {}, 299377: {}, 303372: {}, 304828: {}, 306851: {}, 309625: {}, 310088: {}, 311740: {}, 315493: {}, 317853: {}, 320400: {}, 322183: {}, 328074: {}, 329370: {}, 330077: {}, 336027: {}, 339254: {}, 342267: {}, 344419: {}, 344581: {}, 357055: {}, 358865: {}, 364153: {}, 366987: {}, 379081: {}, 381671: {}, 381726: {}, 383624: {}, 385468: {}, 387385: {}, 397102: {}, 403296: {}, 404329: {}, 407294: {}, 410896: {}, 411354: {}, 413124: {}, 419971: {}, 421210: {}, 422145: {}, 429913: {}, 441402: {}, 442420: {}, 448418: {}, 450814: {}, 451486: {}, 458271: {}, 464824: {}, 471710: {}, 474777: {}, 474971: {}, 480563: {}, 483460: {}, 485254: {}, 491638: {}, 493826: {}, 497464: {}, 498636: {}, 499291: {}, 504798: {}, 508038: {}, 508591: {}, 515547: {}, 516669: {}, 521261: {}, 525927: {}, 527852: {}, 528004: {}, 528712: {}, 534686: {}, 536653: {}, 539609: {}, 542195: {}, 542270: {}, 542921: {}, 548041: {}, 549579: {}, 551402: {}, 554311: {}, 555734: {}, 558630: {}, 562073: {}, 562549: {}, 575254: {}, 580103: {}, 582641: {}, 589427: {}, 598467: {}, 598506: {}, 598530: {}, 600859: {}, 601768: {}, 604918: {}, 605886: {}, 606162: {}, 606760: {}, 609330: {}, 612581: {}, 614379: {}, 615722: {}, 618392: {}, 623592: {}, 624012: {}, 626818: {}, 629553: {}, 630306: {}, 630845: {}, 639819: {}, 639920: {}, 640405: {}, 641226: {}, 644295: {}, 644534: {}, 646786: {}, 647594: {}, 648724: {}, 656095: {}, 665006: {}, 665637: {}, 669484: {}, 669624: {}, 672143: {}, 675118: {}, 678055: {}, 681093: {}, 684825: {}, 687568: {}, 688986: {}, 689687: {}, 703873: {}, 703893: {}, 707177: {}, 708767: {}, 708996: {}, 709529: {}, 711760: {}, 715488: {}, 716814: {}, 722392: {}, 726801: {}, 730433: {}, 731016: {}, 731515: {}, 731842: {}, 738537: {}, 738589: {}, 741694: {}, 743394: {}, 743784: {}, 746852: {}, 748482: {}, 750464: {}, 751759: {}, 755212: {}, 755416: {}, 757430: {}, 762081: {}, 762817: {}, 764267: {}, 764668: {}, 767888: {}, 769136: {}, 770754: {}, 770872: {}, 771432: {}, 777141: {}, 778148: {}, 782475: {}, 783653: {}, 787557: {}, 787734: {}, 790799: {}, 794457: {}, 798794: {}, 802398: {}, 810399: {}, 811732: {}, 818254: {}, 819373: {}, 822012: {}, 832062: {}, 833491: {}, 834379: {}, 838703: {}, 838795: {}, 846230: {}, 848905: {}, 849930: {}, 858420: {}, 859721: {}, 862787: {}, 867172: {}, 867420: {}, 871767: {}, 875434: {}, 880697: {}, 882179: {}, 883995: {}, 888280: {}, 888456: {}, 892665: {}, 893706: {}, 893830: {}, 896033: {}, 897148: {}, 897673: {}, 897728: {}, 897987: {}, 898046: {}, 898955: {}, 902130: {}, 905494: {}, 909287: {}, 910009: {}, 916363: {}, 918339: {}, 921229: {}, 925620: {}, 932543: {}, 940846: {}, 941160: {}, 947148: {}, 951694: {}, 957272: {}, 960696: {}, 965080: {}, 965836: {}, 968619: {}, 971501: {}, 977754: {}, 992816: {}, 993182: {}, 994686: {}, 996962: {}}, + {36727: {}, 63977: {}, 72993: {}, 90014: {}, 105370: {}, 106134: {}, 117051: {}, 132242: {}, 181476: {}, 211564: {}, 234585: {}, 243859: {}, 271716: {}, 274197: {}, 275464: {}, 278409: {}, 280966: {}, 291792: {}, 325050: {}, 339183: {}, 346230: {}, 349251: {}, 358273: {}, 371417: {}, 383867: {}, 396738: {}, 409562: {}, 424683: {}, 443002: {}, 462179: {}, 471853: {}, 477680: {}, 495461: {}, 499685: {}, 502087: {}, 546208: {}, 571231: {}, 573200: {}, 577997: {}, 600505: {}, 635713: {}, 644978: {}, 668823: {}, 682823: {}, 694011: {}, 702753: {}, 705444: {}, 731912: {}, 737054: {}, 748653: {}, 758586: {}, 760702: {}, 768510: {}, 772207: {}, 782158: {}, 806484: {}, 808616: {}, 854672: {}, 864974: {}, 881724: {}, 883754: {}, 901859: {}, 907161: {}, 915000: {}, 915082: {}, 930095: {}, 967507: {}, 990307: {}, 992895: {}}, + {9308: {}, 13282: {}, 17990: {}, 18646: {}, 19082: {}, 50132: {}, 51049: {}, 56162: {}, 58767: {}, 60340: {}, 61754: {}, 71206: {}, 77017: {}, 79145: {}, 85589: {}, 86455: {}, 90386: {}, 91054: {}, 103294: {}, 108307: {}, 109840: {}, 111117: {}, 112300: {}, 114691: {}, 115818: {}, 119583: {}, 140496: {}, 144512: {}, 152155: {}, 152808: {}, 155920: {}, 156626: {}, 159443: {}, 163732: {}, 165392: {}, 168715: {}, 168718: {}, 190887: {}, 194692: {}, 194780: {}, 195515: {}, 204633: {}, 204895: {}, 208933: {}, 217467: {}, 224074: {}, 224519: {}, 225522: {}, 226503: {}, 232077: {}, 235736: {}, 242518: {}, 266058: {}, 266669: {}, 273578: {}, 274896: {}, 280454: {}, 286259: {}, 290965: {}, 300481: {}, 311583: {}, 311643: {}, 312753: {}, 320514: {}, 322851: {}, 329193: {}, 330321: {}, 349609: {}, 352805: {}, 359143: {}, 366130: {}, 371283: {}, 372706: {}, 374537: {}, 379808: {}, 380258: {}, 383794: {}, 387772: {}, 388629: {}, 389407: {}, 399447: {}, 402593: {}, 403985: {}, 407544: {}, 418709: {}, 419151: {}, 421158: {}, 431309: {}, 433362: {}, 435071: {}, 444864: {}, 462667: {}, 464806: {}, 465058: {}, 465630: {}, 467096: {}, 468918: {}, 474922: {}, 475249: {}, 483780: {}, 484014: {}, 485677: {}, 488375: {}, 489894: {}, 493215: {}, 495346: {}, 498012: {}, 508671: {}, 514580: {}, 517112: {}, 519343: {}, 520414: {}, 521508: {}, 522117: {}, 523796: {}, 526074: {}, 526181: {}, 534914: {}, 536722: {}, 541767: {}, 550700: {}, 551441: {}, 555274: {}, 555571: {}, 558364: {}, 564239: {}, 568764: {}, 571194: {}, 572063: {}, 574445: {}, 584197: {}, 584704: {}, 586346: {}, 586884: {}, 595444: {}, 595810: {}, 596723: {}, 615149: {}, 615753: {}, 615826: {}, 620280: {}, 624966: {}, 630045: {}, 633956: {}, 643540: {}, 643941: {}, 653184: {}, 663478: {}, 666227: {}, 683459: {}, 684784: {}, 694578: {}, 698195: {}, 699610: {}, 704307: {}, 711125: {}, 711481: {}, 714524: {}, 715981: {}, 717167: {}, 717915: {}, 718924: {}, 724533: {}, 736034: {}, 744647: {}, 746360: {}, 748703: {}, 750065: {}, 750374: {}, 753126: {}, 774565: {}, 776289: {}, 778626: {}, 781610: {}, 781742: {}, 782240: {}, 784276: {}, 784338: {}, 793600: {}, 796424: {}, 801413: {}, 801668: {}, 812299: {}, 813959: {}, 815197: {}, 817234: {}, 819762: {}, 820026: {}, 822050: {}, 824647: {}, 826899: {}, 828441: {}, 831909: {}, 835653: {}, 836320: {}, 838251: {}, 839656: {}, 846261: {}, 846803: {}, 847503: {}, 849791: {}, 855441: {}, 858183: {}, 861126: {}, 872439: {}, 873167: {}, 875416: {}, 892861: {}, 893489: {}, 894839: {}, 897106: {}, 901009: {}, 903689: {}, 916087: {}, 919183: {}, 919540: {}, 919707: {}, 921360: {}, 921816: {}, 923034: {}, 926093: {}, 929991: {}, 932919: {}, 934418: {}, 934578: {}, 939799: {}, 941874: {}, 944978: {}, 959952: {}, 960546: {}, 961809: {}, 963425: {}, 966482: {}, 967623: {}, 981136: {}, 983194: {}, 983248: {}, 983761: {}, 995818: {}, 997527: {}}, + {4375: {}, 5000: {}, 5399: {}, 10825: {}, 16832: {}, 35121: {}, 36542: {}, 36800: {}, 38866: {}, 40663: {}, 47665: {}, 52811: {}, 57345: {}, 59858: {}, 62319: {}, 66158: {}, 78401: {}, 83724: {}, 84129: {}, 86368: {}, 90922: {}, 98816: {}, 118426: {}, 121855: {}, 124812: {}, 128128: {}, 130896: {}, 131680: {}, 134240: {}, 140790: {}, 143213: {}, 149100: {}, 151196: {}, 154101: {}, 160197: {}, 175142: {}, 178104: {}, 183559: {}, 185197: {}, 187654: {}, 187927: {}, 199626: {}, 203638: {}, 211789: {}, 218678: {}, 222410: {}, 223392: {}, 230039: {}, 230325: {}, 230556: {}, 234621: {}, 235477: {}, 253717: {}, 258475: {}, 261382: {}, 261515: {}, 267997: {}, 272732: {}, 275886: {}, 282192: {}, 288087: {}, 297418: {}, 299076: {}, 307638: {}, 318054: {}, 328895: {}, 332636: {}, 335216: {}, 347466: {}, 349911: {}, 350297: {}, 360529: {}, 368348: {}, 374846: {}, 384018: {}, 385729: {}, 391486: {}, 407928: {}, 419657: {}, 420951: {}, 423482: {}, 429609: {}, 431323: {}, 441870: {}, 453333: {}, 453648: {}, 456283: {}, 464049: {}, 466087: {}, 469889: {}, 476456: {}, 478507: {}, 489329: {}, 490452: {}, 513713: {}, 514009: {}, 516838: {}, 537002: {}, 541528: {}, 543640: {}, 548697: {}, 549336: {}, 553282: {}, 562591: {}, 562727: {}, 573556: {}, 615021: {}, 619158: {}, 625925: {}, 630194: {}, 633098: {}, 635919: {}, 637882: {}, 646702: {}, 656093: {}, 657027: {}, 661873: {}, 666175: {}, 673748: {}, 677374: {}, 700397: {}, 711075: {}, 714907: {}, 721857: {}, 722980: {}, 724696: {}, 730292: {}, 735942: {}, 744330: {}, 759087: {}, 771457: {}, 771830: {}, 774058: {}, 782998: {}, 785103: {}, 785585: {}, 803313: {}, 806450: {}, 810933: {}, 811261: {}, 815112: {}, 819662: {}, 822187: {}, 823721: {}, 829759: {}, 841131: {}, 844000: {}, 845705: {}, 854884: {}, 855104: {}, 858749: {}, 859536: {}, 862212: {}, 863541: {}, 863586: {}, 866551: {}, 869718: {}, 877527: {}, 881631: {}, 891139: {}, 899390: {}, 904078: {}, 913180: {}, 919207: {}, 922422: {}, 928295: {}, 933418: {}, 943856: {}, 954979: {}, 955257: {}, 959731: {}, 960288: {}, 960797: {}, 965184: {}, 971667: {}, 979949: {}, 985377: {}, 986551: {}, 989161: {}, 989481: {}}, + {1645: {}, 6322: {}, 24095: {}, 30207: {}, 104963: {}, 108641: {}, 130380: {}, 143066: {}, 170470: {}, 172603: {}, 173181: {}, 200570: {}, 203987: {}, 222124: {}, 239206: {}, 267446: {}, 339858: {}, 369716: {}, 372838: {}, 377757: {}, 387589: {}, 390921: {}, 400041: {}, 423613: {}, 430565: {}, 456879: {}, 466464: {}, 500206: {}, 507082: {}, 534506: {}, 560540: {}, 566495: {}, 618916: {}, 638246: {}, 654013: {}, 750914: {}, 812691: {}, 818103: {}, 863629: {}, 886273: {}, 889301: {}, 938088: {}, 966619: {}}, + {1677: {}, 34572: {}, 48924: {}, 62149: {}, 71900: {}, 81981: {}, 87476: {}, 139022: {}, 175158: {}, 217871: {}, 263746: {}, 401201: {}, 406575: {}, 426203: {}, 441456: {}, 458252: {}, 459712: {}, 468382: {}, 471072: {}, 517961: {}, 608381: {}, 617541: {}, 661548: {}, 685259: {}, 703165: {}, 706760: {}, 732019: {}, 736767: {}, 739255: {}, 799856: {}, 802157: {}, 806965: {}, 807618: {}, 815933: {}, 848166: {}, 868218: {}, 895859: {}, 906095: {}, 948367: {}, 989307: {}}, + {16: {}, 3166: {}, 9087: {}, 19854: {}, 21238: {}, 21327: {}, 23389: {}, 55777: {}, 66279: {}, 68842: {}, 81481: {}, 83509: {}, 86448: {}, 88590: {}, 96084: {}, 96642: {}, 101028: {}, 117472: {}, 118962: {}, 119839: {}, 126260: {}, 126974: {}, 129687: {}, 131114: {}, 133262: {}, 133763: {}, 134340: {}, 137956: {}, 139992: {}, 149256: {}, 153942: {}, 160613: {}, 161141: {}, 161414: {}, 163446: {}, 165151: {}, 167074: {}, 170699: {}, 173808: {}, 179750: {}, 180191: {}, 182053: {}, 182227: {}, 186835: {}, 190211: {}, 190887: {}, 193821: {}, 201561: {}, 203858: {}, 204234: {}, 204868: {}, 210492: {}, 211311: {}, 228019: {}, 228259: {}, 236797: {}, 238986: {}, 242949: {}, 248328: {}, 253549: {}, 262486: {}, 268999: {}, 275601: {}, 281448: {}, 289871: {}, 292676: {}, 293714: {}, 295674: {}, 306018: {}, 307410: {}, 307518: {}, 309439: {}, 320201: {}, 330635: {}, 338515: {}, 345848: {}, 351229: {}, 355525: {}, 358746: {}, 362203: {}, 362230: {}, 363631: {}, 367715: {}, 386426: {}, 392884: {}, 401825: {}, 404016: {}, 416394: {}, 417130: {}, 418514: {}, 420598: {}, 426747: {}, 430418: {}, 434572: {}, 436394: {}, 444197: {}, 446241: {}, 472763: {}, 477352: {}, 479288: {}, 479507: {}, 479860: {}, 480422: {}, 482561: {}, 488182: {}, 494436: {}, 495140: {}, 495768: {}, 497438: {}, 498897: {}, 498979: {}, 500352: {}, 508728: {}, 509648: {}, 512059: {}, 526398: {}, 530410: {}, 534226: {}, 544170: {}, 554050: {}, 558392: {}, 562415: {}, 570842: {}, 578602: {}, 579252: {}, 582600: {}, 584599: {}, 586809: {}, 587061: {}, 594939: {}, 595350: {}, 611219: {}, 618415: {}, 620645: {}, 628167: {}, 640718: {}, 645191: {}, 645267: {}, 651711: {}, 652772: {}, 653108: {}, 655223: {}, 656684: {}, 659736: {}, 660484: {}, 666568: {}, 669200: {}, 675504: {}, 678046: {}, 678614: {}, 680468: {}, 687067: {}, 692667: {}, 693485: {}, 697422: {}, 719529: {}, 727131: {}, 732916: {}, 736719: {}, 739774: {}, 740166: {}, 740633: {}, 745023: {}, 751435: {}, 755638: {}, 758499: {}, 778070: {}, 784320: {}, 784947: {}, 787912: {}, 788936: {}, 800857: {}, 801258: {}, 805499: {}, 805961: {}, 807047: {}, 836135: {}, 843851: {}, 847589: {}, 847950: {}, 860099: {}, 863373: {}, 864239: {}, 864315: {}, 865556: {}, 872350: {}, 892150: {}, 901407: {}, 901698: {}, 906316: {}, 910928: {}, 916348: {}, 916888: {}, 916972: {}, 918142: {}, 919413: {}, 939421: {}, 939551: {}, 948164: {}, 963522: {}, 969469: {}, 971729: {}, 975099: {}, 978332: {}, 980619: {}, 981129: {}, 981829: {}, 986886: {}, 994881: {}, 998918: {}}, + {9911: {}, 11546: {}, 17579: {}, 31468: {}, 34798: {}, 39903: {}, 42345: {}, 46022: {}, 62499: {}, 65854: {}, 69825: {}, 71824: {}, 82868: {}, 84868: {}, 91871: {}, 92172: {}, 92863: {}, 94790: {}, 95453: {}, 96871: {}, 100622: {}, 111860: {}, 112353: {}, 119770: {}, 120436: {}, 124471: {}, 129764: {}, 134186: {}, 142083: {}, 142105: {}, 145178: {}, 161027: {}, 165232: {}, 169254: {}, 177546: {}, 186302: {}, 195286: {}, 196585: {}, 199110: {}, 209688: {}, 216812: {}, 217144: {}, 224627: {}, 236535: {}, 240425: {}, 248230: {}, 254884: {}, 255666: {}, 257886: {}, 266663: {}, 270233: {}, 270330: {}, 273202: {}, 280912: {}, 285072: {}, 291668: {}, 296277: {}, 316010: {}, 316151: {}, 327098: {}, 333803: {}, 333996: {}, 346561: {}, 358868: {}, 361520: {}, 363869: {}, 367041: {}, 367276: {}, 374964: {}, 382168: {}, 382651: {}, 383480: {}, 394514: {}, 397417: {}, 402082: {}, 402941: {}, 411156: {}, 415445: {}, 418478: {}, 425634: {}, 463342: {}, 466375: {}, 467145: {}, 472818: {}, 475329: {}, 477370: {}, 479941: {}, 495455: {}, 507936: {}, 513941: {}, 514557: {}, 520157: {}, 524718: {}, 541790: {}, 548126: {}, 552890: {}, 557313: {}, 559468: {}, 568372: {}, 568956: {}, 572051: {}, 576027: {}, 578252: {}, 578511: {}, 586718: {}, 588213: {}, 592971: {}, 597652: {}, 599835: {}, 619295: {}, 619619: {}, 621571: {}, 624838: {}, 633361: {}, 635084: {}, 641477: {}, 647806: {}, 661048: {}, 663340: {}, 663618: {}, 672474: {}, 689921: {}, 690835: {}, 692450: {}, 695380: {}, 695807: {}, 702692: {}, 703025: {}, 704191: {}, 709468: {}, 709561: {}, 716491: {}, 731368: {}, 732643: {}, 744584: {}, 745922: {}, 753489: {}, 754410: {}, 757051: {}, 759848: {}, 760962: {}, 785201: {}, 796258: {}, 797462: {}, 821453: {}, 822717: {}, 834716: {}, 836607: {}, 838977: {}, 840095: {}, 848747: {}, 851099: {}, 851459: {}, 852033: {}, 852415: {}, 853403: {}, 855130: {}, 855575: {}, 857876: {}, 862611: {}, 866853: {}, 868938: {}, 871487: {}, 880106: {}, 886290: {}, 886357: {}, 887792: {}, 892041: {}, 893432: {}, 899034: {}, 901943: {}, 902632: {}, 902670: {}, 905320: {}, 905514: {}, 923394: {}, 930005: {}, 931595: {}, 933502: {}, 946736: {}, 947615: {}, 954874: {}, 984931: {}, 987958: {}, 997024: {}}, + {19193: {}, 47119: {}, 56979: {}, 61554: {}, 96132: {}, 277648: {}, 299398: {}, 344316: {}, 442966: {}, 475323: {}, 528237: {}, 547268: {}, 547940: {}, 626161: {}, 631251: {}, 669935: {}, 706305: {}, 712821: {}, 730871: {}, 802246: {}, 819042: {}, 847156: {}, 870746: {}, 886785: {}, 950542: {}, 961534: {}}, + {17909: {}, 54668: {}, 76579: {}, 107386: {}, 117899: {}, 121458: {}, 129708: {}, 133325: {}, 256731: {}, 258551: {}, 273517: {}, 308758: {}, 345442: {}, 363264: {}, 390851: {}, 410432: {}, 448472: {}, 460802: {}, 470534: {}, 491344: {}, 542972: {}, 559454: {}, 610637: {}, 613421: {}, 623550: {}, 653443: {}, 654854: {}, 666105: {}, 685176: {}, 712048: {}, 715522: {}, 736112: {}, 737071: {}, 785559: {}, 786868: {}, 794950: {}, 839713: {}, 851730: {}, 862608: {}, 863467: {}, 867116: {}, 870868: {}, 895345: {}, 918647: {}, 941267: {}, 942262: {}, 965118: {}, 970670: {}, 973720: {}, 981351: {}, 987995: {}, 998305: {}}, + {1439: {}, 5874: {}, 8852: {}, 10849: {}, 13322: {}, 14493: {}, 14865: {}, 18047: {}, 22700: {}, 22828: {}, 22829: {}, 22871: {}, 23039: {}, 24094: {}, 24411: {}, 31039: {}, 32219: {}, 32831: {}, 33088: {}, 37851: {}, 37927: {}, 38612: {}, 38978: {}, 41878: {}, 44057: {}, 44144: {}, 44578: {}, 45922: {}, 54139: {}, 56708: {}, 59940: {}, 62673: {}, 65432: {}, 66801: {}, 69079: {}, 73545: {}, 74136: {}, 75182: {}, 75444: {}, 76569: {}, 79071: {}, 80528: {}, 83452: {}, 83825: {}, 87715: {}, 93289: {}, 97004: {}, 98159: {}, 103402: {}, 103420: {}, 104857: {}, 106034: {}, 108090: {}, 108312: {}, 108467: {}, 108814: {}, 108914: {}, 112987: {}, 115621: {}, 118167: {}, 118663: {}, 121059: {}, 122008: {}, 123883: {}, 124160: {}, 125601: {}, 125994: {}, 126754: {}, 130136: {}, 130539: {}, 131876: {}, 135001: {}, 135193: {}, 135701: {}, 142372: {}, 147446: {}, 148971: {}, 149756: {}, 151278: {}, 153866: {}, 155575: {}, 155733: {}, 156623: {}, 157121: {}, 157895: {}, 163645: {}, 165152: {}, 171421: {}, 174207: {}, 174824: {}, 175654: {}, 175867: {}, 176797: {}, 180053: {}, 181908: {}, 185212: {}, 187682: {}, 189207: {}, 190194: {}, 190832: {}, 193604: {}, 199287: {}, 200102: {}, 203519: {}, 205643: {}, 206543: {}, 207741: {}, 207976: {}, 208017: {}, 208942: {}, 212809: {}, 214693: {}, 216885: {}, 217179: {}, 217318: {}, 218700: {}, 219199: {}, 222805: {}, 234805: {}, 242257: {}, 242393: {}, 242741: {}, 244642: {}, 245196: {}, 247512: {}, 247566: {}, 250673: {}, 256428: {}, 259531: {}, 259799: {}, 260054: {}, 261045: {}, 261934: {}, 262831: {}, 265637: {}, 276779: {}, 277433: {}, 278593: {}, 280613: {}, 281866: {}, 282041: {}, 282094: {}, 282945: {}, 283077: {}, 283843: {}, 288097: {}, 293379: {}, 294366: {}, 294401: {}, 296314: {}, 299041: {}, 301377: {}, 301806: {}, 302313: {}, 304060: {}, 305674: {}, 306940: {}, 310056: {}, 310612: {}, 310704: {}, 313355: {}, 317199: {}, 317481: {}, 318896: {}, 321003: {}, 324642: {}, 324800: {}, 324871: {}, 327133: {}, 331134: {}, 334998: {}, 335726: {}, 338984: {}, 340070: {}, 340541: {}, 341129: {}, 342237: {}, 344249: {}, 344865: {}, 346673: {}, 348817: {}, 348826: {}, 349396: {}, 350806: {}, 353619: {}, 355085: {}, 355277: {}, 360258: {}, 360278: {}, 360348: {}, 369288: {}, 373476: {}, 374403: {}, 377555: {}, 378789: {}, 378883: {}, 378933: {}, 380065: {}, 385639: {}, 386467: {}, 387753: {}, 387853: {}, 390282: {}, 400321: {}, 404735: {}, 410487: {}, 413046: {}, 413522: {}, 415322: {}, 416398: {}, 419379: {}, 422111: {}, 422217: {}, 423566: {}, 424542: {}, 425013: {}, 428546: {}, 428761: {}, 428943: {}, 429460: {}, 432486: {}, 438404: {}, 438692: {}, 439824: {}, 441686: {}, 445655: {}, 445869: {}, 452578: {}, 453862: {}, 457407: {}, 457716: {}, 459182: {}, 459970: {}, 462666: {}, 463047: {}, 463804: {}, 464514: {}, 465899: {}, 470690: {}, 471791: {}, 473846: {}, 474712: {}, 475333: {}, 476165: {}, 484617: {}, 485722: {}, 486072: {}, 486120: {}, 486279: {}, 486402: {}, 487505: {}, 491030: {}, 491107: {}, 494676: {}, 496470: {}, 497918: {}, 500639: {}, 502164: {}, 502604: {}, 503057: {}, 505639: {}, 507929: {}, 509051: {}, 509972: {}, 509986: {}, 510907: {}, 511029: {}, 511329: {}, 511461: {}, 511633: {}, 512193: {}, 512793: {}, 519476: {}, 520260: {}, 521312: {}, 523607: {}, 525819: {}, 528128: {}, 529811: {}, 530651: {}, 530778: {}, 531799: {}, 534093: {}, 534526: {}, 535086: {}, 535576: {}, 538652: {}, 549644: {}, 551073: {}, 551931: {}, 559987: {}, 561019: {}, 566451: {}, 566728: {}, 568369: {}, 570033: {}, 573724: {}, 574702: {}, 575362: {}, 580216: {}, 581516: {}, 582641: {}, 582659: {}, 583152: {}, 583545: {}, 584673: {}, 588157: {}, 591258: {}, 591571: {}, 592356: {}, 594982: {}, 597226: {}, 608859: {}, 610127: {}, 611018: {}, 612090: {}, 614895: {}, 615897: {}, 617791: {}, 623991: {}, 625699: {}, 626953: {}, 629191: {}, 632090: {}, 633704: {}, 634284: {}, 636008: {}, 636075: {}, 639278: {}, 644652: {}, 644788: {}, 645851: {}, 647320: {}, 652216: {}, 655508: {}, 656942: {}, 657423: {}, 658313: {}, 659922: {}, 661103: {}, 663750: {}, 664033: {}, 670384: {}, 671283: {}, 674403: {}, 676213: {}, 676792: {}, 679116: {}, 680516: {}, 684478: {}, 685742: {}, 686192: {}, 687236: {}, 690093: {}, 693770: {}, 695471: {}, 696466: {}, 697968: {}, 698646: {}, 698862: {}, 702494: {}, 702651: {}, 703387: {}, 706803: {}, 707297: {}, 709479: {}, 709580: {}, 709737: {}, 714740: {}, 721395: {}, 724097: {}, 724634: {}, 727060: {}, 727912: {}, 729715: {}, 732863: {}, 733032: {}, 735959: {}, 739636: {}, 741315: {}, 742481: {}, 745801: {}, 745998: {}, 746722: {}, 747682: {}, 752847: {}, 755337: {}, 757679: {}, 763247: {}, 765275: {}, 765400: {}, 767718: {}, 769304: {}, 771240: {}, 772430: {}, 773865: {}, 775840: {}, 780179: {}, 781594: {}, 783177: {}, 783248: {}, 783714: {}, 784328: {}, 786835: {}, 790628: {}, 793902: {}, 794601: {}, 794723: {}, 794976: {}, 799655: {}, 800657: {}, 801597: {}, 802851: {}, 803413: {}, 805708: {}, 808125: {}, 810121: {}, 812472: {}, 813132: {}, 818634: {}, 818882: {}, 823833: {}, 824901: {}, 826174: {}, 827011: {}, 829461: {}, 833531: {}, 833646: {}, 834217: {}, 834535: {}, 838270: {}, 842918: {}, 844001: {}, 846096: {}, 846294: {}, 846968: {}, 847237: {}, 849627: {}, 849917: {}, 850260: {}, 850654: {}, 850783: {}, 852059: {}, 853140: {}, 856452: {}, 858608: {}, 858655: {}, 859866: {}, 862581: {}, 863765: {}, 870669: {}, 871220: {}, 874904: {}, 875386: {}, 878663: {}, 880873: {}, 885351: {}, 885819: {}, 888527: {}, 890183: {}, 892148: {}, 896606: {}, 897015: {}, 899108: {}, 900578: {}, 901827: {}, 903214: {}, 903865: {}, 904435: {}, 907529: {}, 907921: {}, 909860: {}, 913723: {}, 914605: {}, 914948: {}, 915133: {}, 916944: {}, 917278: {}, 919798: {}, 920385: {}, 926312: {}, 928738: {}, 935791: {}, 936159: {}, 937301: {}, 938095: {}, 941790: {}, 942073: {}, 943694: {}, 945297: {}, 946306: {}, 948399: {}, 948876: {}, 950521: {}, 952628: {}, 954044: {}, 958379: {}, 958429: {}, 959388: {}, 960042: {}, 960713: {}, 962391: {}, 963079: {}, 963360: {}, 963959: {}, 965833: {}, 966707: {}, 969139: {}, 971526: {}, 971695: {}, 972925: {}, 977207: {}, 977922: {}, 978645: {}, 979548: {}, 982745: {}, 986582: {}, 991850: {}, 992866: {}, 992990: {}, 999291: {}}, + {6046: {}, 7217: {}, 10462: {}, 12763: {}, 15591: {}, 21994: {}, 23509: {}, 23863: {}, 47312: {}, 49265: {}, 49444: {}, 59742: {}, 77894: {}, 82724: {}, 98806: {}, 102215: {}, 102350: {}, 104411: {}, 107925: {}, 109795: {}, 117476: {}, 120153: {}, 120157: {}, 121790: {}, 128205: {}, 132062: {}, 133108: {}, 136362: {}, 147689: {}, 150995: {}, 158381: {}, 173363: {}, 175936: {}, 183874: {}, 185958: {}, 189225: {}, 193499: {}, 197787: {}, 201808: {}, 206812: {}, 213533: {}, 223850: {}, 224554: {}, 226659: {}, 232118: {}, 233334: {}, 235973: {}, 249864: {}, 254176: {}, 272810: {}, 274278: {}, 277120: {}, 279236: {}, 280117: {}, 281236: {}, 281381: {}, 285836: {}, 288820: {}, 289508: {}, 290441: {}, 296475: {}, 296910: {}, 301630: {}, 302367: {}, 303361: {}, 304020: {}, 305095: {}, 311760: {}, 314119: {}, 315869: {}, 329855: {}, 331000: {}, 334541: {}, 339480: {}, 345246: {}, 352455: {}, 353103: {}, 354147: {}, 354564: {}, 362591: {}, 365328: {}, 368383: {}, 369065: {}, 376118: {}, 379424: {}, 382196: {}, 382987: {}, 391534: {}, 392250: {}, 393704: {}, 394112: {}, 397246: {}, 399639: {}, 403487: {}, 417008: {}, 419810: {}, 428993: {}, 437155: {}, 452741: {}, 480133: {}, 480221: {}, 504438: {}, 504521: {}, 506408: {}, 511534: {}, 527015: {}, 530031: {}, 535252: {}, 537880: {}, 539204: {}, 541175: {}, 545832: {}, 546192: {}, 546609: {}, 548147: {}, 548284: {}, 548943: {}, 558572: {}, 560944: {}, 563900: {}, 567766: {}, 573993: {}, 577495: {}, 581182: {}, 584793: {}, 587448: {}, 589825: {}, 590100: {}, 593044: {}, 599186: {}, 604160: {}, 606511: {}, 616061: {}, 619031: {}, 620917: {}, 621540: {}, 625149: {}, 626791: {}, 629756: {}, 629939: {}, 632546: {}, 636577: {}, 637445: {}, 638132: {}, 651092: {}, 651413: {}, 655763: {}, 660988: {}, 663275: {}, 664520: {}, 666689: {}, 679932: {}, 695117: {}, 701901: {}, 703729: {}, 704227: {}, 711524: {}, 714539: {}, 717373: {}, 724182: {}, 727745: {}, 733532: {}, 737611: {}, 747065: {}, 750840: {}, 755202: {}, 756167: {}, 757137: {}, 757772: {}, 762202: {}, 772822: {}, 772910: {}, 784788: {}, 807400: {}, 822846: {}, 822944: {}, 823434: {}, 829593: {}, 840176: {}, 841259: {}, 842312: {}, 842352: {}, 843506: {}, 849235: {}, 849239: {}, 858623: {}, 860024: {}, 863984: {}, 866414: {}, 870277: {}, 877769: {}, 885696: {}, 886889: {}, 896973: {}, 904267: {}, 904946: {}, 913960: {}, 918113: {}, 918207: {}, 919342: {}, 924163: {}, 929898: {}, 930521: {}, 930798: {}, 937296: {}, 947772: {}, 947874: {}, 951341: {}, 955547: {}, 962297: {}, 963290: {}, 967519: {}, 972772: {}, 973315: {}, 990383: {}, 995103: {}}, + {15815: {}, 16299: {}, 37184: {}, 42449: {}, 43437: {}, 56201: {}, 69697: {}, 72566: {}, 72904: {}, 77761: {}, 109062: {}, 124996: {}, 126439: {}, 128830: {}, 140757: {}, 145701: {}, 154916: {}, 167381: {}, 178980: {}, 188957: {}, 197694: {}, 201649: {}, 202209: {}, 211543: {}, 232571: {}, 239488: {}, 248660: {}, 249397: {}, 258105: {}, 275137: {}, 278306: {}, 281311: {}, 291533: {}, 292270: {}, 301586: {}, 302600: {}, 304447: {}, 306932: {}, 323122: {}, 324693: {}, 328611: {}, 329414: {}, 333379: {}, 342494: {}, 343333: {}, 354432: {}, 354790: {}, 355382: {}, 358813: {}, 360467: {}, 373417: {}, 399215: {}, 401486: {}, 404786: {}, 406125: {}, 407176: {}, 411169: {}, 442435: {}, 443442: {}, 446315: {}, 451425: {}, 457785: {}, 458203: {}, 465578: {}, 472129: {}, 473301: {}, 479995: {}, 486890: {}, 495299: {}, 498064: {}, 547279: {}, 547966: {}, 549461: {}, 565637: {}, 574186: {}, 584930: {}, 586310: {}, 588696: {}, 591988: {}, 602039: {}, 603773: {}, 604229: {}, 620623: {}, 633591: {}, 644008: {}, 645968: {}, 678678: {}, 684015: {}, 697544: {}, 698047: {}, 700276: {}, 704835: {}, 707935: {}, 708926: {}, 736557: {}, 737944: {}, 741454: {}, 754894: {}, 760136: {}, 775223: {}, 778331: {}, 781641: {}, 794311: {}, 807657: {}, 811277: {}, 811341: {}, 814696: {}, 815223: {}, 815510: {}, 829259: {}, 841083: {}, 856271: {}, 861952: {}, 863706: {}, 871196: {}, 873081: {}, 884233: {}, 896550: {}, 908747: {}, 909490: {}, 915049: {}, 936063: {}, 954893: {}, 956229: {}, 959018: {}, 959801: {}, 962345: {}, 962592: {}, 970944: {}, 985044: {}, 985355: {}, 989228: {}, 992399: {}, 997871: {}}, + {260: {}, 5582: {}, 8087: {}, 16397: {}, 20770: {}, 33084: {}, 38342: {}, 40313: {}, 42508: {}, 45231: {}, 51219: {}, 67166: {}, 70029: {}, 72339: {}, 75162: {}, 83277: {}, 94824: {}, 99542: {}, 136331: {}, 145482: {}, 159949: {}, 160277: {}, 161513: {}, 162867: {}, 169031: {}, 180324: {}, 181409: {}, 185066: {}, 193026: {}, 196680: {}, 207214: {}, 207433: {}, 210875: {}, 215441: {}, 216768: {}, 235613: {}, 247370: {}, 257170: {}, 275910: {}, 277395: {}, 278561: {}, 279628: {}, 282181: {}, 298128: {}, 301304: {}, 314990: {}, 317459: {}, 321850: {}, 329663: {}, 333838: {}, 335411: {}, 343538: {}, 343951: {}, 346552: {}, 356005: {}, 358226: {}, 369608: {}, 370223: {}, 374883: {}, 378660: {}, 389997: {}, 405016: {}, 424953: {}, 430149: {}, 456418: {}, 460896: {}, 469159: {}, 500105: {}, 505548: {}, 507847: {}, 517590: {}, 521230: {}, 523621: {}, 528724: {}, 531988: {}, 538542: {}, 568105: {}, 576627: {}, 577085: {}, 583176: {}, 586967: {}, 592866: {}, 603159: {}, 608156: {}, 620117: {}, 645165: {}, 660340: {}, 670893: {}, 677419: {}, 677483: {}, 681523: {}, 685808: {}, 717432: {}, 717943: {}, 731485: {}, 731737: {}, 748218: {}, 749880: {}, 751795: {}, 756219: {}, 762362: {}, 767371: {}, 769279: {}, 790460: {}, 791034: {}, 791531: {}, 818144: {}, 829312: {}, 844668: {}, 845321: {}, 851251: {}, 864864: {}, 885479: {}, 908530: {}, 940087: {}, 963599: {}, 966577: {}, 969060: {}, 970427: {}, 984632: {}, 995312: {}}, + {7167: {}, 8466: {}, 10869: {}, 15822: {}, 18905: {}, 19996: {}, 21118: {}, 27702: {}, 31732: {}, 39982: {}, 56899: {}, 59451: {}, 67530: {}, 69936: {}, 72464: {}, 76877: {}, 79537: {}, 85400: {}, 92664: {}, 92941: {}, 104920: {}, 107184: {}, 119403: {}, 126761: {}, 136094: {}, 145481: {}, 160344: {}, 164738: {}, 166859: {}, 167169: {}, 167369: {}, 168933: {}, 178199: {}, 179016: {}, 194710: {}, 197676: {}, 216725: {}, 223815: {}, 229747: {}, 232908: {}, 233772: {}, 236948: {}, 260081: {}, 262300: {}, 263586: {}, 265103: {}, 277766: {}, 278479: {}, 299520: {}, 302915: {}, 310817: {}, 315762: {}, 317966: {}, 320768: {}, 327276: {}, 332259: {}, 347134: {}, 348428: {}, 354239: {}, 358070: {}, 365282: {}, 378677: {}, 386327: {}, 390697: {}, 394208: {}, 400014: {}, 401521: {}, 410061: {}, 410296: {}, 421791: {}, 423415: {}, 424233: {}, 427549: {}, 442586: {}, 445829: {}, 455136: {}, 456301: {}, 460942: {}, 464243: {}, 479470: {}, 489179: {}, 506300: {}, 521118: {}, 528877: {}, 536679: {}, 536929: {}, 541440: {}, 559259: {}, 572129: {}, 608506: {}, 615742: {}, 624153: {}, 631850: {}, 633546: {}, 637710: {}, 641040: {}, 665243: {}, 669051: {}, 671582: {}, 686940: {}, 690947: {}, 701032: {}, 702092: {}, 715946: {}, 725621: {}, 729823: {}, 735590: {}, 736767: {}, 742598: {}, 744612: {}, 750890: {}, 756090: {}, 770222: {}, 780676: {}, 780798: {}, 796987: {}, 797608: {}, 803542: {}, 806457: {}, 829013: {}, 846369: {}, 848881: {}, 854425: {}, 857279: {}, 860868: {}, 866109: {}, 866230: {}, 868331: {}, 868782: {}, 871077: {}, 871731: {}, 875412: {}, 880219: {}, 882106: {}, 885187: {}, 888093: {}, 891943: {}, 899525: {}, 899664: {}, 907656: {}, 914588: {}, 914900: {}, 915802: {}, 919145: {}, 922486: {}, 924227: {}, 941769: {}, 962944: {}, 966810: {}, 980708: {}, 984893: {}, 993741: {}, 996672: {}}, + {1780: {}, 2843: {}, 4835: {}, 17596: {}, 19010: {}, 19623: {}, 19958: {}, 21527: {}, 38993: {}, 39273: {}, 40365: {}, 42556: {}, 43362: {}, 45744: {}, 45766: {}, 48361: {}, 53828: {}, 60364: {}, 61498: {}, 63452: {}, 64915: {}, 70895: {}, 77403: {}, 95358: {}, 101692: {}, 103624: {}, 109953: {}, 110262: {}, 111147: {}, 115687: {}, 120044: {}, 123535: {}, 132719: {}, 133559: {}, 135903: {}, 136360: {}, 138776: {}, 146044: {}, 149399: {}, 159602: {}, 161101: {}, 165159: {}, 170830: {}, 174900: {}, 176376: {}, 179105: {}, 181498: {}, 183092: {}, 184205: {}, 185498: {}, 185811: {}, 187324: {}, 190302: {}, 195164: {}, 202392: {}, 203032: {}, 208671: {}, 213815: {}, 219547: {}, 220517: {}, 223121: {}, 223460: {}, 225898: {}, 230152: {}, 235755: {}, 241202: {}, 241960: {}, 242447: {}, 246068: {}, 264122: {}, 265303: {}, 266098: {}, 267278: {}, 274497: {}, 275825: {}, 276707: {}, 277253: {}, 293667: {}, 295064: {}, 297024: {}, 300927: {}, 301680: {}, 307223: {}, 307407: {}, 311114: {}, 318204: {}, 318741: {}, 323570: {}, 330779: {}, 344744: {}, 347845: {}, 353342: {}, 356969: {}, 360238: {}, 361896: {}, 373236: {}, 375434: {}, 385311: {}, 386461: {}, 392676: {}, 402777: {}, 403357: {}, 412964: {}, 414504: {}, 416033: {}, 417732: {}, 421237: {}, 431636: {}, 432118: {}, 433386: {}, 434015: {}, 437665: {}, 439507: {}, 443445: {}, 450655: {}, 455907: {}, 466576: {}, 467240: {}, 470073: {}, 470923: {}, 479849: {}, 485155: {}, 486776: {}, 491518: {}, 493966: {}, 495314: {}, 499893: {}, 502503: {}, 503824: {}, 506724: {}, 508274: {}, 508531: {}, 520378: {}, 524250: {}, 525824: {}, 526435: {}, 532319: {}, 535476: {}, 539188: {}, 540678: {}, 543139: {}, 545509: {}, 545846: {}, 554361: {}, 554523: {}, 554641: {}, 562503: {}, 569187: {}, 573851: {}, 582233: {}, 582588: {}, 599332: {}, 601598: {}, 608471: {}, 609359: {}, 623057: {}, 624986: {}, 627637: {}, 629617: {}, 631366: {}, 631691: {}, 632543: {}, 634443: {}, 639087: {}, 641398: {}, 641769: {}, 646057: {}, 650857: {}, 656797: {}, 657660: {}, 658303: {}, 658344: {}, 658441: {}, 660667: {}, 667611: {}, 670532: {}, 671110: {}, 671644: {}, 673798: {}, 674474: {}, 674955: {}, 675118: {}, 676480: {}, 676542: {}, 684142: {}, 690283: {}, 691819: {}, 695601: {}, 698044: {}, 699044: {}, 710355: {}, 713458: {}, 714289: {}, 716624: {}, 718720: {}, 720317: {}, 722880: {}, 724961: {}, 725113: {}, 729926: {}, 732993: {}, 736815: {}, 744136: {}, 744241: {}, 747265: {}, 749244: {}, 759322: {}, 759779: {}, 762423: {}, 769903: {}, 776406: {}, 778348: {}, 780747: {}, 785710: {}, 787129: {}, 788359: {}, 790332: {}, 793377: {}, 800938: {}, 801740: {}, 806492: {}, 807438: {}, 817949: {}, 819499: {}, 822844: {}, 824212: {}, 825791: {}, 828474: {}, 828558: {}, 830552: {}, 831784: {}, 833279: {}, 834099: {}, 837145: {}, 839895: {}, 843791: {}, 850486: {}, 855151: {}, 857995: {}, 859656: {}, 863372: {}, 866958: {}, 869486: {}, 875582: {}, 878999: {}, 880004: {}, 883148: {}, 885479: {}, 886787: {}, 886824: {}, 890161: {}, 890765: {}, 893536: {}, 900480: {}, 906712: {}, 907562: {}, 910859: {}, 911296: {}, 912164: {}, 915797: {}, 918976: {}, 922786: {}, 923742: {}, 927903: {}, 928354: {}, 933639: {}, 937221: {}, 938433: {}, 945610: {}, 954107: {}, 957992: {}, 966251: {}, 970887: {}, 972156: {}, 975277: {}, 978065: {}, 980595: {}, 980979: {}, 981295: {}, 984535: {}, 986395: {}, 990909: {}, 994690: {}, 995036: {}, 995158: {}, 995410: {}}, + {334: {}, 32655: {}, 55046: {}, 62022: {}, 73613: {}, 77343: {}, 101124: {}, 107068: {}, 117499: {}, 117730: {}, 120673: {}, 124794: {}, 132620: {}, 146974: {}, 147517: {}, 150501: {}, 156095: {}, 172428: {}, 173315: {}, 175385: {}, 175574: {}, 190869: {}, 198353: {}, 206146: {}, 216949: {}, 217954: {}, 242237: {}, 245294: {}, 264560: {}, 278889: {}, 291762: {}, 292801: {}, 300075: {}, 300481: {}, 302243: {}, 318737: {}, 324658: {}, 324864: {}, 329315: {}, 329700: {}, 337062: {}, 341116: {}, 342497: {}, 350780: {}, 354770: {}, 368156: {}, 375739: {}, 379571: {}, 382754: {}, 384423: {}, 392739: {}, 394348: {}, 398285: {}, 411003: {}, 412595: {}, 425690: {}, 432712: {}, 445636: {}, 465052: {}, 465365: {}, 468093: {}, 469866: {}, 475584: {}, 476164: {}, 477804: {}, 487777: {}, 488760: {}, 490033: {}, 510553: {}, 513808: {}, 520217: {}, 526976: {}, 529753: {}, 538410: {}, 546002: {}, 554632: {}, 564668: {}, 569539: {}, 581783: {}, 606930: {}, 607918: {}, 608347: {}, 611796: {}, 623435: {}, 642322: {}, 651086: {}, 651191: {}, 662426: {}, 663401: {}, 665029: {}, 678084: {}, 683432: {}, 683683: {}, 692495: {}, 694074: {}, 699463: {}, 707528: {}, 712394: {}, 717653: {}, 731394: {}, 734593: {}, 745234: {}, 751297: {}, 751837: {}, 755654: {}, 762967: {}, 768704: {}, 769565: {}, 773686: {}, 778473: {}, 804277: {}, 821133: {}, 832543: {}, 841831: {}, 851930: {}, 857658: {}, 867008: {}, 871904: {}, 872283: {}, 874563: {}, 886715: {}, 890451: {}, 897991: {}, 904759: {}, 907308: {}, 907359: {}, 910313: {}, 912964: {}, 915634: {}, 917989: {}, 932513: {}, 939019: {}, 940626: {}, 949595: {}, 961152: {}, 967112: {}, 992688: {}, 995334: {}, 997260: {}}, + {428: {}, 1889: {}, 6110: {}, 14464: {}, 30078: {}, 41633: {}, 43356: {}, 43435: {}, 47634: {}, 56164: {}, 67752: {}, 70826: {}, 84292: {}, 94432: {}, 99168: {}, 100153: {}, 107749: {}, 113178: {}, 113582: {}, 127241: {}, 134574: {}, 137433: {}, 139057: {}, 146335: {}, 152544: {}, 154286: {}, 168427: {}, 169087: {}, 170463: {}, 174853: {}, 183261: {}, 184904: {}, 188281: {}, 189127: {}, 190975: {}, 194390: {}, 201095: {}, 202816: {}, 205157: {}, 207939: {}, 211872: {}, 212590: {}, 223859: {}, 250815: {}, 256597: {}, 256750: {}, 269329: {}, 278881: {}, 280093: {}, 283752: {}, 292910: {}, 300938: {}, 306840: {}, 313670: {}, 314989: {}, 319535: {}, 321635: {}, 328144: {}, 340233: {}, 341936: {}, 344531: {}, 352863: {}, 372511: {}, 382752: {}, 395161: {}, 397062: {}, 399074: {}, 401217: {}, 403516: {}, 403845: {}, 406988: {}, 410969: {}, 412686: {}, 413753: {}, 428567: {}, 432535: {}, 443525: {}, 451282: {}, 467986: {}, 471367: {}, 473274: {}, 474429: {}, 478817: {}, 495301: {}, 503719: {}, 511865: {}, 512989: {}, 515783: {}, 528073: {}, 547924: {}, 547990: {}, 559159: {}, 567034: {}, 571787: {}, 577668: {}, 600386: {}, 601085: {}, 604438: {}, 606388: {}, 606810: {}, 619201: {}, 631228: {}, 644327: {}, 646549: {}, 649628: {}, 656910: {}, 658084: {}, 665166: {}, 666827: {}, 670414: {}, 672141: {}, 672434: {}, 672476: {}, 689902: {}, 693412: {}, 697202: {}, 701615: {}, 705986: {}, 711631: {}, 711975: {}, 727545: {}, 730273: {}, 734081: {}, 738142: {}, 740576: {}, 750787: {}, 752974: {}, 760598: {}, 761706: {}, 763263: {}, 775671: {}, 776423: {}, 779169: {}, 786672: {}, 795932: {}, 800366: {}, 804031: {}, 805465: {}, 812387: {}, 812916: {}, 822114: {}, 824099: {}, 825068: {}, 827134: {}, 828613: {}, 832090: {}, 847079: {}, 856342: {}, 857505: {}, 866123: {}, 866348: {}, 869166: {}, 880868: {}, 882259: {}, 884414: {}, 892521: {}, 905634: {}, 908987: {}, 909092: {}, 916607: {}, 922033: {}, 923039: {}, 926333: {}, 926473: {}, 940457: {}, 940780: {}, 943392: {}, 951710: {}, 957253: {}, 961014: {}, 961118: {}, 965514: {}, 991782: {}, 997101: {}}, + } + + for i := range additions { + for v, k := range additions[i] { + b.RoaringSetRangeAdd(k, v) + } + for v := range deletions[i] { + b.RoaringSetRangeRemove(0, v) + } + require.Nil(t, b.FlushAndSwitch()) + } + + control := make([]*sroar.Bitmap, maxKey) + for i := range control { + control[i] = sroar.NewBitmap() + } + for v, k := range map[uint64]uint64{103: 49, 104: 93, 147: 7, 250: 58, 519: 67, 556: 42, 582: 31, 598: 30, 626: 4, 653: 28, 891: 31, 1060: 88, 1065: 56, 1215: 97, 1260: 60, 1280: 83, 1304: 85, 1335: 43, 1346: 43, 1380: 93, 1528: 42, 1544: 65, 1713: 73, 1788: 48, 1875: 0, 1929: 87, 2033: 3, 2090: 78, 2092: 83, 2199: 23, 2257: 63, 2425: 58, 2457: 1, 2522: 5, 2553: 4, 2568: 92, 2596: 84, 2617: 81, 2826: 54, 2843: 7, 2902: 22, 3083: 77, 3121: 5, 3196: 2, 3202: 44, 3230: 15, 3246: 49, 3464: 66, 3492: 84, 3500: 50, 3631: 11, 3770: 61, 3773: 63, 3811: 65, 3817: 3, 3892: 48, 3908: 29, 3940: 9, 3945: 65, 4002: 79, 4022: 99, 4025: 96, 4026: 0, 4083: 54, 4229: 12, 4261: 13, 4282: 47, 4333: 95, 4463: 25, 4591: 18, 4632: 82, 4699: 88, 4731: 63, 4752: 76, 4765: 80, 4828: 59, 4907: 6, 4992: 16, 5106: 28, 5249: 46, 5368: 50, 5464: 9, 5466: 59, 5484: 20, 5489: 37, 5500: 25, 5555: 68, 5692: 36, 5767: 44, 5771: 34, 5791: 32, 5792: 91, 5850: 28, 5879: 14, 5975: 90, 5997: 44, 6009: 92, 6015: 55, 6045: 81, 6108: 74, 6158: 42, 6177: 63, 6206: 18, 6258: 98, 6276: 92, 6303: 77, 6497: 23, 6516: 34, 6587: 51, 6768: 7, 6810: 63, 6834: 53, 7014: 84, 7107: 61, 7201: 4, 7277: 72, 7307: 40, 7337: 54, 7370: 2, 7573: 18, 7700: 23, 7704: 34, 7821: 39, 7838: 47, 7873: 8, 7881: 78, 7971: 7, 8310: 26, 8505: 18, 8590: 46, 8635: 50, 8673: 79, 8728: 67, 8770: 66, 8799: 17, 8822: 73, 8841: 96, 8860: 38, 8987: 80, 9008: 45, 9029: 82, 9039: 99, 9179: 44, 9181: 8, 9286: 61, 9337: 27, 9419: 54, 9423: 40, 9577: 28, 9607: 66, 9905: 6, 9930: 40, 10067: 18, 10074: 19, 10130: 37, 10160: 3, 10211: 83, 10289: 62, 10292: 63, 10338: 6, 10393: 37, 10441: 77, 10475: 58, 10574: 65, 10613: 24, 10651: 80, 10705: 78, 10744: 86, 11161: 20, 11187: 11, 11196: 33, 11210: 2, 11243: 58, 11276: 99, 11392: 50, 11542: 14, 11663: 40, 11676: 13, 11680: 54, 11699: 49, 11825: 58, 11857: 95, 11893: 44, 11927: 60, 11934: 29, 11978: 39, 12165: 48, 12273: 7, 12327: 90, 12332: 44, 12357: 66, 12358: 76, 12367: 12, 12513: 18, 12523: 68, 12559: 81, 12614: 6, 12713: 1, 12741: 44, 12779: 40, 12860: 33, 12967: 69, 13175: 57, 13193: 60, 13209: 25, 13301: 2, 13510: 97, 13598: 88, 13631: 65, 13669: 12, 13714: 80, 13726: 39, 13843: 45, 13921: 91, 14105: 31, 14133: 26, 14225: 45, 14251: 53, 14744: 67, 14764: 41, 14863: 14, 14890: 58, 15020: 82, 15249: 41, 15262: 89, 15303: 8, 15427: 78, 15534: 65, 15667: 97, 15697: 88, 15746: 55, 15808: 25, 15857: 21, 15910: 71, 16113: 51, 16143: 39, 16189: 16, 16246: 49, 16258: 30, 16318: 32, 16427: 80, 16436: 43, 16440: 62, 16503: 77, 16549: 3, 16675: 56, 16710: 62, 16754: 35, 16859: 8, 16925: 18, 17023: 76, 17092: 52, 17139: 45, 17141: 38, 17146: 62, 17182: 13, 17232: 55, 17375: 64, 17389: 59, 17542: 22, 17617: 23, 17716: 77, 17751: 64, 17918: 34, 17977: 29, 18118: 94, 18155: 86, 18255: 29, 18344: 34, 18436: 40, 18441: 32, 18538: 17, 18685: 97, 18730: 90, 18735: 94, 18749: 44, 18789: 23, 18858: 50, 18884: 25, 18945: 3, 18958: 43, 19027: 22, 19032: 21, 19055: 36, 19095: 17, 19159: 73, 19226: 22, 19352: 70, 19373: 92, 19620: 83, 19650: 57, 19652: 43, 19739: 70, 19781: 24, 19805: 65, 19816: 1, 19823: 63, 19843: 64, 19941: 50, 19971: 66, 20147: 33, 20464: 25, 20465: 20, 20540: 94, 20557: 67, 20618: 81, 20641: 39, 20661: 4, 20665: 81, 20719: 97, 20769: 7, 20782: 50, 20803: 99, 20949: 41, 21028: 30, 21063: 45, 21072: 65, 21137: 26, 21255: 24, 21389: 58, 21423: 72, 21460: 68, 21477: 53, 21587: 77, 21606: 51, 21803: 35, 21817: 45, 21826: 82, 21865: 99, 21897: 80, 21921: 79, 21927: 60, 21990: 58, 22096: 68, 22107: 99, 22124: 39, 22143: 54, 22369: 49, 22412: 53, 22433: 67, 22531: 83, 22646: 14, 22649: 60, 22654: 31, 22665: 25, 22676: 95, 22677: 95, 22831: 63, 22837: 64, 22926: 63, 23008: 2, 23021: 86, 23041: 18, 23126: 75, 23281: 48, 23294: 51, 23336: 58, 23361: 87, 23393: 16, 23597: 48, 23678: 63, 23761: 85, 23778: 34, 23792: 91, 23972: 29, 24004: 31, 24046: 32, 24057: 61, 24069: 16, 24137: 86, 24171: 65, 24216: 27, 24337: 21, 24338: 26, 24342: 82, 24416: 65, 24427: 16, 24469: 17, 24474: 29, 24524: 18, 24614: 81, 24678: 69, 24696: 53, 24821: 45, 24908: 48, 24943: 10, 25044: 37, 25088: 25, 25093: 26, 25407: 35, 25567: 46, 25665: 37, 25694: 95, 25731: 92, 25824: 49, 25908: 66, 25955: 15, 26069: 73, 26078: 83, 26092: 52, 26099: 91, 26164: 54, 26216: 43, 26228: 73, 26243: 52, 26248: 12, 26294: 57, 26394: 40, 26464: 21, 26503: 89, 26615: 68, 27005: 57, 27011: 69, 27025: 73, 27032: 5, 27105: 77, 27122: 32, 27215: 39, 27233: 56, 27249: 52, 27275: 65, 27369: 59, 27627: 37, 27643: 29, 27645: 53, 27654: 54, 27840: 5, 27879: 73, 27941: 12, 27990: 66, 28000: 12, 28034: 63, 28059: 1, 28062: 99, 28215: 72, 28250: 64, 28422: 80, 28448: 7, 28535: 22, 28541: 64, 28576: 67, 28640: 17, 28840: 13, 28859: 48, 28883: 56, 28910: 3, 28915: 77, 28947: 55, 29048: 40, 29084: 16, 29183: 94, 29209: 93, 29220: 82, 29235: 80, 29261: 9, 29317: 35, 29382: 37, 29536: 82, 29628: 77, 29641: 5, 29695: 81, 30022: 35, 30093: 55, 30170: 71, 30202: 12, 30335: 26, 30385: 99, 30444: 27, 30539: 63, 30556: 84, 30572: 96, 30667: 96, 30717: 53, 30755: 82, 30818: 38, 30892: 46, 30927: 32, 30929: 56, 31043: 94, 31064: 77, 31123: 44, 31192: 85, 31299: 57, 31340: 97, 31407: 76, 31420: 19, 31432: 23, 31537: 39, 31748: 98, 31832: 64, 31844: 11, 31860: 62, 31893: 39, 31958: 11, 32052: 42, 32183: 6, 32185: 1, 32204: 81, 32285: 55, 32302: 57, 32397: 38, 32405: 14, 32408: 21, 32420: 47, 32421: 81, 32442: 97, 32461: 89, 32484: 85, 32551: 50, 32846: 6, 32950: 26, 33023: 2, 33080: 28, 33163: 87, 33292: 75, 33294: 97, 33303: 70, 33318: 37, 33387: 62, 33419: 67, 33471: 13, 33477: 26, 33517: 30, 33612: 13, 33758: 93, 33789: 40, 33873: 47, 33896: 92, 34048: 64, 34224: 10, 34230: 97, 34246: 36, 34359: 87, 34572: 57, 34629: 88, 34649: 98, 34810: 34, 34831: 86, 34850: 12, 34871: 3, 34921: 32, 34952: 14, 34987: 80, 35017: 98, 35026: 36, 35083: 57, 35091: 58, 35142: 87, 35208: 26, 35330: 32, 35503: 29, 35538: 97, 35625: 29, 35661: 8, 35732: 44, 35835: 77, 35910: 23, 35922: 8, 35995: 13, 36088: 85, 36098: 82, 36122: 24, 36148: 68, 36155: 42, 36259: 41, 36310: 36, 36381: 39, 36534: 25, 36909: 67, 36932: 24, 37061: 89, 37092: 36, 37255: 33, 37294: 31, 37354: 76, 37360: 61, 37398: 24, 37424: 17, 37586: 57, 37643: 31, 37660: 40, 37815: 61, 37841: 58, 38009: 93, 38047: 31, 38216: 53, 38219: 25, 38349: 91, 38356: 40, 38445: 82, 38505: 7, 38565: 39, 38614: 50, 38735: 56, 38739: 58, 38829: 3, 38833: 44, 38848: 40, 38849: 21, 38877: 8, 38909: 27, 38992: 24, 39015: 98, 39153: 78, 39258: 75, 39312: 87, 39387: 73, 39479: 94, 39564: 42, 39569: 31, 39606: 94, 39657: 22, 39714: 45, 39828: 75, 39895: 18, 39913: 45, 39961: 81, 40073: 93, 40135: 23, 40247: 87, 40404: 44, 40579: 8, 40590: 63, 40714: 43, 40727: 64, 40737: 54, 40778: 58, 40928: 18, 40970: 47, 40988: 69, 41005: 68, 41094: 45, 41132: 64, 41225: 60, 41308: 15, 41312: 39, 41433: 10, 41438: 53, 41477: 22, 41510: 94, 41520: 43, 41558: 5, 41592: 54, 41742: 44, 41750: 67, 41858: 78, 41893: 73, 41907: 12, 41961: 81, 42070: 28, 42230: 47, 42267: 88, 42272: 72, 42337: 76, 42375: 86, 42468: 18, 42488: 21, 42490: 39, 42525: 8, 42557: 6, 42581: 1, 42775: 84, 42879: 46, 42889: 74, 42952: 48, 43098: 85, 43115: 45, 43120: 25, 43146: 76, 43191: 25, 43222: 36, 43308: 92, 43313: 82, 43389: 15, 43433: 3, 43488: 90, 43510: 35, 43669: 21, 43747: 38, 43748: 32, 43760: 28, 43850: 63, 43886: 62, 43902: 3, 43925: 53, 44021: 74, 44035: 43, 44062: 66, 44073: 68, 44099: 80, 44205: 65, 44211: 38, 44218: 45, 44320: 17, 44348: 55, 44379: 64, 44410: 5, 44445: 94, 44460: 19, 44508: 68, 44511: 64, 44535: 12, 44549: 50, 44571: 93, 44632: 64, 44642: 59, 44643: 78, 44652: 62, 44704: 31, 44710: 92, 44714: 95, 44754: 98, 44778: 73, 44782: 66, 44828: 16, 44917: 6, 44952: 92, 45015: 83, 45066: 15, 45155: 22, 45216: 58, 45362: 54, 45427: 19, 45504: 52, 45639: 60, 45692: 92, 45728: 61, 45772: 4, 45777: 31, 45781: 31, 45822: 63, 45845: 94, 46066: 98, 46140: 34, 46151: 82, 46188: 79, 46234: 59, 46245: 3, 46292: 20, 46303: 0, 46326: 79, 46377: 47, 46465: 13, 46499: 58, 46510: 37, 46613: 83, 46737: 91, 46758: 71, 46800: 9, 46813: 25, 47012: 72, 47022: 18, 47043: 46, 47077: 81, 47080: 95, 47160: 69, 47280: 8, 47389: 9, 47412: 7, 47430: 44, 47431: 76, 47527: 95, 47633: 36, 47638: 81, 47770: 90, 47773: 36, 47827: 82, 47884: 19, 47907: 60, 47925: 71, 47933: 48, 47965: 62, 48005: 66, 48181: 47, 48226: 20, 48248: 21, 48286: 77, 48325: 37, 48446: 89, 48490: 32, 48527: 90, 48596: 43, 48752: 33, 48847: 79, 48899: 63, 48951: 48, 49005: 32, 49015: 95, 49098: 14, 49201: 24, 49329: 35, 49336: 32, 49355: 12, 49361: 79, 49419: 75, 49483: 26, 49758: 65, 49805: 7, 49818: 29, 49838: 90, 49865: 26, 49939: 24, 49942: 94, 50000: 89, 50033: 51, 50098: 40, 50124: 12, 50131: 91, 50202: 83, 50321: 21, 50501: 88, 50607: 82, 50618: 87, 50630: 32, 50659: 3, 50680: 41, 50721: 17, 50806: 47, 50829: 67, 50866: 68, 50880: 24, 50943: 96, 50947: 29, 51031: 6, 51067: 44, 51143: 4, 51204: 13, 51227: 74, 51314: 4, 51340: 17, 51344: 48, 51351: 66, 51373: 25, 51406: 89, 51437: 91, 51487: 15, 51556: 59, 51578: 43, 51725: 6, 51807: 73, 51845: 65, 51853: 84, 51862: 35, 52040: 84, 52055: 35, 52078: 49, 52116: 46, 52143: 90, 52623: 26, 52673: 25, 52697: 10, 52712: 51, 52723: 32, 52733: 59, 52798: 64, 52828: 14, 52845: 57, 52879: 29, 52888: 69, 52940: 45, 52986: 52, 53108: 41, 53136: 32, 53156: 41, 53375: 50, 53476: 7, 53503: 37, 53513: 30, 53593: 7, 53627: 90, 53634: 7, 53645: 96, 53769: 77, 53816: 78, 53903: 59, 53967: 78, 54054: 29, 54093: 53, 54234: 67, 54255: 57, 54336: 40, 54419: 5, 54475: 58, 54587: 9, 54592: 33, 54670: 96, 54766: 21, 54849: 12, 54867: 78, 54997: 9, 55105: 54, 55139: 89, 55169: 12, 55206: 22, 55230: 91, 55284: 58, 55295: 40, 55331: 74, 55367: 52, 55669: 84, 55723: 3, 55731: 5, 55884: 70, 55934: 11, 56007: 52, 56076: 13, 56192: 52, 56247: 67, 56400: 39, 56454: 37, 56587: 33, 56602: 68, 56662: 8, 56787: 46, 56906: 71, 56965: 80, 57151: 94, 57241: 0, 57331: 11, 57332: 28, 57337: 69, 57351: 73, 57361: 5, 57402: 78, 57452: 24, 57466: 91, 57477: 48, 57492: 11, 57501: 26, 57570: 70, 57587: 84, 57607: 6, 57666: 40, 57841: 82, 57871: 66, 57926: 66, 57928: 35, 57939: 9, 57956: 80, 58160: 54, 58162: 89, 58179: 98, 58327: 17, 58337: 84, 58388: 13, 58406: 33, 58495: 81, 58643: 76, 58669: 56, 58779: 32, 58798: 29, 58823: 9, 58854: 92, 58871: 27, 58923: 37, 58945: 99, 58971: 52, 59184: 20, 59261: 92, 59288: 49, 59397: 99, 59448: 39, 59614: 42, 59624: 60, 59643: 71, 59647: 44, 59690: 30, 59713: 39, 59717: 49, 59728: 39, 59827: 87, 59919: 19, 60045: 3, 60117: 84, 60212: 38, 60386: 40, 60428: 63, 60431: 83, 60435: 86, 60441: 48, 60448: 2, 60450: 92, 60532: 92, 60577: 23, 60743: 91, 60836: 79, 60843: 11, 60870: 10, 60876: 18, 60897: 89, 60908: 5, 60926: 57, 61016: 58, 61063: 48, 61140: 92, 61230: 4, 61358: 22, 61370: 31, 61393: 38, 61464: 58, 61621: 19, 61752: 63, 61771: 82, 61789: 70, 62036: 32, 62140: 63, 62220: 70, 62266: 16, 62352: 45, 62360: 71, 62365: 18, 62430: 78, 62478: 52, 62540: 53, 62644: 54, 62666: 29, 62684: 64, 62778: 60, 62872: 89, 63166: 29, 63183: 71, 63196: 69, 63264: 99, 63297: 35, 63382: 10, 63481: 5, 63543: 87, 63548: 29, 63625: 99, 63668: 19, 63762: 59, 63764: 51, 63794: 97, 63860: 52, 63878: 54, 63948: 46, 63986: 59, 63990: 4, 64023: 7, 64309: 20, 64449: 64, 64500: 39, 64678: 41, 64693: 40, 64755: 86, 64908: 25, 64944: 45, 65046: 89, 65112: 86, 65132: 22, 65165: 0, 65172: 30, 65199: 18, 65267: 14, 65279: 88, 65327: 61, 65404: 25, 65417: 96, 65470: 63, 65684: 11, 65695: 48, 65807: 70, 65910: 43, 65935: 26, 65936: 73, 65982: 76, 66070: 1, 66198: 11, 66314: 3, 66349: 64, 66391: 19, 66426: 53, 66546: 4, 66742: 96, 66936: 83, 66975: 69, 66986: 78, 67052: 19, 67099: 86, 67152: 86, 67160: 85, 67178: 80, 67347: 38, 67396: 69, 67446: 13, 67542: 53, 67559: 9, 67560: 7, 67586: 18, 67596: 49, 67599: 85, 67617: 76, 67798: 52, 67804: 60, 67916: 36, 67977: 78, 68017: 58, 68192: 26, 68260: 29, 68359: 26, 68435: 43, 68533: 86, 68609: 11, 68719: 93, 68753: 28, 68841: 46, 68910: 94, 69056: 17, 69076: 92, 69172: 27, 69268: 97, 69322: 15, 69366: 18, 69376: 80, 69486: 5, 69572: 19, 69640: 84, 69645: 59, 69670: 76, 69770: 64, 69847: 66, 69888: 64, 69983: 73, 70043: 47, 70064: 11, 70124: 17, 70175: 83, 70338: 11, 70354: 53, 70384: 90, 70401: 69, 70449: 78, 70535: 48, 70547: 57, 70579: 73, 70654: 96, 70850: 62, 70855: 61, 70977: 64, 71090: 53, 71096: 52, 71166: 42, 71246: 46, 71356: 98, 71446: 28, 71489: 15, 71571: 84, 71633: 22, 71781: 29, 71863: 95, 71883: 42, 71895: 71, 71915: 24, 71983: 22, 71995: 14, 72041: 0, 72071: 51, 72118: 24, 72235: 30, 72267: 56, 72285: 87, 72523: 13, 72563: 1, 72785: 78, 72827: 16, 72862: 14, 72869: 87, 72877: 2, 73019: 26, 73058: 91, 73060: 31, 73061: 54, 73108: 40, 73283: 93, 73312: 87, 73375: 13, 73426: 8, 73437: 17, 73443: 14, 73468: 63, 73583: 62, 73617: 59, 73638: 7, 73705: 55, 73719: 53, 73765: 84, 73802: 56, 73869: 73, 73870: 98, 73929: 87, 73996: 16, 74009: 70, 74039: 58, 74048: 21, 74091: 54, 74100: 75, 74134: 5, 74203: 89, 74270: 50, 74550: 8, 74616: 27, 74839: 93, 74872: 59, 74900: 53, 74926: 16, 75007: 15, 75073: 25, 75077: 6, 75337: 2, 75351: 11, 75397: 14, 75413: 44, 75442: 76, 75539: 37, 75645: 15, 75771: 45, 75818: 45, 75987: 28, 76035: 64, 76038: 72, 76087: 6, 76108: 88, 76111: 86, 76120: 92, 76251: 42, 76279: 77, 76320: 8, 76412: 18, 76443: 99, 76551: 24, 76572: 79, 76592: 8, 76607: 31, 76623: 69, 76641: 4, 76661: 25, 76679: 1, 76712: 20, 76830: 17, 76840: 50, 76935: 34, 77114: 9, 77158: 12, 77177: 35, 77481: 62, 77616: 60, 77650: 4, 77682: 13, 77794: 27, 77830: 31, 77912: 54, 77975: 38, 77977: 40, 78053: 80, 78076: 85, 78107: 0, 78120: 14, 78135: 85, 78161: 70, 78246: 60, 78328: 58, 78382: 57, 78405: 94, 78423: 64, 78424: 47, 78468: 22, 78597: 36, 78656: 54, 78670: 63, 78792: 94, 78941: 45, 78998: 65, 79017: 7, 79052: 96, 79075: 87, 79147: 13, 79289: 20, 79312: 93, 79540: 82, 79632: 11, 79655: 33, 79750: 80, 79780: 99, 79852: 37, 79871: 46, 79919: 16, 79943: 38, 79954: 58, 79982: 64, 80134: 53, 80172: 2, 80196: 55, 80203: 1, 80254: 67, 80346: 16, 80372: 29, 80571: 63, 80654: 33, 80667: 89, 80669: 35, 80684: 61, 80711: 46, 80850: 69, 80867: 72, 80916: 98, 80951: 83, 81045: 91, 81086: 77, 81148: 41, 81233: 61, 81270: 6, 81307: 88, 81308: 58, 81340: 40, 81444: 38, 81488: 87, 81534: 40, 81610: 28, 81732: 24, 81758: 72, 81779: 12, 81825: 84, 81929: 6, 81972: 14, 82403: 81, 82450: 25, 82466: 0, 82534: 78, 82604: 0, 82657: 83, 82734: 86, 82753: 96, 82766: 68, 82844: 82, 82854: 27, 82904: 19, 82917: 33, 83003: 28, 83168: 76, 83395: 82, 83400: 91, 83532: 55, 83556: 81, 83682: 90, 83696: 63, 83744: 4, 83994: 49, 84166: 8, 84205: 39, 84223: 49, 84267: 87, 84395: 89, 84443: 90, 84529: 45, 84602: 7, 84646: 8, 84717: 42, 84770: 77, 84773: 6, 84775: 71, 84822: 62, 84844: 60, 84878: 15, 85008: 47, 85135: 35, 85206: 25, 85213: 3, 85266: 48, 85464: 32, 85489: 83, 85501: 10, 85520: 90, 85557: 25, 85592: 75, 85624: 73, 85662: 65, 85691: 89, 85723: 2, 85803: 86, 85831: 61, 85918: 52, 85954: 92, 86061: 74, 86107: 62, 86128: 86, 86211: 83, 86218: 80, 86277: 10, 86292: 72, 86296: 15, 86323: 56, 86324: 79, 86361: 47, 86396: 57, 86532: 82, 86578: 88, 86582: 79, 86663: 8, 86691: 8, 86718: 13, 86789: 59, 86916: 11, 86947: 21, 86979: 44, 86983: 42, 87090: 88, 87229: 0, 87289: 9, 87330: 24, 87434: 52, 87523: 89, 87551: 48, 87554: 52, 87682: 83, 87764: 39, 87906: 52, 87931: 53, 88000: 35, 88128: 32, 88130: 22, 88152: 22, 88153: 92, 88172: 12, 88181: 87, 88254: 58, 88293: 13, 88457: 91, 88560: 68, 88702: 74, 88730: 58, 89029: 31, 89060: 29, 89178: 53, 89344: 86, 89394: 99, 89401: 29, 89404: 34, 89618: 23, 89737: 26, 89812: 70, 89937: 45, 89986: 84, 90040: 25, 90060: 74, 90123: 11, 90181: 81, 90189: 78, 90195: 45, 90216: 38, 90228: 74, 90419: 58, 90492: 34, 90540: 61, 90566: 5, 90746: 15, 90794: 64, 90815: 66, 90868: 27, 90884: 18, 90976: 34, 91294: 14, 91304: 9, 91394: 60, 91459: 17, 91482: 14, 91494: 61, 91583: 40, 91755: 57, 91768: 74, 91824: 7, 91828: 63, 91853: 21, 91921: 18, 91939: 13, 91941: 66, 92032: 78, 92074: 70, 92086: 85, 92131: 48, 92340: 91, 92416: 11, 92467: 41, 92519: 90, 92677: 8, 92753: 29, 92773: 2, 92783: 57, 92814: 87, 92833: 34, 92904: 78, 92920: 99, 93121: 3, 93167: 65, 93281: 76, 93296: 85, 93300: 38, 93301: 61, 93356: 59, 93393: 39, 93740: 8, 93801: 69, 93826: 29, 93993: 8, 94193: 64, 94228: 81, 94325: 57, 94431: 9, 94481: 54, 94528: 90, 94620: 58, 94628: 49, 94776: 11, 94802: 51, 94889: 93, 94951: 36, 94969: 46, 95017: 99, 95138: 48, 95169: 42, 95256: 20, 95314: 60, 95442: 85, 95524: 70, 95555: 57, 95577: 99, 95669: 48, 95712: 77, 95998: 3, 96128: 13, 96196: 2, 96274: 20, 96305: 46, 96371: 47, 96515: 99, 96580: 84, 96763: 50, 96813: 69, 96827: 33, 96890: 24, 96947: 90, 96964: 74, 97002: 41, 97010: 22, 97022: 8, 97025: 17, 97044: 68, 97085: 57, 97094: 84, 97119: 23, 97141: 66, 97168: 89, 97242: 79, 97278: 16, 97302: 88, 97473: 85, 97529: 18, 97600: 56, 97641: 37, 97684: 46, 97727: 53, 97734: 91, 97757: 18, 97763: 75, 97776: 88, 97831: 19, 97835: 18, 97883: 17, 97918: 2, 97924: 66, 97925: 5, 98012: 35, 98109: 60, 98126: 4, 98156: 38, 98172: 69, 98202: 57, 98281: 15, 98368: 66, 98375: 89, 98398: 9, 98401: 66, 98469: 28, 98585: 12, 98660: 44, 98664: 48, 98684: 50, 98693: 68, 98778: 81, 98787: 43, 98805: 93, 98966: 64, 99070: 52, 99110: 90, 99166: 53, 99216: 15, 99226: 79, 99251: 83, 99350: 36, 99397: 23, 99411: 55, 99438: 5, 99439: 52, 99508: 87, 99563: 2, 99605: 23, 99622: 63, 99642: 28, 99667: 30, 99722: 32, 99810: 9, 99935: 46, 99942: 41, 100031: 93, 100051: 10, 100058: 2, 100112: 71, 100346: 73, 100424: 13, 100433: 8, 100438: 53, 100506: 27, 100631: 42, 100760: 71, 100852: 92, 100996: 89, 101078: 15, 101090: 69, 101277: 44, 101308: 95, 101372: 27, 101397: 96, 101582: 48, 101854: 20, 101886: 5, 101999: 0, 102067: 60, 102154: 70, 102323: 38, 102358: 80, 102447: 57, 102457: 41, 102458: 6, 102548: 63, 102556: 88, 102567: 40, 102572: 12, 102688: 57, 102706: 58, 102762: 41, 102766: 57, 102862: 55, 102873: 48, 102882: 63, 102908: 85, 102920: 22, 102995: 91, 103050: 74, 103056: 23, 103076: 72, 103103: 67, 103320: 66, 103377: 67, 103382: 54, 103603: 57, 103692: 65, 103735: 38, 103769: 46, 103791: 32, 103911: 79, 103924: 41, 103999: 19, 104083: 74, 104234: 42, 104331: 9, 104568: 66, 104775: 16, 104840: 30, 105041: 52, 105122: 71, 105193: 90, 105241: 86, 105242: 68, 105506: 92, 105528: 10, 105717: 27, 105795: 20, 105925: 90, 105939: 99, 105952: 75, 106045: 22, 106273: 7, 106330: 50, 106558: 38, 106583: 10, 106658: 22, 106691: 58, 106731: 39, 106743: 54, 106996: 27, 107041: 70, 107075: 46, 107103: 66, 107122: 7, 107130: 13, 107152: 2, 107330: 40, 107402: 37, 107409: 39, 107431: 49, 107440: 97, 107772: 65, 107863: 61, 107912: 59, 107978: 82, 108022: 65, 108035: 84, 108097: 15, 108150: 73, 108193: 78, 108204: 62, 108287: 40, 108334: 37, 108338: 65, 108388: 80, 108439: 42, 108449: 99, 108590: 15, 108635: 49, 108664: 59, 108757: 84, 108843: 3, 108966: 5, 109045: 20, 109066: 71, 109098: 73, 109166: 43, 109185: 91, 109199: 81, 109209: 26, 109272: 80, 109458: 73, 109546: 79, 109552: 13, 109567: 43, 109654: 63, 109777: 60, 109813: 11, 109858: 51, 109919: 94, 109983: 75, 109996: 37, 110000: 86, 110009: 9, 110037: 73, 110071: 7, 110149: 99, 110322: 30, 110373: 4, 110374: 16, 110404: 96, 110626: 32, 110713: 23, 110726: 21, 110744: 58, 110921: 48, 110972: 54, 111124: 10, 111143: 18, 111177: 79, 111249: 12, 111299: 17, 111307: 77, 111365: 64, 111384: 45, 111412: 26, 111426: 86, 111436: 52, 111458: 5, 111572: 75, 111615: 69, 111724: 66, 111733: 90, 111972: 60, 112046: 32, 112107: 56, 112271: 27, 112289: 91, 112341: 88, 112366: 71, 112393: 45, 112412: 42, 112474: 50, 112475: 2, 112580: 67, 112836: 74, 112876: 22, 112912: 23, 112979: 70, 113014: 65, 113047: 76, 113090: 42, 113155: 85, 113227: 48, 113272: 51, 113543: 43, 113583: 15, 113689: 18, 113783: 9, 113890: 5, 113926: 31, 113989: 62, 114080: 14, 114111: 72, 114144: 49, 114151: 63, 114223: 58, 114256: 30, 114337: 93, 114372: 1, 114506: 79, 114519: 72, 114594: 34, 114598: 2, 114630: 1, 114648: 1, 114684: 64, 114726: 96, 114913: 72, 115064: 94, 115080: 61, 115120: 68, 115164: 36, 115167: 62, 115283: 15, 115349: 31, 115370: 28, 115373: 78, 115409: 5, 115509: 61, 115562: 82, 115645: 31, 115653: 74, 115707: 2, 116004: 92, 116062: 41, 116196: 1, 116332: 56, 116513: 44, 116592: 4, 116612: 83, 116694: 81, 116731: 0, 116776: 56, 116808: 13, 116842: 17, 116859: 98, 116862: 50, 116934: 20, 117022: 58, 117080: 43, 117110: 62, 117154: 89, 117197: 27, 117224: 14, 117271: 14, 117347: 4, 117386: 66, 117398: 41, 117441: 66, 117448: 40, 117489: 31, 117522: 67, 117661: 5, 117685: 9, 117750: 10, 117775: 38, 117828: 34, 117886: 5, 117997: 31, 118122: 28, 118125: 73, 118174: 22, 118204: 32, 118269: 75, 118279: 14, 118350: 29, 118436: 18, 118465: 85, 118792: 96, 118800: 79, 118844: 24, 118847: 75, 119010: 65, 119027: 51, 119180: 34, 119378: 26, 119469: 51, 119512: 90, 119597: 51, 119678: 27, 119682: 52, 119733: 67, 119814: 52, 119815: 57, 119836: 65, 119873: 85, 119924: 59, 119938: 85, 119968: 20, 120060: 10, 120127: 19, 120194: 23, 120216: 85, 120284: 56, 120387: 96, 120467: 61, 120546: 67, 120568: 17, 120614: 19, 120633: 42, 120795: 3, 120843: 6, 120846: 0, 120852: 11, 120901: 80, 120913: 59, 120946: 56, 120958: 85, 120977: 4, 121005: 83, 121069: 41, 121077: 5, 121176: 57, 121213: 48, 121278: 30, 121302: 24, 121316: 37, 121324: 20, 121363: 25, 121432: 16, 121494: 17, 121496: 53, 121542: 81, 121603: 7, 121610: 99, 121616: 99, 121637: 24, 121670: 65, 121692: 31, 121753: 16, 121789: 90, 121806: 9, 121811: 84, 121952: 7, 122125: 41, 122134: 88, 122212: 47, 122238: 25, 122259: 62, 122346: 46, 122549: 32, 122577: 58, 122676: 65, 122769: 79, 122808: 88, 122903: 28, 122925: 17, 122926: 59, 122935: 23, 122959: 91, 123148: 80, 123172: 89, 123184: 93, 123319: 35, 123322: 22, 123459: 2, 123492: 32, 123578: 13, 123779: 21, 123819: 65, 123849: 2, 124028: 4, 124075: 92, 124134: 64, 124225: 69, 124351: 93, 124357: 65, 124406: 24, 124413: 60, 124555: 60, 124609: 9, 124674: 84, 124709: 46, 124723: 64, 124824: 53, 124929: 96, 124945: 39, 124964: 85, 125272: 72, 125289: 32, 125306: 96, 125422: 51, 125438: 50, 125439: 30, 125459: 97, 125637: 48, 125770: 78, 125773: 1, 125860: 14, 125877: 66, 125925: 70, 125933: 15, 125995: 46, 126022: 29, 126030: 74, 126087: 60, 126151: 19, 126161: 23, 126194: 70, 126355: 46, 126372: 38, 126383: 7, 126452: 34, 126524: 66, 126531: 8, 126626: 75, 126662: 56, 126730: 42, 126743: 96, 126806: 1, 126998: 46, 127003: 12, 127203: 84, 127533: 35, 127672: 10, 127721: 84, 127814: 72, 127860: 80, 127878: 82, 127967: 39, 128112: 84, 128215: 15, 128245: 35, 128399: 2, 128434: 4, 128509: 73, 128529: 69, 128846: 1, 128936: 80, 128969: 47, 128984: 48, 129277: 66, 129340: 58, 129413: 12, 129625: 65, 129760: 91, 129800: 43, 129813: 28, 129897: 18, 129980: 58, 130002: 20, 130012: 5, 130058: 44, 130237: 0, 130400: 45, 130438: 95, 130477: 18, 130522: 84, 130594: 68, 130694: 1, 130723: 33, 130734: 6, 130745: 2, 130765: 19, 130964: 51, 131020: 73, 131046: 18, 131232: 83, 131239: 73, 131241: 72, 131298: 9, 131505: 48, 131514: 22, 131516: 18, 131564: 55, 131588: 46, 131589: 84, 131634: 35, 131788: 80, 131975: 86, 131986: 41, 131998: 33, 132057: 4, 132112: 75, 132114: 52, 132123: 1, 132183: 51, 132232: 89, 132257: 70, 132384: 24, 132403: 28, 132530: 89, 132562: 38, 132567: 68, 132668: 30, 132679: 94, 132680: 4, 132817: 65, 132843: 55, 132865: 90, 132893: 82, 133085: 23, 133101: 66, 133124: 10, 133184: 85, 133200: 89, 133243: 15, 133247: 70, 133285: 85, 133323: 43, 133364: 10, 133380: 93, 133395: 75, 133437: 2, 133554: 85, 133645: 37, 133759: 30, 133793: 46, 133803: 62, 133813: 30, 133830: 72, 133868: 80, 133924: 60, 133957: 32, 134027: 18, 134095: 52, 134100: 92, 134265: 37, 134304: 54, 134313: 34, 134512: 62, 134597: 51, 134651: 40, 134684: 74, 134728: 99, 134735: 67, 134753: 42, 135053: 84, 135055: 67, 135084: 66, 135160: 56, 135242: 65, 135290: 24, 135291: 43, 135356: 55, 135509: 30, 135522: 87, 135536: 29, 135671: 41, 135727: 88, 135822: 9, 135920: 96, 135984: 41, 136027: 56, 136111: 3, 136137: 92, 136144: 77, 136187: 0, 136445: 49, 136473: 69, 136496: 52, 136647: 52, 136660: 12, 136762: 77, 136789: 20, 136805: 96, 136979: 0, 137013: 89, 137036: 17, 137048: 74, 137141: 97, 137145: 4, 137195: 5, 137208: 38, 137229: 39, 137241: 65, 137340: 86, 137511: 55, 137566: 57, 137639: 13, 137806: 93, 137856: 51, 137917: 1, 137959: 53, 138009: 4, 138055: 8, 138191: 85, 138214: 43, 138286: 35, 138360: 11, 138452: 93, 138457: 43, 138480: 77, 138574: 19, 138680: 66, 138714: 55, 138784: 57, 138794: 86, 138885: 30, 138896: 3, 138987: 42, 139415: 73, 139432: 57, 139464: 39, 139522: 8, 139589: 73, 139637: 54, 139737: 68, 139756: 36, 139786: 86, 139799: 23, 139825: 42, 139868: 94, 139901: 52, 139949: 15, 140035: 49, 140118: 13, 140546: 0, 140589: 64, 140689: 25, 140703: 72, 140707: 26, 140929: 78, 141056: 47, 141057: 83, 141103: 1, 141196: 6, 141218: 61, 141330: 7, 141348: 21, 141425: 92, 141433: 55, 141451: 12, 141590: 73, 141703: 89, 141709: 31, 141737: 1, 141754: 92, 141771: 72, 141856: 6, 141896: 0, 141946: 95, 142100: 28, 142110: 9, 142152: 45, 142228: 58, 142316: 16, 142319: 74, 142332: 0, 142343: 26, 142411: 31, 142433: 71, 142454: 17, 142494: 7, 142632: 51, 142669: 22, 142682: 95, 142750: 14, 142787: 52, 142934: 80, 142969: 98, 143121: 71, 143202: 20, 143362: 11, 143428: 20, 143514: 32, 143582: 15, 143612: 2, 143614: 80, 143681: 21, 143782: 82, 143802: 81, 143901: 40, 143934: 46, 144018: 58, 144022: 74, 144056: 17, 144081: 57, 144101: 41, 144111: 15, 144124: 47, 144170: 32, 144203: 76, 144353: 74, 144362: 86, 144393: 17, 144774: 95, 144813: 12, 144866: 33, 144886: 53, 144896: 22, 144988: 33, 145000: 90, 145080: 30, 145218: 52, 145230: 30, 145235: 79, 145241: 91, 145416: 64, 145419: 22, 145495: 72, 145536: 10, 145542: 86, 145620: 45, 145624: 43, 145639: 59, 145705: 35, 145783: 22, 145790: 85, 145844: 88, 145847: 75, 145920: 86, 145930: 35, 145970: 46, 145982: 0, 145999: 57, 146185: 28, 146215: 26, 146361: 79, 146681: 16, 146854: 11, 146898: 59, 147062: 46, 147081: 0, 147100: 0, 147151: 62, 147157: 28, 147312: 9, 147327: 86, 147496: 91, 147504: 30, 147526: 44, 147563: 61, 147573: 56, 147670: 80, 147726: 19, 147765: 96, 147895: 1, 147922: 12, 147987: 21, 148036: 46, 148113: 84, 148114: 14, 148285: 74, 148461: 32, 148535: 69, 148542: 85, 148553: 51, 148605: 8, 148659: 32, 148759: 37, 148762: 96, 148801: 76, 148831: 66, 148837: 59, 148909: 58, 148922: 86, 148967: 94, 149051: 24, 149184: 40, 149198: 34, 149204: 81, 149240: 18, 149329: 32, 149448: 47, 149471: 62, 149502: 4, 149643: 96, 149693: 85, 149789: 39, 149794: 15, 149819: 52, 149892: 6, 149972: 78, 150008: 68, 150022: 64, 150061: 75, 150083: 28, 150147: 2, 150173: 33, 150289: 41, 150596: 95, 150666: 65, 150674: 27, 150797: 36, 150896: 95, 150902: 9, 150994: 68, 151005: 12, 151053: 40, 151095: 75, 151180: 65, 151292: 60, 151324: 2, 151342: 49, 151344: 29, 151437: 71, 151535: 82, 151627: 0, 151659: 88, 151752: 14, 151865: 49, 151896: 14, 151968: 29, 152096: 28, 152111: 14, 152123: 12, 152150: 16, 152238: 58, 152348: 7, 152366: 39, 152613: 41, 152618: 22, 152674: 92, 152700: 58, 152834: 36, 152878: 30, 152935: 80, 153016: 92, 153043: 38, 153209: 23, 153250: 69, 153275: 44, 153464: 53, 153467: 98, 153551: 69, 153576: 36, 153579: 32, 153586: 78, 153587: 97, 153651: 11, 153768: 87, 153795: 96, 153901: 33, 153959: 12, 153973: 37, 154068: 50, 154170: 22, 154264: 23, 154340: 83, 154456: 53, 154508: 68, 154563: 77, 154631: 14, 154657: 73, 154843: 34, 154941: 33, 154954: 64, 154963: 61, 154996: 55, 155007: 7, 155105: 63, 155150: 9, 155238: 66, 155306: 13, 155335: 20, 155343: 51, 155344: 70, 155399: 85, 155431: 59, 155581: 83, 155799: 25, 155920: 84, 155939: 20, 155950: 84, 155962: 65, 156056: 6, 156172: 61, 156272: 13, 156362: 92, 156379: 78, 156522: 35, 156572: 26, 156601: 81, 156605: 67, 156667: 79, 156669: 41, 156735: 16, 156751: 23, 156829: 46, 157064: 36, 157097: 21, 157165: 2, 157193: 68, 157254: 38, 157316: 61, 157317: 45, 157377: 89, 157396: 30, 157410: 47, 157418: 47, 157621: 7, 157639: 66, 157651: 24, 157760: 77, 157798: 84, 157804: 31, 157825: 0, 157984: 71, 158066: 37, 158144: 59, 158160: 55, 158222: 99, 158380: 48, 158434: 16, 158576: 46, 158614: 82, 158739: 75, 158741: 28, 158922: 69, 158963: 1, 159003: 38, 159047: 91, 159068: 74, 159114: 48, 159123: 6, 159154: 31, 159220: 96, 159372: 71, 159374: 71, 159431: 97, 159571: 50, 159597: 72, 159702: 85, 159735: 66, 159738: 91, 159903: 49, 159943: 39, 160124: 64, 160136: 46, 160158: 35, 160247: 10, 160257: 4, 160326: 73, 160410: 35, 160538: 34, 160573: 42, 160598: 82, 160628: 55, 160644: 22, 160728: 51, 160761: 14, 160831: 63, 160894: 63, 160941: 1, 160966: 96, 160967: 74, 161012: 66, 161048: 66, 161061: 86, 161065: 18, 161090: 65, 161321: 48, 161594: 91, 161938: 20, 161949: 88, 161976: 92, 161981: 20, 162031: 33, 162087: 32, 162105: 0, 162331: 88, 162502: 85, 162653: 0, 162746: 81, 162785: 97, 162881: 70, 162887: 38, 163006: 34, 163035: 90, 163061: 32, 163083: 8, 163404: 18, 163482: 85, 163562: 16, 163613: 79, 163625: 69, 163678: 59, 163679: 92, 163686: 31, 163695: 97, 163752: 59, 164011: 82, 164175: 92, 164205: 64, 164224: 96, 164235: 68, 164276: 20, 164279: 33, 164414: 24, 164433: 52, 164505: 31, 164507: 35, 164551: 8, 164577: 91, 164605: 71, 164639: 37, 164685: 7, 164764: 61, 164777: 51, 164778: 55, 164865: 33, 164899: 47, 165002: 83, 165114: 97, 165206: 13, 165361: 25, 165372: 47, 165496: 52, 165509: 41, 165610: 72, 165618: 45, 165688: 71, 165892: 27, 165967: 65, 165969: 39, 166001: 27, 166053: 84, 166070: 82, 166166: 50, 166174: 35, 166207: 38, 166218: 49, 166246: 35, 166247: 11, 166254: 57, 166284: 2, 166427: 73, 166442: 76, 166483: 21, 166566: 81, 166823: 23, 166829: 11, 166877: 72, 166894: 59, 166913: 74, 167170: 12, 167219: 7, 167310: 76, 167366: 58, 167370: 77, 167449: 65, 167478: 74, 167487: 79, 167550: 79, 167555: 14, 167768: 44, 167823: 42, 167920: 77, 167973: 82, 168107: 22, 168270: 58, 168352: 83, 168465: 44, 168530: 69, 168550: 11, 168589: 39, 168626: 65, 168631: 3, 168636: 23, 168721: 19, 168872: 33, 168903: 5, 168944: 48, 168945: 72, 169047: 52, 169144: 1, 169194: 97, 169202: 18, 169315: 73, 169362: 26, 169548: 33, 169617: 45, 169648: 52, 169686: 5, 169754: 78, 169756: 87, 169814: 68, 169820: 42, 169905: 59, 170018: 26, 170045: 40, 170113: 1, 170269: 19, 170359: 60, 170410: 24, 170412: 26, 170511: 52, 170519: 51, 170715: 83, 170761: 69, 170819: 36, 170850: 37, 170974: 14, 170977: 74, 170989: 77, 171007: 13, 171132: 74, 171289: 36, 171316: 24, 171358: 27, 171409: 86, 171465: 43, 171601: 17, 171619: 12, 171661: 83, 171701: 76, 171703: 4, 171816: 83, 172046: 28, 172186: 89, 172211: 3, 172227: 63, 172290: 52, 172412: 7, 172444: 59, 172447: 49, 172454: 13, 172472: 51, 172496: 65, 172584: 35, 172594: 72, 172627: 27, 172675: 86, 172695: 78, 172716: 79, 172719: 59, 172727: 0, 172872: 76, 172936: 26, 173001: 38, 173044: 95, 173057: 59, 173079: 81, 173141: 78, 173354: 46, 173458: 55, 173550: 80, 173721: 39, 173760: 40, 173796: 5, 173803: 95, 173961: 13, 173970: 6, 173983: 71, 173991: 47, 174002: 34, 174003: 12, 174031: 52, 174045: 60, 174061: 12, 174229: 10, 174231: 19, 174289: 81, 174291: 68, 174412: 6, 174463: 52, 174599: 9, 174727: 95, 174830: 70, 174960: 15, 175064: 56, 175072: 33, 175151: 56, 175234: 28, 175243: 30, 175252: 13, 175303: 40, 175384: 70, 175531: 89, 175775: 61, 175821: 44, 175825: 31, 175852: 66, 175938: 32, 176039: 96, 176149: 25, 176274: 55, 176286: 81, 176404: 83, 176413: 36, 176420: 76, 176461: 28, 176517: 36, 176543: 44, 176549: 13, 176558: 50, 176614: 79, 176629: 28, 176757: 14, 176780: 52, 176818: 15, 176888: 73, 176894: 31, 176899: 73, 176974: 11, 177008: 52, 177043: 65, 177262: 95, 177342: 26, 177352: 41, 177358: 9, 177359: 75, 177388: 37, 177496: 57, 177546: 88, 177578: 23, 177617: 64, 177682: 76, 177727: 17, 177797: 20, 177906: 8, 177909: 34, 178044: 35, 178130: 37, 178173: 29, 178234: 68, 178237: 34, 178378: 91, 178432: 72, 178460: 77, 178481: 43, 178539: 73, 178705: 88, 178783: 52, 178933: 23, 178940: 16, 178941: 49, 178955: 56, 179004: 34, 179019: 98, 179026: 4, 179161: 44, 179190: 17, 179209: 84, 179238: 40, 179298: 70, 179300: 50, 179311: 55, 179391: 74, 179498: 47, 179572: 47, 179579: 18, 179591: 32, 179595: 96, 179703: 32, 179737: 74, 179873: 22, 179885: 19, 179913: 72, 179933: 52, 179957: 82, 179982: 52, 180042: 20, 180090: 6, 180092: 91, 180236: 0, 180301: 65, 180354: 43, 180357: 22, 180360: 2, 180380: 73, 180457: 27, 180466: 95, 180739: 35, 180780: 20, 180820: 48, 180827: 37, 180855: 37, 180859: 99, 180894: 27, 180896: 6, 180973: 33, 181144: 37, 181195: 80, 181298: 3, 181302: 66, 181316: 41, 181405: 63, 181424: 10, 181706: 13, 181890: 42, 181934: 25, 181998: 53, 182043: 46, 182054: 87, 182220: 22, 182312: 3, 182332: 46, 182376: 91, 182498: 45, 182536: 55, 182562: 7, 182604: 99, 182655: 87, 182698: 12, 182702: 34, 182985: 80, 183160: 75, 183217: 20, 183228: 68, 183370: 55, 183382: 19, 183386: 85, 183513: 76, 183789: 86, 183970: 3, 183971: 77, 184103: 66, 184167: 44, 184206: 28, 184226: 55, 184327: 28, 184346: 64, 184389: 41, 184407: 94, 184415: 16, 184436: 95, 184541: 99, 184543: 45, 184548: 43, 184790: 10, 184825: 34, 184834: 95, 184855: 41, 184875: 89, 184897: 75, 185037: 92, 185051: 15, 185076: 86, 185091: 36, 185244: 59, 185273: 87, 185282: 63, 185309: 19, 185381: 13, 185441: 92, 185551: 84, 185630: 49, 185643: 66, 185661: 19, 185696: 89, 185777: 46, 185874: 43, 185878: 89, 185879: 15, 185935: 96, 185953: 99, 186074: 61, 186080: 23, 186086: 98, 186152: 11, 186200: 14, 186352: 58, 186466: 41, 186485: 35, 186582: 39, 186632: 31, 186639: 25, 186674: 11, 186721: 94, 186858: 22, 186863: 44, 187112: 69, 187157: 37, 187178: 21, 187252: 36, 187374: 79, 187389: 76, 187456: 60, 187557: 49, 187619: 30, 187718: 48, 187796: 22, 187809: 95, 187823: 90, 187833: 77, 187842: 72, 187867: 42, 187890: 5, 187909: 10, 187932: 20, 188016: 52, 188054: 59, 188195: 81, 188285: 46, 188539: 43, 188634: 43, 188747: 83, 188809: 84, 188827: 7, 188891: 2, 189017: 33, 189035: 21, 189036: 79, 189177: 4, 189178: 13, 189236: 83, 189282: 64, 189296: 80, 189408: 76, 189593: 0, 189628: 33, 189718: 46, 189823: 71, 189864: 40, 190083: 93, 190116: 81, 190140: 31, 190177: 40, 190231: 68, 190287: 4, 190296: 36, 190318: 72, 190375: 62, 190424: 62, 190428: 43, 190462: 88, 190488: 26, 190530: 80, 190602: 45, 190679: 83, 190795: 28, 190872: 21, 190893: 9, 190901: 25, 190944: 29, 190976: 65, 190993: 24, 191140: 48, 191234: 41, 191361: 53, 191425: 83, 191456: 36, 191723: 23, 191741: 10, 191828: 40, 191847: 76, 191871: 44, 191909: 54, 191912: 89, 191950: 67, 192065: 25, 192094: 46, 192102: 38, 192195: 81, 192210: 24, 192263: 39, 192271: 73, 192293: 7, 192333: 80, 192415: 44, 192554: 89, 192570: 46, 192604: 97, 192654: 58, 192664: 17, 192941: 62, 192971: 70, 193011: 74, 193194: 93, 193201: 63, 193260: 74, 193308: 38, 193373: 64, 193422: 7, 193458: 72, 193537: 50, 193540: 4, 193643: 98, 193654: 51, 193667: 87, 193677: 20, 193811: 25, 193828: 90, 193876: 74, 193880: 9, 193891: 16, 193938: 2, 193990: 67, 194245: 81, 194293: 39, 194384: 39, 194401: 94, 194420: 17, 194443: 70, 194503: 19, 194549: 18, 194581: 39, 194667: 7, 194760: 47, 194766: 81, 194842: 57, 194871: 74, 194948: 21, 194952: 91, 194976: 79, 195006: 3, 195084: 16, 195138: 61, 195157: 3, 195186: 56, 195309: 94, 195337: 81, 195363: 15, 195467: 51, 195549: 44, 195624: 93, 195751: 26, 196022: 96, 196043: 80, 196049: 81, 196083: 18, 196085: 6, 196114: 67, 196157: 37, 196182: 6, 196286: 2, 196289: 7, 196338: 53, 196388: 16, 196432: 81, 196451: 1, 196539: 13, 196563: 30, 196706: 10, 196832: 89, 196854: 70, 196863: 58, 196898: 99, 196954: 74, 196956: 21, 197115: 3, 197135: 96, 197191: 98, 197332: 11, 197398: 36, 197426: 93, 197438: 67, 197449: 99, 197578: 50, 197614: 42, 197656: 7, 197701: 88, 197767: 56, 197828: 42, 197889: 91, 197904: 45, 197906: 31, 197977: 82, 198089: 84, 198215: 28, 198221: 52, 198371: 55, 198578: 93, 198630: 82, 198751: 72, 198799: 15, 198846: 86, 198861: 64, 198917: 39, 198934: 30, 198978: 61, 199016: 65, 199048: 31, 199159: 84, 199187: 15, 199194: 19, 199253: 5, 199254: 66, 199335: 92, 199440: 84, 199501: 82, 199883: 21, 199906: 23, 200014: 19, 200092: 87, 200162: 99, 200236: 45, 200296: 72, 200452: 8, 200500: 63, 200508: 89, 200518: 33, 200523: 80, 200646: 85, 200843: 43, 200870: 68, 200954: 9, 201017: 33, 201032: 99, 201070: 50, 201072: 18, 201078: 57, 201140: 23, 201178: 57, 201179: 5, 201299: 76, 201329: 8, 201510: 16, 201583: 61, 201854: 30, 201959: 39, 202002: 38, 202018: 50, 202025: 62, 202029: 76, 202053: 63, 202081: 62, 202085: 53, 202123: 4, 202187: 31, 202188: 88, 202256: 62, 202347: 49, 202361: 80, 202459: 46, 202526: 86, 202593: 58, 202627: 10, 202632: 72, 202705: 9, 202814: 6, 202860: 47, 203048: 23, 203107: 79, 203128: 96, 203307: 63, 203327: 2, 203395: 42, 203433: 71, 203442: 91, 203592: 68, 203648: 90, 203705: 11, 203738: 49, 203799: 95, 203849: 8, 203860: 39, 204045: 65, 204165: 15, 204283: 77, 204296: 45, 204343: 87, 204600: 69, 204779: 32, 204808: 30, 204852: 47, 204931: 85, 204959: 80, 205000: 11, 205019: 40, 205063: 58, 205091: 84, 205158: 45, 205170: 21, 205244: 32, 205261: 49, 205422: 79, 205476: 10, 205477: 93, 205490: 13, 205539: 43, 205540: 53, 205701: 7, 205823: 80, 205827: 64, 205859: 53, 205864: 52, 205968: 68, 206042: 46, 206104: 8, 206265: 55, 206274: 32, 206320: 91, 206485: 79, 206525: 17, 206540: 33, 206606: 28, 206621: 15, 206646: 25, 206664: 91, 206740: 55, 206870: 21, 206879: 84, 206900: 22, 207020: 11, 207051: 3, 207062: 11, 207139: 76, 207198: 83, 207201: 81, 207229: 36, 207265: 47, 207379: 68, 207407: 37, 207427: 83, 207556: 56, 207820: 37, 207846: 64, 207847: 76, 207848: 86, 207862: 36, 208095: 40, 208402: 0, 208417: 34, 208419: 95, 208430: 41, 208457: 48, 208484: 58, 208553: 34, 208619: 10, 208640: 76, 208673: 1, 208702: 99, 208746: 96, 208862: 73, 209079: 8, 209157: 3, 209162: 90, 209316: 88, 209432: 50, 209497: 60, 209613: 23, 209720: 89, 209791: 16, 209812: 46, 209834: 93, 209944: 17, 209948: 9, 210160: 62, 210209: 36, 210211: 83, 210309: 53, 210334: 49, 210341: 16, 210462: 14, 210463: 23, 210540: 54, 210613: 48, 210618: 47, 210655: 3, 210933: 17, 210934: 21, 210938: 37, 210957: 16, 210970: 61, 211002: 70, 211006: 91, 211077: 27, 211120: 8, 211168: 33, 211249: 3, 211321: 65, 211327: 53, 211342: 99, 211379: 43, 211471: 91, 211656: 58, 211680: 33, 211722: 74, 211773: 34, 211800: 82, 211842: 43, 211860: 35, 211887: 28, 212009: 92, 212013: 26, 212099: 71, 212137: 75, 212164: 30, 212247: 30, 212281: 27, 212316: 80, 212321: 28, 212374: 53, 212574: 76, 212751: 26, 212781: 46, 212836: 20, 212905: 3, 213101: 8, 213145: 21, 213217: 78, 213341: 26, 213499: 79, 213518: 70, 213575: 99, 213579: 86, 213589: 41, 213597: 58, 213734: 74, 213758: 85, 213812: 55, 213824: 83, 213847: 3, 213952: 82, 214057: 82, 214122: 31, 214134: 91, 214176: 8, 214206: 56, 214305: 56, 214444: 32, 214477: 9, 214479: 67, 214587: 72, 214638: 87, 214642: 23, 214648: 7, 214672: 91, 214882: 49, 214907: 79, 214953: 90, 215011: 59, 215015: 91, 215018: 76, 215039: 26, 215087: 55, 215151: 77, 215160: 90, 215162: 38, 215286: 1, 215305: 94, 215467: 56, 215468: 25, 215520: 23, 215526: 60, 215557: 33, 215559: 35, 215713: 61, 215799: 34, 215904: 36, 216090: 68, 216157: 64, 216217: 12, 216235: 57, 216239: 26, 216240: 99, 216278: 87, 216292: 11, 216320: 75, 216479: 15, 216531: 48, 216591: 15, 216636: 93, 216825: 69, 216853: 45, 216956: 18, 217049: 83, 217166: 92, 217190: 30, 217456: 2, 217546: 98, 217639: 59, 217693: 24, 217821: 31, 217859: 5, 217890: 17, 217920: 13, 218082: 61, 218155: 11, 218238: 77, 218241: 28, 218264: 60, 218277: 73, 218311: 75, 218323: 19, 218479: 93, 218498: 59, 218514: 85, 218570: 65, 218575: 20, 218660: 89, 218689: 30, 218723: 12, 218731: 98, 218851: 61, 219108: 43, 219247: 81, 219290: 14, 219328: 38, 219388: 72, 219461: 79, 219470: 6, 219482: 23, 219555: 1, 219577: 39, 219595: 56, 219656: 11, 219766: 57, 219783: 96, 219908: 25, 219994: 64, 219998: 20, 220140: 71, 220208: 95, 220226: 29, 220355: 25, 220375: 74, 220377: 51, 220428: 61, 220448: 70, 220514: 32, 220557: 95, 220589: 91, 220770: 15, 220876: 2, 220924: 10, 220975: 24, 221064: 46, 221189: 10, 221277: 63, 221518: 52, 221590: 77, 221654: 77, 221914: 22, 222235: 8, 222281: 58, 222290: 40, 222295: 96, 222306: 35, 222316: 51, 222343: 97, 222360: 28, 222505: 18, 222507: 36, 222593: 11, 222605: 95, 222708: 92, 222794: 73, 222953: 30, 223013: 10, 223103: 61, 223144: 13, 223163: 63, 223182: 77, 223191: 40, 223387: 17, 223672: 94, 223698: 2, 223766: 49, 223786: 1, 223875: 59, 223923: 69, 223991: 61, 224013: 62, 224054: 13, 224058: 1, 224150: 67, 224280: 29, 224368: 37, 224405: 89, 224473: 73, 224527: 48, 224534: 38, 224744: 36, 224892: 61, 225121: 93, 225138: 63, 225481: 90, 225549: 19, 225672: 37, 225907: 53, 225984: 73, 225989: 19, 226020: 56, 226171: 14, 226233: 33, 226319: 1, 226339: 9, 226389: 3, 226438: 44, 226456: 70, 226505: 0, 226583: 33, 226726: 3, 226810: 21, 226932: 12, 227033: 31, 227049: 2, 227155: 99, 227231: 70, 227241: 86, 227243: 72, 227252: 88, 227264: 90, 227287: 53, 227297: 12, 227298: 45, 227314: 95, 227542: 38, 227553: 53, 227573: 48, 227705: 80, 227746: 11, 227751: 21, 227796: 99, 227822: 60, 227850: 46, 227853: 19, 227862: 85, 227925: 64, 228022: 39, 228053: 61, 228064: 40, 228067: 98, 228224: 71, 228226: 50, 228310: 94, 228316: 67, 228432: 8, 228460: 78, 228461: 12, 228637: 46, 228665: 43, 228806: 96, 228870: 37, 228952: 28, 229084: 95, 229132: 58, 229287: 55, 229386: 15, 229419: 74, 229450: 26, 229520: 98, 229527: 86, 229532: 15, 229697: 87, 229741: 51, 229970: 25, 229971: 33, 229984: 59, 230083: 97, 230238: 40, 230333: 64, 230375: 28, 230450: 73, 230466: 2, 230544: 4, 230587: 5, 230625: 47, 230681: 15, 230715: 64, 230876: 94, 230880: 9, 230898: 69, 230991: 2, 231167: 34, 231216: 29, 231223: 51, 231274: 85, 231401: 28, 231535: 6, 231547: 27, 231654: 19, 231657: 57, 231672: 81, 231724: 98, 231774: 81, 231797: 10, 231802: 12, 231880: 77, 231997: 60, 232038: 12, 232095: 4, 232115: 3, 232163: 45, 232316: 95, 232526: 9, 232564: 54, 232607: 66, 232630: 7, 232638: 20, 232700: 12, 232791: 67, 232800: 36, 232827: 95, 232933: 12, 232936: 38, 232992: 49, 233015: 88, 233158: 40, 233247: 32, 233270: 11, 233281: 36, 233336: 60, 233407: 66, 233447: 63, 233483: 27, 233495: 57, 233519: 72, 233542: 77, 233545: 62, 233548: 38, 233635: 56, 233750: 22, 233829: 4, 234027: 46, 234050: 82, 234381: 20, 234447: 38, 234465: 1, 234500: 82, 234559: 54, 234603: 7, 234606: 33, 234629: 16, 234697: 47, 234732: 5, 234780: 73, 234807: 31, 234904: 82, 234975: 85, 235003: 2, 235099: 46, 235156: 86, 235178: 21, 235212: 62, 235232: 21, 235318: 78, 235382: 86, 235432: 66, 235501: 35, 235591: 24, 235595: 13, 235641: 6, 235663: 77, 235816: 10, 235866: 87, 235972: 44, 235984: 27, 236046: 78, 236066: 33, 236145: 10, 236212: 48, 236315: 73, 236333: 53, 236397: 29, 236430: 20, 236451: 85, 236470: 44, 236501: 32, 236547: 64, 236561: 88, 236637: 34, 236799: 41, 236842: 11, 236906: 25, 236949: 10, 236992: 72, 237016: 93, 237018: 37, 237050: 2, 237124: 56, 237221: 32, 237380: 6, 237409: 23, 237458: 17, 237759: 73, 237856: 97, 237866: 36, 237869: 74, 237967: 80, 237970: 28, 238055: 0, 238094: 64, 238118: 23, 238125: 12, 238162: 92, 238191: 72, 238363: 99, 238451: 31, 238547: 8, 238603: 4, 238711: 81, 238751: 47, 238769: 39, 238803: 75, 238818: 48, 238846: 28, 238850: 50, 238950: 29, 238968: 20, 239012: 66, 239016: 45, 239057: 75, 239066: 96, 239172: 75, 239188: 63, 239202: 69, 239300: 32, 239328: 4, 239339: 55, 239355: 99, 239410: 14, 239545: 67, 239679: 31, 239921: 78, 240046: 54, 240193: 17, 240204: 59, 240219: 68, 240491: 80, 240520: 4, 240527: 67, 240563: 33, 240636: 29, 240654: 41, 240667: 96, 240788: 16, 240794: 18, 240803: 43, 240904: 90, 240952: 0, 240959: 23, 240996: 4, 241021: 33, 241029: 77, 241045: 66, 241069: 79, 241250: 19, 241323: 16, 241361: 86, 241428: 93, 241517: 82, 241733: 37, 241736: 61, 241764: 84, 241881: 31, 241899: 54, 241901: 81, 241999: 23, 242083: 23, 242153: 3, 242259: 82, 242327: 61, 242349: 87, 242498: 2, 242665: 65, 242690: 62, 242731: 17, 242774: 41, 242800: 49, 242828: 78, 242856: 96, 242930: 39, 243012: 69, 243300: 78, 243321: 72, 243360: 64, 243431: 9, 243545: 92, 243559: 4, 243621: 18, 243643: 21, 243746: 54, 243797: 63, 243922: 8, 243936: 84, 244026: 81, 244185: 40, 244195: 77, 244243: 63, 244328: 71, 244427: 29, 244444: 13, 244699: 97, 244708: 65, 244757: 59, 244816: 36, 244857: 74, 244949: 26, 245018: 65, 245080: 73, 245082: 7, 245106: 69, 245115: 54, 245238: 50, 245296: 78, 245335: 88, 245397: 12, 245627: 96, 245655: 97, 245668: 72, 245688: 28, 245709: 72, 245717: 15, 245740: 96, 245748: 19, 245810: 72, 245901: 73, 245920: 83, 245948: 2, 246043: 17, 246164: 34, 246240: 75, 246243: 52, 246279: 80, 246318: 54, 246464: 51, 246476: 59, 246485: 85, 246509: 23, 246572: 5, 246714: 40, 246725: 43, 246812: 6, 246815: 47, 246829: 36, 246895: 77, 247016: 16, 247044: 60, 247046: 59, 247069: 78, 247151: 32, 247359: 98, 247373: 62, 247385: 86, 247396: 66, 247399: 64, 247457: 12, 247475: 95, 247527: 10, 247617: 74, 247745: 2, 247789: 15, 247896: 86, 248016: 19, 248101: 49, 248105: 45, 248229: 42, 248307: 73, 248354: 24, 248387: 50, 248475: 46, 248598: 86, 248664: 37, 248696: 43, 248711: 36, 248725: 70, 248801: 18, 248906: 11, 248917: 25, 249193: 96, 249339: 87, 249393: 89, 249404: 82, 249462: 47, 249511: 93, 249528: 28, 249559: 24, 249654: 2, 249743: 63, 249801: 1, 249900: 48, 250006: 53, 250119: 42, 250161: 36, 250180: 64, 250186: 47, 250211: 11, 250268: 64, 250325: 44, 250370: 7, 250395: 29, 250601: 70, 250702: 55, 250765: 4, 250964: 30, 251111: 67, 251155: 74, 251204: 62, 251273: 38, 251566: 99, 251889: 31, 251999: 22, 252014: 59, 252101: 8, 252279: 59, 252326: 82, 252353: 15, 252413: 72, 252436: 82, 252484: 89, 252511: 50, 252537: 19, 252582: 37, 252773: 71, 252776: 80, 252815: 21, 252926: 98, 252933: 53, 253052: 4, 253073: 35, 253091: 45, 253104: 83, 253141: 41, 253166: 5, 253241: 5, 253257: 39, 253359: 12, 253380: 15, 253541: 65, 253562: 55, 253567: 12, 253608: 99, 253621: 33, 253730: 41, 253744: 3, 253791: 97, 253812: 98, 253870: 97, 253883: 0, 253951: 25, 253954: 12, 253996: 5, 254007: 86, 254040: 17, 254126: 69, 254199: 15, 254212: 84, 254218: 46, 254223: 67, 254329: 21, 254339: 65, 254342: 74, 254396: 87, 254488: 15, 254490: 34, 254552: 50, 254558: 91, 254683: 75, 254721: 68, 254788: 49, 254807: 36, 254836: 78, 254894: 75, 254910: 97, 254943: 46, 254975: 29, 254989: 80, 255152: 44, 255416: 85, 255452: 75, 255486: 1, 255569: 98, 255615: 99, 255673: 12, 255700: 97, 255904: 0, 255933: 76, 256014: 46, 256166: 1, 256213: 98, 256320: 53, 256334: 43, 256352: 84, 256404: 96, 256550: 43, 256725: 96, 256832: 3, 256891: 29, 256895: 4, 257002: 69, 257071: 50, 257083: 95, 257100: 56, 257169: 47, 257173: 54, 257230: 49, 257473: 27, 257481: 46, 257521: 14, 257558: 63, 257600: 6, 257660: 94, 257668: 97, 257847: 96, 257882: 69, 258054: 93, 258078: 19, 258181: 91, 258255: 75, 258277: 10, 258316: 64, 258317: 60, 258472: 38, 258566: 98, 258599: 23, 258649: 82, 258742: 64, 259023: 98, 259079: 12, 259170: 30, 259208: 73, 259359: 73, 259372: 43, 259430: 43, 259442: 38, 259464: 12, 259544: 88, 259720: 30, 259730: 75, 259739: 27, 259850: 7, 259858: 92, 259873: 84, 260037: 83, 260280: 38, 260314: 56, 260324: 92, 260358: 67, 260460: 24, 260492: 17, 260734: 5, 260766: 54, 260775: 77, 260841: 30, 260863: 23, 260877: 79, 260899: 22, 260920: 64, 260930: 56, 260965: 38, 261004: 26, 261021: 52, 261102: 42, 261159: 87, 261175: 62, 261248: 98, 261291: 64, 261310: 6, 261360: 22, 261361: 59, 261383: 53, 261440: 38, 261458: 82, 261482: 84, 261544: 34, 261589: 67, 261592: 47, 261682: 61, 261702: 62, 261967: 63, 262077: 93, 262222: 85, 262348: 47, 262369: 9, 262424: 22, 262444: 65, 262468: 94, 262588: 74, 262640: 2, 262653: 36, 262659: 62, 262835: 29, 263000: 13, 263107: 78, 263129: 91, 263135: 62, 263161: 86, 263182: 14, 263213: 67, 263345: 53, 263421: 98, 263561: 20, 263594: 40, 263690: 17, 263957: 55, 264132: 65, 264328: 68, 264355: 31, 264519: 32, 264531: 56, 264626: 60, 264634: 85, 264647: 35, 264728: 67, 264738: 60, 264914: 92, 265101: 9, 265105: 61, 265194: 47, 265210: 30, 265241: 17, 265301: 5, 265334: 11, 265471: 56, 265527: 30, 265539: 20, 265581: 79, 265605: 48, 265624: 3, 265629: 90, 265863: 66, 265875: 78, 265899: 27, 265964: 36, 265998: 4, 266123: 44, 266419: 63, 266490: 35, 266562: 56, 266630: 21, 266632: 1, 266720: 23, 266758: 82, 266786: 22, 266799: 49, 267043: 64, 267162: 11, 267300: 14, 267427: 31, 267481: 97, 267553: 18, 267555: 54, 267570: 82, 267578: 35, 267661: 69, 267681: 56, 267706: 49, 267753: 49, 267907: 12, 268088: 51, 268210: 8, 268255: 74, 268256: 45, 268299: 35, 268315: 54, 268374: 69, 268378: 88, 268400: 91, 268424: 84, 268444: 26, 268759: 53, 268787: 5, 268869: 76, 268984: 26, 269031: 17, 269120: 27, 269301: 17, 269349: 47, 269351: 44, 269422: 3, 269429: 17, 269475: 19, 269627: 94, 269652: 29, 269654: 3, 269655: 47, 269762: 18, 269825: 66, 269935: 14, 270043: 34, 270046: 1, 270216: 26, 270227: 97, 270503: 73, 270516: 73, 270559: 26, 270586: 43, 270728: 11, 270804: 81, 270824: 62, 270941: 63, 270972: 98, 271097: 0, 271116: 19, 271262: 55, 271281: 34, 271285: 53, 271300: 53, 271324: 94, 271370: 60, 271422: 74, 271449: 86, 271492: 49, 271572: 43, 271902: 56, 271940: 38, 271943: 72, 271996: 94, 272175: 26, 272202: 66, 272217: 43, 272250: 12, 272267: 38, 272279: 38, 272312: 79, 272360: 73, 272505: 58, 272529: 91, 272556: 85, 272640: 55, 272648: 98, 272687: 44, 272699: 96, 272802: 40, 272901: 39, 272906: 85, 273068: 2, 273155: 19, 273200: 76, 273215: 20, 273473: 12, 273526: 0, 273527: 5, 273717: 1, 273727: 29, 273760: 10, 273819: 60, 273947: 49, 274162: 72, 274176: 54, 274192: 58, 274363: 35, 274380: 23, 274390: 82, 274445: 16, 274646: 30, 274673: 50, 274690: 36, 274700: 4, 274722: 54, 274786: 89, 274831: 14, 274845: 47, 274923: 43, 274967: 65, 274976: 94, 275130: 66, 275153: 70, 275180: 35, 275268: 48, 275320: 33, 275418: 82, 275504: 21, 275563: 19, 275727: 94, 275773: 76, 275785: 43, 275840: 21, 275861: 1, 275899: 63, 275927: 41, 275930: 87, 276006: 90, 276035: 14, 276222: 84, 276230: 16, 276271: 15, 276330: 37, 276344: 5, 276540: 27, 276541: 83, 276566: 68, 276583: 39, 276692: 69, 276859: 5, 277041: 6, 277072: 62, 277116: 86, 277184: 78, 277198: 62, 277199: 90, 277248: 89, 277276: 8, 277355: 86, 277421: 74, 277435: 49, 277489: 45, 277653: 13, 277723: 92, 277729: 61, 277736: 33, 277798: 76, 277835: 19, 277841: 95, 277861: 8, 277917: 40, 277993: 62, 277996: 94, 278083: 43, 278233: 57, 278461: 86, 278565: 73, 278617: 78, 278661: 40, 278706: 66, 278766: 35, 278801: 58, 278837: 25, 278855: 28, 278953: 89, 279041: 67, 279190: 47, 279263: 25, 279305: 28, 279333: 38, 279369: 58, 279406: 75, 279453: 84, 279463: 26, 279501: 56, 279667: 22, 279779: 47, 279914: 41, 279929: 56, 279940: 23, 279996: 44, 280027: 25, 280097: 30, 280206: 82, 280221: 51, 280385: 21, 280387: 9, 280557: 65, 280613: 62, 280619: 21, 280675: 24, 280823: 56, 280843: 46, 280864: 18, 280940: 6, 281001: 36, 281080: 46, 281123: 32, 281218: 18, 281233: 25, 281342: 89, 281375: 64, 281399: 90, 281640: 34, 281647: 85, 281858: 17, 281864: 49, 281871: 88, 281897: 1, 281902: 15, 281995: 41, 282055: 2, 282066: 73, 282106: 3, 282166: 58, 282197: 49, 282205: 25, 282335: 16, 282380: 10, 282385: 71, 282406: 97, 282438: 5, 282515: 45, 282700: 95, 282711: 75, 282745: 88, 282810: 99, 282832: 83, 282976: 48, 283000: 0, 283032: 70, 283061: 15, 283117: 67, 283122: 85, 283166: 24, 283319: 2, 283446: 45, 283548: 5, 283611: 11, 283705: 3, 283714: 79, 283839: 30, 284100: 27, 284132: 2, 284148: 89, 284149: 27, 284233: 13, 284279: 13, 284327: 72, 284383: 87, 284650: 60, 284653: 36, 284655: 98, 284704: 23, 284764: 18, 284798: 76, 284866: 53, 284904: 62, 285036: 41, 285043: 29, 285073: 93, 285096: 27, 285168: 15, 285297: 78, 285310: 97, 285323: 28, 285371: 37, 285376: 76, 285406: 39, 285419: 62, 285540: 10, 285603: 85, 285653: 77, 285659: 3, 285692: 75, 285700: 6, 285745: 22, 285757: 94, 285782: 79, 285883: 96, 286033: 55, 286057: 39, 286067: 25, 286144: 13, 286173: 98, 286224: 17, 286253: 93, 286257: 96, 286546: 49, 286563: 24, 286594: 95, 286657: 41, 286796: 74, 286924: 69, 286959: 35, 287036: 95, 287169: 74, 287183: 29, 287247: 75, 287322: 82, 287504: 21, 287596: 13, 287635: 39, 287716: 31, 287719: 48, 287790: 38, 287811: 21, 287849: 79, 287865: 17, 288014: 74, 288024: 94, 288066: 3, 288074: 57, 288143: 99, 288172: 87, 288220: 29, 288250: 99, 288343: 0, 288349: 91, 288491: 14, 288702: 95, 288758: 47, 288778: 74, 289008: 24, 289076: 64, 289336: 97, 289387: 93, 289472: 24, 289532: 5, 289567: 56, 289577: 68, 289713: 51, 289727: 64, 289805: 29, 289898: 69, 289906: 55, 289909: 82, 289938: 75, 289951: 27, 289994: 58, 290032: 2, 290156: 26, 290206: 86, 290349: 5, 290484: 15, 290494: 41, 290549: 92, 290586: 30, 290634: 23, 290710: 29, 290794: 23, 290883: 33, 291069: 3, 291135: 55, 291174: 70, 291417: 82, 291451: 43, 291475: 69, 291594: 69, 291685: 46, 291687: 83, 291702: 90, 291782: 54, 291833: 85, 291919: 12, 292280: 60, 292281: 25, 292360: 96, 292385: 91, 292441: 50, 292464: 19, 292499: 16, 292503: 48, 292534: 90, 292565: 18, 292613: 43, 292649: 54, 292681: 59, 292683: 24, 292704: 70, 292834: 54, 292886: 3, 292994: 6, 293118: 36, 293254: 57, 293269: 98, 293486: 35, 293574: 45, 293730: 88, 293754: 68, 293777: 51, 293820: 64, 293880: 94, 293883: 36, 293918: 34, 293921: 54, 293957: 51, 294003: 97, 294127: 50, 294140: 72, 294141: 53, 294182: 39, 294262: 75, 294376: 67, 294479: 50, 294512: 37, 294597: 54, 294667: 48, 294722: 24, 294744: 45, 294949: 85, 295196: 5, 295202: 33, 295213: 32, 295276: 77, 295304: 87, 295418: 76, 295572: 88, 295592: 71, 295596: 10, 295843: 96, 295915: 11, 296044: 72, 296070: 88, 296079: 25, 296206: 47, 296264: 85, 296323: 34, 296465: 7, 296551: 35, 296619: 37, 296622: 73, 296698: 15, 296700: 55, 296733: 19, 296983: 69, 297014: 39, 297047: 69, 297076: 75, 297086: 52, 297120: 7, 297183: 6, 297262: 95, 297497: 92, 297582: 57, 297607: 45, 297789: 31, 297824: 13, 297959: 66, 297997: 76, 298055: 86, 298064: 14, 298086: 48, 298112: 0, 298124: 73, 298151: 56, 298234: 98, 298270: 40, 298369: 80, 298523: 55, 298604: 57, 298674: 29, 298683: 85, 298731: 5, 298735: 51, 298736: 42, 298800: 5, 298818: 27, 298832: 40, 298880: 0, 298979: 7, 299078: 63, 299127: 88, 299382: 3, 299389: 26, 299395: 70, 299397: 32, 299466: 16, 299480: 47, 299515: 30, 299554: 24, 299644: 9, 299788: 85, 299922: 10, 299952: 97, 299963: 88, 300082: 44, 300088: 35, 300270: 26, 300307: 85, 300352: 63, 300354: 8, 300420: 86, 300536: 79, 300579: 41, 300594: 8, 300630: 66, 300715: 38, 300792: 43, 300894: 52, 300945: 0, 300986: 35, 301069: 31, 301227: 44, 301261: 79, 301292: 26, 301325: 91, 301338: 98, 301346: 1, 301501: 3, 301595: 56, 301610: 4, 301611: 63, 301637: 99, 301701: 27, 301737: 69, 301742: 66, 301743: 27, 301744: 70, 301765: 83, 301811: 75, 302121: 8, 302137: 62, 302299: 74, 302540: 13, 302603: 46, 302817: 25, 302911: 70, 303073: 82, 303074: 75, 303078: 50, 303096: 43, 303220: 60, 303238: 53, 303354: 81, 303439: 10, 303452: 38, 303487: 82, 303507: 4, 303576: 11, 303626: 26, 303835: 89, 303889: 68, 303998: 2, 304297: 33, 304423: 88, 304443: 27, 304532: 57, 304569: 12, 304587: 37, 304590: 57, 304756: 30, 304769: 34, 304796: 89, 304839: 7, 304850: 79, 304935: 9, 304949: 90, 304959: 6, 305017: 33, 305099: 74, 305100: 13, 305101: 20, 305127: 97, 305341: 84, 305357: 19, 305384: 66, 305472: 92, 305474: 59, 305554: 2, 305604: 95, 305919: 99, 305950: 64, 305965: 37, 306200: 84, 306219: 64, 306243: 10, 306276: 45, 306373: 17, 306590: 15, 306655: 85, 307050: 27, 307113: 83, 307179: 58, 307188: 8, 307329: 67, 307353: 63, 307369: 97, 307384: 96, 307496: 56, 307580: 46, 307581: 86, 307582: 40, 307592: 26, 307657: 63, 307659: 62, 307674: 83, 307754: 76, 307802: 94, 307891: 77, 307934: 96, 307938: 78, 308050: 50, 308060: 33, 308105: 67, 308107: 41, 308127: 1, 308377: 83, 308394: 41, 308459: 46, 308536: 3, 308543: 9, 308613: 60, 308616: 33, 308812: 34, 308827: 74, 308841: 2, 308899: 20, 308955: 22, 309017: 49, 309093: 61, 309102: 71, 309107: 0, 309253: 87, 309270: 42, 309355: 48, 309474: 17, 309703: 35, 309754: 77, 309795: 0, 309938: 42, 309943: 94, 309955: 82, 310022: 17, 310164: 57, 310201: 75, 310203: 81, 310225: 30, 310232: 77, 310248: 6, 310597: 54, 310603: 38, 310643: 51, 310650: 81, 310988: 58, 311273: 66, 311338: 74, 311349: 26, 311363: 48, 311381: 34, 311393: 11, 311394: 65, 311458: 25, 311510: 51, 311546: 63, 311564: 89, 311773: 22, 311803: 12, 311823: 87, 311869: 66, 311884: 86, 311902: 69, 311925: 54, 311972: 10, 312136: 85, 312166: 15, 312405: 36, 312457: 14, 312626: 23, 313100: 37, 313139: 17, 313228: 90, 313250: 22, 313263: 24, 313279: 44, 313366: 59, 313446: 30, 313627: 17, 313655: 29, 313707: 22, 313725: 65, 313844: 16, 313967: 75, 314028: 80, 314054: 43, 314081: 34, 314104: 15, 314111: 91, 314184: 85, 314222: 20, 314278: 79, 314294: 44, 314421: 42, 314436: 70, 314464: 98, 314465: 41, 314497: 90, 314620: 96, 314650: 20, 314683: 32, 314781: 54, 314962: 93, 314982: 33, 315106: 76, 315135: 21, 315160: 62, 315163: 22, 315203: 75, 315281: 86, 315321: 9, 315489: 6, 315549: 8, 315593: 29, 315617: 44, 315703: 93, 315722: 93, 315739: 26, 315784: 88, 315840: 42, 315870: 66, 315955: 57, 316103: 50, 316142: 66, 316223: 75, 316279: 28, 316325: 93, 316339: 65, 316364: 93, 316623: 82, 316700: 31, 316814: 17, 316821: 14, 316929: 40, 316948: 6, 316968: 96, 317005: 67, 317017: 62, 317099: 46, 317104: 23, 317147: 26, 317165: 22, 317180: 30, 317237: 63, 317253: 39, 317286: 32, 317301: 4, 317446: 98, 317559: 45, 317599: 68, 317610: 43, 317627: 55, 317702: 27, 317733: 91, 317752: 57, 317790: 48, 317848: 70, 317903: 79, 318045: 47, 318056: 62, 318123: 41, 318225: 57, 318315: 80, 318422: 69, 318486: 74, 318715: 92, 318821: 31, 318857: 98, 318935: 11, 318974: 25, 318991: 43, 319024: 65, 319041: 32, 319155: 57, 319278: 19, 319441: 65, 319508: 38, 319518: 52, 319548: 96, 319625: 26, 319647: 62, 319650: 97, 319669: 74, 319691: 15, 319724: 99, 319760: 35, 319896: 10, 320094: 72, 320232: 82, 320304: 32, 320307: 21, 320379: 2, 320460: 48, 320478: 51, 320570: 40, 320598: 91, 320608: 40, 320622: 49, 320662: 75, 320669: 86, 320681: 53, 320682: 25, 320705: 55, 320733: 30, 320798: 95, 320998: 39, 321154: 34, 321163: 40, 321172: 78, 321205: 37, 321248: 84, 321335: 67, 321391: 41, 321476: 39, 321477: 2, 321500: 5, 321507: 77, 321521: 6, 321551: 4, 321574: 75, 321623: 99, 321628: 55, 321637: 95, 321801: 87, 321840: 58, 321915: 90, 321966: 23, 322131: 47, 322274: 51, 322307: 80, 322372: 91, 322378: 67, 322439: 17, 322590: 58, 322611: 24, 322636: 54, 322892: 17, 322912: 95, 322968: 43, 323039: 14, 323177: 22, 323201: 73, 323209: 96, 323277: 36, 323339: 10, 323380: 77, 323406: 17, 323624: 18, 323674: 18, 323710: 56, 323752: 35, 323754: 82, 323959: 6, 324034: 50, 324065: 22, 324067: 50, 324167: 7, 324193: 16, 324264: 68, 324293: 61, 324528: 14, 324539: 86, 324556: 37, 324607: 9, 324673: 28, 324702: 27, 324895: 38, 324938: 58, 325019: 69, 325100: 64, 325129: 82, 325272: 23, 325273: 80, 325384: 96, 325470: 59, 325557: 38, 325599: 82, 325676: 85, 325695: 25, 325716: 95, 325722: 15, 325723: 38, 325779: 25, 325822: 57, 325914: 25, 326041: 43, 326081: 10, 326168: 74, 326245: 31, 326318: 18, 326322: 61, 326379: 41, 326395: 37, 326482: 3, 326535: 3, 326565: 61, 326719: 40, 326736: 4, 326795: 65, 326944: 12, 326996: 70, 327146: 96, 327178: 88, 327229: 62, 327474: 23, 327530: 25, 327582: 72, 327601: 0, 327668: 53, 327670: 85, 327678: 67, 327693: 55, 327781: 2, 327795: 15, 327867: 0, 327885: 21, 327907: 99, 327983: 29, 327997: 62, 328231: 53, 328295: 85, 328477: 85, 328502: 76, 328547: 9, 328557: 7, 328605: 5, 328624: 19, 328915: 86, 329017: 51, 329043: 97, 329180: 0, 329228: 69, 329239: 80, 329243: 74, 329264: 98, 329396: 37, 329456: 70, 329504: 29, 329525: 80, 329538: 32, 329600: 51, 329689: 80, 329740: 99, 329782: 64, 330062: 77, 330069: 53, 330179: 49, 330304: 51, 330341: 87, 330403: 71, 330427: 4, 330433: 25, 330470: 13, 330475: 0, 330500: 44, 330531: 92, 330552: 34, 330557: 29, 330617: 20, 330621: 83, 330690: 5, 330725: 7, 330765: 12, 330817: 84, 331024: 66, 331200: 98, 331202: 8, 331414: 4, 331479: 15, 331515: 41, 331597: 27, 331719: 3, 331781: 25, 331816: 40, 331837: 85, 331869: 20, 331966: 71, 332211: 52, 332239: 76, 332307: 32, 332381: 42, 332497: 95, 332612: 29, 332620: 66, 332637: 75, 332672: 28, 332723: 13, 332772: 79, 332912: 34, 333093: 18, 333255: 11, 333257: 42, 333337: 59, 333371: 71, 333388: 62, 333520: 24, 333527: 51, 333611: 66, 333665: 84, 333674: 34, 333727: 36, 333856: 30, 333870: 53, 333983: 21, 334150: 90, 334363: 35, 334511: 78, 334571: 4, 334580: 42, 334604: 83, 334607: 3, 334624: 2, 334787: 10, 334820: 79, 334869: 89, 334981: 55, 334983: 47, 334988: 94, 335056: 63, 335154: 53, 335162: 61, 335266: 68, 335333: 5, 335363: 68, 335391: 35, 335418: 39, 335557: 16, 335560: 99, 335607: 37, 335637: 83, 335674: 41, 335991: 33, 336079: 34, 336248: 92, 336316: 58, 336339: 45, 336351: 43, 336495: 35, 336496: 19, 336543: 13, 336622: 71, 336706: 48, 336717: 79, 336766: 57, 336800: 17, 336804: 16, 336816: 50, 336837: 80, 336901: 32, 336931: 2, 336933: 5, 337010: 12, 337048: 18, 337058: 25, 337130: 27, 337185: 36, 337269: 87, 337397: 46, 337427: 55, 337512: 31, 337696: 23, 337704: 43, 337742: 20, 337803: 68, 337968: 67, 337980: 54, 338057: 61, 338063: 89, 338099: 60, 338142: 78, 338150: 83, 338278: 96, 338285: 58, 338473: 37, 338496: 21, 338500: 76, 338653: 50, 338780: 15, 338826: 40, 338876: 41, 338903: 24, 339008: 64, 339028: 89, 339109: 89, 339126: 89, 339135: 77, 339158: 73, 339206: 12, 339221: 73, 339236: 42, 339253: 11, 339267: 59, 339454: 1, 339513: 35, 339596: 51, 339639: 43, 339663: 30, 339677: 2, 339738: 84, 339775: 0, 339802: 93, 339840: 28, 339846: 3, 339895: 30, 339906: 25, 339922: 70, 340021: 49, 340143: 39, 340179: 23, 340292: 97, 340445: 19, 340458: 12, 340468: 99, 340541: 67, 340558: 49, 340568: 24, 340616: 18, 340637: 33, 340684: 63, 340711: 30, 340745: 70, 340854: 86, 340887: 26, 340891: 56, 341001: 70, 341100: 96, 341186: 63, 341333: 53, 341356: 67, 341535: 26, 341559: 58, 341583: 64, 341602: 65, 341736: 44, 341887: 89, 341915: 27, 342005: 23, 342033: 21, 342045: 41, 342063: 92, 342067: 22, 342142: 18, 342171: 40, 342188: 37, 342232: 1, 342357: 80, 342396: 26, 342399: 75, 342522: 84, 342785: 61, 342865: 71, 342888: 66, 342892: 75, 342961: 23, 342993: 81, 343078: 26, 343186: 75, 343295: 26, 343339: 86, 343345: 90, 343424: 97, 343451: 17, 343458: 92, 343502: 24, 343647: 8, 343954: 95, 343974: 74, 344219: 38, 344361: 94, 344464: 50, 344515: 35, 344578: 99, 344618: 46, 344621: 83, 344626: 84, 344674: 70, 344679: 91, 344723: 14, 344724: 7, 344778: 64, 345000: 52, 345166: 71, 345174: 25, 345359: 71, 345548: 50, 345652: 39, 345712: 98, 345990: 18, 346041: 17, 346134: 1, 346186: 48, 346281: 64, 346446: 84, 346570: 92, 346573: 24, 346582: 82, 346632: 84, 346730: 31, 346770: 12, 346798: 77, 346809: 94, 346821: 66, 346924: 35, 346942: 84, 347054: 70, 347075: 94, 347182: 53, 347236: 48, 347266: 11, 347314: 30, 347372: 62, 347382: 86, 347418: 58, 347510: 81, 347894: 70, 348071: 93, 348186: 74, 348287: 62, 348364: 49, 348477: 2, 348618: 66, 348667: 13, 348929: 45, 348939: 21, 348963: 8, 348992: 98, 349073: 66, 349075: 54, 349077: 75, 349320: 51, 349340: 73, 349376: 85, 349423: 15, 349472: 46, 349510: 33, 349530: 27, 349703: 41, 349742: 62, 349750: 69, 349772: 80, 349788: 22, 349842: 98, 349877: 66, 349908: 40, 349913: 70, 349916: 35, 349966: 67, 350038: 65, 350083: 97, 350146: 88, 350173: 27, 350279: 58, 350337: 33, 350352: 80, 350464: 21, 350583: 63, 350586: 59, 350597: 13, 350616: 33, 350667: 26, 350677: 27, 350681: 70, 350779: 41, 350786: 28, 350960: 81, 350970: 28, 350979: 60, 351040: 50, 351105: 16, 351193: 85, 351334: 41, 351336: 64, 351353: 3, 351389: 33, 351406: 56, 351424: 59, 351472: 94, 351484: 71, 351501: 84, 351552: 12, 351573: 38, 351727: 17, 351762: 14, 351769: 18, 351889: 57, 351975: 30, 351984: 42, 352038: 38, 352047: 61, 352137: 89, 352145: 80, 352253: 54, 352428: 72, 352598: 9, 352617: 37, 352628: 17, 352675: 80, 352696: 39, 352699: 85, 352803: 72, 352846: 94, 352849: 66, 352860: 56, 352915: 67, 352964: 70, 353026: 93, 353055: 69, 353155: 69, 353308: 85, 353408: 8, 353540: 65, 353544: 42, 353556: 47, 353737: 44, 353822: 86, 353838: 79, 353882: 40, 353980: 67, 353998: 93, 354053: 44, 354058: 20, 354065: 6, 354102: 52, 354108: 39, 354352: 0, 354359: 2, 354381: 56, 354407: 60, 354547: 64, 354574: 33, 354637: 29, 354711: 27, 354796: 62, 354817: 2, 354855: 21, 355067: 94, 355200: 79, 355212: 69, 355310: 86, 355388: 93, 355420: 10, 355434: 90, 355516: 89, 355629: 62, 355772: 60, 355789: 83, 355831: 59, 355850: 11, 355868: 13, 356003: 81, 356031: 93, 356152: 21, 356204: 86, 356213: 90, 356297: 95, 356352: 63, 356394: 17, 356457: 5, 356486: 59, 356561: 25, 356577: 61, 356654: 52, 356668: 93, 356726: 72, 356783: 27, 356856: 34, 356894: 16, 356915: 49, 356928: 81, 357069: 63, 357074: 37, 357131: 18, 357189: 5, 357243: 10, 357263: 0, 357265: 61, 357307: 96, 357412: 2, 357508: 45, 357548: 54, 357578: 34, 357584: 12, 357720: 35, 357880: 30, 357958: 4, 357989: 8, 358019: 78, 358100: 13, 358183: 7, 358238: 18, 358274: 8, 358414: 41, 358431: 37, 358443: 54, 358550: 79, 358578: 86, 358588: 83, 358687: 58, 358785: 46, 358888: 76, 358914: 19, 359018: 53, 359179: 54, 359191: 90, 359316: 47, 359330: 55, 359346: 47, 359467: 47, 359508: 97, 359515: 68, 359701: 81, 359703: 10, 359718: 37, 359841: 15, 359913: 90, 359933: 2, 360011: 21, 360053: 85, 360055: 59, 360132: 15, 360183: 68, 360184: 31, 360194: 76, 360206: 86, 360279: 9, 360299: 16, 360316: 57, 360337: 27, 360357: 88, 360443: 33, 360522: 80, 360535: 0, 360580: 91, 360583: 72, 360632: 89, 360646: 1, 360732: 72, 360783: 83, 360827: 49, 360893: 22, 360920: 63, 360997: 33, 361001: 68, 361119: 0, 361159: 94, 361255: 5, 361274: 56, 361330: 60, 361334: 78, 361335: 57, 361356: 60, 361365: 24, 361406: 40, 361411: 44, 361439: 32, 361446: 51, 361472: 98, 361495: 0, 361713: 85, 361800: 79, 361834: 32, 361841: 70, 361887: 75, 362020: 24, 362160: 69, 362182: 81, 362240: 42, 362283: 78, 362339: 33, 362353: 80, 362400: 15, 362473: 88, 362474: 24, 362515: 13, 362536: 86, 362555: 71, 362565: 3, 362767: 44, 362881: 33, 362885: 72, 362935: 32, 362986: 91, 363146: 7, 363210: 41, 363215: 64, 363261: 38, 363302: 99, 363307: 28, 363432: 35, 363537: 92, 363544: 69, 363714: 89, 363737: 11, 363788: 42, 363802: 28, 363821: 86, 363979: 37, 364029: 85, 364134: 33, 364215: 50, 364253: 42, 364265: 48, 364289: 35, 364290: 27, 364314: 98, 364372: 70, 364446: 21, 364450: 60, 364498: 3, 364522: 57, 364671: 58, 364703: 21, 364711: 24, 364877: 74, 365030: 89, 365059: 77, 365116: 55, 365213: 18, 365316: 69, 365512: 84, 365554: 79, 365561: 67, 365622: 6, 365839: 31, 365867: 22, 365879: 9, 365910: 71, 366020: 17, 366030: 28, 366103: 12, 366112: 42, 366146: 18, 366189: 49, 366344: 73, 366534: 35, 366539: 86, 366541: 59, 366569: 59, 366598: 60, 366611: 51, 366725: 44, 366779: 22, 366799: 8, 366869: 51, 366929: 6, 366954: 79, 367002: 96, 367061: 21, 367097: 44, 367110: 18, 367133: 49, 367195: 74, 367317: 0, 367408: 61, 367617: 44, 367644: 19, 367658: 63, 367738: 84, 367799: 14, 367822: 39, 367886: 90, 367910: 80, 368004: 54, 368036: 0, 368098: 62, 368254: 89, 368307: 12, 368374: 94, 368431: 5, 368495: 55, 368496: 84, 368647: 22, 368670: 64, 368694: 50, 368789: 84, 368831: 8, 368895: 62, 368923: 30, 369031: 8, 369067: 30, 369139: 78, 369157: 2, 369262: 92, 369326: 36, 369496: 17, 369518: 51, 369664: 59, 369666: 2, 369678: 97, 369740: 98, 369790: 91, 369820: 25, 369854: 15, 370055: 98, 370178: 1, 370214: 7, 370240: 48, 370256: 8, 370309: 87, 370310: 68, 370341: 86, 370352: 0, 370372: 51, 370394: 27, 370677: 98, 370884: 85, 371153: 40, 371188: 61, 371266: 95, 371338: 3, 371396: 32, 371463: 48, 371499: 72, 371774: 30, 372050: 71, 372082: 24, 372140: 83, 372142: 90, 372350: 6, 372393: 71, 372411: 4, 372484: 83, 372578: 84, 372595: 3, 372683: 79, 372708: 21, 372742: 59, 372814: 49, 372863: 55, 372883: 5, 372954: 77, 373027: 92, 373088: 63, 373097: 47, 373120: 84, 373171: 48, 373198: 24, 373207: 37, 373216: 76, 373360: 46, 373615: 33, 373628: 71, 373634: 99, 373758: 59, 373841: 30, 373853: 96, 373939: 6, 373975: 38, 373991: 9, 374062: 76, 374149: 17, 374201: 63, 374251: 80, 374292: 67, 374321: 25, 374391: 56, 374497: 68, 374548: 76, 374691: 27, 374717: 8, 374727: 36, 374731: 36, 374749: 64, 374757: 19, 374759: 99, 374827: 60, 374846: 61, 374862: 84, 374917: 84, 374984: 92, 375075: 8, 375111: 39, 375297: 77, 375299: 18, 375405: 94, 375541: 80, 375542: 21, 375696: 51, 375905: 92, 375955: 41, 375973: 73, 376008: 84, 376061: 47, 376107: 57, 376126: 2, 376269: 66, 376272: 35, 376373: 55, 376419: 73, 376517: 94, 376587: 60, 376649: 90, 376953: 81, 376957: 94, 377119: 83, 377158: 71, 377209: 14, 377301: 49, 377371: 43, 377380: 75, 377399: 0, 377468: 71, 377498: 47, 377580: 47, 377606: 55, 377627: 77, 377987: 70, 378151: 82, 378186: 91, 378294: 49, 378383: 27, 378389: 75, 378504: 29, 378529: 93, 378554: 13, 378622: 27, 378651: 9, 378862: 63, 378931: 98, 378966: 69, 379003: 42, 379007: 16, 379021: 13, 379095: 58, 379166: 88, 379198: 65, 379234: 70, 379290: 84, 379588: 35, 379702: 70, 379730: 30, 379787: 74, 379796: 60, 379844: 37, 379860: 71, 379996: 49, 380303: 84, 380328: 5, 380544: 49, 380553: 14, 380558: 84, 380727: 52, 380732: 99, 380769: 76, 380921: 74, 381014: 69, 381090: 34, 381148: 8, 381182: 21, 381214: 58, 381257: 34, 381262: 20, 381382: 12, 381412: 38, 381456: 4, 381489: 48, 381519: 87, 381539: 93, 381549: 73, 381555: 17, 381645: 45, 381733: 44, 381838: 55, 381857: 45, 381958: 65, 382021: 9, 382042: 66, 382128: 5, 382159: 8, 382178: 82, 382207: 84, 382309: 39, 382394: 91, 382410: 52, 382434: 88, 382443: 18, 382473: 51, 382564: 1, 382576: 44, 382605: 35, 382745: 65, 382815: 47, 383065: 19, 383102: 46, 383146: 13, 383227: 89, 383276: 38, 383379: 0, 383454: 57, 383492: 49, 383503: 49, 383567: 13, 383584: 15, 383611: 22, 383614: 61, 383615: 68, 383824: 31, 384012: 26, 384019: 83, 384040: 1, 384231: 64, 384350: 15, 384363: 45, 384471: 48, 384600: 0, 384635: 79, 384682: 38, 384698: 84, 384746: 94, 384851: 63, 384872: 24, 384905: 75, 384926: 22, 385074: 4, 385211: 56, 385269: 48, 385307: 9, 385361: 12, 385457: 77, 385468: 41, 385523: 23, 385677: 81, 385690: 27, 385740: 81, 385790: 64, 385846: 79, 385854: 58, 385967: 15, 386109: 5, 386122: 13, 386130: 91, 386141: 68, 386185: 48, 386193: 62, 386204: 89, 386254: 52, 386388: 22, 386463: 55, 386493: 12, 386541: 4, 386558: 70, 386617: 25, 386620: 68, 386905: 51, 386943: 57, 386980: 7, 387051: 79, 387060: 0, 387104: 31, 387198: 74, 387244: 90, 387309: 27, 387316: 63, 387621: 11, 387673: 12, 387724: 58, 387773: 43, 387823: 82, 387877: 68, 388000: 10, 388003: 50, 388011: 97, 388027: 49, 388030: 59, 388115: 68, 388156: 41, 388226: 45, 388263: 94, 388278: 62, 388326: 0, 388500: 44, 388570: 76, 388584: 89, 388590: 2, 388609: 89, 388685: 61, 388730: 87, 388822: 13, 388839: 98, 388947: 86, 388970: 85, 389319: 57, 389358: 5, 389380: 38, 389384: 54, 389410: 36, 389413: 32, 389471: 11, 389613: 77, 389645: 80, 389705: 74, 389717: 4, 389808: 55, 389818: 34, 389858: 94, 389883: 72, 389973: 72, 390026: 46, 390076: 56, 390222: 96, 390252: 48, 390373: 90, 390598: 38, 390899: 50, 390921: 13, 390948: 67, 390958: 29, 390968: 41, 391106: 18, 391188: 29, 391197: 40, 391214: 37, 391251: 55, 391376: 40, 391384: 34, 391388: 38, 391416: 12, 391459: 92, 391555: 78, 391572: 92, 391630: 70, 391647: 32, 391727: 23, 391756: 49, 391942: 67, 391995: 8, 392016: 77, 392118: 0, 392343: 30, 392371: 3, 392404: 30, 392482: 0, 392617: 87, 392737: 2, 392830: 79, 392893: 48, 392952: 97, 392966: 68, 393019: 33, 393110: 15, 393129: 71, 393172: 70, 393220: 77, 393235: 18, 393249: 63, 393341: 74, 393344: 87, 393368: 33, 393551: 4, 393556: 3, 393721: 53, 393763: 7, 393836: 22, 393887: 83, 393968: 29, 394015: 8, 394027: 96, 394091: 26, 394175: 13, 394240: 65, 394491: 40, 394537: 4, 394574: 67, 394675: 30, 394762: 1, 394776: 84, 394815: 32, 394899: 21, 394917: 79, 395006: 10, 395010: 94, 395130: 38, 395290: 7, 395323: 83, 395379: 26, 395409: 51, 395414: 31, 395517: 42, 395528: 4, 395579: 67, 395600: 71, 395601: 32, 395611: 71, 395631: 96, 395636: 97, 395784: 10, 395821: 77, 395964: 44, 396000: 38, 396081: 53, 396143: 34, 396213: 40, 396231: 75, 396319: 41, 396327: 6, 396467: 95, 396513: 71, 396565: 41, 396588: 59, 396596: 57, 396681: 60, 396826: 22, 396914: 59, 396953: 59, 396975: 3, 397115: 98, 397130: 19, 397165: 37, 397178: 28, 397304: 29, 397334: 85, 397396: 8, 397672: 81, 397741: 85, 397876: 20, 397885: 8, 397902: 10, 398056: 30, 398137: 92, 398373: 13, 398451: 70, 398463: 72, 398517: 87, 398585: 96, 398607: 83, 398647: 95, 398704: 11, 398774: 80, 398869: 97, 398907: 36, 398910: 59, 398931: 28, 399204: 65, 399363: 10, 399405: 49, 399424: 6, 399429: 57, 399497: 64, 399544: 71, 399558: 17, 399561: 70, 399577: 86, 399578: 47, 399580: 97, 399620: 1, 399641: 16, 399886: 3, 400002: 65, 400142: 54, 400226: 88, 400325: 61, 400361: 95, 400380: 4, 400407: 9, 400433: 0, 400441: 51, 400470: 18, 400505: 3, 400518: 16, 400551: 12, 400704: 82, 400721: 53, 400729: 61, 400779: 76, 400900: 22, 400916: 61, 401084: 39, 401138: 9, 401180: 83, 401226: 42, 401312: 83, 401314: 33, 401440: 19, 401457: 42, 401466: 14, 401489: 87, 401504: 80, 401574: 15, 401604: 5, 401626: 1, 401770: 65, 401789: 89, 401796: 19, 401837: 3, 401863: 71, 401891: 55, 402077: 61, 402125: 92, 402241: 78, 402301: 96, 402312: 66, 402405: 15, 402421: 99, 402517: 69, 402526: 49, 402573: 0, 402616: 1, 402742: 40, 402948: 96, 402959: 45, 402996: 42, 403027: 82, 403034: 62, 403041: 23, 403112: 88, 403189: 71, 403190: 48, 403245: 75, 403302: 34, 403328: 69, 403336: 17, 403396: 86, 403514: 76, 403638: 15, 403700: 76, 403740: 68, 403747: 56, 403814: 82, 403821: 39, 403907: 97, 403908: 30, 403968: 12, 404021: 24, 404026: 45, 404059: 72, 404088: 66, 404133: 46, 404271: 27, 404278: 35, 404284: 87, 404296: 40, 404384: 90, 404468: 8, 404487: 77, 404570: 47, 404701: 81, 404744: 23, 404756: 82, 404770: 45, 404931: 84, 405161: 84, 405238: 50, 405267: 74, 405297: 17, 405334: 4, 405639: 40, 405844: 28, 405884: 42, 405890: 21, 405951: 99, 406058: 53, 406063: 87, 406138: 93, 406169: 89, 406200: 56, 406215: 18, 406280: 92, 406344: 35, 406345: 12, 406353: 76, 406421: 83, 406462: 60, 406635: 96, 406656: 78, 406667: 34, 406779: 85, 406794: 23, 406822: 79, 406867: 47, 406934: 5, 406962: 12, 406997: 75, 407086: 87, 407132: 60, 407163: 39, 407287: 67, 407347: 68, 407378: 64, 407486: 67, 407654: 91, 407680: 98, 407890: 31, 408058: 14, 408078: 92, 408197: 11, 408294: 0, 408425: 91, 408574: 18, 408630: 77, 408673: 55, 408849: 14, 408866: 1, 408965: 73, 409000: 92, 409204: 14, 409230: 64, 409236: 55, 409339: 68, 409460: 91, 409581: 66, 409604: 24, 409633: 86, 409697: 37, 409720: 91, 409881: 98, 409941: 59, 409948: 49, 410175: 45, 410184: 22, 410229: 99, 410245: 38, 410265: 41, 410272: 42, 410288: 41, 410408: 11, 410546: 76, 410591: 49, 410604: 9, 410812: 70, 410817: 17, 410873: 88, 410876: 86, 410893: 58, 410957: 34, 410976: 56, 411039: 7, 411078: 90, 411083: 61, 411157: 45, 411285: 87, 411309: 74, 411599: 39, 411813: 93, 411845: 94, 412059: 22, 412062: 69, 412097: 5, 412159: 78, 412308: 91, 412377: 0, 412386: 72, 412421: 48, 412471: 13, 412502: 33, 412506: 69, 412529: 5, 412566: 36, 412570: 28, 412586: 83, 412649: 3, 412652: 13, 412670: 80, 412688: 42, 412712: 47, 412773: 28, 412786: 56, 412802: 69, 412846: 45, 412876: 76, 412893: 55, 412921: 14, 412937: 16, 413042: 70, 413053: 74, 413062: 47, 413596: 23, 413685: 71, 413755: 70, 413785: 30, 413814: 68, 413961: 64, 413993: 24, 414003: 92, 414054: 27, 414209: 20, 414219: 46, 414324: 81, 414331: 48, 414436: 7, 414521: 70, 414543: 73, 414819: 4, 414855: 1, 415022: 82, 415162: 72, 415189: 98, 415195: 83, 415222: 58, 415260: 42, 415329: 39, 415355: 40, 415396: 31, 415456: 40, 415612: 36, 415648: 22, 415735: 3, 415826: 30, 415891: 19, 415926: 37, 416060: 46, 416075: 48, 416090: 58, 416153: 44, 416249: 91, 416766: 9, 416879: 92, 416916: 55, 416940: 33, 416955: 95, 417000: 32, 417057: 66, 417160: 11, 417249: 31, 417293: 22, 417370: 12, 417378: 19, 417392: 70, 417493: 42, 417506: 86, 417553: 68, 417779: 9, 417822: 39, 417836: 90, 417949: 74, 418002: 33, 418005: 10, 418014: 67, 418124: 63, 418229: 30, 418335: 14, 418336: 3, 418443: 79, 418565: 7, 418588: 36, 418639: 82, 418690: 84, 418788: 67, 418793: 68, 418865: 15, 418892: 42, 418910: 97, 418918: 2, 418999: 54, 419085: 51, 419121: 50, 419159: 23, 419166: 40, 419184: 30, 419221: 96, 419302: 45, 419371: 35, 419413: 22, 419448: 8, 419534: 59, 419619: 81, 419635: 4, 419678: 50, 419696: 16, 419731: 22, 419735: 75, 419861: 47, 419994: 80, 420008: 66, 420070: 50, 420078: 57, 420086: 83, 420214: 54, 420231: 85, 420239: 20, 420400: 64, 420439: 23, 420447: 29, 420466: 21, 420775: 50, 420781: 46, 420815: 44, 420847: 4, 420955: 17, 421009: 32, 421094: 57, 421095: 76, 421226: 49, 421253: 92, 421308: 57, 421331: 31, 421345: 44, 421372: 63, 421373: 63, 421385: 80, 421389: 65, 421447: 73, 421484: 70, 421533: 8, 421542: 39, 421593: 68, 421605: 56, 421652: 52, 421655: 83, 421770: 18, 421818: 73, 422050: 40, 422090: 25, 422187: 99, 422354: 13, 422424: 84, 422439: 87, 422533: 87, 422536: 39, 422573: 2, 422605: 34, 422670: 71, 422699: 0, 422701: 67, 422884: 14, 422915: 6, 423147: 4, 423195: 99, 423245: 22, 423273: 20, 423289: 17, 423376: 10, 423646: 58, 423712: 88, 423765: 15, 423812: 70, 423831: 49, 423950: 89, 424069: 11, 424078: 26, 424086: 47, 424130: 92, 424169: 17, 424255: 95, 424307: 47, 424351: 11, 424437: 42, 424640: 89, 424724: 84, 424914: 50, 424934: 7, 425070: 47, 425073: 68, 425137: 79, 425228: 85, 425370: 66, 425434: 37, 425469: 29, 425474: 85, 425507: 30, 425514: 77, 425529: 94, 425567: 18, 425745: 32, 425748: 17, 425755: 18, 425810: 18, 425856: 8, 425964: 99, 426043: 5, 426124: 10, 426168: 92, 426169: 81, 426358: 13, 426365: 43, 426394: 13, 426461: 33, 426469: 10, 426520: 61, 426693: 72, 426705: 56, 426710: 80, 426711: 40, 426844: 88, 426858: 66, 426930: 64, 427017: 1, 427145: 73, 427273: 10, 427297: 28, 427414: 86, 427427: 60, 427575: 98, 427587: 78, 427675: 1, 427698: 87, 427710: 48, 427776: 65, 427887: 16, 428125: 85, 428136: 65, 428158: 13, 428226: 71, 428276: 81, 428283: 30, 428339: 23, 428360: 11, 428397: 51, 428453: 54, 428478: 35, 428523: 96, 428561: 4, 428577: 59, 428638: 70, 428802: 25, 428810: 96, 428857: 1, 428991: 30, 429026: 2, 429064: 26, 429081: 71, 429191: 71, 429278: 93, 429301: 73, 429329: 95, 429382: 89, 429397: 5, 429477: 87, 429534: 57, 429585: 28, 429601: 11, 429710: 19, 429778: 93, 429818: 37, 429944: 27, 429955: 28, 429980: 46, 430009: 55, 430039: 53, 430095: 19, 430111: 45, 430235: 54, 430255: 63, 430261: 11, 430585: 10, 430655: 42, 430787: 31, 430867: 86, 430871: 19, 430882: 14, 430913: 37, 431244: 2, 431260: 13, 431345: 81, 431555: 11, 431568: 63, 431601: 9, 431729: 47, 431829: 73, 431874: 36, 431939: 4, 432019: 57, 432099: 43, 432348: 89, 432360: 84, 432370: 56, 432377: 13, 432610: 44, 432705: 17, 432953: 1, 432974: 94, 432976: 66, 433082: 33, 433570: 48, 433624: 22, 433669: 98, 433746: 78, 433854: 11, 433878: 9, 433908: 3, 434048: 30, 434200: 59, 434235: 99, 434443: 94, 434631: 83, 434639: 98, 434675: 33, 434681: 72, 434707: 42, 434733: 15, 434755: 79, 434776: 33, 434780: 78, 434850: 53, 434853: 23, 434872: 40, 434963: 84, 435016: 23, 435020: 97, 435184: 93, 435490: 5, 435540: 59, 435652: 31, 435803: 91, 435962: 41, 435966: 91, 436026: 24, 436351: 68, 436437: 54, 436499: 85, 436502: 92, 436601: 36, 436602: 79, 436603: 44, 436613: 95, 436622: 57, 436703: 13, 436794: 57, 437095: 12, 437143: 37, 437315: 4, 437338: 81, 437449: 79, 437632: 85, 437774: 83, 437855: 76, 437892: 61, 437932: 28, 438116: 14, 438325: 76, 438380: 79, 438506: 67, 438600: 70, 438613: 3, 438645: 76, 438651: 75, 438663: 53, 438664: 42, 438696: 27, 438782: 90, 438862: 97, 438881: 45, 438883: 72, 438906: 78, 439168: 40, 439214: 85, 439272: 76, 439287: 87, 439310: 26, 439341: 79, 439380: 92, 439446: 94, 439458: 3, 439522: 59, 439583: 48, 439595: 83, 439624: 74, 439638: 17, 439683: 17, 439697: 45, 439848: 14, 439924: 42, 439928: 92, 439937: 57, 439998: 35, 440001: 53, 440043: 28, 440058: 87, 440074: 32, 440160: 98, 440243: 59, 440258: 25, 440331: 44, 440371: 4, 440513: 18, 440549: 78, 440604: 30, 440654: 41, 440672: 51, 440722: 26, 440916: 35, 441054: 94, 441110: 91, 441121: 42, 441227: 85, 441344: 13, 441368: 99, 441449: 62, 441495: 37, 441500: 69, 441609: 5, 441617: 74, 441685: 22, 441703: 50, 441834: 57, 441895: 72, 442074: 72, 442080: 3, 442082: 10, 442139: 98, 442247: 90, 442363: 73, 442381: 86, 442429: 90, 442435: 68, 442591: 11, 442653: 20, 442698: 11, 442721: 93, 442820: 81, 442836: 19, 442882: 17, 442916: 87, 443032: 79, 443084: 86, 443093: 0, 443134: 53, 443200: 23, 443207: 71, 443269: 18, 443321: 67, 443419: 47, 443571: 1, 443617: 87, 443624: 91, 443630: 34, 443680: 33, 443881: 76, 443982: 42, 444139: 64, 444243: 72, 444264: 87, 444463: 95, 444597: 24, 444679: 82, 444730: 88, 444907: 91, 445083: 9, 445152: 8, 445216: 39, 445237: 51, 445249: 61, 445251: 69, 445265: 1, 445282: 33, 445358: 48, 445532: 17, 445543: 93, 445595: 74, 445678: 90, 445788: 32, 445802: 35, 445838: 51, 445845: 13, 445907: 65, 446002: 27, 446112: 40, 446132: 11, 446330: 65, 446344: 42, 446469: 73, 446476: 78, 446592: 15, 446626: 38, 446710: 98, 446729: 98, 446736: 74, 446768: 90, 446979: 77, 447096: 77, 447303: 17, 447373: 29, 447596: 14, 447677: 40, 447799: 87, 447815: 84, 447818: 84, 447971: 45, 448001: 69, 448140: 47, 448190: 27, 448224: 51, 448226: 54, 448259: 26, 448309: 50, 448337: 68, 448379: 96, 448424: 75, 448443: 0, 448478: 8, 448480: 80, 448512: 34, 448607: 33, 448640: 5, 448645: 5, 448677: 30, 448834: 70, 448936: 64, 449082: 37, 449096: 44, 449099: 29, 449216: 3, 449262: 15, 449327: 25, 449343: 61, 449376: 94, 449473: 7, 449508: 80, 449660: 91, 449666: 55, 449687: 78, 449691: 10, 449706: 97, 449724: 42, 449868: 76, 449921: 64, 450051: 28, 450246: 56, 450256: 69, 450373: 81, 450390: 84, 450394: 71, 450432: 56, 450461: 23, 450482: 26, 450493: 82, 450511: 91, 450522: 75, 450628: 48, 450658: 70, 450659: 61, 450661: 24, 450748: 16, 450795: 29, 450807: 3, 450826: 84, 450871: 16, 450939: 94, 451021: 40, 451055: 31, 451062: 18, 451087: 45, 451164: 66, 451166: 33, 451249: 37, 451303: 39, 451336: 31, 451394: 46, 451515: 52, 451665: 7, 451752: 67, 451757: 96, 451820: 71, 451909: 74, 451928: 19, 451938: 20, 452014: 49, 452088: 40, 452104: 7, 452260: 26, 452299: 80, 452419: 4, 452421: 7, 452559: 99, 452624: 29, 452707: 55, 452794: 66, 452904: 58, 453008: 70, 453159: 99, 453183: 76, 453225: 23, 453257: 39, 453264: 77, 453327: 32, 453349: 32, 453386: 61, 453420: 73, 453564: 49, 453584: 54, 453591: 43, 453669: 82, 453676: 35, 453722: 77, 453728: 63, 453768: 7, 453837: 45, 453888: 14, 453895: 52, 453916: 72, 453988: 81, 454137: 26, 454197: 93, 454250: 97, 454258: 46, 454324: 59, 454338: 67, 454362: 2, 454447: 62, 454505: 77, 454563: 20, 454658: 3, 454801: 49, 454806: 60, 454836: 90, 454908: 35, 454990: 85, 455130: 86, 455190: 35, 455244: 6, 455258: 90, 455297: 18, 455388: 44, 455422: 27, 455435: 38, 455436: 27, 455484: 45, 455585: 26, 455600: 79, 455657: 61, 455994: 67, 456040: 51, 456054: 99, 456078: 66, 456138: 85, 456242: 48, 456267: 76, 456353: 68, 456374: 75, 456406: 72, 456413: 33, 456523: 90, 456537: 95, 456541: 71, 456609: 70, 456632: 74, 456661: 70, 456664: 86, 456690: 55, 456702: 55, 456704: 97, 456727: 27, 456763: 96, 456826: 95, 456861: 65, 456899: 7, 456974: 3, 456980: 70, 457018: 74, 457042: 65, 457066: 94, 457181: 53, 457238: 7, 457321: 1, 457383: 69, 457406: 44, 457418: 31, 457454: 64, 457469: 33, 457675: 9, 457678: 7, 457704: 96, 457732: 34, 457738: 32, 457858: 56, 457932: 90, 458004: 88, 458114: 92, 458251: 68, 458290: 72, 458491: 72, 458599: 64, 458607: 61, 458663: 64, 458714: 85, 458723: 49, 458746: 91, 458771: 90, 458898: 18, 459047: 56, 459157: 21, 459182: 32, 459221: 12, 459230: 60, 459457: 50, 459536: 10, 459596: 1, 459968: 32, 459974: 40, 460039: 87, 460156: 88, 460194: 61, 460195: 98, 460222: 91, 460268: 29, 460410: 51, 460487: 9, 460529: 5, 460543: 94, 460730: 74, 460735: 44, 460747: 3, 460787: 24, 460827: 68, 460905: 75, 460965: 70, 460968: 93, 461123: 24, 461261: 36, 461585: 70, 461598: 78, 461675: 32, 461678: 54, 461723: 7, 462031: 58, 462062: 43, 462073: 57, 462123: 69, 462348: 67, 462546: 18, 462569: 46, 462672: 47, 462768: 3, 462881: 56, 462884: 5, 462888: 42, 462889: 2, 462936: 8, 463042: 13, 463124: 85, 463184: 71, 463299: 88, 463317: 69, 463324: 49, 463403: 29, 463417: 23, 463505: 76, 463509: 31, 463566: 11, 463599: 82, 463711: 16, 463895: 36, 463999: 89, 464053: 2, 464054: 6, 464085: 51, 464101: 62, 464140: 82, 464225: 81, 464373: 45, 464397: 34, 464421: 28, 464509: 75, 464575: 10, 464650: 27, 464853: 39, 464922: 71, 464994: 48, 465212: 44, 465343: 6, 465411: 2, 465441: 74, 465457: 93, 465566: 53, 465605: 87, 465613: 93, 465682: 0, 465699: 53, 465721: 4, 465777: 26, 465784: 11, 465803: 60, 465904: 53, 465948: 78, 465993: 63, 465998: 93, 466048: 92, 466128: 46, 466148: 90, 466190: 32, 466338: 21, 466367: 15, 466454: 31, 466486: 70, 466633: 42, 466748: 39, 466762: 10, 466769: 2, 466809: 36, 466819: 47, 467027: 82, 467034: 4, 467051: 40, 467079: 12, 467138: 79, 467139: 26, 467268: 31, 467318: 61, 467397: 80, 467406: 30, 467425: 40, 467454: 1, 467547: 18, 467584: 78, 467585: 83, 467663: 3, 467705: 55, 467790: 74, 467836: 31, 467842: 62, 467848: 60, 467916: 68, 468004: 42, 468018: 78, 468090: 88, 468100: 0, 468127: 11, 468183: 25, 468203: 37, 468206: 40, 468276: 42, 468377: 90, 468394: 20, 468407: 52, 468608: 34, 468680: 23, 468744: 30, 468791: 88, 468796: 47, 468817: 50, 468940: 44, 468960: 93, 468986: 84, 469011: 1, 469018: 67, 469071: 86, 469143: 58, 469392: 99, 469432: 89, 469458: 73, 469463: 38, 469470: 47, 469475: 76, 469482: 2, 469508: 4, 469544: 60, 469567: 26, 469734: 59, 469741: 82, 469913: 6, 469995: 79, 470363: 37, 470406: 11, 470492: 89, 470576: 78, 470578: 43, 470664: 84, 470669: 25, 470695: 8, 470699: 91, 470708: 25, 470784: 41, 471082: 88, 471112: 58, 471150: 94, 471250: 80, 471307: 62, 471398: 21, 471416: 74, 471515: 94, 471705: 41, 471724: 75, 471762: 13, 471769: 1, 471818: 81, 471827: 19, 471871: 57, 471895: 18, 471906: 27, 471996: 24, 472075: 87, 472098: 96, 472170: 54, 472242: 19, 472322: 23, 472327: 45, 472362: 44, 472421: 46, 472476: 92, 472543: 42, 472561: 35, 472655: 62, 472735: 19, 472775: 89, 472784: 3, 472833: 84, 472910: 50, 472923: 32, 472964: 60, 473001: 76, 473002: 9, 473019: 90, 473059: 68, 473214: 80, 473285: 11, 473319: 66, 473320: 29, 473668: 2, 473701: 97, 473915: 49, 474013: 69, 474023: 10, 474081: 86, 474127: 59, 474153: 36, 474177: 10, 474178: 46, 474214: 84, 474381: 35, 474428: 95, 474433: 86, 474484: 79, 474540: 33, 474603: 7, 474616: 25, 474768: 66, 474784: 21, 474883: 43, 474988: 51, 475096: 46, 475277: 30, 475411: 18, 475413: 6, 475502: 72, 475696: 66, 475732: 62, 475758: 84, 475790: 3, 475801: 98, 475940: 29, 476002: 67, 476176: 76, 476345: 58, 476356: 81, 476675: 3, 476678: 69, 476750: 20, 476853: 3, 476897: 87, 476944: 16, 476967: 29, 477017: 78, 477111: 19, 477150: 90, 477206: 48, 477297: 12, 477304: 57, 477428: 60, 477441: 26, 477671: 22, 477722: 59, 477782: 30, 477788: 78, 477806: 61, 477845: 76, 477903: 47, 477969: 14, 478153: 12, 478416: 63, 478436: 94, 478591: 91, 478625: 14, 478692: 49, 478752: 71, 478808: 98, 478843: 33, 478897: 11, 478921: 21, 479031: 36, 479297: 62, 479481: 70, 479519: 64, 479528: 94, 479592: 28, 479697: 48, 479745: 70, 479850: 8, 479993: 90, 479999: 18, 480025: 74, 480046: 11, 480106: 87, 480124: 76, 480143: 96, 480220: 90, 480243: 6, 480361: 32, 480466: 90, 480510: 25, 480514: 5, 480516: 35, 480626: 41, 480629: 0, 480644: 83, 480676: 85, 480764: 47, 480882: 5, 480929: 44, 481103: 41, 481162: 97, 481174: 66, 481202: 11, 481302: 0, 481450: 85, 481520: 40, 481617: 86, 481653: 29, 481678: 66, 481755: 87, 481829: 49, 481842: 82, 481890: 78, 481920: 21, 481943: 0, 481989: 88, 482070: 67, 482081: 21, 482221: 84, 482223: 71, 482241: 99, 482263: 58, 482376: 89, 482385: 16, 482443: 9, 482489: 75, 482500: 23, 482644: 99, 482647: 30, 482743: 1, 482754: 78, 482826: 8, 482838: 16, 482923: 9, 482936: 10, 482966: 7, 483272: 44, 483536: 60, 483613: 41, 483685: 39, 483706: 30, 483784: 53, 483865: 68, 483881: 15, 483918: 19, 483948: 5, 483971: 22, 484083: 87, 484175: 40, 484323: 50, 484333: 96, 484397: 56, 484443: 0, 484752: 28, 484803: 0, 484804: 16, 484879: 78, 484913: 91, 484926: 83, 485042: 73, 485065: 69, 485130: 66, 485258: 78, 485270: 45, 485352: 45, 485501: 2, 485639: 54, 485650: 55, 485739: 95, 485820: 43, 485873: 73, 485892: 53, 485915: 92, 486116: 67, 486133: 50, 486170: 29, 486206: 6, 486253: 61, 486260: 89, 486291: 33, 486491: 49, 486580: 2, 486591: 11, 486715: 13, 486731: 57, 486770: 98, 486889: 23, 486908: 92, 486924: 34, 487065: 46, 487211: 10, 487407: 53, 487430: 23, 487458: 65, 487511: 48, 487667: 75, 487705: 49, 487812: 70, 487861: 83, 487869: 76, 487913: 18, 487918: 71, 487933: 88, 487952: 64, 488016: 24, 488106: 10, 488119: 95, 488225: 93, 488230: 74, 488286: 12, 488389: 40, 488401: 57, 488474: 67, 488483: 61, 488494: 8, 488510: 48, 488533: 73, 488554: 18, 488584: 6, 488841: 82, 488876: 38, 489081: 14, 489147: 92, 489199: 26, 489244: 78, 489352: 18, 489473: 3, 489512: 37, 489518: 63, 489717: 69, 489741: 73, 489763: 13, 489810: 35, 489835: 26, 489917: 8, 489956: 60, 490044: 20, 490068: 92, 490142: 72, 490168: 15, 490196: 51, 490212: 40, 490214: 52, 490302: 88, 490316: 53, 490328: 26, 490402: 43, 490430: 75, 490492: 47, 490519: 22, 490564: 20, 490800: 62, 490889: 46, 490913: 22, 490935: 24, 490948: 83, 490991: 23, 491002: 38, 491084: 90, 491169: 45, 491258: 52, 491333: 17, 491525: 85, 491784: 17, 491789: 53, 491810: 77, 491848: 69, 491861: 99, 491952: 77, 491996: 10, 492037: 29, 492119: 51, 492131: 33, 492244: 18, 492269: 25, 492358: 44, 492368: 5, 492386: 87, 492444: 95, 492478: 15, 492482: 99, 492505: 17, 492736: 90, 492798: 25, 492833: 77, 492917: 14, 492921: 60, 493008: 1, 493079: 7, 493107: 49, 493124: 42, 493125: 91, 493145: 57, 493289: 93, 493505: 32, 493601: 64, 493611: 84, 493761: 95, 493900: 30, 493987: 14, 494013: 55, 494020: 73, 494077: 79, 494088: 29, 494154: 96, 494296: 63, 494366: 73, 494523: 67, 494533: 55, 494539: 3, 494637: 27, 494645: 19, 494687: 64, 494821: 42, 494995: 79, 495017: 64, 495035: 11, 495061: 10, 495206: 15, 495218: 81, 495248: 40, 495277: 44, 495334: 74, 495390: 19, 495412: 60, 495625: 81, 495780: 38, 495816: 96, 495929: 48, 496119: 41, 496235: 32, 496315: 85, 496337: 96, 496356: 27, 496619: 7, 496669: 61, 496681: 97, 496800: 47, 496804: 0, 496834: 91, 496868: 88, 496959: 43, 497126: 20, 497140: 48, 497266: 95, 497329: 71, 497330: 6, 497375: 11, 497452: 1, 497567: 49, 497620: 83, 497878: 71, 497907: 33, 498022: 37, 498040: 7, 498075: 77, 498314: 3, 498337: 27, 498345: 1, 498346: 58, 498644: 70, 498645: 98, 498742: 54, 498758: 33, 498792: 50, 498846: 47, 498853: 78, 498856: 67, 498920: 17, 498945: 53, 499061: 70, 499065: 53, 499094: 44, 499107: 99, 499295: 23, 499312: 67, 499492: 57, 499507: 95, 499565: 5, 499579: 79, 499626: 11, 499651: 92, 499922: 91, 499967: 11, 500054: 33, 500093: 76, 500130: 49, 500187: 42, 500198: 19, 500206: 58, 500254: 21, 500302: 4, 500406: 31, 500484: 50, 500497: 46, 500588: 48, 500600: 23, 500620: 37, 500684: 16, 500700: 91, 500723: 10, 500746: 95, 500750: 53, 500758: 59, 500821: 28, 500895: 52, 500955: 92, 501034: 23, 501135: 77, 501143: 54, 501219: 61, 501319: 14, 501343: 93, 501403: 28, 501412: 62, 501420: 97, 501569: 16, 501599: 3, 501648: 44, 501678: 19, 501715: 64, 501843: 29, 501883: 47, 501955: 1, 502185: 43, 502198: 9, 502305: 77, 502330: 53, 502433: 46, 502438: 17, 502460: 51, 502549: 63, 502629: 48, 502641: 97, 502654: 17, 502827: 0, 502900: 82, 502910: 16, 502932: 0, 503108: 91, 503180: 92, 503220: 45, 503231: 77, 503234: 70, 503410: 69, 503415: 21, 503614: 52, 503633: 72, 503668: 42, 503795: 65, 503951: 46, 503960: 53, 503990: 80, 504053: 13, 504064: 52, 504125: 74, 504130: 20, 504146: 99, 504187: 48, 504214: 39, 504231: 27, 504261: 8, 504279: 22, 504318: 48, 504359: 75, 504393: 52, 504653: 83, 504655: 32, 504705: 69, 504717: 40, 504901: 55, 505042: 63, 505181: 48, 505197: 51, 505230: 26, 505241: 2, 505315: 35, 505339: 70, 505345: 9, 505395: 78, 505477: 78, 505589: 47, 505644: 24, 505687: 72, 505829: 28, 505910: 86, 505962: 73, 506010: 74, 506049: 86, 506161: 83, 506271: 94, 506304: 65, 506351: 15, 506497: 60, 506701: 96, 507051: 14, 507065: 41, 507134: 74, 507140: 35, 507185: 80, 507199: 28, 507302: 17, 507332: 66, 507349: 50, 507363: 20, 507419: 16, 507429: 22, 507440: 92, 507454: 56, 507678: 73, 507681: 28, 507685: 55, 507760: 12, 507790: 92, 507815: 6, 507880: 93, 507931: 23, 507953: 94, 507992: 13, 508056: 84, 508134: 98, 508238: 86, 508240: 43, 508263: 35, 508354: 83, 508357: 6, 508381: 64, 508455: 25, 508463: 83, 508468: 6, 508622: 51, 508627: 65, 508708: 28, 508815: 16, 508862: 20, 508889: 62, 508892: 25, 508975: 8, 509081: 2, 509187: 73, 509188: 41, 509210: 49, 509260: 48, 509365: 25, 509387: 2, 509439: 83, 509445: 54, 509519: 94, 509573: 99, 509576: 91, 509711: 6, 509732: 97, 509796: 74, 509810: 42, 509846: 66, 509858: 42, 510080: 62, 510133: 97, 510154: 26, 510248: 45, 510306: 28, 510335: 95, 510345: 36, 510440: 92, 510462: 37, 510476: 89, 510481: 3, 510563: 89, 510572: 89, 510597: 51, 510863: 21, 510982: 23, 511051: 48, 511067: 42, 511330: 81, 511380: 79, 511414: 4, 511630: 71, 511646: 3, 511664: 88, 511678: 43, 511755: 90, 511977: 52, 511984: 84, 512138: 30, 512183: 26, 512221: 78, 512358: 66, 512413: 28, 512446: 52, 512454: 1, 512530: 81, 512532: 15, 512670: 93, 512683: 78, 512726: 20, 512856: 3, 512903: 89, 512932: 46, 512953: 17, 512965: 4, 512982: 15, 513039: 13, 513216: 82, 513242: 81, 513255: 75, 513390: 63, 513439: 98, 513576: 43, 513617: 69, 513625: 59, 513672: 59, 513688: 9, 514086: 22, 514131: 26, 514151: 39, 514357: 1, 514384: 81, 514487: 96, 514530: 40, 514571: 82, 514572: 9, 514656: 37, 514689: 96, 514737: 19, 514759: 61, 514820: 15, 514843: 84, 514853: 80, 515021: 17, 515052: 63, 515095: 96, 515102: 76, 515109: 96, 515128: 86, 515196: 94, 515229: 13, 515368: 90, 515508: 37, 515673: 26, 515806: 75, 515863: 13, 515891: 49, 516162: 4, 516216: 97, 516233: 61, 516234: 74, 516251: 39, 516264: 62, 516325: 9, 516326: 16, 516400: 55, 516573: 19, 516579: 10, 516656: 49, 516724: 21, 516736: 29, 516761: 89, 516823: 12, 516839: 27, 516853: 17, 516888: 10, 516909: 37, 517043: 70, 517149: 57, 517151: 92, 517165: 96, 517170: 41, 517249: 71, 517250: 80, 517287: 85, 517463: 10, 517468: 3, 517608: 24, 517665: 81, 517751: 22, 517870: 4, 517892: 17, 517934: 7, 518028: 88, 518050: 14, 518060: 56, 518092: 90, 518133: 85, 518219: 97, 518272: 70, 518398: 1, 518501: 79, 518513: 13, 518852: 31, 518977: 71, 519320: 8, 519371: 57, 519387: 62, 519423: 40, 519447: 74, 519482: 60, 519545: 31, 519567: 38, 519738: 7, 519761: 13, 519877: 85, 519911: 9, 519929: 75, 519956: 34, 519970: 23, 520015: 90, 520048: 5, 520054: 25, 520067: 60, 520144: 14, 520223: 27, 520355: 16, 520455: 22, 520494: 17, 520629: 37, 520647: 63, 520668: 77, 520693: 34, 520853: 71, 520871: 27, 520957: 36, 521100: 20, 521125: 11, 521186: 51, 521326: 29, 521475: 1, 521619: 14, 521635: 56, 521643: 42, 521667: 88, 521702: 7, 521826: 82, 521897: 53, 522035: 85, 522063: 18, 522093: 93, 522201: 39, 522218: 88, 522244: 20, 522303: 98, 522472: 83, 522617: 79, 522876: 33, 522908: 22, 523025: 57, 523047: 4, 523235: 93, 523335: 37, 523573: 77, 523686: 11, 523692: 50, 523752: 94, 523786: 43, 523820: 92, 523823: 73, 523911: 67, 523934: 75, 523958: 24, 523989: 92, 524021: 34, 524093: 57, 524119: 23, 524125: 50, 524276: 28, 524317: 6, 524477: 44, 524535: 51, 524555: 7, 524601: 91, 524628: 66, 524721: 46, 524739: 63, 524830: 8, 524882: 46, 524886: 70, 524922: 72, 524980: 96, 525010: 13, 525078: 22, 525257: 13, 525267: 24, 525269: 39, 525275: 72, 525325: 44, 525386: 69, 525473: 90, 525485: 42, 525503: 23, 525585: 37, 525796: 55, 525811: 40, 525902: 82, 525907: 96, 526069: 95, 526081: 8, 526088: 98, 526211: 39, 526285: 24, 526462: 15, 526463: 56, 526527: 49, 526532: 13, 526566: 61, 526587: 12, 526612: 61, 526641: 25, 526644: 90, 526673: 53, 526688: 29, 526742: 34, 526780: 82, 526799: 46, 527009: 28, 527065: 70, 527213: 67, 527266: 70, 527315: 2, 527381: 90, 527425: 14, 527461: 49, 527536: 17, 527671: 18, 527822: 80, 527961: 44, 528050: 42, 528171: 9, 528212: 70, 528283: 38, 528304: 37, 528319: 56, 528352: 0, 528428: 42, 528452: 35, 528512: 47, 528540: 48, 528685: 79, 528706: 43, 528742: 29, 528789: 12, 528813: 92, 528976: 32, 528992: 1, 529005: 6, 529104: 71, 529163: 61, 529194: 37, 529286: 25, 529481: 11, 529602: 79, 529642: 12, 529686: 47, 529815: 10, 529835: 81, 529957: 73, 529969: 61, 530003: 36, 530062: 8, 530099: 94, 530153: 27, 530186: 53, 530194: 66, 530236: 99, 530263: 19, 530428: 97, 530479: 17, 530535: 74, 530558: 58, 530590: 28, 530726: 0, 530771: 91, 530864: 16, 530893: 86, 530950: 4, 530957: 6, 530988: 0, 531013: 12, 531066: 9, 531081: 75, 531115: 96, 531261: 79, 531288: 15, 531302: 2, 531328: 80, 531458: 64, 531483: 38, 531632: 18, 531652: 97, 531661: 39, 531848: 43, 531871: 6, 531875: 56, 532076: 46, 532154: 60, 532155: 55, 532379: 31, 532465: 36, 532481: 54, 532566: 71, 532599: 61, 532609: 92, 532672: 45, 532689: 69, 532801: 97, 532806: 51, 532833: 88, 532857: 37, 532866: 81, 532873: 51, 533017: 46, 533066: 73, 533101: 32, 533122: 14, 533181: 6, 533281: 11, 533350: 30, 533518: 13, 533556: 12, 533569: 80, 533627: 77, 533659: 15, 533674: 43, 533676: 96, 533716: 86, 533867: 81, 533916: 76, 533957: 3, 533962: 88, 533994: 72, 533996: 86, 534005: 73, 534014: 69, 534024: 83, 534081: 96, 534134: 11, 534139: 47, 534158: 61, 534161: 47, 534236: 78, 534363: 42, 534411: 69, 534416: 42, 534605: 40, 534620: 60, 534719: 41, 534800: 90, 534854: 17, 534924: 16, 534974: 23, 535041: 83, 535043: 27, 535152: 92, 535200: 37, 535201: 57, 535209: 87, 535260: 52, 535339: 64, 535400: 73, 535513: 55, 535519: 41, 535556: 98, 535627: 74, 535697: 51, 535704: 91, 535733: 37, 535735: 20, 535755: 59, 535765: 32, 535834: 70, 535950: 3, 535959: 32, 535964: 83, 536011: 71, 536028: 12, 536033: 48, 536229: 50, 536267: 36, 536291: 19, 536384: 96, 536394: 10, 536441: 79, 536443: 85, 536458: 3, 536566: 52, 536610: 76, 536687: 32, 536705: 99, 536762: 26, 536789: 86, 536970: 80, 537209: 51, 537596: 63, 537599: 30, 537652: 48, 537700: 36, 537719: 50, 537732: 63, 537753: 58, 537770: 40, 537771: 11, 537803: 78, 537848: 95, 537917: 38, 537949: 27, 538026: 21, 538160: 37, 538389: 84, 538642: 75, 538710: 1, 538757: 90, 538812: 19, 538819: 98, 538831: 69, 538835: 39, 539087: 31, 539089: 54, 539091: 14, 539100: 38, 539135: 97, 539147: 17, 539178: 4, 539357: 81, 539376: 94, 539456: 92, 539621: 79, 539672: 89, 539693: 31, 539783: 19, 539799: 75, 539947: 92, 539969: 61, 540099: 68, 540366: 35, 540404: 29, 540411: 41, 540554: 99, 540646: 40, 540648: 93, 540679: 10, 540704: 67, 540749: 37, 540989: 92, 541023: 35, 541117: 84, 541121: 77, 541344: 95, 541386: 15, 541438: 81, 541471: 64, 541552: 5, 541627: 59, 541659: 74, 541858: 58, 541886: 88, 541909: 79, 541925: 16, 541962: 24, 542171: 12, 542197: 8, 542389: 72, 542393: 86, 542436: 22, 542554: 98, 542566: 18, 542667: 48, 542698: 70, 542712: 38, 542715: 33, 542850: 26, 542948: 82, 543252: 95, 543322: 44, 543387: 11, 543429: 65, 543582: 40, 543716: 51, 543765: 44, 543783: 11, 543814: 71, 543900: 18, 543912: 73, 543939: 6, 543972: 69, 543979: 2, 544025: 11, 544029: 35, 544094: 76, 544113: 21, 544175: 14, 544220: 84, 544267: 32, 544362: 29, 544528: 81, 544741: 27, 544793: 16, 544874: 36, 544941: 20, 544952: 16, 544998: 42, 545217: 24, 545243: 18, 545254: 53, 545366: 75, 545372: 94, 545381: 28, 545382: 62, 545529: 94, 545533: 76, 545591: 98, 545618: 67, 545639: 34, 545682: 68, 545724: 46, 545798: 71, 545799: 56, 545807: 3, 545920: 62, 545945: 20, 546042: 57, 546096: 89, 546125: 48, 546191: 25, 546217: 22, 546257: 57, 546409: 59, 546494: 9, 546577: 25, 546587: 50, 546646: 13, 546716: 55, 546728: 91, 546975: 29, 547229: 73, 547254: 27, 547255: 35, 547390: 82, 547547: 5, 547565: 32, 547578: 66, 547603: 20, 547640: 93, 547716: 8, 547718: 78, 547830: 38, 547846: 34, 547891: 73, 547936: 20, 548084: 4, 548103: 4, 548263: 83, 548335: 55, 548339: 19, 548345: 97, 548402: 50, 548471: 4, 548495: 31, 548499: 94, 548553: 11, 548716: 8, 548733: 84, 548766: 37, 548966: 76, 549036: 76, 549059: 47, 549140: 61, 549263: 90, 549269: 78, 549317: 68, 549335: 78, 549367: 70, 549384: 25, 549404: 39, 549548: 74, 549590: 16, 549621: 86, 549639: 85, 549661: 13, 549685: 58, 549770: 22, 549778: 17, 549824: 20, 549845: 50, 549980: 16, 550005: 54, 550014: 57, 550073: 20, 550084: 85, 550243: 80, 550307: 14, 550352: 70, 550379: 59, 550403: 24, 550406: 8, 550446: 64, 550510: 6, 550523: 50, 550551: 73, 550600: 38, 550619: 48, 550764: 38, 550772: 45, 550823: 29, 550864: 96, 550901: 66, 550988: 31, 551063: 59, 551121: 59, 551191: 41, 551243: 63, 551332: 70, 551391: 0, 551393: 44, 551411: 54, 551429: 92, 551455: 42, 551654: 34, 551756: 29, 551805: 69, 551807: 23, 551838: 92, 551892: 80, 551946: 21, 552037: 26, 552075: 19, 552121: 79, 552151: 57, 552155: 99, 552193: 23, 552378: 94, 552497: 85, 552640: 44, 552648: 47, 552751: 76, 552784: 49, 552855: 93, 552876: 93, 553015: 52, 553056: 85, 553210: 66, 553261: 0, 553282: 65, 553428: 63, 553448: 63, 553461: 80, 553480: 39, 553543: 64, 553641: 45, 553648: 29, 553799: 83, 553885: 38, 553952: 85, 554228: 84, 554271: 1, 554289: 97, 554302: 19, 554313: 12, 554324: 30, 554356: 60, 554359: 56, 554469: 14, 554501: 24, 554502: 14, 554544: 69, 554567: 56, 554664: 96, 554707: 38, 554741: 36, 554798: 34, 554843: 61, 554858: 29, 554878: 80, 554925: 39, 554983: 65, 555057: 27, 555105: 65, 555147: 3, 555194: 52, 555215: 29, 555277: 72, 555368: 24, 555459: 59, 555461: 56, 555753: 35, 555754: 92, 555796: 5, 555799: 77, 555846: 66, 555894: 58, 555911: 65, 556123: 94, 556178: 56, 556219: 60, 556221: 74, 556240: 58, 556253: 2, 556327: 57, 556347: 25, 556408: 40, 556429: 99, 556468: 2, 556581: 81, 556601: 46, 556636: 44, 556717: 26, 556751: 61, 556757: 6, 556761: 0, 556808: 95, 556854: 98, 556855: 50, 556915: 91, 557014: 92, 557018: 9, 557036: 63, 557260: 52, 557310: 30, 557334: 90, 557338: 78, 557382: 43, 557398: 38, 557474: 16, 557527: 50, 557611: 9, 557618: 56, 557663: 49, 557758: 70, 557853: 97, 557861: 40, 557928: 20, 557959: 88, 557982: 53, 558047: 79, 558153: 35, 558260: 7, 558278: 69, 558512: 13, 558528: 10, 558598: 2, 558637: 51, 558651: 88, 558712: 70, 558800: 45, 558847: 17, 558969: 76, 559061: 72, 559232: 65, 559296: 26, 559420: 89, 559680: 94, 559708: 13, 559718: 72, 559803: 55, 559811: 10, 559903: 51, 560026: 30, 560044: 15, 560213: 86, 560219: 49, 560220: 67, 560474: 44, 560673: 97, 560726: 43, 560827: 73, 560845: 11, 560859: 64, 560897: 19, 560926: 62, 560932: 46, 561008: 91, 561166: 19, 561289: 44, 561310: 11, 561452: 29, 561549: 10, 561625: 64, 561647: 34, 561672: 66, 561814: 9, 561924: 50, 561940: 96, 561941: 97, 561957: 61, 562122: 54, 562144: 57, 562185: 15, 562285: 12, 562306: 24, 562388: 88, 562396: 44, 562539: 68, 562616: 99, 562681: 62, 562695: 24, 562767: 48, 562872: 77, 562895: 1, 562938: 62, 563155: 42, 563198: 80, 563270: 41, 563336: 89, 563464: 17, 563514: 45, 563527: 52, 563626: 46, 563740: 92, 563750: 21, 563779: 64, 563784: 68, 563796: 51, 563808: 67, 563844: 15, 563869: 4, 563905: 62, 564044: 82, 564069: 56, 564089: 25, 564130: 6, 564169: 32, 564474: 46, 564494: 72, 564509: 3, 564534: 83, 564617: 16, 564736: 33, 564784: 93, 565023: 69, 565061: 71, 565105: 43, 565128: 37, 565198: 66, 565290: 46, 565299: 0, 565326: 96, 565495: 73, 565522: 38, 565574: 86, 565588: 50, 565624: 17, 565781: 46, 565796: 14, 565798: 68, 565813: 87, 565859: 15, 565889: 9, 565910: 5, 566125: 71, 566205: 85, 566401: 30, 566453: 1, 566533: 71, 566573: 29, 566623: 37, 566628: 21, 566812: 36, 566830: 69, 566944: 60, 567121: 43, 567171: 95, 567185: 35, 567262: 48, 567378: 81, 567389: 18, 567512: 79, 567563: 38, 567623: 4, 567649: 46, 567676: 14, 567745: 96, 567760: 49, 567792: 38, 567832: 4, 567875: 94, 567881: 28, 567946: 21, 567956: 34, 568005: 54, 568061: 82, 568320: 14, 568596: 70, 568613: 79, 568658: 33, 568708: 23, 568788: 10, 568859: 57, 569040: 10, 569151: 59, 569162: 16, 569200: 95, 569207: 76, 569331: 10, 569585: 52, 569658: 31, 569665: 37, 569720: 42, 569740: 78, 569802: 5, 569839: 6, 569960: 10, 569963: 26, 570013: 77, 570061: 85, 570099: 63, 570151: 53, 570220: 94, 570264: 96, 570322: 1, 570443: 87, 570444: 18, 570487: 26, 570490: 35, 570558: 75, 570634: 40, 570736: 94, 570788: 0, 570892: 22, 570938: 24, 570982: 59, 571022: 83, 571030: 3, 571032: 69, 571115: 64, 571160: 84, 571200: 44, 571249: 78, 571310: 4, 571353: 42, 571399: 34, 571458: 24, 571498: 12, 571633: 40, 571799: 49, 571864: 71, 571912: 83, 571998: 66, 572143: 4, 572228: 61, 572283: 77, 572299: 19, 572316: 27, 572382: 14, 572495: 56, 572534: 87, 572553: 54, 572679: 37, 572686: 46, 572708: 47, 572735: 62, 572749: 42, 572777: 63, 572800: 56, 572868: 92, 572885: 82, 572944: 96, 572964: 94, 572998: 51, 573096: 64, 573210: 9, 573235: 25, 573272: 32, 573292: 98, 573304: 20, 573318: 16, 573419: 23, 573472: 65, 573599: 0, 573626: 73, 573663: 8, 573747: 59, 573821: 13, 573967: 96, 574102: 77, 574104: 59, 574109: 23, 574156: 16, 574184: 67, 574188: 33, 574367: 68, 574491: 7, 574516: 42, 574566: 19, 574575: 70, 574608: 26, 574700: 0, 574760: 49, 574829: 38, 574903: 74, 574908: 56, 574921: 68, 574962: 87, 574979: 26, 575009: 11, 575077: 14, 575152: 49, 575163: 58, 575175: 64, 575233: 14, 575234: 68, 575246: 22, 575271: 94, 575322: 81, 575326: 72, 575369: 76, 575418: 35, 575421: 56, 575459: 6, 575511: 46, 575517: 54, 575642: 45, 575755: 62, 575926: 72, 576038: 92, 576115: 38, 576278: 98, 576399: 4, 576404: 64, 576405: 8, 576566: 77, 576618: 38, 576660: 7, 576665: 40, 576695: 22, 576718: 92, 576722: 77, 576730: 29, 576744: 72, 576815: 4, 576845: 98, 577082: 88, 577124: 80, 577167: 60, 577239: 71, 577338: 17, 577422: 97, 577560: 30, 577782: 49, 577959: 69, 578003: 29, 578011: 81, 578033: 14, 578043: 82, 578171: 29, 578180: 97, 578196: 99, 578204: 69, 578502: 56, 578574: 36, 578615: 99, 578619: 37, 578625: 6, 578636: 11, 578639: 62, 578939: 42, 578945: 92, 579027: 33, 579038: 31, 579057: 28, 579182: 74, 579292: 32, 579318: 93, 579345: 43, 579351: 25, 579477: 63, 579487: 43, 579671: 58, 579800: 89, 579836: 31, 579917: 66, 579965: 45, 579983: 17, 580222: 76, 580246: 48, 580343: 11, 580462: 26, 580573: 21, 580596: 88, 580674: 72, 580783: 57, 580832: 20, 580863: 75, 581035: 21, 581200: 90, 581215: 96, 581264: 53, 581335: 91, 581397: 15, 581452: 94, 581473: 66, 581671: 48, 581704: 44, 581792: 0, 581801: 37, 581892: 4, 581925: 63, 581960: 42, 581996: 77, 582005: 25, 582044: 86, 582130: 35, 582135: 93, 582157: 25, 582174: 61, 582263: 1, 582300: 29, 582462: 11, 582533: 6, 582663: 39, 582749: 12, 582835: 84, 582852: 9, 582891: 9, 583046: 58, 583069: 2, 583097: 43, 583274: 72, 583405: 81, 583523: 61, 583544: 48, 583589: 83, 583702: 57, 583739: 95, 583745: 33, 583753: 84, 583939: 63, 584131: 98, 584170: 14, 584246: 31, 584354: 49, 584453: 54, 584490: 96, 584708: 25, 584741: 10, 584828: 79, 584888: 85, 584901: 41, 584918: 73, 585168: 84, 585216: 68, 585264: 17, 585515: 56, 585518: 36, 585575: 83, 585665: 18, 585759: 96, 585764: 26, 585771: 49, 585773: 31, 585850: 26, 585904: 99, 585973: 28, 586042: 20, 586049: 29, 586057: 77, 586066: 20, 586119: 72, 586173: 37, 586215: 91, 586336: 88, 586360: 27, 586370: 83, 586398: 33, 586442: 62, 586633: 78, 586643: 95, 586670: 97, 586692: 69, 586694: 44, 586767: 94, 586932: 51, 586964: 92, 587000: 81, 587063: 11, 587248: 88, 587483: 8, 587601: 57, 587620: 19, 587641: 33, 587654: 65, 587659: 21, 587688: 57, 587706: 0, 587808: 44, 587834: 66, 587862: 43, 587945: 87, 588035: 18, 588040: 34, 588044: 76, 588052: 15, 588106: 28, 588329: 99, 588553: 54, 588589: 44, 588651: 90, 588692: 67, 588713: 5, 588817: 33, 588869: 36, 588927: 4, 589029: 33, 589047: 40, 589181: 10, 589299: 2, 589301: 63, 589311: 46, 589344: 95, 589348: 52, 589410: 50, 589430: 1, 589496: 94, 589538: 77, 589574: 79, 589594: 3, 589632: 38, 589661: 51, 589665: 45, 589723: 64, 589731: 46, 589762: 92, 589837: 15, 589914: 21, 589975: 2, 590032: 6, 590058: 60, 590202: 30, 590225: 86, 590455: 47, 590478: 22, 590494: 12, 590645: 18, 590771: 42, 590846: 4, 590847: 68, 590850: 75, 591016: 15, 591028: 62, 591077: 2, 591138: 51, 591205: 14, 591339: 8, 591401: 34, 591513: 22, 591564: 8, 591634: 37, 591637: 20, 591683: 87, 591854: 60, 591872: 29, 591917: 97, 591949: 41, 592104: 16, 592164: 53, 592218: 11, 592246: 82, 592301: 97, 592327: 60, 592517: 76, 592555: 44, 592670: 28, 592729: 75, 592833: 98, 593011: 8, 593246: 31, 593254: 97, 593294: 11, 593365: 83, 593378: 39, 593449: 34, 593456: 74, 593478: 61, 593481: 8, 593785: 71, 593852: 25, 593964: 20, 594093: 10, 594119: 31, 594239: 45, 594378: 60, 594414: 26, 594467: 54, 594541: 71, 594555: 82, 594604: 61, 594667: 63, 594780: 25, 594803: 53, 594986: 9, 595143: 67, 595189: 38, 595217: 53, 595224: 24, 595447: 55, 595663: 33, 595858: 83, 595897: 2, 595905: 52, 595962: 50, 596037: 87, 596052: 55, 596138: 4, 596572: 63, 596586: 72, 596667: 76, 596753: 56, 596803: 24, 596849: 17, 596895: 14, 596943: 21, 596992: 20, 596994: 57, 597030: 20, 597223: 72, 597250: 81, 597273: 90, 597283: 35, 597372: 76, 597431: 54, 597462: 4, 597555: 16, 597566: 11, 597571: 3, 597667: 3, 597702: 88, 597770: 76, 598059: 55, 598075: 44, 598426: 93, 598453: 73, 598489: 83, 598492: 78, 598707: 0, 598736: 86, 598739: 0, 598939: 34, 599002: 94, 599117: 63, 599138: 92, 599157: 83, 599186: 13, 599232: 95, 599253: 85, 599264: 24, 599280: 27, 599322: 44, 599482: 20, 599516: 72, 599546: 91, 599662: 83, 599677: 5, 599712: 32, 599858: 68, 599935: 7, 599944: 66, 599964: 80, 600059: 28, 600061: 63, 600159: 62, 600218: 9, 600239: 96, 600256: 15, 600261: 90, 600389: 87, 600461: 63, 600511: 6, 600551: 9, 600572: 46, 600587: 16, 600603: 86, 600724: 63, 600791: 19, 600793: 23, 600817: 13, 601109: 36, 601144: 66, 601183: 21, 601191: 96, 601221: 17, 601345: 76, 601396: 50, 601398: 7, 601506: 73, 601542: 55, 601553: 42, 601610: 86, 601611: 28, 601634: 26, 601635: 37, 601711: 84, 601738: 67, 601752: 24, 601799: 79, 601806: 76, 601857: 61, 601861: 74, 602010: 65, 602023: 71, 602187: 25, 602287: 14, 602311: 50, 602315: 44, 602488: 60, 602506: 24, 602548: 11, 602560: 55, 602588: 24, 602690: 50, 602763: 32, 602837: 8, 603023: 0, 603048: 46, 603053: 4, 603106: 42, 603111: 42, 603132: 35, 603257: 33, 603343: 33, 603406: 81, 603568: 39, 603592: 31, 603619: 50, 603651: 38, 603823: 25, 603878: 59, 603879: 38, 604060: 25, 604111: 78, 604138: 54, 604286: 46, 604328: 72, 604353: 60, 604442: 87, 604452: 31, 604497: 23, 604498: 72, 604510: 49, 604517: 2, 604532: 72, 604579: 0, 604623: 47, 604866: 58, 604977: 26, 605047: 74, 605048: 71, 605058: 6, 605212: 78, 605295: 24, 605524: 6, 605561: 18, 605571: 96, 605584: 56, 605614: 67, 605636: 6, 605673: 90, 605676: 99, 605713: 63, 605897: 57, 605918: 26, 606255: 29, 606349: 76, 606431: 53, 606602: 43, 606611: 17, 606623: 68, 606650: 76, 606812: 53, 606858: 82, 606866: 42, 606893: 79, 606925: 74, 606929: 53, 607018: 99, 607046: 25, 607075: 22, 607079: 61, 607149: 89, 607284: 64, 607313: 33, 607338: 86, 607403: 42, 607466: 30, 607476: 60, 607538: 74, 607922: 42, 607950: 11, 608063: 5, 608079: 34, 608152: 89, 608201: 96, 608307: 33, 608422: 95, 608567: 92, 608658: 49, 608713: 64, 608789: 35, 608813: 40, 608824: 39, 608864: 77, 608891: 76, 608899: 73, 608910: 89, 608993: 19, 609046: 50, 609171: 95, 609214: 26, 609374: 4, 609388: 13, 609461: 10, 609675: 98, 609683: 74, 609688: 44, 609696: 95, 609768: 90, 609981: 11, 610055: 96, 610063: 73, 610076: 32, 610139: 39, 610234: 68, 610426: 84, 610429: 60, 610547: 61, 610549: 91, 610596: 27, 610622: 47, 610625: 13, 610700: 58, 610718: 70, 610747: 73, 610843: 19, 610857: 24, 610876: 12, 611144: 73, 611191: 12, 611204: 44, 611257: 52, 611311: 91, 611313: 75, 611384: 97, 611414: 51, 611459: 16, 611468: 85, 611510: 71, 611569: 14, 611602: 27, 611711: 82, 611803: 5, 611834: 10, 611841: 10, 611871: 76, 611878: 14, 611930: 57, 612164: 41, 612174: 37, 612197: 95, 612199: 57, 612301: 42, 612305: 35, 612352: 14, 612371: 70, 612412: 5, 612434: 20, 612486: 8, 612561: 47, 612663: 74, 612685: 7, 612693: 93, 612698: 25, 612704: 53, 612709: 66, 612819: 60, 612873: 32, 612893: 3, 612954: 89, 612981: 53, 612990: 4, 613011: 53, 613058: 44, 613233: 97, 613268: 2, 613296: 3, 613322: 47, 613328: 68, 613505: 74, 613636: 86, 613668: 91, 613733: 45, 613827: 57, 613877: 27, 613895: 19, 613910: 59, 613916: 86, 614065: 49, 614066: 16, 614144: 12, 614158: 59, 614160: 57, 614161: 36, 614226: 40, 614264: 53, 614292: 67, 614299: 21, 614312: 69, 614329: 24, 614438: 93, 614447: 59, 614541: 22, 614548: 41, 614604: 10, 614656: 84, 614829: 83, 614872: 42, 614881: 10, 614891: 6, 615038: 18, 615062: 45, 615172: 72, 615178: 31, 615298: 78, 615317: 80, 615794: 37, 615815: 82, 616020: 89, 616078: 2, 616274: 85, 616283: 33, 616339: 12, 616492: 50, 616519: 85, 616538: 66, 616584: 10, 616909: 4, 616916: 6, 617031: 63, 617078: 28, 617213: 82, 617377: 6, 617466: 49, 617504: 2, 617548: 70, 617576: 29, 617641: 97, 617673: 64, 617750: 61, 617792: 4, 617870: 21, 617872: 21, 618084: 71, 618135: 88, 618317: 89, 618347: 34, 618485: 4, 618567: 76, 618728: 99, 618752: 75, 618808: 90, 618879: 79, 618890: 52, 618905: 34, 618936: 23, 618938: 35, 618958: 73, 618961: 52, 619121: 38, 619194: 95, 619222: 33, 619259: 45, 619326: 33, 619328: 24, 619333: 24, 619335: 1, 619374: 24, 619376: 93, 619396: 81, 619496: 41, 619605: 33, 619671: 38, 619737: 81, 619914: 79, 619989: 71, 620058: 32, 620060: 8, 620108: 11, 620473: 11, 620521: 37, 620610: 18, 620655: 69, 620672: 63, 620704: 83, 620714: 67, 620862: 42, 620867: 62, 621027: 87, 621029: 97, 621132: 28, 621160: 36, 621172: 92, 621198: 0, 621344: 82, 621474: 3, 621697: 25, 621725: 24, 621789: 78, 621864: 45, 621866: 72, 621870: 21, 621915: 8, 621933: 54, 622086: 67, 622121: 78, 622181: 22, 622241: 40, 622282: 54, 622331: 83, 622409: 20, 622458: 87, 622472: 40, 622534: 38, 622562: 81, 622662: 36, 622681: 78, 622683: 15, 622746: 8, 622907: 45, 622924: 64, 622944: 81, 623013: 95, 623177: 32, 623193: 96, 623381: 25, 623420: 47, 623422: 63, 623444: 93, 623578: 46, 623605: 12, 623630: 21, 623828: 74, 623897: 1, 623921: 83, 623938: 67, 624038: 97, 624060: 42, 624263: 49, 624293: 42, 624356: 16, 624383: 10, 624389: 73, 624442: 48, 624491: 15, 624525: 24, 624549: 78, 624567: 94, 624700: 79, 624704: 30, 624713: 87, 624802: 72, 624814: 9, 624832: 43, 624905: 14, 624914: 78, 624957: 48, 624960: 36, 624978: 4, 625035: 73, 625132: 46, 625201: 96, 625326: 16, 625449: 75, 625525: 81, 625527: 60, 625602: 90, 625779: 91, 625906: 57, 625913: 89, 626004: 87, 626006: 19, 626007: 73, 626044: 5, 626057: 40, 626085: 35, 626102: 95, 626156: 50, 626184: 92, 626232: 11, 626392: 34, 626405: 52, 626452: 74, 626628: 68, 626670: 19, 626681: 11, 626797: 61, 626804: 86, 626845: 54, 627015: 42, 627030: 25, 627040: 30, 627065: 22, 627279: 20, 627339: 12, 627363: 60, 627393: 6, 627467: 28, 627484: 42, 627516: 82, 627526: 73, 627618: 29, 627625: 27, 627737: 30, 627816: 65, 627857: 58, 628203: 20, 628313: 37, 628378: 74, 628386: 49, 628573: 83, 628628: 22, 628736: 94, 628772: 47, 628835: 90, 628867: 44, 628923: 92, 628943: 51, 628986: 46, 629141: 33, 629225: 99, 629352: 4, 629498: 72, 629556: 37, 629589: 14, 629628: 50, 629631: 64, 629632: 79, 629657: 73, 629701: 67, 629806: 85, 629826: 52, 629905: 27, 629913: 30, 630002: 62, 630023: 68, 630041: 61, 630076: 17, 630091: 8, 630110: 51, 630133: 42, 630155: 92, 630169: 81, 630177: 54, 630264: 64, 630444: 19, 630531: 27, 630628: 86, 630703: 18, 630731: 76, 631046: 15, 631080: 60, 631242: 24, 631304: 43, 631306: 34, 631520: 71, 631554: 73, 631582: 76, 631599: 41, 631634: 81, 631681: 86, 631730: 8, 631781: 51, 631927: 86, 631950: 18, 631988: 57, 632141: 50, 632251: 41, 632303: 12, 632380: 80, 632415: 84, 632458: 46, 632495: 61, 632522: 7, 632631: 7, 632685: 11, 632731: 12, 632735: 81, 632847: 27, 633085: 33, 633217: 70, 633332: 30, 633350: 30, 633353: 21, 633402: 84, 633421: 39, 633451: 49, 633522: 1, 633590: 38, 633599: 75, 633686: 33, 633705: 71, 633708: 4, 633722: 67, 633835: 61, 633916: 3, 634088: 80, 634115: 88, 634205: 41, 634228: 60, 634273: 45, 634275: 46, 634371: 26, 634510: 97, 634661: 46, 634666: 82, 634684: 91, 634745: 67, 634790: 22, 634821: 40, 634855: 52, 634929: 47, 635013: 26, 635020: 54, 635094: 91, 635189: 68, 635463: 95, 635525: 50, 635556: 32, 635602: 41, 635726: 22, 635783: 37, 635817: 27, 635837: 7, 635889: 57, 635958: 95, 636013: 12, 636039: 48, 636084: 32, 636119: 67, 636133: 77, 636159: 87, 636193: 47, 636219: 72, 636285: 86, 636408: 88, 636409: 66, 636431: 30, 636562: 26, 636572: 23, 636758: 74, 636857: 3, 636892: 0, 636896: 75, 636947: 91, 637044: 27, 637151: 40, 637160: 67, 637268: 41, 637302: 72, 637370: 48, 637582: 40, 637604: 15, 637616: 0, 637626: 13, 637659: 93, 637701: 55, 637743: 59, 637819: 56, 637862: 91, 637919: 49, 637923: 54, 637962: 14, 638163: 6, 638316: 38, 638341: 49, 638412: 64, 638448: 60, 638455: 19, 638485: 0, 638507: 67, 638525: 59, 638595: 33, 638669: 20, 638860: 60, 638951: 56, 638969: 85, 639172: 35, 639226: 35, 639345: 49, 639403: 11, 639413: 80, 639440: 82, 639467: 50, 639485: 80, 639498: 95, 639590: 86, 639638: 45, 639661: 13, 639674: 82, 639711: 62, 639717: 63, 639755: 48, 639832: 46, 639854: 43, 639888: 27, 639933: 79, 640035: 55, 640093: 81, 640142: 58, 640174: 43, 640191: 28, 640214: 37, 640218: 74, 640322: 89, 640395: 26, 640404: 32, 640415: 63, 640529: 57, 640538: 53, 640551: 64, 640553: 92, 640650: 77, 640745: 90, 640826: 22, 640829: 46, 640835: 17, 640847: 53, 640887: 79, 641007: 71, 641080: 21, 641093: 44, 641119: 66, 641160: 32, 641218: 86, 641239: 35, 641287: 87, 641339: 71, 641345: 48, 641703: 6, 641846: 32, 641873: 51, 641955: 55, 642042: 61, 642080: 90, 642085: 74, 642118: 89, 642150: 56, 642214: 30, 642231: 1, 642251: 46, 642314: 18, 642453: 56, 642557: 52, 642587: 28, 642616: 55, 642696: 57, 642772: 8, 642782: 0, 642834: 66, 642942: 38, 642957: 12, 643097: 67, 643154: 74, 643324: 3, 643398: 84, 643865: 15, 643932: 5, 643933: 15, 643972: 2, 644052: 91, 644069: 29, 644135: 28, 644181: 46, 644272: 43, 644343: 64, 644346: 49, 644401: 27, 644425: 4, 644554: 76, 644633: 11, 644686: 36, 644692: 34, 644929: 29, 644960: 90, 644961: 53, 644994: 58, 645006: 8, 645020: 59, 645050: 84, 645102: 76, 645122: 13, 645152: 97, 645249: 57, 645288: 77, 645423: 38, 645450: 34, 645478: 7, 645564: 61, 645600: 98, 645644: 42, 645666: 84, 645784: 42, 645813: 49, 645908: 64, 645962: 83, 646006: 1, 646106: 78, 646165: 68, 646185: 75, 646212: 20, 646223: 78, 646268: 77, 646298: 80, 646394: 67, 646457: 95, 646472: 33, 646542: 42, 646607: 85, 646643: 89, 646776: 79, 646811: 61, 646832: 50, 646923: 19, 646978: 70, 647034: 29, 647067: 50, 647080: 1, 647145: 59, 647198: 18, 647232: 50, 647235: 90, 647355: 42, 647389: 32, 647541: 37, 647597: 90, 647620: 19, 647729: 75, 647764: 71, 647767: 51, 647814: 59, 647904: 85, 647909: 51, 647914: 19, 647962: 47, 647976: 97, 648061: 32, 648080: 67, 648214: 20, 648322: 30, 648351: 17, 648378: 5, 648424: 89, 648475: 43, 648498: 92, 648555: 82, 648623: 87, 648641: 36, 648707: 55, 648762: 92, 648927: 2, 649050: 41, 649081: 56, 649247: 19, 649299: 50, 649390: 60, 649399: 51, 649411: 38, 649414: 4, 649482: 26, 649627: 78, 649734: 93, 649740: 61, 649760: 23, 649856: 41, 649881: 25, 649956: 97, 649969: 68, 650156: 40, 650610: 44, 650669: 26, 650879: 38, 650908: 42, 651049: 29, 651102: 87, 651204: 93, 651263: 51, 651306: 8, 651463: 3, 651581: 60, 651596: 45, 651947: 6, 651972: 66, 651989: 44, 651991: 75, 652052: 96, 652127: 70, 652144: 88, 652284: 54, 652456: 92, 652518: 13, 652786: 8, 652821: 70, 652957: 77, 653069: 90, 653106: 38, 653242: 23, 653294: 75, 653368: 22, 653383: 29, 653410: 46, 653510: 73, 653519: 78, 653623: 89, 653646: 32, 653865: 37, 653900: 77, 653928: 49, 654050: 19, 654076: 47, 654091: 1, 654098: 95, 654153: 13, 654178: 35, 654201: 80, 654276: 2, 654286: 35, 654318: 25, 654345: 64, 654348: 43, 654408: 97, 654411: 17, 654445: 73, 654470: 59, 654510: 79, 654526: 17, 654548: 57, 654583: 3, 654607: 12, 654669: 67, 654768: 36, 654769: 93, 654803: 25, 654865: 92, 655008: 21, 655046: 52, 655100: 98, 655101: 26, 655216: 20, 655222: 34, 655248: 50, 655449: 42, 655524: 3, 655541: 25, 655584: 16, 655595: 15, 655601: 7, 655724: 47, 655850: 53, 655881: 71, 655959: 33, 656013: 45, 656045: 89, 656077: 82, 656188: 57, 656204: 22, 656293: 62, 656345: 18, 656402: 5, 656448: 16, 656540: 98, 656598: 88, 656689: 48, 656695: 69, 656773: 50, 656792: 6, 656873: 77, 656916: 94, 656938: 33, 657025: 12, 657039: 35, 657181: 43, 657185: 43, 657298: 12, 657310: 34, 657428: 47, 657430: 59, 657473: 34, 657494: 58, 657529: 71, 657638: 61, 657750: 56, 657826: 83, 657928: 78, 657934: 37, 658225: 0, 658261: 13, 658275: 39, 658345: 45, 658362: 27, 658434: 51, 658598: 60, 658622: 93, 658638: 92, 658645: 48, 658692: 57, 658726: 45, 658826: 84, 658926: 48, 658934: 55, 659020: 45, 659135: 94, 659144: 98, 659293: 78, 659302: 83, 659430: 31, 659503: 41, 659574: 45, 659582: 95, 659718: 86, 659744: 81, 659750: 73, 659756: 95, 659761: 25, 659808: 50, 659908: 89, 659957: 89, 659974: 92, 660040: 45, 660097: 87, 660140: 37, 660166: 68, 660292: 63, 660365: 6, 660397: 44, 660399: 84, 660420: 68, 660427: 65, 660556: 66, 660564: 95, 660628: 38, 660693: 4, 660711: 17, 660732: 8, 660750: 34, 660808: 29, 660870: 59, 660928: 87, 661068: 35, 661149: 88, 661502: 5, 661582: 86, 661646: 30, 661709: 24, 661764: 48, 661793: 99, 661820: 93, 661926: 7, 661998: 34, 662031: 40, 662134: 92, 662135: 53, 662188: 45, 662216: 68, 662231: 43, 662238: 7, 662252: 44, 662412: 53, 662416: 79, 662418: 22, 662464: 50, 662495: 89, 662600: 82, 662601: 7, 662615: 46, 662734: 67, 662852: 38, 662863: 46, 662870: 51, 662892: 84, 662908: 8, 663113: 8, 663184: 47, 663292: 9, 663429: 86, 663487: 44, 663655: 85, 663677: 12, 663690: 30, 663837: 87, 663840: 99, 663897: 87, 663941: 78, 664021: 70, 664042: 19, 664080: 97, 664241: 70, 664266: 43, 664443: 19, 664486: 22, 664622: 65, 664652: 40, 664747: 17, 664780: 64, 664825: 11, 664847: 74, 664947: 53, 664950: 64, 665033: 3, 665078: 15, 665097: 98, 665133: 0, 665414: 60, 665482: 11, 665569: 27, 665588: 15, 665619: 90, 665859: 46, 665867: 67, 665891: 50, 665998: 98, 666110: 80, 666124: 45, 666143: 79, 666259: 42, 666337: 64, 666431: 82, 666556: 68, 666661: 92, 666696: 68, 666713: 42, 666728: 88, 666912: 80, 666917: 15, 666943: 18, 666998: 68, 667040: 30, 667174: 64, 667212: 28, 667226: 59, 667329: 76, 667431: 97, 667481: 14, 667559: 24, 667684: 84, 667807: 69, 667837: 51, 667869: 8, 667896: 59, 668041: 17, 668093: 42, 668095: 40, 668283: 16, 668284: 70, 668285: 62, 668289: 86, 668396: 83, 668518: 19, 668582: 17, 668617: 41, 668750: 28, 668815: 35, 668895: 29, 668942: 94, 669110: 5, 669219: 45, 669281: 56, 669411: 2, 669425: 85, 669435: 13, 669684: 60, 669784: 20, 669888: 34, 669985: 39, 670001: 24, 670184: 35, 670447: 15, 670493: 8, 670669: 45, 670676: 17, 670743: 39, 670825: 30, 670854: 73, 670897: 50, 670917: 77, 671004: 89, 671160: 53, 671217: 90, 671274: 18, 671355: 29, 671361: 90, 671394: 38, 671461: 33, 671471: 22, 671530: 53, 671603: 49, 671630: 92, 671678: 74, 671682: 80, 671749: 21, 671897: 81, 671953: 19, 671997: 58, 672106: 20, 672130: 51, 672142: 11, 672151: 55, 672217: 75, 672392: 93, 672487: 56, 672666: 84, 672691: 24, 672762: 14, 672838: 54, 672842: 63, 672848: 78, 673049: 6, 673095: 23, 673131: 54, 673143: 10, 673173: 22, 673217: 8, 673250: 95, 673295: 82, 673385: 98, 673395: 47, 673418: 12, 673424: 59, 673443: 19, 673590: 48, 673638: 12, 673662: 41, 673803: 58, 673813: 46, 673873: 89, 673877: 44, 673916: 35, 673987: 56, 673993: 38, 674051: 1, 674062: 86, 674080: 14, 674148: 39, 674308: 42, 674383: 70, 674450: 61, 674465: 49, 674506: 3, 674536: 20, 674555: 28, 674603: 72, 674626: 56, 674730: 39, 674763: 76, 674828: 78, 674975: 70, 675010: 99, 675022: 42, 675176: 34, 675393: 60, 675483: 64, 675492: 64, 675495: 69, 675510: 50, 675614: 50, 675620: 9, 675650: 39, 675840: 96, 675925: 33, 675934: 91, 676039: 70, 676154: 90, 676228: 4, 676243: 26, 676254: 53, 676279: 69, 676293: 23, 676320: 13, 676340: 63, 676404: 34, 676435: 2, 676487: 13, 676497: 89, 676502: 15, 676561: 19, 676609: 95, 676746: 89, 676864: 54, 676884: 6, 677035: 49, 677045: 9, 677108: 56, 677127: 83, 677150: 32, 677155: 62, 677169: 40, 677238: 89, 677240: 84, 677271: 30, 677277: 98, 677286: 80, 677480: 1, 677534: 56, 677592: 62, 677665: 46, 677689: 64, 677782: 8, 677841: 72, 677874: 27, 677911: 19, 677913: 72, 677921: 31, 677938: 49, 678172: 38, 678293: 93, 678332: 53, 678539: 62, 678569: 15, 678654: 23, 678680: 5, 678712: 89, 678751: 20, 678769: 88, 679041: 45, 679179: 70, 679371: 65, 679390: 54, 679430: 63, 679661: 87, 679682: 63, 679743: 92, 679847: 80, 679853: 24, 679981: 49, 680176: 52, 680304: 69, 680309: 54, 680358: 8, 680371: 9, 680422: 48, 680423: 34, 680500: 27, 680514: 43, 680651: 99, 680704: 50, 680722: 55, 680769: 91, 680795: 10, 680801: 42, 680843: 82, 680904: 31, 680915: 47, 680979: 94, 680990: 13, 681112: 28, 681123: 56, 681298: 80, 681355: 25, 681409: 36, 681486: 55, 681499: 55, 681554: 42, 681627: 54, 681645: 60, 681685: 80, 681787: 76, 681855: 32, 681942: 81, 681969: 30, 681982: 69, 681992: 57, 682182: 66, 682183: 51, 682187: 32, 682220: 41, 682236: 5, 682279: 57, 682291: 92, 682494: 16, 682534: 53, 682552: 4, 682604: 47, 682641: 70, 682690: 23, 682724: 36, 682726: 19, 682734: 92, 682808: 66, 682809: 5, 682876: 65, 682932: 65, 682984: 22, 683111: 4, 683124: 79, 683130: 46, 683136: 57, 683149: 17, 683188: 97, 683498: 54, 683541: 3, 683638: 84, 683656: 0, 683698: 0, 683743: 7, 683755: 8, 683773: 3, 683991: 53, 684016: 6, 684033: 45, 684166: 69, 684168: 20, 684407: 54, 684487: 27, 684532: 11, 684534: 87, 684544: 66, 684574: 90, 684662: 63, 684703: 34, 685000: 76, 685025: 78, 685131: 25, 685135: 11, 685148: 85, 685240: 63, 685253: 38, 685270: 49, 685308: 68, 685387: 65, 685461: 56, 685839: 21, 685853: 43, 685992: 17, 686126: 24, 686131: 60, 686211: 96, 686214: 37, 686292: 39, 686352: 12, 686373: 96, 686379: 86, 686414: 30, 686556: 31, 686726: 38, 686770: 47, 686824: 22, 686853: 95, 686972: 95, 687024: 50, 687054: 25, 687060: 28, 687065: 63, 687098: 39, 687204: 25, 687206: 83, 687225: 85, 687234: 23, 687588: 40, 687728: 68, 687732: 56, 687875: 19, 688048: 19, 688144: 2, 688169: 27, 688217: 93, 688290: 20, 688317: 46, 688464: 13, 688465: 74, 688579: 51, 688609: 32, 688664: 36, 688724: 70, 688767: 41, 688862: 60, 688943: 3, 688990: 20, 689074: 92, 689153: 49, 689163: 31, 689193: 88, 689222: 14, 689279: 23, 689281: 76, 689358: 30, 689384: 85, 689445: 94, 689474: 16, 689581: 48, 689608: 1, 689655: 97, 689676: 91, 689695: 28, 689718: 88, 689727: 36, 689747: 87, 689834: 16, 689841: 62, 689844: 92, 689850: 3, 689855: 82, 689910: 79, 689915: 41, 689932: 82, 689940: 17, 690218: 34, 690262: 92, 690270: 15, 690302: 35, 690324: 3, 690375: 47, 690377: 52, 690403: 76, 690439: 56, 690441: 52, 690458: 6, 690499: 38, 690545: 65, 690610: 81, 690643: 62, 690665: 54, 690692: 19, 690712: 3, 690795: 37, 690815: 45, 690984: 79, 690993: 47, 690995: 84, 691076: 30, 691091: 77, 691111: 95, 691628: 53, 691629: 27, 691636: 84, 691643: 61, 691668: 47, 691758: 87, 691801: 42, 692002: 26, 692099: 96, 692122: 54, 692217: 17, 692284: 86, 692329: 60, 692378: 83, 692409: 44, 692458: 2, 692611: 36, 692634: 81, 692641: 21, 692672: 89, 692727: 11, 692733: 86, 692755: 65, 692825: 76, 692923: 93, 692925: 18, 693127: 51, 693139: 41, 693202: 19, 693264: 17, 693283: 66, 693291: 18, 693303: 55, 693366: 27, 693473: 90, 693483: 52, 693523: 45, 693535: 68, 693574: 95, 693683: 74, 693936: 65, 694080: 96, 694118: 59, 694145: 84, 694148: 61, 694154: 64, 694192: 26, 694231: 89, 694338: 94, 694449: 57, 694604: 96, 694700: 34, 694757: 90, 694785: 61, 694800: 72, 694808: 68, 694936: 17, 694981: 5, 695024: 46, 695041: 58, 695066: 91, 695067: 76, 695093: 59, 695104: 52, 695206: 24, 695228: 19, 695245: 5, 695335: 61, 695394: 1, 695397: 95, 695403: 0, 695441: 80, 695675: 63, 695685: 82, 695828: 36, 695870: 28, 695927: 58, 695931: 72, 695941: 95, 695987: 14, 696045: 40, 696084: 1, 696088: 92, 696181: 78, 696193: 17, 696223: 81, 696262: 72, 696273: 35, 696344: 70, 696362: 77, 696377: 33, 696479: 4, 696493: 10, 696503: 57, 696543: 69, 696759: 33, 696772: 36, 696800: 51, 696808: 98, 696871: 21, 696881: 2, 696912: 54, 696929: 87, 697002: 57, 697010: 7, 697028: 73, 697102: 72, 697120: 29, 697129: 62, 697197: 77, 697292: 84, 697297: 53, 697352: 4, 697419: 78, 697498: 17, 697584: 82, 697618: 3, 697627: 38, 697708: 29, 697756: 89, 697817: 37, 697844: 44, 697875: 80, 697890: 59, 697912: 45, 697935: 42, 698007: 20, 698016: 46, 698040: 96, 698134: 32, 698162: 9, 698212: 98, 698237: 34, 698245: 91, 698296: 62, 698317: 41, 698349: 72, 698589: 63, 698628: 72, 698664: 17, 698675: 55, 698800: 27, 698863: 43, 698919: 70, 699011: 59, 699088: 51, 699099: 70, 699109: 81, 699156: 35, 699201: 63, 699204: 5, 699219: 88, 699246: 37, 699253: 21, 699400: 10, 699450: 59, 699532: 35, 699585: 71, 699669: 78, 699706: 26, 699732: 48, 699737: 99, 699746: 91, 699830: 65, 699900: 12, 699928: 25, 700105: 3, 700154: 72, 700163: 34, 700310: 50, 700356: 18, 700363: 92, 700393: 52, 700425: 82, 700462: 60, 700496: 26, 700501: 13, 700605: 75, 700689: 99, 700752: 75, 700757: 75, 700795: 99, 701181: 57, 701252: 68, 701321: 53, 701352: 99, 701385: 5, 701397: 44, 701424: 29, 701470: 73, 701581: 51, 701584: 99, 701665: 32, 701684: 95, 701685: 53, 701692: 13, 701730: 19, 701764: 60, 701847: 88, 701888: 35, 701975: 80, 701976: 27, 701986: 68, 702025: 44, 702035: 86, 702210: 41, 702287: 77, 702341: 93, 702396: 95, 702521: 70, 702537: 16, 702574: 38, 702619: 54, 702648: 35, 702712: 20, 702741: 81, 702759: 35, 702785: 94, 702831: 66, 702920: 15, 702980: 31, 703010: 5, 703018: 72, 703049: 21, 703058: 90, 703331: 50, 703426: 89, 703455: 59, 703467: 42, 703540: 23, 703581: 57, 703590: 57, 703673: 80, 703745: 26, 703781: 64, 704037: 35, 704095: 47, 704158: 51, 704276: 1, 704298: 32, 704641: 95, 704690: 1, 704898: 72, 704936: 23, 704956: 50, 704967: 52, 705011: 18, 705229: 85, 705250: 19, 705294: 93, 705352: 61, 705404: 95, 705454: 82, 705464: 88, 705544: 85, 705546: 97, 705646: 15, 705858: 27, 705870: 1, 706104: 48, 706135: 78, 706137: 39, 706156: 92, 706180: 83, 706205: 99, 706247: 47, 706324: 87, 706374: 72, 706408: 71, 706592: 43, 706647: 3, 706799: 83, 706800: 32, 706967: 58, 706974: 79, 706981: 45, 707148: 30, 707167: 84, 707425: 23, 707460: 22, 707461: 80, 707486: 53, 707507: 81, 707582: 11, 707682: 35, 707745: 75, 707760: 65, 707782: 96, 707868: 14, 707925: 37, 707969: 89, 708020: 2, 708118: 89, 708129: 35, 708353: 47, 708450: 87, 708500: 62, 708738: 29, 708784: 25, 708965: 9, 709034: 18, 709042: 67, 709135: 60, 709155: 63, 709176: 21, 709239: 40, 709248: 66, 709276: 97, 709305: 4, 709337: 5, 709394: 65, 709528: 21, 709531: 58, 709545: 34, 709674: 68, 709747: 40, 709752: 29, 709766: 8, 709769: 67, 709888: 52, 709923: 7, 709937: 89, 709965: 12, 709967: 20, 709968: 61, 709986: 33, 710059: 20, 710095: 17, 710134: 39, 710250: 86, 710281: 68, 710292: 15, 710333: 12, 710416: 16, 710551: 71, 710792: 26, 710819: 95, 710989: 44, 710994: 2, 711145: 80, 711208: 37, 711288: 79, 711321: 2, 711383: 57, 711435: 69, 711539: 79, 711617: 48, 711697: 54, 711758: 45, 711841: 50, 711850: 24, 711855: 63, 711861: 78, 711865: 27, 711879: 74, 711943: 44, 712103: 84, 712106: 81, 712135: 67, 712243: 92, 712280: 95, 712284: 9, 712388: 40, 712395: 96, 712423: 43, 712424: 29, 712521: 31, 712550: 12, 712674: 93, 712724: 14, 712745: 61, 712785: 79, 712923: 1, 713012: 85, 713051: 27, 713056: 62, 713075: 55, 713229: 53, 713508: 47, 713514: 74, 713546: 53, 713646: 55, 713706: 38, 713713: 65, 713879: 25, 713918: 55, 714032: 32, 714075: 70, 714094: 48, 714116: 16, 714227: 79, 714379: 16, 714389: 19, 714395: 49, 714409: 30, 714525: 77, 714530: 69, 714608: 79, 714713: 54, 714736: 51, 714847: 74, 715038: 98, 715053: 57, 715200: 32, 715256: 48, 715264: 60, 715366: 1, 715499: 57, 715537: 12, 715669: 34, 715752: 65, 715765: 79, 715767: 11, 716057: 92, 716185: 31, 716228: 85, 716270: 36, 716300: 65, 716304: 39, 716494: 91, 716519: 96, 716594: 77, 716614: 58, 716774: 85, 716782: 72, 716828: 50, 716974: 63, 717011: 97, 717015: 61, 717038: 30, 717065: 67, 717083: 43, 717118: 31, 717127: 7, 717199: 11, 717209: 42, 717224: 75, 717267: 77, 717343: 27, 717351: 69, 717445: 14, 717531: 90, 717635: 17, 717646: 84, 717670: 68, 717786: 72, 717835: 33, 717981: 78, 718101: 85, 718226: 28, 718293: 78, 718413: 60, 718463: 69, 718554: 19, 718628: 6, 718712: 58, 718780: 91, 718782: 37, 718802: 33, 718897: 92, 718946: 52, 719006: 28, 719079: 10, 719310: 71, 719337: 72, 719341: 54, 719348: 21, 719369: 26, 719453: 18, 719517: 89, 719540: 30, 719717: 92, 719836: 72, 719859: 33, 719883: 79, 719967: 95, 720005: 2, 720042: 83, 720103: 60, 720169: 7, 720213: 17, 720315: 68, 720359: 41, 720428: 94, 720528: 91, 720606: 92, 720857: 64, 720951: 96, 720965: 57, 720981: 18, 721080: 25, 721118: 29, 721119: 79, 721158: 93, 721161: 70, 721314: 28, 721337: 90, 721343: 98, 721361: 70, 721380: 98, 721391: 35, 721516: 17, 721522: 68, 721770: 64, 721830: 75, 721835: 89, 721950: 2, 722005: 29, 722030: 10, 722130: 87, 722172: 3, 722218: 77, 722285: 37, 722309: 8, 722326: 90, 722419: 65, 722589: 19, 722650: 79, 722680: 42, 722770: 82, 722861: 76, 722981: 77, 722991: 67, 723175: 43, 723186: 67, 723375: 34, 723390: 7, 723438: 23, 723510: 21, 723516: 57, 723545: 69, 723684: 74, 723686: 77, 723690: 63, 723880: 5, 723915: 42, 723944: 54, 723964: 56, 724020: 36, 724037: 69, 724041: 90, 724056: 98, 724062: 21, 724112: 84, 724456: 38, 724507: 33, 724545: 91, 724587: 15, 724719: 41, 724777: 62, 724781: 59, 724823: 28, 724855: 72, 724872: 20, 724907: 14, 724988: 22, 724995: 65, 725034: 41, 725082: 48, 725182: 97, 725232: 54, 725241: 23, 725246: 42, 725290: 55, 725329: 56, 725332: 80, 725396: 13, 725507: 77, 725530: 74, 725604: 43, 725686: 48, 725712: 38, 725719: 7, 725741: 90, 725742: 11, 725749: 19, 725762: 96, 725801: 77, 725832: 4, 725949: 99, 726051: 41, 726089: 88, 726136: 95, 726195: 98, 726232: 99, 726280: 13, 726325: 15, 726355: 51, 726387: 28, 726419: 91, 726574: 73, 726638: 11, 726644: 79, 726695: 5, 726782: 93, 726829: 15, 726893: 13, 727008: 25, 727012: 15, 727071: 25, 727132: 40, 727159: 67, 727250: 51, 727355: 99, 727429: 77, 727486: 39, 727756: 85, 727765: 20, 727810: 51, 727845: 85, 727995: 17, 728035: 25, 728321: 75, 728574: 51, 728649: 25, 728725: 81, 728727: 96, 728751: 79, 728782: 35, 728849: 26, 728880: 10, 729164: 0, 729196: 44, 729286: 20, 729296: 18, 729442: 81, 729486: 54, 729546: 84, 729552: 14, 729653: 5, 729696: 20, 729852: 26, 729945: 19, 729981: 15, 730024: 69, 730178: 64, 730201: 68, 730320: 97, 730391: 9, 730409: 22, 730517: 61, 730596: 77, 730698: 71, 730825: 29, 730846: 73, 730966: 5, 731027: 74, 731196: 29, 731211: 28, 731250: 82, 731382: 13, 731409: 10, 731518: 79, 731528: 15, 731580: 32, 731874: 88, 731904: 79, 731931: 16, 731949: 72, 732042: 39, 732078: 36, 732108: 9, 732176: 64, 732183: 77, 732211: 95, 732257: 23, 732373: 81, 732393: 40, 732511: 34, 732558: 47, 732562: 22, 732577: 60, 732662: 31, 732665: 17, 732716: 45, 732729: 58, 732730: 32, 732770: 93, 732811: 31, 732876: 91, 732878: 73, 732928: 65, 732987: 54, 733042: 61, 733045: 30, 733178: 62, 733566: 77, 733593: 85, 733658: 35, 733673: 47, 733689: 67, 733980: 66, 733989: 63, 734001: 87, 734077: 45, 734298: 0, 734338: 15, 734372: 97, 734417: 37, 734462: 10, 734469: 64, 734496: 65, 734511: 16, 734532: 56, 734559: 80, 734579: 17, 734623: 54, 734636: 39, 734666: 82, 734680: 32, 734715: 51, 734790: 84, 734947: 41, 734979: 40, 734999: 68, 735030: 35, 735046: 35, 735101: 70, 735123: 11, 735191: 3, 735219: 54, 735287: 23, 735327: 59, 735382: 7, 735447: 13, 735514: 34, 735582: 54, 735597: 2, 735716: 12, 735796: 38, 735799: 81, 735862: 15, 736012: 40, 736049: 30, 736097: 76, 736146: 98, 736172: 35, 736330: 71, 736333: 21, 736375: 4, 736578: 80, 736589: 71, 736710: 33, 736714: 49, 736927: 58, 736963: 97, 737004: 53, 737015: 10, 737016: 33, 737063: 74, 737066: 76, 737183: 76, 737321: 51, 737342: 5, 737371: 78, 737473: 64, 737515: 10, 737537: 42, 737586: 70, 737631: 33, 737658: 69, 737724: 61, 737799: 93, 737828: 85, 737878: 74, 737958: 5, 737976: 10, 738171: 74, 738177: 10, 738234: 7, 738238: 32, 738239: 30, 738259: 75, 738266: 93, 738271: 72, 738272: 81, 738337: 38, 738358: 68, 738420: 27, 738517: 57, 738519: 16, 738629: 46, 738770: 23, 738810: 90, 738840: 32, 738904: 32, 738958: 80, 739121: 64, 739296: 96, 739323: 80, 739335: 67, 739419: 41, 739425: 77, 739432: 78, 739548: 15, 739554: 42, 739650: 88, 739803: 3, 739823: 91, 739849: 22, 739877: 7, 739882: 21, 739947: 85, 740041: 53, 740044: 27, 740050: 4, 740282: 97, 740358: 89, 740365: 33, 740440: 97, 740600: 89, 740760: 13, 740801: 44, 740880: 82, 740894: 52, 740944: 78, 740950: 60, 740955: 20, 741004: 14, 741030: 90, 741170: 81, 741197: 87, 741302: 7, 741348: 97, 741364: 67, 741516: 76, 741553: 74, 741607: 36, 741828: 59, 741856: 86, 741909: 83, 741929: 9, 742041: 67, 742089: 85, 742095: 12, 742115: 96, 742273: 20, 742378: 39, 742465: 51, 742492: 65, 742499: 85, 742514: 64, 742516: 60, 742519: 63, 742532: 98, 742552: 82, 742666: 45, 742704: 50, 742789: 63, 742813: 1, 742877: 53, 742891: 78, 742918: 51, 742924: 89, 743000: 47, 743001: 57, 743013: 69, 743022: 96, 743057: 62, 743068: 0, 743124: 17, 743180: 28, 743208: 59, 743235: 93, 743269: 38, 743610: 63, 743613: 44, 743616: 34, 743791: 5, 743830: 91, 743874: 71, 743977: 76, 744167: 26, 744237: 4, 744490: 30, 744528: 2, 744558: 24, 744560: 79, 744609: 95, 744659: 36, 744672: 70, 744725: 59, 744781: 56, 744987: 11, 745002: 29, 745078: 90, 745126: 46, 745194: 75, 745203: 6, 745257: 47, 745267: 58, 745274: 35, 745293: 54, 745346: 78, 745432: 64, 745522: 38, 745598: 70, 745641: 12, 745730: 52, 745915: 61, 746033: 82, 746048: 39, 746096: 61, 746255: 57, 746270: 64, 746294: 20, 746422: 94, 746468: 9, 746475: 2, 746509: 98, 746546: 78, 746598: 22, 746716: 39, 746859: 74, 746885: 27, 746988: 71, 746993: 84, 747058: 57, 747104: 45, 747215: 9, 747240: 51, 747279: 37, 747367: 68, 747512: 91, 747514: 37, 747629: 22, 747929: 83, 747992: 29, 748011: 45, 748043: 23, 748052: 89, 748123: 92, 748150: 19, 748171: 31, 748277: 40, 748390: 2, 748392: 20, 748399: 78, 748402: 71, 748412: 31, 748616: 10, 748635: 63, 748677: 75, 748707: 75, 748733: 96, 748812: 78, 748872: 65, 748876: 19, 748973: 0, 749003: 29, 749012: 38, 749037: 69, 749269: 89, 749334: 70, 749404: 57, 749583: 68, 749679: 19, 749754: 30, 749802: 57, 749822: 8, 749878: 87, 749952: 24, 749994: 91, 750054: 80, 750078: 45, 750214: 14, 750228: 10, 750249: 83, 750283: 11, 750313: 81, 750509: 47, 750608: 17, 750683: 28, 750693: 58, 750700: 96, 750732: 18, 750927: 58, 751007: 23, 751048: 7, 751160: 20, 751196: 89, 751199: 48, 751224: 76, 751425: 76, 751475: 30, 751529: 47, 751662: 68, 751687: 43, 751708: 80, 751770: 42, 751771: 80, 751779: 83, 751870: 18, 752170: 60, 752190: 64, 752271: 3, 752293: 56, 752399: 65, 752571: 56, 752596: 91, 752683: 48, 752742: 38, 752757: 56, 752833: 12, 752947: 83, 752984: 45, 753156: 74, 753201: 76, 753211: 79, 753282: 6, 753307: 64, 753377: 59, 753592: 74, 753792: 7, 753871: 41, 753933: 42, 754298: 35, 754303: 6, 754568: 25, 754624: 18, 754738: 78, 754881: 57, 755024: 91, 755083: 59, 755154: 73, 755156: 2, 755209: 39, 755256: 60, 755360: 1, 755416: 82, 755503: 80, 755549: 53, 755554: 30, 755567: 96, 755584: 82, 755651: 48, 755731: 34, 755841: 64, 755924: 26, 755985: 40, 756040: 31, 756355: 98, 756371: 53, 756435: 94, 756605: 81, 756610: 87, 756632: 74, 756641: 51, 756676: 82, 756679: 91, 756725: 10, 756804: 31, 756898: 28, 757131: 11, 757191: 7, 757198: 55, 757247: 67, 757250: 37, 757418: 75, 757454: 47, 757475: 81, 757528: 94, 757608: 56, 757619: 55, 757620: 69, 757637: 67, 757648: 28, 757689: 26, 757721: 93, 757743: 70, 757769: 44, 757930: 1, 757953: 0, 757968: 38, 758128: 63, 758147: 40, 758176: 25, 758243: 14, 758246: 39, 758266: 89, 758287: 9, 758312: 8, 758402: 39, 758414: 44, 758525: 73, 758607: 83, 758654: 84, 758700: 39, 758739: 23, 758782: 35, 758906: 28, 758908: 76, 758952: 43, 759048: 13, 759085: 50, 759110: 6, 759135: 41, 759161: 34, 759178: 92, 759266: 34, 759268: 70, 759332: 72, 759355: 4, 759422: 22, 759475: 62, 759538: 49, 759562: 43, 759579: 83, 759588: 26, 759639: 98, 759656: 10, 759762: 11, 759799: 54, 760083: 49, 760156: 24, 760207: 48, 760249: 42, 760310: 67, 760333: 73, 760341: 31, 760348: 19, 760356: 78, 760378: 71, 760425: 89, 760489: 70, 760676: 46, 760682: 95, 760715: 44, 760836: 34, 760944: 34, 761018: 22, 761078: 19, 761155: 42, 761198: 22, 761258: 51, 761286: 62, 761419: 41, 761456: 19, 761499: 66, 761570: 94, 761572: 34, 761587: 21, 761622: 99, 761640: 60, 761749: 24, 761776: 69, 761823: 85, 761842: 18, 761867: 6, 761908: 16, 761947: 48, 762002: 54, 762011: 70, 762047: 69, 762093: 68, 762165: 77, 762300: 86, 762306: 34, 762324: 58, 762368: 94, 762561: 37, 762571: 75, 762682: 91, 762701: 38, 762765: 6, 762776: 46, 762785: 75, 762837: 26, 762843: 29, 762859: 54, 762896: 27, 762930: 59, 763080: 40, 763117: 54, 763143: 3, 763172: 95, 763341: 8, 763342: 54, 763409: 9, 763597: 56, 763632: 86, 763768: 61, 763884: 21, 764017: 31, 764061: 95, 764220: 81, 764339: 1, 764351: 47, 764488: 71, 764538: 39, 764562: 60, 764642: 8, 764724: 55, 764744: 42, 764824: 73, 764863: 24, 764874: 91, 764910: 31, 764933: 7, 764974: 91, 765077: 76, 765118: 86, 765284: 38, 765342: 86, 765429: 73, 765486: 3, 765611: 29, 765666: 25, 765707: 88, 765714: 22, 765735: 60, 765842: 71, 765920: 15, 765996: 78, 766026: 48, 766074: 20, 766079: 43, 766219: 1, 766245: 14, 766247: 35, 766325: 51, 766449: 48, 766492: 16, 766511: 7, 766570: 80, 766592: 91, 766708: 52, 766751: 74, 766763: 69, 766890: 73, 766910: 82, 767025: 90, 767125: 57, 767250: 63, 767268: 95, 767403: 4, 767654: 32, 767685: 58, 767693: 95, 767723: 30, 767826: 36, 767837: 7, 767890: 50, 767918: 8, 767970: 50, 768012: 97, 768066: 78, 768081: 8, 768188: 38, 768276: 24, 768455: 99, 768484: 89, 768596: 75, 768632: 9, 768760: 77, 768769: 20, 768802: 6, 768895: 8, 769003: 69, 769080: 25, 769194: 88, 769218: 72, 769249: 6, 769297: 69, 769420: 18, 769542: 53, 769551: 86, 769599: 69, 769620: 54, 769622: 87, 769696: 84, 769750: 64, 769790: 29, 769872: 91, 769892: 18, 769895: 29, 769969: 58, 769982: 43, 769993: 63, 769998: 22, 770016: 58, 770056: 16, 770083: 13, 770124: 6, 770298: 68, 770483: 66, 770515: 18, 770541: 99, 770716: 63, 770806: 60, 770831: 84, 770947: 88, 770978: 88, 771025: 58, 771040: 32, 771052: 82, 771074: 6, 771175: 9, 771285: 70, 771438: 20, 771532: 99, 771866: 84, 771893: 54, 771903: 93, 771909: 61, 771944: 30, 771974: 11, 772056: 56, 772348: 98, 772349: 75, 772545: 2, 772563: 56, 772596: 89, 772655: 23, 772781: 25, 772788: 15, 772790: 70, 772851: 52, 772858: 96, 772917: 93, 772947: 27, 772957: 71, 773013: 45, 773030: 49, 773219: 58, 773239: 61, 773240: 3, 773339: 15, 773412: 45, 773441: 43, 773479: 69, 773492: 9, 773514: 47, 773722: 37, 773730: 58, 773766: 47, 773959: 45, 774182: 27, 774199: 1, 774244: 91, 774248: 89, 774327: 64, 774367: 73, 774425: 62, 774483: 33, 774498: 93, 774624: 89, 774686: 3, 774748: 37, 774912: 17, 774990: 53, 775016: 99, 775198: 58, 775233: 25, 775273: 92, 775282: 21, 775471: 29, 775493: 91, 775523: 11, 775528: 20, 775587: 3, 775683: 75, 775724: 23, 775738: 88, 775907: 68, 775912: 60, 775914: 72, 775928: 41, 775969: 26, 776155: 52, 776185: 48, 776195: 7, 776262: 85, 776422: 89, 776428: 55, 776457: 34, 776482: 30, 776485: 67, 776564: 51, 776650: 8, 776684: 65, 776762: 62, 776820: 69, 776976: 26, 777096: 88, 777118: 31, 777149: 35, 777200: 90, 777239: 12, 777257: 2, 777275: 50, 777295: 33, 777359: 3, 777389: 12, 777443: 91, 777448: 77, 777455: 23, 777544: 99, 777740: 98, 777822: 2, 778068: 24, 778283: 25, 778328: 57, 778394: 18, 778798: 70, 778885: 78, 778908: 96, 778932: 59, 779017: 3, 779022: 75, 779042: 93, 779043: 44, 779054: 52, 779133: 7, 779252: 88, 779279: 61, 779328: 15, 779400: 27, 779497: 66, 779623: 40, 779753: 65, 779767: 8, 779787: 97, 779971: 85, 779996: 6, 780054: 73, 780088: 67, 780129: 1, 780146: 25, 780281: 15, 780305: 25, 780331: 28, 780386: 26, 780416: 52, 780655: 36, 780779: 71, 780866: 40, 780896: 63, 780972: 16, 781025: 16, 781041: 41, 781066: 45, 781148: 73, 781176: 87, 781316: 98, 781363: 96, 781495: 84, 781521: 1, 781565: 65, 781635: 23, 781636: 24, 781701: 50, 781713: 40, 781794: 43, 781813: 7, 781920: 85, 781939: 26, 782013: 70, 782035: 77, 782127: 11, 782142: 34, 782147: 99, 782402: 69, 782464: 68, 782471: 3, 782495: 68, 782513: 99, 782652: 37, 782707: 30, 782745: 62, 782762: 79, 782848: 60, 782893: 63, 782900: 66, 782982: 30, 783015: 50, 783190: 45, 783394: 42, 783438: 72, 783454: 97, 783483: 36, 783510: 5, 783515: 24, 783558: 90, 783562: 78, 783661: 82, 783725: 32, 783746: 38, 783753: 5, 783882: 33, 783886: 75, 783930: 32, 783981: 59, 784133: 10, 784200: 58, 784203: 7, 784214: 49, 784264: 35, 784382: 70, 784438: 43, 784613: 33, 784695: 97, 784745: 73, 784802: 93, 784893: 26, 784948: 17, 784985: 19, 785015: 23, 785029: 73, 785135: 46, 785142: 14, 785296: 75, 785322: 18, 785391: 98, 785436: 92, 785456: 45, 785597: 54, 785605: 23, 785654: 23, 785695: 79, 785699: 12, 785749: 2, 785751: 9, 785835: 17, 785843: 55, 785942: 1, 785951: 99, 785984: 9, 785994: 15, 786015: 91, 786112: 27, 786208: 90, 786216: 36, 786223: 70, 786332: 96, 786462: 28, 786484: 2, 786581: 25, 786639: 29, 786641: 93, 786691: 39, 786740: 16, 786774: 93, 786783: 50, 786888: 94, 786913: 58, 786916: 4, 786950: 15, 786955: 15, 786980: 74, 787086: 56, 787111: 77, 787120: 87, 787121: 2, 787187: 46, 787272: 65, 787298: 81, 787467: 33, 787495: 29, 787531: 44, 787556: 39, 787574: 2, 787604: 34, 787657: 55, 787723: 15, 787875: 25, 787888: 56, 787991: 70, 788007: 55, 788053: 78, 788093: 67, 788116: 69, 788198: 61, 788343: 32, 788477: 40, 788546: 38, 788649: 83, 788650: 16, 788736: 26, 788831: 20, 788938: 97, 789013: 73, 789045: 54, 789123: 48, 789150: 37, 789170: 84, 789550: 20, 789582: 84, 789618: 90, 789660: 2, 789681: 81, 789789: 54, 789814: 43, 789905: 1, 789910: 43, 789945: 17, 789963: 91, 790020: 48, 790048: 38, 790083: 43, 790158: 47, 790247: 29, 790257: 15, 790289: 35, 790506: 98, 790529: 14, 790594: 66, 790625: 30, 790659: 66, 790706: 12, 790733: 92, 790749: 57, 790821: 90, 790828: 29, 790840: 68, 790931: 82, 791028: 88, 791122: 78, 791190: 16, 791311: 9, 791374: 94, 791398: 31, 791403: 22, 791536: 14, 791638: 17, 791641: 63, 791654: 36, 791720: 58, 791858: 67, 791890: 53, 791949: 57, 792019: 40, 792032: 53, 792116: 99, 792179: 22, 792251: 75, 792402: 8, 792418: 66, 792433: 19, 792670: 13, 792741: 92, 792755: 78, 792794: 44, 792873: 12, 792911: 91, 792969: 10, 793090: 44, 793177: 94, 793190: 35, 793295: 18, 793298: 91, 793308: 38, 793419: 78, 793460: 3, 793505: 9, 793547: 97, 793647: 37, 793675: 74, 793750: 18, 793792: 97, 793837: 77, 793941: 92, 794040: 66, 794093: 0, 794103: 53, 794199: 97, 794220: 38, 794227: 68, 794249: 5, 794280: 22, 794342: 61, 794380: 25, 794490: 28, 794509: 48, 794609: 19, 794690: 8, 794773: 28, 794785: 58, 794794: 54, 794894: 52, 794967: 87, 795069: 77, 795112: 97, 795356: 99, 795413: 85, 795421: 55, 795433: 71, 795589: 60, 795592: 18, 795799: 30, 795835: 35, 795876: 12, 795893: 83, 795960: 71, 796023: 91, 796088: 35, 796146: 61, 796217: 82, 796339: 13, 796349: 29, 796353: 63, 796357: 43, 796368: 18, 796390: 34, 796392: 27, 796429: 65, 796504: 66, 796557: 23, 796627: 8, 796636: 72, 796669: 19, 796713: 8, 796760: 98, 796766: 3, 796803: 90, 796819: 27, 796875: 78, 796973: 62, 797023: 91, 797027: 15, 797049: 93, 797062: 22, 797105: 72, 797219: 85, 797226: 63, 797281: 96, 797325: 77, 797400: 90, 797421: 25, 797566: 35, 797574: 88, 797648: 80, 797673: 18, 797684: 45, 797797: 32, 797807: 77, 797880: 50, 797960: 46, 797982: 30, 798062: 47, 798076: 85, 798148: 82, 798231: 81, 798273: 71, 798287: 22, 798305: 54, 798368: 29, 798399: 36, 798470: 92, 798555: 26, 798621: 66, 798702: 85, 798715: 17, 798759: 4, 798943: 46, 799071: 92, 799182: 28, 799191: 13, 799275: 8, 799319: 65, 799325: 13, 799343: 67, 799360: 90, 799635: 69, 799642: 94, 799662: 19, 799693: 70, 799704: 4, 799803: 8, 799920: 21, 799922: 19, 800026: 41, 800283: 38, 800396: 39, 800404: 82, 800464: 37, 800652: 7, 800655: 8, 800683: 40, 800687: 30, 800932: 65, 800962: 95, 800972: 16, 801035: 90, 801274: 82, 801280: 36, 801364: 85, 801420: 35, 801428: 58, 801441: 82, 801522: 50, 801580: 62, 801657: 7, 801758: 45, 801793: 50, 801871: 93, 801886: 13, 802004: 12, 802160: 18, 802270: 67, 802271: 3, 802376: 51, 802419: 15, 802421: 32, 802470: 22, 802627: 21, 802681: 0, 802685: 12, 802693: 12, 802710: 47, 802737: 81, 802913: 93, 802968: 82, 803000: 57, 803042: 50, 803085: 22, 803118: 84, 803244: 25, 803245: 43, 803262: 26, 803358: 24, 803485: 99, 803527: 8, 803564: 97, 803612: 62, 803761: 62, 803771: 22, 803850: 39, 803889: 37, 804023: 80, 804078: 1, 804118: 44, 804296: 19, 804312: 96, 804326: 25, 804368: 50, 804414: 24, 804444: 36, 804484: 71, 804514: 49, 804682: 76, 804791: 90, 804884: 18, 804889: 9, 805020: 59, 805046: 70, 805092: 17, 805156: 35, 805190: 57, 805504: 45, 805576: 40, 805592: 90, 805660: 47, 805736: 13, 805766: 75, 805801: 39, 805815: 94, 805818: 2, 805943: 89, 806099: 52, 806134: 55, 806144: 50, 806164: 83, 806263: 73, 806473: 30, 806484: 84, 806504: 13, 806507: 48, 806527: 44, 806603: 82, 806605: 74, 806677: 45, 806702: 94, 806779: 54, 806799: 13, 806972: 12, 807010: 39, 807094: 95, 807107: 63, 807203: 64, 807209: 33, 807407: 93, 807571: 78, 807619: 60, 807636: 72, 807703: 23, 807892: 98, 807959: 77, 808066: 80, 808095: 53, 808113: 60, 808158: 8, 808197: 1, 808246: 92, 808318: 94, 808326: 0, 808337: 69, 808558: 47, 808613: 24, 808614: 58, 808663: 8, 808706: 34, 808829: 67, 808838: 79, 808850: 59, 808867: 64, 808900: 75, 808922: 79, 808945: 16, 809011: 18, 809153: 7, 809196: 88, 809242: 88, 809303: 23, 809316: 30, 809319: 9, 809329: 55, 809383: 74, 809397: 88, 809399: 93, 809483: 11, 809540: 77, 809541: 20, 809609: 75, 809627: 63, 809690: 83, 809799: 83, 809955: 81, 810005: 92, 810041: 53, 810047: 80, 810244: 42, 810389: 8, 810441: 65, 810560: 83, 810683: 30, 810935: 84, 810941: 46, 810950: 85, 810992: 59, 811237: 7, 811400: 18, 811416: 67, 811425: 26, 811551: 1, 811557: 40, 811563: 38, 811566: 19, 811687: 86, 811757: 50, 811766: 90, 811781: 35, 811782: 14, 811930: 26, 811981: 0, 812016: 40, 812207: 69, 812213: 68, 812279: 72, 812314: 7, 812348: 90, 812434: 64, 812486: 83, 812621: 67, 812700: 96, 812733: 81, 812749: 72, 812764: 24, 812857: 24, 812888: 58, 812941: 44, 812985: 89, 813054: 63, 813117: 64, 813198: 31, 813275: 25, 813310: 39, 813322: 20, 813491: 44, 813523: 93, 813538: 97, 813638: 25, 813668: 32, 813865: 62, 813891: 88, 813903: 51, 814028: 83, 814157: 56, 814260: 53, 814296: 41, 814343: 94, 814386: 84, 814466: 39, 814647: 68, 814684: 63, 814695: 90, 814707: 89, 814916: 47, 814941: 6, 814960: 13, 814981: 33, 815018: 22, 815074: 30, 815121: 38, 815136: 72, 815366: 6, 815390: 97, 815445: 55, 815461: 39, 815499: 93, 815617: 69, 815622: 79, 815668: 2, 815845: 75, 815938: 19, 815984: 29, 815993: 83, 816030: 71, 816149: 24, 816280: 41, 816287: 48, 816348: 63, 816359: 46, 816386: 13, 816459: 84, 816483: 69, 816578: 15, 816672: 74, 816779: 23, 816853: 51, 816931: 47, 817010: 52, 817044: 27, 817093: 54, 817191: 47, 817198: 78, 817212: 86, 817274: 77, 817306: 11, 817345: 31, 817354: 90, 817370: 76, 817385: 11, 817454: 86, 817457: 44, 817463: 1, 817484: 22, 817555: 3, 817561: 88, 817702: 59, 817767: 16, 817873: 59, 818080: 29, 818081: 6, 818114: 74, 818145: 57, 818152: 21, 818173: 59, 818272: 93, 818298: 94, 818452: 13, 818559: 89, 818608: 10, 818621: 18, 818625: 36, 818627: 42, 818658: 35, 818764: 63, 818858: 41, 818860: 22, 819169: 70, 819330: 70, 819371: 35, 819372: 39, 819471: 82, 819541: 3, 819703: 0, 819731: 39, 819736: 55, 819782: 14, 819806: 88, 819868: 8, 819872: 73, 819890: 97, 819912: 79, 819933: 19, 819953: 51, 820027: 65, 820093: 67, 820113: 48, 820238: 45, 820302: 57, 820337: 90, 820378: 98, 820427: 75, 820522: 16, 820601: 12, 820618: 17, 820628: 64, 820781: 75, 820821: 88, 820980: 58, 820983: 7, 821008: 46, 821034: 27, 821171: 80, 821199: 97, 821274: 38, 821381: 23, 821387: 58, 821408: 18, 821416: 6, 821469: 34, 821591: 89, 821665: 76, 821683: 89, 821727: 31, 821793: 86, 821819: 78, 821858: 50, 821990: 46, 822009: 3, 822103: 40, 822149: 96, 822157: 50, 822189: 54, 822250: 78, 822306: 92, 822346: 50, 822433: 46, 822494: 17, 822501: 24, 822724: 70, 822949: 97, 822976: 40, 822989: 92, 823024: 81, 823056: 49, 823081: 71, 823083: 64, 823150: 53, 823170: 81, 823231: 98, 823235: 21, 823264: 38, 823436: 29, 823437: 86, 823474: 0, 823489: 99, 823624: 21, 823724: 68, 823730: 5, 823911: 90, 824041: 24, 824106: 27, 824115: 20, 824121: 52, 824164: 90, 824209: 99, 824210: 12, 824262: 51, 824285: 60, 824300: 30, 824307: 54, 824470: 7, 824479: 32, 824528: 84, 824702: 16, 824724: 70, 824878: 9, 824954: 27, 824984: 56, 825025: 92, 825043: 88, 825098: 92, 825124: 92, 825128: 35, 825220: 74, 825346: 9, 825373: 41, 825427: 45, 825532: 33, 825596: 5, 825623: 83, 825656: 71, 825696: 50, 825726: 77, 825775: 56, 825818: 67, 825875: 3, 825901: 44, 825967: 50, 826117: 79, 826118: 58, 826149: 68, 826154: 68, 826237: 98, 826239: 36, 826308: 42, 826584: 54, 826594: 54, 826680: 5, 826736: 57, 826754: 94, 826765: 19, 826851: 19, 826902: 5, 826919: 61, 827085: 16, 827127: 52, 827176: 6, 827382: 75, 827387: 98, 827408: 50, 827834: 13, 827924: 65, 828106: 46, 828432: 36, 828446: 72, 828662: 83, 828679: 17, 828710: 27, 828948: 67, 828963: 60, 828964: 1, 829049: 65, 829074: 76, 829119: 40, 829186: 79, 829279: 65, 829335: 62, 829540: 30, 829553: 6, 829731: 78, 829776: 94, 829789: 46, 829812: 33, 829968: 48, 830037: 14, 830116: 69, 830199: 97, 830311: 77, 830378: 51, 830379: 92, 830397: 1, 830440: 38, 830451: 11, 830510: 0, 830610: 92, 830657: 77, 830742: 22, 830743: 41, 830945: 80, 830985: 46, 831031: 29, 831060: 70, 831103: 35, 831113: 48, 831195: 80, 831517: 3, 831805: 47, 831816: 1, 832073: 42, 832127: 62, 832215: 32, 832237: 98, 832344: 10, 832365: 82, 832395: 82, 832403: 45, 832456: 85, 832470: 83, 832503: 80, 832795: 94, 832975: 90, 833056: 3, 833136: 73, 833142: 69, 833326: 71, 833399: 15, 833517: 97, 833621: 32, 833644: 1, 833659: 87, 833806: 51, 833810: 16, 833926: 96, 833943: 89, 833960: 46, 833992: 70, 834008: 60, 834045: 45, 834124: 10, 834127: 20, 834175: 15, 834254: 39, 834286: 71, 834352: 15, 834404: 3, 834476: 5, 834587: 18, 834657: 22, 834773: 84, 834789: 54, 834821: 34, 834901: 30, 834922: 62, 834950: 84, 835039: 87, 835090: 41, 835202: 92, 835217: 55, 835259: 14, 835412: 49, 835512: 98, 835606: 50, 835678: 21, 835706: 50, 835735: 36, 835764: 94, 835877: 67, 835904: 79, 835915: 78, 835951: 4, 835961: 28, 836011: 7, 836088: 40, 836109: 85, 836137: 80, 836149: 68, 836171: 81, 836173: 2, 836188: 12, 836190: 8, 836201: 47, 836256: 58, 836303: 9, 836598: 99, 836654: 82, 836680: 14, 836750: 62, 836754: 20, 836786: 39, 836800: 26, 836818: 20, 836898: 4, 836942: 74, 836943: 35, 836964: 62, 837031: 16, 837088: 13, 837104: 29, 837105: 79, 837133: 95, 837218: 18, 837240: 84, 837256: 6, 837387: 97, 837414: 44, 837479: 32, 837516: 90, 837696: 53, 837843: 97, 837858: 55, 837860: 76, 837873: 64, 838013: 39, 838071: 41, 838079: 46, 838116: 69, 838153: 60, 838158: 64, 838187: 55, 838343: 83, 838417: 13, 838436: 49, 838451: 54, 838458: 47, 838517: 34, 838561: 52, 838563: 56, 838780: 6, 838806: 41, 838831: 37, 838894: 46, 838945: 86, 838950: 79, 838991: 10, 839001: 59, 839035: 56, 839036: 26, 839229: 17, 839291: 17, 839321: 53, 839333: 20, 839396: 84, 839436: 44, 839523: 79, 839530: 63, 839633: 16, 839644: 35, 839708: 48, 839744: 51, 839786: 55, 839829: 11, 839871: 4, 839887: 54, 839955: 11, 839970: 34, 839972: 72, 840081: 73, 840122: 36, 840246: 43, 840284: 46, 840345: 35, 840384: 77, 840402: 10, 840469: 66, 840494: 86, 840596: 9, 840678: 71, 840764: 55, 840792: 95, 840984: 73, 841031: 11, 841037: 60, 841057: 48, 841075: 19, 841076: 50, 841120: 27, 841127: 54, 841224: 61, 841312: 94, 841314: 45, 841469: 22, 841495: 6, 841586: 30, 841745: 65, 841770: 10, 841884: 80, 841885: 73, 841986: 0, 841992: 39, 842049: 7, 842344: 13, 842440: 14, 842648: 80, 842703: 13, 842750: 58, 842793: 98, 842878: 57, 842899: 76, 842913: 68, 843028: 92, 843126: 68, 843215: 48, 843381: 47, 843417: 69, 843433: 12, 843548: 46, 843592: 57, 843606: 36, 843617: 83, 843683: 53, 843736: 22, 843787: 80, 843805: 65, 843875: 99, 843886: 50, 844050: 90, 844084: 11, 844107: 58, 844144: 43, 844278: 81, 844327: 1, 844401: 52, 844573: 67, 844590: 11, 844593: 22, 844604: 64, 844612: 14, 844648: 77, 844719: 41, 844734: 64, 844760: 15, 844882: 82, 844898: 95, 844975: 82, 845102: 11, 845179: 1, 845241: 99, 845283: 28, 845396: 0, 845534: 76, 845704: 9, 845720: 11, 845728: 14, 845743: 1, 845766: 74, 845842: 37, 845843: 57, 845892: 31, 845903: 76, 845963: 17, 845995: 25, 846107: 9, 846249: 2, 846255: 46, 846375: 18, 846493: 73, 846505: 22, 846595: 71, 846623: 26, 846626: 42, 846930: 1, 846976: 28, 847088: 79, 847106: 6, 847120: 90, 847137: 26, 847144: 21, 847236: 38, 847361: 65, 847364: 86, 847407: 52, 847413: 19, 847431: 13, 847451: 21, 847466: 18, 847514: 22, 847528: 86, 847685: 39, 847686: 9, 847700: 72, 847778: 58, 847838: 68, 847858: 59, 847915: 97, 848072: 46, 848163: 72, 848184: 29, 848220: 83, 848227: 83, 848244: 81, 848418: 88, 848451: 34, 848521: 92, 848589: 46, 848609: 93, 848722: 13, 848770: 14, 848787: 54, 848796: 93, 848886: 18, 848921: 63, 848951: 33, 848964: 15, 849030: 99, 849132: 14, 849303: 96, 849364: 66, 849458: 23, 849653: 88, 849921: 80, 850121: 86, 850295: 45, 850453: 89, 850570: 94, 850587: 32, 850620: 21, 850644: 2, 850671: 48, 850774: 41, 850843: 76, 850885: 98, 850932: 97, 850967: 3, 850988: 98, 850998: 65, 851081: 87, 851105: 80, 851172: 53, 851189: 19, 851268: 37, 851356: 94, 851360: 37, 851450: 54, 851493: 28, 851580: 94, 851841: 69, 851852: 89, 851864: 2, 851941: 26, 852136: 65, 852188: 88, 852222: 76, 852281: 36, 852455: 2, 852534: 67, 852570: 43, 852602: 13, 852614: 8, 852700: 2, 852763: 61, 852891: 46, 852952: 87, 853062: 73, 853190: 80, 853220: 41, 853294: 48, 853398: 36, 853515: 35, 853594: 67, 853818: 42, 853852: 54, 853887: 52, 853989: 73, 854049: 29, 854109: 75, 854114: 74, 854323: 14, 854351: 37, 854374: 56, 854377: 77, 854392: 65, 854512: 69, 854517: 96, 854681: 28, 854801: 7, 854976: 11, 855235: 61, 855241: 51, 855302: 38, 855376: 37, 855452: 73, 855456: 6, 855482: 96, 855487: 10, 855501: 89, 855539: 53, 855595: 95, 855640: 68, 855706: 78, 855712: 77, 855719: 85, 855815: 80, 855836: 64, 855932: 96, 856047: 77, 856196: 71, 856204: 78, 856208: 34, 856214: 20, 856226: 64, 856269: 71, 856298: 24, 856334: 66, 856423: 17, 856463: 56, 856571: 8, 856775: 89, 856787: 33, 856888: 95, 856909: 55, 857052: 85, 857068: 19, 857076: 62, 857088: 30, 857097: 63, 857103: 3, 857239: 19, 857322: 13, 857372: 9, 857547: 9, 857609: 67, 857794: 69, 857907: 16, 858067: 71, 858120: 12, 858274: 11, 858276: 27, 858394: 0, 858514: 81, 858563: 90, 858576: 92, 858598: 31, 858601: 27, 858689: 58, 858700: 53, 858768: 44, 858833: 99, 858851: 93, 858901: 23, 858906: 91, 859052: 3, 859065: 85, 859179: 29, 859197: 16, 859262: 74, 859293: 30, 859333: 86, 859479: 46, 859521: 10, 859539: 56, 859595: 68, 859671: 8, 859767: 3, 859796: 2, 859817: 99, 859829: 63, 859942: 88, 859945: 0, 859981: 67, 860100: 67, 860173: 99, 860210: 48, 860218: 53, 860578: 68, 860591: 91, 860628: 7, 860688: 82, 860792: 35, 860930: 16, 860991: 2, 861010: 1, 861237: 75, 861238: 64, 861383: 8, 861390: 6, 861415: 62, 861450: 17, 861458: 78, 861499: 68, 861590: 76, 861603: 9, 861714: 24, 861760: 11, 861828: 67, 861953: 99, 862042: 50, 862070: 4, 862302: 59, 862586: 97, 862660: 48, 862713: 39, 862852: 56, 862928: 66, 862931: 71, 863056: 77, 863113: 46, 863260: 10, 863310: 66, 863362: 34, 863434: 29, 863461: 65, 863464: 23, 863567: 22, 863592: 2, 863611: 78, 863695: 64, 863696: 1, 863795: 69, 863883: 14, 863931: 10, 864082: 85, 864199: 62, 864206: 39, 864225: 39, 864237: 80, 864283: 83, 864285: 28, 864294: 81, 864341: 17, 864539: 46, 864564: 7, 864647: 34, 864694: 98, 864728: 9, 864809: 81, 864847: 78, 864917: 39, 864991: 93, 865030: 58, 865096: 79, 865190: 2, 865249: 32, 865393: 21, 865576: 16, 865642: 7, 865643: 39, 865658: 28, 865665: 50, 865683: 60, 865698: 28, 865822: 46, 865889: 27, 865939: 25, 866034: 44, 866179: 31, 866371: 0, 866449: 39, 866463: 40, 866517: 47, 866680: 35, 866688: 72, 866702: 42, 866882: 59, 866885: 82, 866890: 27, 866903: 65, 866918: 59, 866937: 68, 867050: 32, 867107: 37, 867110: 98, 867111: 7, 867147: 28, 867175: 31, 867204: 6, 867235: 90, 867260: 97, 867289: 91, 867297: 50, 867312: 84, 867329: 59, 867419: 90, 867426: 73, 867472: 6, 867534: 92, 867597: 34, 867601: 7, 867614: 73, 867766: 64, 867778: 13, 867804: 77, 867818: 54, 867890: 95, 867943: 86, 867948: 97, 868004: 93, 868031: 93, 868262: 40, 868272: 38, 868309: 41, 868399: 72, 868465: 39, 868534: 83, 868631: 22, 868696: 14, 868704: 83, 868731: 82, 868897: 88, 869059: 39, 869344: 27, 869485: 84, 869613: 25, 869623: 20, 869632: 8, 869764: 70, 869785: 58, 869825: 45, 869879: 4, 869983: 74, 870160: 61, 870161: 31, 870271: 20, 870346: 65, 870455: 12, 870554: 69, 870558: 20, 870725: 13, 870788: 69, 870825: 41, 871057: 0, 871200: 99, 871209: 71, 871229: 27, 871230: 70, 871297: 82, 871328: 89, 871352: 24, 871380: 40, 871497: 68, 871541: 21, 871544: 64, 871567: 57, 871574: 30, 871595: 22, 871641: 50, 871645: 15, 871787: 86, 871887: 96, 872073: 13, 872112: 38, 872238: 18, 872249: 72, 872277: 29, 872303: 40, 872481: 69, 872543: 36, 872575: 39, 872621: 15, 872655: 42, 872663: 82, 872677: 3, 872701: 88, 872726: 3, 872759: 1, 872859: 33, 873005: 32, 873153: 91, 873158: 77, 873297: 22, 873340: 18, 873379: 78, 873381: 16, 873508: 48, 873517: 12, 873628: 49, 873654: 53, 873732: 11, 873781: 46, 873787: 45, 873916: 84, 874052: 82, 874097: 1, 874114: 29, 874168: 90, 874185: 70, 874232: 86, 874253: 8, 874367: 82, 874467: 2, 874554: 81, 874555: 73, 874566: 8, 874653: 47, 874683: 68, 874864: 90, 874930: 23, 874975: 14, 874999: 97, 875135: 0, 875148: 2, 875160: 96, 875193: 55, 875212: 23, 875293: 80, 875347: 11, 875355: 54, 875409: 80, 875415: 12, 875515: 56, 875614: 55, 875671: 75, 875682: 40, 875697: 66, 875747: 51, 875763: 96, 875774: 52, 875788: 22, 875802: 42, 875925: 66, 876043: 50, 876085: 50, 876116: 83, 876187: 54, 876216: 76, 876236: 84, 876319: 77, 876331: 20, 876376: 15, 876393: 44, 876394: 44, 876401: 14, 876593: 33, 876633: 72, 876889: 65, 876895: 32, 876898: 40, 876961: 43, 877006: 25, 877116: 25, 877150: 92, 877320: 47, 877351: 40, 877356: 66, 877680: 8, 877808: 80, 877852: 14, 878000: 74, 878005: 14, 878259: 92, 878328: 22, 878372: 43, 878373: 51, 878497: 85, 878559: 24, 878579: 53, 878605: 40, 878682: 69, 878731: 61, 878773: 81, 878845: 46, 878917: 36, 878960: 35, 878964: 3, 879031: 95, 879137: 6, 879176: 23, 879199: 0, 879259: 15, 879364: 67, 879447: 14, 879452: 43, 879457: 53, 879462: 76, 879488: 49, 879641: 48, 879666: 44, 879667: 12, 879732: 31, 879733: 15, 879768: 2, 879940: 48, 879947: 86, 879995: 30, 880223: 49, 880238: 77, 880256: 39, 880312: 52, 880326: 50, 880396: 56, 880457: 52, 880464: 68, 880574: 5, 880647: 56, 880702: 35, 880744: 46, 880820: 20, 880850: 42, 880860: 67, 880861: 94, 880887: 12, 880929: 5, 880980: 53, 881027: 41, 881038: 90, 881041: 71, 881185: 8, 881196: 90, 881202: 43, 881210: 98, 881227: 38, 881298: 0, 881327: 16, 881352: 51, 881481: 52, 881535: 80, 881557: 62, 881637: 70, 881881: 66, 881885: 45, 881932: 27, 881980: 13, 882012: 8, 882027: 24, 882119: 42, 882158: 81, 882191: 62, 882236: 31, 882377: 77, 882430: 63, 882504: 5, 882713: 42, 882774: 42, 882799: 23, 882821: 96, 882895: 64, 882996: 80, 883051: 8, 883147: 39, 883196: 28, 883257: 66, 883297: 55, 883299: 83, 883313: 96, 883324: 89, 883347: 38, 883380: 91, 883385: 68, 883684: 58, 883751: 3, 883853: 72, 883861: 40, 884168: 24, 884189: 16, 884222: 81, 884282: 79, 884440: 60, 884501: 94, 884521: 9, 884565: 55, 884571: 40, 884594: 62, 884603: 62, 884622: 32, 884686: 93, 884923: 25, 884977: 1, 885002: 60, 885097: 16, 885103: 55, 885228: 39, 885307: 53, 885344: 59, 885471: 23, 885588: 72, 885601: 51, 885608: 25, 885610: 26, 885633: 64, 885634: 29, 885877: 80, 885884: 73, 885940: 54, 885949: 23, 885958: 55, 886016: 91, 886034: 70, 886113: 52, 886129: 0, 886224: 8, 886293: 1, 886316: 15, 886317: 41, 886412: 56, 886442: 94, 886497: 3, 886703: 3, 886877: 49, 886878: 53, 886922: 86, 886984: 57, 887012: 61, 887043: 80, 887066: 34, 887159: 71, 887170: 14, 887199: 41, 887346: 78, 887377: 56, 887383: 74, 887483: 89, 887511: 52, 887533: 45, 887701: 19, 887840: 44, 887994: 4, 888002: 20, 888096: 54, 888182: 75, 888321: 8, 888322: 52, 888415: 61, 888507: 33, 888586: 39, 888681: 57, 888712: 42, 888794: 75, 888812: 38, 888849: 91, 888856: 70, 888904: 73, 889087: 41, 889165: 46, 889248: 28, 889266: 84, 889284: 47, 889319: 11, 889525: 68, 889769: 94, 889860: 36, 889997: 78, 889998: 96, 890154: 47, 890155: 35, 890248: 5, 890299: 23, 890303: 1, 890385: 60, 890386: 10, 890515: 20, 890532: 35, 890559: 9, 890576: 50, 890730: 11, 890751: 14, 890932: 63, 890971: 1, 891012: 92, 891253: 17, 891255: 27, 891307: 95, 891368: 19, 891502: 55, 891545: 46, 891660: 79, 891745: 42, 891916: 91, 892127: 72, 892177: 41, 892188: 6, 892247: 48, 892290: 14, 892319: 0, 892335: 94, 892443: 87, 892501: 97, 892516: 66, 892535: 71, 892664: 95, 892692: 76, 892706: 41, 892762: 18, 892797: 77, 892855: 5, 892873: 57, 892894: 55, 892898: 87, 892911: 62, 892944: 40, 893042: 51, 893079: 56, 893184: 9, 893194: 4, 893242: 69, 893551: 81, 893667: 40, 893818: 94, 893839: 15, 893903: 60, 893905: 7, 893982: 74, 893983: 3, 894195: 96, 894233: 15, 894367: 77, 894407: 89, 894507: 43, 894557: 19, 894587: 39, 894707: 59, 894745: 49, 894805: 11, 894822: 44, 894836: 23, 894843: 0, 894854: 73, 894980: 86, 895009: 15, 895071: 33, 895266: 64, 895344: 44, 895423: 33, 895431: 8, 895452: 78, 895501: 24, 895515: 35, 895537: 69, 895727: 96, 895735: 62, 895736: 73, 895769: 12, 895790: 39, 895920: 52, 895924: 43, 895934: 78, 896078: 84, 896250: 54, 896266: 55, 896361: 94, 896450: 95, 896495: 2, 896582: 56, 896774: 29, 896893: 97, 896914: 44, 896929: 61, 896948: 28, 897054: 35, 897089: 95, 897192: 97, 897229: 35, 897299: 96, 897311: 89, 897323: 2, 897336: 35, 897351: 27, 897360: 79, 897398: 33, 897747: 23, 897796: 0, 898093: 65, 898136: 23, 898191: 65, 898193: 41, 898281: 67, 898357: 68, 898360: 97, 898378: 94, 898460: 71, 898494: 20, 898564: 46, 898628: 56, 898681: 39, 898685: 17, 898820: 56, 898953: 50, 899063: 67, 899162: 44, 899371: 21, 899431: 96, 899561: 98, 899593: 56, 899680: 6, 899685: 98, 899709: 19, 899746: 76, 899851: 95, 899853: 83, 900011: 53, 900025: 18, 900056: 53, 900057: 98, 900099: 32, 900162: 31, 900237: 51, 900262: 63, 900316: 47, 900421: 15, 900630: 45, 900723: 42, 900791: 60, 900856: 85, 900925: 12, 901015: 41, 901024: 43, 901026: 69, 901035: 50, 901092: 25, 901102: 60, 901158: 79, 901218: 17, 901220: 78, 901273: 79, 901276: 10, 901295: 76, 901311: 43, 901349: 57, 901357: 4, 901592: 95, 901710: 84, 901805: 74, 901898: 7, 901930: 8, 901942: 19, 902073: 77, 902076: 48, 902196: 21, 902227: 31, 902253: 32, 902296: 23, 902358: 27, 902363: 49, 902396: 34, 902437: 86, 902464: 23, 902552: 11, 902706: 79, 902878: 52, 902885: 51, 903007: 7, 903128: 23, 903164: 50, 903249: 16, 903323: 66, 903448: 18, 903491: 25, 903533: 12, 903588: 51, 903668: 74, 903669: 26, 903722: 58, 903872: 59, 903907: 74, 903947: 68, 903974: 35, 903980: 81, 904112: 39, 904155: 72, 904209: 14, 904255: 27, 904278: 63, 904286: 28, 904496: 84, 904519: 35, 904523: 49, 904538: 70, 904543: 58, 904597: 40, 904604: 6, 904689: 21, 904724: 48, 904775: 93, 904932: 43, 904936: 44, 905019: 35, 905021: 4, 905041: 78, 905044: 84, 905048: 67, 905049: 37, 905052: 90, 905080: 8, 905113: 33, 905121: 54, 905189: 61, 905190: 21, 905196: 53, 905422: 92, 905513: 24, 905590: 37, 905897: 33, 905929: 7, 905968: 8, 905979: 26, 906099: 90, 906285: 58, 906382: 85, 906460: 80, 906474: 55, 906498: 22, 906501: 74, 906532: 43, 906540: 17, 906561: 25, 906636: 41, 906708: 93, 906747: 74, 906751: 59, 906768: 69, 907013: 29, 907172: 65, 907209: 42, 907251: 78, 907443: 54, 907469: 13, 907538: 30, 907563: 62, 907602: 23, 907614: 81, 907620: 99, 907685: 25, 907922: 16, 907923: 47, 908042: 86, 908057: 22, 908082: 99, 908134: 13, 908311: 28, 908436: 28, 908488: 42, 908584: 95, 908658: 98, 908701: 16, 908708: 31, 908768: 60, 908843: 48, 908855: 33, 908939: 33, 909146: 12, 909160: 56, 909197: 59, 909201: 11, 909424: 13, 909444: 44, 909498: 2, 909758: 47, 909775: 26, 909776: 82, 909850: 10, 909854: 85, 909927: 44, 909980: 10, 910036: 34, 910198: 1, 910263: 85, 910288: 38, 910407: 77, 910454: 13, 910456: 60, 910545: 61, 910630: 38, 910723: 87, 910813: 21, 910868: 55, 910885: 76, 910982: 34, 911001: 48, 911010: 75, 911081: 16, 911093: 54, 911304: 18, 911306: 6, 911398: 21, 911425: 25, 911536: 47, 911548: 42, 911648: 86, 911751: 30, 911882: 85, 911911: 64, 911917: 11, 911997: 31, 912112: 86, 912167: 29, 912246: 23, 912249: 88, 912461: 59, 912479: 21, 912488: 56, 912491: 31, 912579: 5, 912611: 91, 912704: 17, 912748: 75, 912820: 56, 912934: 85, 913009: 85, 913035: 90, 913048: 67, 913144: 70, 913188: 15, 913201: 84, 913264: 53, 913277: 5, 913303: 3, 913304: 22, 913586: 0, 913735: 44, 913986: 18, 914003: 31, 914122: 44, 914125: 13, 914162: 27, 914196: 6, 914223: 58, 914275: 18, 914282: 34, 914355: 35, 914371: 41, 914471: 18, 914516: 35, 914586: 1, 914630: 55, 914671: 19, 914906: 59, 915067: 32, 915069: 64, 915170: 25, 915171: 99, 915191: 48, 915199: 80, 915204: 19, 915325: 9, 915335: 49, 915381: 52, 915477: 65, 915604: 93, 915608: 23, 915750: 58, 915795: 17, 915909: 0, 916155: 14, 916202: 6, 916384: 5, 916410: 78, 916415: 58, 916449: 87, 916474: 23, 916587: 68, 916604: 2, 916743: 75, 916895: 19, 916915: 90, 917039: 51, 917062: 9, 917079: 18, 917104: 74, 917150: 45, 917228: 83, 917270: 32, 917276: 79, 917318: 17, 917353: 74, 917363: 68, 917383: 60, 917412: 39, 917495: 93, 917506: 28, 917650: 17, 917711: 54, 917721: 71, 917777: 57, 917797: 29, 917964: 8, 917981: 19, 918019: 33, 918139: 82, 918175: 7, 918194: 59, 918399: 9, 918450: 84, 918497: 39, 918530: 38, 918577: 52, 918980: 0, 919066: 93, 919096: 15, 919108: 97, 919198: 68, 919245: 22, 919248: 27, 919442: 59, 919506: 5, 919559: 66, 919594: 80, 919595: 52, 919609: 36, 919668: 53, 919674: 48, 919711: 27, 919996: 24, 920107: 28, 920164: 19, 920189: 67, 920388: 89, 920467: 36, 920575: 35, 920637: 25, 920653: 75, 920725: 57, 920876: 78, 920898: 25, 921020: 69, 921022: 3, 921028: 80, 921062: 49, 921103: 74, 921264: 8, 921267: 74, 921277: 74, 921288: 31, 921550: 6, 921565: 62, 921590: 83, 921602: 50, 921655: 42, 921673: 78, 921713: 98, 921726: 21, 921799: 28, 921828: 28, 921854: 57, 921860: 65, 921890: 81, 921916: 52, 922002: 81, 922004: 15, 922030: 21, 922076: 20, 922106: 8, 922142: 27, 922146: 28, 922157: 63, 922191: 15, 922272: 70, 922288: 69, 922310: 44, 922367: 24, 922463: 30, 922515: 39, 922564: 22, 922586: 73, 922661: 71, 922696: 15, 922788: 32, 922800: 48, 922831: 38, 922940: 78, 922990: 31, 923001: 97, 923002: 55, 923045: 78, 923060: 74, 923117: 12, 923127: 68, 923133: 85, 923239: 81, 923455: 40, 923544: 94, 923576: 64, 923639: 83, 923661: 4, 923685: 12, 923770: 25, 924012: 23, 924040: 59, 924122: 43, 924165: 94, 924218: 3, 924248: 46, 924290: 63, 924297: 88, 924338: 86, 924462: 25, 924479: 7, 924574: 71, 924653: 1, 924676: 13, 924702: 44, 924714: 34, 924756: 63, 924766: 31, 924799: 46, 924845: 99, 924937: 93, 924988: 62, 925047: 36, 925071: 83, 925155: 75, 925452: 91, 925464: 28, 925494: 30, 925509: 84, 925563: 28, 925657: 4, 925719: 9, 925722: 6, 925762: 71, 925915: 22, 925977: 81, 926124: 20, 926292: 46, 926343: 76, 926349: 75, 926355: 9, 926503: 57, 926614: 16, 926626: 70, 926693: 84, 927182: 31, 927412: 83, 927415: 38, 927537: 52, 927563: 81, 927568: 89, 927615: 10, 927647: 96, 927664: 14, 927709: 9, 927830: 17, 927924: 31, 928021: 32, 928191: 86, 928358: 50, 928411: 35, 928426: 47, 928495: 56, 928497: 92, 928504: 19, 928623: 24, 928679: 25, 928705: 92, 928706: 7, 928713: 73, 928725: 14, 928755: 56, 928776: 68, 929160: 75, 929311: 11, 929339: 27, 929482: 45, 929686: 71, 929695: 8, 929727: 70, 929848: 18, 929972: 64, 929981: 52, 930004: 53, 930036: 29, 930044: 64, 930233: 46, 930393: 72, 930481: 1, 930592: 77, 930628: 62, 930687: 84, 930689: 10, 930741: 65, 930833: 57, 930968: 80, 931197: 87, 931279: 72, 931341: 80, 931402: 32, 931411: 32, 931459: 33, 931565: 1, 931574: 33, 931575: 92, 931729: 50, 931782: 11, 931822: 29, 931838: 12, 931927: 19, 931963: 18, 931983: 29, 932040: 19, 932090: 95, 932110: 49, 932114: 10, 932148: 48, 932216: 55, 932272: 1, 932291: 62, 932380: 76, 932400: 53, 932405: 8, 932433: 16, 932437: 12, 932452: 56, 932457: 51, 932462: 37, 932634: 54, 932675: 81, 932712: 51, 932722: 55, 932723: 30, 932735: 39, 932744: 52, 932798: 95, 933015: 76, 933022: 78, 933043: 54, 933161: 15, 933193: 66, 933267: 15, 933340: 16, 933384: 83, 933412: 1, 933736: 89, 933748: 63, 933776: 72, 933800: 15, 933923: 18, 933983: 77, 934080: 57, 934284: 57, 934307: 50, 934368: 62, 934414: 84, 934436: 59, 934528: 4, 934574: 53, 934600: 50, 934707: 62, 934754: 60, 934793: 46, 934883: 5, 934945: 63, 934946: 93, 934950: 97, 935001: 96, 935055: 61, 935187: 42, 935196: 94, 935264: 42, 935283: 87, 935290: 77, 935449: 94, 935592: 13, 935775: 65, 935825: 38, 935858: 54, 935870: 36, 936010: 74, 936060: 2, 936070: 0, 936141: 39, 936266: 91, 936301: 3, 936321: 62, 936335: 37, 936354: 15, 936364: 86, 936390: 83, 936402: 70, 936520: 15, 936525: 0, 936601: 44, 936611: 46, 936672: 39, 936697: 92, 936753: 79, 936762: 56, 936883: 2, 936954: 1, 937158: 33, 937207: 35, 937232: 99, 937413: 44, 937473: 9, 937757: 91, 937825: 80, 937921: 59, 938085: 20, 938310: 32, 938419: 46, 938462: 56, 938467: 93, 938472: 53, 938531: 67, 938571: 59, 938703: 16, 938808: 72, 938911: 91, 938983: 34, 939008: 28, 939121: 97, 939136: 54, 939189: 56, 939308: 18, 939317: 67, 939328: 41, 939333: 97, 939367: 82, 939426: 29, 939594: 84, 939671: 7, 939673: 72, 939707: 48, 939786: 19, 939834: 68, 939849: 98, 939981: 18, 940166: 92, 940232: 87, 940263: 76, 940271: 31, 940316: 65, 940419: 39, 940540: 77, 940607: 80, 940690: 68, 940712: 4, 940777: 49, 940831: 71, 940873: 98, 940951: 49, 940986: 17, 940995: 97, 941000: 33, 941107: 82, 941125: 40, 941212: 82, 941344: 77, 941423: 33, 941459: 46, 941758: 2, 941832: 51, 941911: 46, 942002: 35, 942111: 92, 942181: 86, 942221: 31, 942270: 36, 942308: 48, 942391: 69, 942556: 27, 942629: 44, 942634: 36, 942651: 56, 942663: 55, 942720: 62, 942751: 69, 942923: 28, 942928: 15, 942942: 90, 942998: 46, 943013: 25, 943374: 60, 943397: 13, 943465: 73, 943552: 69, 943603: 37, 943672: 17, 943675: 93, 943928: 2, 943968: 94, 943981: 66, 944015: 30, 944280: 22, 944394: 74, 944449: 33, 944655: 15, 944872: 54, 944891: 98, 945024: 35, 945144: 70, 945161: 16, 945293: 73, 945307: 67, 945335: 32, 945401: 21, 945416: 66, 945558: 91, 945594: 74, 945688: 21, 945706: 93, 945786: 63, 945797: 28, 945984: 51, 945993: 96, 946001: 69, 946313: 71, 946345: 57, 946457: 72, 946555: 41, 946605: 30, 946700: 42, 946746: 45, 946908: 30, 946923: 65, 946937: 91, 946940: 87, 946965: 3, 946973: 77, 947001: 56, 947023: 74, 947218: 0, 947293: 11, 947360: 8, 947361: 81, 947403: 71, 947440: 72, 947698: 95, 947793: 27, 947957: 91, 947958: 23, 947963: 32, 948004: 59, 948056: 68, 948125: 82, 948201: 92, 948269: 50, 948277: 82, 948319: 37, 948326: 82, 948336: 71, 948383: 97, 948384: 63, 948431: 73, 948490: 26, 948572: 78, 948601: 19, 948610: 41, 948698: 64, 948723: 3, 949002: 70, 949042: 25, 949049: 2, 949268: 97, 949369: 64, 949373: 73, 949488: 53, 949535: 95, 949561: 33, 949580: 81, 949584: 40, 949616: 29, 949643: 84, 949689: 4, 949704: 24, 949811: 23, 949881: 66, 950066: 28, 950393: 15, 950691: 15, 950695: 80, 950817: 67, 950836: 40, 950910: 86, 950968: 70, 950982: 70, 951145: 67, 951164: 8, 951249: 69, 951278: 99, 951335: 70, 951474: 90, 951741: 7, 951778: 18, 951834: 35, 951867: 60, 951883: 92, 951948: 58, 951955: 7, 951960: 21, 952132: 29, 952202: 38, 952282: 28, 952541: 10, 952577: 3, 952597: 46, 952809: 34, 952999: 87, 953027: 0, 953057: 33, 953073: 86, 953102: 33, 953246: 88, 953454: 12, 953533: 30, 953577: 48, 953790: 64, 953794: 0, 953828: 67, 953852: 73, 953986: 38, 954014: 10, 954113: 47, 954225: 22, 954249: 9, 954278: 78, 954353: 85, 954415: 10, 954537: 59, 954581: 66, 954612: 73, 954684: 95, 954717: 19, 954781: 71, 954961: 76, 955029: 46, 955032: 95, 955188: 75, 955475: 43, 955500: 45, 955674: 20, 955808: 29, 955858: 39, 955877: 96, 955881: 3, 955921: 44, 955944: 9, 956007: 23, 956079: 63, 956264: 26, 956347: 66, 956372: 37, 956412: 42, 956430: 11, 956456: 31, 956473: 87, 956507: 17, 956636: 89, 956688: 5, 956736: 7, 956819: 40, 956820: 12, 956840: 80, 956852: 53, 957029: 60, 957042: 28, 957170: 94, 957192: 96, 957224: 92, 957308: 20, 957404: 84, 957469: 72, 957523: 53, 957598: 72, 957698: 82, 957733: 96, 957802: 73, 957808: 20, 957811: 15, 957932: 84, 957950: 32, 957981: 34, 958068: 84, 958091: 65, 958105: 87, 958199: 53, 958218: 75, 958326: 68, 958338: 93, 958360: 72, 958366: 83, 958498: 9, 958539: 66, 958658: 41, 958693: 27, 958723: 19, 958748: 84, 958883: 69, 958949: 59, 958966: 7, 959004: 76, 959060: 40, 959081: 56, 959118: 89, 959340: 30, 959415: 36, 959478: 48, 959564: 79, 959580: 98, 959630: 34, 959738: 64, 959884: 93, 959906: 88, 959936: 28, 960025: 36, 960036: 17, 960114: 44, 960136: 28, 960163: 36, 960172: 62, 960219: 31, 960256: 20, 960276: 67, 960331: 24, 960459: 38, 960502: 8, 960506: 30, 960542: 8, 960574: 94, 960599: 65, 960639: 41, 960706: 26, 960761: 89, 960766: 57, 960924: 83, 961008: 97, 961082: 58, 961126: 14, 961129: 67, 961182: 83, 961233: 21, 961302: 65, 961361: 11, 961371: 78, 961372: 90, 961400: 71, 961426: 62, 961440: 6, 961479: 60, 961521: 61, 961573: 57, 961620: 63, 961655: 52, 961658: 97, 961679: 5, 961687: 15, 961705: 80, 961711: 69, 961727: 97, 961752: 98, 961786: 80, 961855: 18, 961912: 15, 962008: 57, 962153: 77, 962188: 37, 962231: 89, 962250: 60, 962481: 82, 962564: 98, 962650: 60, 962679: 54, 962881: 32, 962991: 25, 963001: 64, 963013: 30, 963104: 32, 963180: 9, 963201: 19, 963285: 48, 963510: 4, 963551: 10, 963561: 10, 963611: 22, 963659: 11, 963666: 1, 963773: 20, 963793: 47, 963839: 2, 963916: 73, 964030: 98, 964037: 64, 964179: 28, 964189: 51, 964219: 75, 964229: 15, 964277: 19, 964418: 95, 964450: 52, 964511: 66, 964513: 12, 964601: 91, 964612: 94, 964676: 91, 964714: 33, 964758: 37, 964929: 36, 965019: 17, 965027: 14, 965067: 65, 965090: 16, 965242: 53, 965264: 90, 965331: 77, 965351: 91, 965353: 74, 965592: 64, 965734: 5, 965753: 56, 965755: 8, 965822: 37, 965831: 67, 965850: 92, 965853: 73, 965886: 56, 965995: 59, 966015: 94, 966016: 44, 966030: 88, 966073: 63, 966196: 3, 966213: 14, 966278: 81, 966409: 49, 966446: 70, 966469: 14, 966547: 17, 966572: 8, 966605: 88, 966623: 44, 966723: 56, 966772: 7, 966777: 40, 966795: 39, 966811: 9, 966868: 80, 966876: 45, 966923: 13, 967133: 29, 967136: 82, 967150: 47, 967357: 65, 967455: 61, 967458: 36, 967530: 68, 967569: 75, 967620: 24, 967672: 0, 967680: 61, 967688: 45, 967721: 80, 967725: 77, 967766: 49, 967952: 26, 968048: 49, 968049: 29, 968072: 22, 968175: 42, 968259: 80, 968284: 19, 968382: 55, 968411: 29, 968466: 3, 968484: 26, 968542: 67, 968545: 68, 968564: 56, 968607: 0, 968651: 63, 968671: 63, 968864: 68, 968878: 60, 968916: 27, 968989: 66, 969120: 90, 969135: 17, 969165: 71, 969225: 44, 969289: 38, 969414: 84, 969501: 46, 969698: 43, 969767: 21, 969776: 87, 969878: 4, 969960: 39, 969991: 59, 970077: 64, 970117: 77, 970155: 86, 970189: 2, 970248: 19, 970250: 48, 970251: 46, 970289: 31, 970330: 62, 970405: 0, 970448: 60, 970465: 45, 970467: 31, 970511: 72, 970544: 92, 970567: 7, 970570: 13, 970615: 39, 970620: 69, 970696: 94, 970712: 71, 970803: 26, 970804: 38, 970821: 19, 970823: 12, 970856: 77, 970936: 34, 970960: 82, 970987: 87, 971039: 92, 971050: 46, 971168: 26, 971187: 71, 971223: 63, 971258: 9, 971468: 14, 971550: 31, 971607: 1, 971626: 10, 971635: 44, 971684: 59, 971724: 22, 971791: 54, 971858: 58, 971906: 55, 971993: 92, 972110: 58, 972114: 8, 972189: 77, 972199: 30, 972506: 50, 972594: 29, 972668: 81, 972685: 80, 972960: 47, 973090: 2, 973104: 67, 973445: 85, 973488: 18, 973528: 69, 973613: 7, 973669: 21, 973680: 63, 973775: 36, 973809: 76, 973858: 62, 973976: 1, 974023: 78, 974027: 10, 974056: 59, 974070: 4, 974091: 64, 974093: 29, 974098: 63, 974201: 56, 974279: 7, 974358: 96, 974621: 57, 974633: 74, 974642: 17, 974645: 26, 974723: 2, 974826: 74, 974834: 34, 974955: 49, 974957: 29, 975061: 94, 975099: 9, 975105: 15, 975286: 59, 975325: 48, 975345: 14, 975518: 82, 975566: 58, 975592: 14, 975603: 7, 975604: 99, 975615: 95, 975653: 14, 975728: 60, 975758: 13, 975936: 96, 975979: 37, 976142: 85, 976220: 50, 976282: 70, 976309: 23, 976428: 15, 976440: 1, 976469: 18, 976587: 22, 976663: 85, 976718: 55, 976781: 12, 976882: 29, 976987: 22, 977038: 77, 977047: 89, 977219: 17, 977510: 48, 977528: 2, 977558: 75, 977616: 66, 977671: 85, 977799: 24, 977817: 66, 977842: 17, 978113: 59, 978133: 0, 978247: 96, 978294: 80, 978295: 39, 978301: 65, 978383: 37, 978426: 99, 978526: 51, 978559: 5, 978567: 39, 978642: 26, 978660: 88, 978734: 14, 978769: 49, 978858: 14, 978861: 14, 978883: 55, 978885: 67, 979035: 44, 979131: 4, 979331: 52, 979509: 88, 979530: 38, 979602: 61, 979666: 27, 979931: 13, 980054: 44, 980057: 6, 980336: 28, 980455: 36, 980461: 37, 980567: 27, 980579: 61, 980588: 88, 980687: 66, 980710: 69, 980738: 0, 980745: 53, 980775: 0, 980822: 68, 981069: 63, 981089: 95, 981123: 32, 981159: 75, 981201: 83, 981213: 87, 981272: 44, 981380: 70, 981386: 6, 981490: 5, 981544: 67, 981660: 81, 981695: 22, 981765: 97, 982009: 18, 982015: 8, 982196: 59, 982267: 28, 982356: 51, 982374: 91, 982385: 9, 982401: 38, 982406: 9, 982415: 0, 982518: 42, 982537: 67, 982540: 38, 982690: 95, 982732: 89, 982895: 54, 982964: 80, 982995: 51, 983101: 43, 983105: 11, 983159: 74, 983214: 11, 983250: 2, 983330: 8, 983348: 14, 983391: 53, 983466: 57, 983467: 76, 983484: 70, 983526: 54, 983531: 51, 983586: 72, 983626: 15, 983648: 6, 983849: 37, 983872: 29, 983977: 86, 984204: 72, 984208: 57, 984225: 98, 984230: 24, 984342: 9, 984439: 97, 984568: 53, 984656: 9, 984756: 54, 984764: 85, 984859: 27, 984897: 71, 984930: 29, 985009: 85, 985011: 69, 985111: 62, 985179: 32, 985182: 88, 985275: 20, 985331: 99, 985454: 5, 985460: 40, 985771: 31, 985883: 53, 985945: 84, 985960: 55, 986034: 73, 986089: 78, 986134: 43, 986156: 34, 986209: 50, 986220: 70, 986237: 17, 986240: 53, 986247: 67, 986252: 29, 986313: 99, 986384: 8, 986437: 26, 986466: 28, 986651: 65, 986659: 90, 986667: 5, 986696: 66, 986752: 27, 986763: 39, 986793: 75, 986834: 24, 986854: 92, 986966: 83, 987013: 16, 987035: 36, 987101: 55, 987209: 22, 987231: 99, 987249: 88, 987256: 51, 987269: 94, 987329: 79, 987341: 23, 987386: 98, 987445: 49, 987473: 74, 987713: 77, 987734: 82, 987741: 8, 987757: 22, 987971: 15, 988004: 66, 988066: 93, 988119: 35, 988121: 15, 988122: 36, 988178: 39, 988214: 40, 988234: 57, 988354: 81, 988360: 81, 988400: 99, 988411: 63, 988495: 90, 988620: 92, 988656: 62, 988670: 3, 988766: 34, 988881: 71, 988944: 91, 989027: 50, 989052: 48, 989070: 58, 989173: 94, 989328: 36, 989450: 96, 989468: 27, 989569: 39, 989633: 39, 989664: 9, 989758: 53, 989771: 32, 989826: 37, 989867: 75, 989966: 81, 989996: 33, 990015: 72, 990023: 82, 990099: 69, 990139: 1, 990237: 40, 990370: 67, 990538: 29, 990607: 24, 990613: 81, 990769: 30, 990813: 9, 990838: 41, 990860: 4, 990861: 37, 990901: 75, 990965: 59, 990979: 11, 991082: 43, 991119: 68, 991264: 50, 991282: 2, 991466: 2, 991475: 40, 991525: 84, 991667: 33, 991680: 85, 991703: 83, 991785: 18, 991787: 16, 991803: 15, 991951: 96, 992033: 37, 992174: 99, 992260: 41, 992341: 64, 992366: 10, 992541: 74, 992547: 0, 992558: 53, 992600: 14, 992612: 48, 992619: 86, 992728: 96, 992791: 84, 992862: 6, 992884: 42, 992898: 26, 992907: 95, 992909: 33, 992993: 24, 993000: 78, 993041: 80, 993084: 52, 993120: 80, 993134: 15, 993139: 69, 993168: 82, 993227: 53, 993257: 96, 993259: 25, 993310: 46, 993414: 10, 993443: 62, 993516: 82, 993594: 58, 993617: 52, 993709: 9, 993770: 20, 993794: 61, 993858: 86, 993926: 78, 993951: 51, 994003: 63, 994088: 39, 994112: 38, 994317: 50, 994380: 57, 994434: 24, 994541: 37, 994549: 27, 994664: 38, 994726: 20, 994742: 20, 994768: 21, 994832: 79, 994925: 5, 994926: 66, 994943: 3, 995006: 27, 995029: 76, 995043: 52, 995089: 56, 995166: 31, 995171: 74, 995180: 46, 995195: 36, 995279: 89, 995488: 82, 995517: 32, 995619: 8, 995716: 70, 995763: 27, 995800: 2, 995823: 25, 995960: 83, 996012: 85, 996014: 97, 996092: 9, 996100: 51, 996174: 72, 996206: 32, 996346: 59, 996406: 61, 996555: 53, 996635: 49, 996678: 95, 997045: 88, 997145: 66, 997225: 86, 997397: 66, 997438: 0, 997452: 67, 997500: 4, 997513: 72, 997589: 61, 997606: 96, 997627: 37, 997631: 45, 997730: 44, 997735: 71, 997751: 24, 997768: 95, 998028: 91, 998084: 86, 998131: 21, 998157: 14, 998167: 25, 998323: 96, 998344: 3, 998346: 38, 998370: 97, 998461: 49, 998521: 27, 998522: 26, 998539: 25, 998625: 52, 998629: 36, 998675: 21, 998800: 91, 998944: 81, 999093: 86, 999132: 53, 999169: 39, 999185: 83, 999193: 41, 999206: 28, 999298: 11, 999316: 9, 999341: 54, 999355: 30, 999575: 42, 999673: 75, 999821: 65, 999835: 1, 999918: 99, 999936: 37} { + control[k].Set(v) + } + + var compacted bool + for compacted, err = b.disk.compactOnce(); err == nil && compacted; compacted, err = b.disk.compactOnce() { + } + require.Nil(t, err) + + verifyBucketRangeAgainstControl(t, b, control) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_set_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_set_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..171aa40833c472c1bb346ba72d53975a9130fb8e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compaction_set_integration_test.go @@ -0,0 +1,528 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func compactionSetStrategy(ctx context.Context, t *testing.T, opts []BucketOption, + expectedMinSize, expectedMaxSize int64, +) { + size := 100 + + type kv struct { + key []byte + values [][]byte + delete bool + } + // this segment is not part of the merge, but might still play a role in + // overall results. For example if one of the later segments has a tombstone + // for it + var previous1 []kv + var previous2 []kv + + var segment1 []kv + var segment2 []kv + var expected []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 4 scenarios evenly: + // + // 0.) created in the first segment, never touched again + // 1.) created in the first segment, appended to it in the second + // 2.) created in the first segment, first element deleted in the second + // 3.) created in the first segment, second element deleted in the second + // 4.) not present in the first segment, created in the second + // 5.) present in an unrelated previous segment, deleted in the first + // 6.) present in an unrelated previous segment, deleted in the second + // 7.) present in an unrelated previous segment, never touched again + for i := 0; i < size; i++ { + key := []byte(fmt.Sprintf("key-%02d", i)) + + value1 := []byte(fmt.Sprintf("value-%02d-01", i)) + value2 := []byte(fmt.Sprintf("value-%02d-02", i)) + values := [][]byte{value1, value2} + + switch i % 8 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + values: values[:1], + }) + + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: key, + values: values[:1], + }) + + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: key, + values: values[:1], + }) + + // update in the second segment + segment2 = append(segment2, kv{ + key: key, + values: values[1:2], + }) + + expected = append(expected, kv{ + key: key, + values: values, + }) + + case 2: + // add both to segment 1, delete the first + segment1 = append(segment1, kv{ + key: key, + values: values, + }) + + // delete first element in the second segment + segment2 = append(segment2, kv{ + key: key, + values: values[:1], + delete: true, + }) + + // only the 2nd element should be left in the expected + expected = append(expected, kv{ + key: key, + values: values[1:2], + }) + + case 3: + // add both to segment 1, delete the second + segment1 = append(segment1, kv{ + key: key, + values: values, + }) + + // delete second element in the second segment + segment2 = append(segment2, kv{ + key: key, + values: values[1:], + delete: true, + }) + + // only the 1st element should be left in the expected + expected = append(expected, kv{ + key: key, + values: values[:1], + }) + + case 4: + // do not add to segment 1 + + // only add to segment 2 (first entry) + segment2 = append(segment2, kv{ + key: key, + values: values, + }) + + expected = append(expected, kv{ + key: key, + values: values, + }) + + case 5: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: key, + values: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + values: values[1:], + }) + + // delete in segment 1 + segment1 = append(segment1, kv{ + key: key, + values: values[:1], + delete: true, + }) + segment1 = append(segment1, kv{ + key: key, + values: values[1:], + delete: true, + }) + + // should not have any values in expected at all, not even a key + + case 6: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: key, + values: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + values: values[1:], + }) + + // delete in segment 2 + segment2 = append(segment2, kv{ + key: key, + values: values[:1], + delete: true, + }) + segment2 = append(segment2, kv{ + key: key, + values: values[1:], + delete: true, + }) + + // should not have any values in expected at all, not even a key + + case 7: + // part of a previous segment + previous1 = append(previous1, kv{ + key: key, + values: values[:1], + }) + previous2 = append(previous2, kv{ + key: key, + values: values[1:], + }) + + expected = append(expected, kv{ + key: key, + values: values, + }) + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import and flush previous segments", func(t *testing.T) { + for _, pair := range previous1 { + err := bucket.SetAdd(pair.key, pair.values) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + + for _, pair := range previous2 { + err := bucket.SetAdd(pair.key, pair.values) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, pair := range segment1 { + if !pair.delete { + err := bucket.SetAdd(pair.key, pair.values) + require.Nil(t, err) + } else { + err := bucket.SetDeleteSingle(pair.key, pair.values[0]) + require.Nil(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, pair := range segment2 { + if !pair.delete { + err := bucket.SetAdd(pair.key, pair.values) + require.Nil(t, err) + } else { + err := bucket.SetDeleteSingle(pair.key, pair.values[0]) + require.Nil(t, err) + } + } + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.SetCursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + i := 0 + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + if i == 1 { + // segment1 and segment2 merged + // none of them is root segment, so tombstones + // will not be removed regardless of keepTombstones setting + assertSecondSegmentOfSize(t, bucket, 8556, 8556) + } + i++ + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.SetCursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + assertSingleSegmentOfSize(t, bucket, expectedMinSize, expectedMaxSize) + }) +} + +func compactionSetStrategy_RemoveUnnecessary(ctx context.Context, t *testing.T, opts []BucketOption) { + // in this test each segment reverses the action of the previous segment so + // that in the end a lot of information is present in the individual segments + // which is no longer needed. We then verify that after all compaction this + // information is gone, thus freeing up disk space + size := 100 + + type kv struct { + key []byte + values [][]byte + } + + key := []byte("my-key") + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + if i != 0 { + // we can only delete an existing value if this isn't the first write + value := []byte(fmt.Sprintf("value-%05d", i-1)) + err := bucket.SetDeleteSingle(key, value) + require.Nil(t, err) + } + + value := []byte(fmt.Sprintf("value-%05d", i)) + err := bucket.SetAdd(key, [][]byte{value}) + require.Nil(t, err) + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + expected := []kv{ + { + key: key, + values: [][]byte{[]byte(fmt.Sprintf("value-%05d", size-1))}, + }, + } + + c := bucket.SetCursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + expected := []kv{ + { + key: key, + values: [][]byte{[]byte(fmt.Sprintf("value-%05d", size-1))}, + }, + } + + c := bucket.SetCursor() + defer c.Close() + + for k, v := c.First(); k != nil; k, v = c.Next() { + retrieved = append(retrieved, kv{ + key: k, + values: v, + }) + } + + assert.Equal(t, expected, retrieved) + }) +} + +func compactionSetStrategy_FrequentPutDeleteOperations(ctx context.Context, t *testing.T, opts []BucketOption) { + // In this test we are testing that the compaction works well for set collection + maxSize := 10 + + for size := 4; size < maxSize; size++ { + t.Run(fmt.Sprintf("compact %v segments", size), func(t *testing.T) { + var bucket *Bucket + + key := []byte("key-original") + value1 := []byte("value-01") + value2 := []byte("value-02") + values := [][]byte{value1, value2} + + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import and flush segments", func(t *testing.T) { + for i := 0; i < size; i++ { + err := bucket.SetAdd(key, values) + require.Nil(t, err) + + if size == 5 { + // delete all + err := bucket.SetDeleteSingle(key, values[0]) + require.Nil(t, err) + err = bucket.SetDeleteSingle(key, values[1]) + require.Nil(t, err) + } else if size == 6 { + // delete only one value + err := bucket.SetDeleteSingle(key, values[0]) + require.Nil(t, err) + } else if i != size-1 { + // don't delete from the last segment + err := bucket.SetDeleteSingle(key, values[0]) + require.Nil(t, err) + err = bucket.SetDeleteSingle(key, values[1]) + require.Nil(t, err) + } + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("verify that objects exist before compaction", func(t *testing.T) { + res, err := bucket.SetList(key) + assert.Nil(t, err) + switch size { + case 5: + assert.Len(t, res, 0) + case 6: + assert.Len(t, res, 1) + default: + assert.Len(t, res, 2) + } + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + } + require.Nil(t, err) + }) + + t.Run("verify that objects exist after compaction", func(t *testing.T) { + res, err := bucket.SetList(key) + assert.Nil(t, err) + switch size { + case 5: + assert.Len(t, res, 0) + case 6: + assert.Len(t, res, 1) + default: + assert.Len(t, res, 2) + } + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_inverted.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_inverted.go new file mode 100644 index 0000000000000000000000000000000000000000..91ee34012ac202c7d5347395c1c4b08bb4e36bb7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_inverted.go @@ -0,0 +1,455 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "encoding/binary" + "encoding/gob" + "fmt" + "io" + "maps" + "math" + + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/varenc" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type compactorInverted struct { + // c1 is always the older segment, so when there is a conflict c2 wins + // (because of the replace strategy) + c1 *segmentCursorInvertedReusable + c2 *segmentCursorInvertedReusable + + // the level matching those of the cursors + currentLevel uint16 + secondaryIndexCount uint16 + // Tells if tombstones or keys without corresponding values + // can be removed from merged segment. + // (left segment is root (1st) one, keepTombstones is off for bucket) + cleanupTombstones bool + + w io.WriteSeeker + bufw compactor.Writer + mw *compactor.MemoryWriter + + scratchSpacePath string + + offset int + + tombstonesToWrite *sroar.Bitmap + tombstonesToClean *sroar.Bitmap + + propertyLengthsToWrite map[uint64]uint32 + propertyLengthsToClean map[uint64]uint32 + + invertedHeader *segmentindex.HeaderInverted + + docIdEncoder varenc.VarEncEncoder[uint64] + tfEncoder varenc.VarEncEncoder[uint64] + + allocChecker memwatch.AllocChecker + + k1, b, avgPropLen float64 + + enableChecksumValidation bool + + segmentFile *segmentindex.SegmentFile + maxNewFileSize int64 +} + +func newCompactorInverted(w io.WriteSeeker, + c1, c2 *segmentCursorInvertedReusable, level, secondaryIndexCount uint16, + scratchSpacePath string, cleanupTombstones bool, + k1, b, avgPropLen float64, maxNewFileSize int64, + allocChecker memwatch.AllocChecker, enableChecksumValidation bool, +) *compactorInverted { + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "operation": "compaction", + "strategy": StrategyInverted, + }) + writeCB := func(written int64) { + observeWrite.Observe(float64(written)) + } + meteredW := diskio.NewMeteredWriter(w, writeCB) + writer, mw := compactor.NewWriter(meteredW, maxNewFileSize) + + return &compactorInverted{ + c1: c1, + c2: c2, + w: meteredW, + bufw: writer, + mw: mw, + currentLevel: level, + cleanupTombstones: cleanupTombstones, + secondaryIndexCount: secondaryIndexCount, + scratchSpacePath: scratchSpacePath, + offset: 0, + k1: k1, + b: b, + avgPropLen: avgPropLen, + enableChecksumValidation: enableChecksumValidation, + maxNewFileSize: maxNewFileSize, + allocChecker: allocChecker, + } +} + +func (c *compactorInverted) do() error { + var err error + + if err := c.init(); err != nil { + return errors.Wrap(err, "init") + } + + c.segmentFile = segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(c.bufw), + segmentindex.WithChecksumsDisabled(!c.enableChecksumValidation), + ) + + c.tombstonesToWrite, err = c.c1.segment.ReadOnlyTombstones() + if err != nil { + return errors.Wrap(err, "get tombstones") + } + + c.tombstonesToClean, err = c.c2.segment.ReadOnlyTombstones() + if err != nil { + return errors.Wrap(err, "get tombstones") + } + + propertyLengthsToWrite, err := c.c1.segment.GetPropertyLengths() + if err != nil { + return errors.Wrap(err, "get property lengths") + } + + propertyLengthsToClean, err := c.c2.segment.GetPropertyLengths() + if err != nil { + return errors.Wrap(err, "get property lengths") + } + + c.propertyLengthsToWrite = make(map[uint64]uint32, len(propertyLengthsToWrite)) + c.propertyLengthsToClean = make(map[uint64]uint32, len(propertyLengthsToClean)) + + maps.Copy(c.propertyLengthsToWrite, propertyLengthsToWrite) + maps.Copy(c.propertyLengthsToClean, propertyLengthsToClean) + + tombstones := c.computeTombstonesAndPropLengths() + + keysOffset := segmentindex.HeaderSize + segmentindex.SegmentInvertedDefaultHeaderSize + len(c.c1.segment.invertedHeader.DataFields) + + kis, err := c.writeKeys() + if err != nil { + return errors.Wrap(err, "write keys") + } + + tombstoneOffset := c.offset + _, err = c.writeTombstones(tombstones) + if err != nil { + return errors.Wrap(err, "write tombstones") + } + + propertyLengthsOffset := c.offset + _, err = c.writePropertyLengths(c.propertyLengthsToWrite) + if err != nil { + return errors.Wrap(err, "write property lengths") + } + treeOffset := uint64(c.offset) + if err := c.writeIndices(kis); err != nil { + return errors.Wrap(err, "write index") + } + + // flush buffered, so we can safely seek on underlying writer + if c.mw == nil { + if err := c.bufw.Flush(); err != nil { + return fmt.Errorf("flush buffered: %w", err) + } + } + + c.invertedHeader.KeysOffset = uint64(keysOffset) + c.invertedHeader.TombstoneOffset = uint64(tombstoneOffset) + c.invertedHeader.PropertyLengthsOffset = uint64(propertyLengthsOffset) + + version := segmentindex.ChooseHeaderVersion(c.enableChecksumValidation) + if err := compactor.WriteHeaders(c.mw, c.w, c.bufw, c.segmentFile, c.currentLevel, version, + c.secondaryIndexCount, treeOffset, segmentindex.StrategyInverted, c.invertedHeader); err != nil { + return errors.Wrap(err, "write header") + } + + if _, err := c.segmentFile.WriteChecksum(); err != nil { + return fmt.Errorf("write compactorMap segment checksum: %w", err) + } + + return nil +} + +func (c *compactorInverted) init() error { + // write a dummy header, we don't know the contents of the actual header yet, + // we will seek to the beginning and overwrite the actual header at the very + // end + + if len(c.c1.segment.invertedHeader.DataFields) != len(c.c2.segment.invertedHeader.DataFields) { + return errors.Errorf("inverted header data fields mismatch: %d != %d", + len(c.c1.segment.invertedHeader.DataFields), + len(c.c2.segment.invertedHeader.DataFields)) + } + + if _, err := c.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil { + return errors.Wrap(err, "write empty header") + } + if _, err := c.bufw.Write(make([]byte, segmentindex.SegmentInvertedDefaultHeaderSize+len(c.c1.segment.invertedHeader.DataFields))); err != nil { + return errors.Wrap(err, "write empty inverted header") + } + + c.offset = segmentindex.HeaderSize + segmentindex.SegmentInvertedDefaultHeaderSize + len(c.c1.segment.invertedHeader.DataFields) + + c.invertedHeader = &segmentindex.HeaderInverted{ + Version: 0, + KeysOffset: uint64(c.offset), + TombstoneOffset: 0, + PropertyLengthsOffset: 0, + BlockSize: uint8(segmentindex.SegmentInvertedDefaultBlockSize), + DataFieldCount: uint8(len(c.c1.segment.invertedHeader.DataFields)), + DataFields: c.c1.segment.invertedHeader.DataFields, + } + + c.docIdEncoder = varenc.GetVarEncEncoder64(c.invertedHeader.DataFields[0]) + c.docIdEncoder.Init(terms.BLOCK_SIZE) + c.tfEncoder = varenc.GetVarEncEncoder64(c.invertedHeader.DataFields[1]) + c.tfEncoder.Init(terms.BLOCK_SIZE) + + return nil +} + +func (c *compactorInverted) writeTombstones(tombstones *sroar.Bitmap) (int, error) { + tombstonesBuffer := make([]byte, 0) + + if tombstones != nil && !tombstones.IsEmpty() { + tombstonesBuffer = tombstones.ToBuffer() + } + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, uint64(len(tombstonesBuffer))) + if _, err := c.segmentFile.BodyWriter().Write(buf); err != nil { + return 0, err + } + + if _, err := c.segmentFile.BodyWriter().Write(tombstonesBuffer); err != nil { + return 0, err + } + c.offset += len(tombstonesBuffer) + 8 + return len(tombstonesBuffer) + 8, nil +} + +func (c *compactorInverted) combinePropertyLengths() (uint64, float64) { + count := c.c1.segment.invertedData.avgPropertyLengthsCount + c.c2.segment.invertedData.avgPropertyLengthsCount + average := c.c1.segment.invertedData.avgPropertyLengthsAvg * (float64(c.c1.segment.invertedData.avgPropertyLengthsCount) / float64(count)) + average += c.c2.segment.invertedData.avgPropertyLengthsAvg * (float64(c.c2.segment.invertedData.avgPropertyLengthsCount) / float64(count)) + + return count, average +} + +func (c *compactorInverted) writePropertyLengths(propLengths map[uint64]uint32) (int, error) { + b := new(bytes.Buffer) + + e := gob.NewEncoder(b) + + // Encoding the map + err := e.Encode(propLengths) + if err != nil { + return 0, err + } + + count, average := c.combinePropertyLengths() + + buf := make([]byte, 8) + + binary.LittleEndian.PutUint64(buf, math.Float64bits(average)) + if _, err := c.segmentFile.BodyWriter().Write(buf); err != nil { + return 0, err + } + + binary.LittleEndian.PutUint64(buf, count) + if _, err := c.segmentFile.BodyWriter().Write(buf); err != nil { + return 0, err + } + + binary.LittleEndian.PutUint64(buf, uint64(b.Len())) + if _, err := c.segmentFile.BodyWriter().Write(buf); err != nil { + return 0, err + } + + if _, err := c.segmentFile.BodyWriter().Write(b.Bytes()); err != nil { + return 0, err + } + c.offset += b.Len() + 8 + 8 + 8 + return b.Len() + 8 + 8 + 8, nil +} + +func (c *compactorInverted) writeKeys() ([]segmentindex.Key, error) { + key1, value1, _ := c.c1.first() + key2, value2, _ := c.c2.first() + + // the (dummy) header was already written, this is our initial offset + + var kis []segmentindex.Key + sim := newSortedMapMerger() + + for { + if key1 == nil && key2 == nil { + break + } + if bytes.Equal(key1, key2) { + + value1Clean, _ := c.cleanupValues(value1) + + sim.reset([][]MapPair{value1Clean, value2}) + mergedPairs, err := sim. + doKeepTombstonesReusable() + if err != nil { + return nil, err + } + + if len(mergedPairs) == 0 { + // skip key if no values left + key1, value1, _ = c.c1.next() + key2, value2, _ = c.c2.next() + continue + } + + ki, err := c.writeIndividualNode(c.offset, key2, mergedPairs, c.propertyLengthsToWrite) + if err != nil { + return nil, errors.Wrap(err, "write individual node (equal keys)") + } + + c.offset = ki.ValueEnd + kis = append(kis, ki) + + // advance both! + key1, value1, _ = c.c1.next() + key2, value2, _ = c.c2.next() + continue + } + + if (key1 != nil && bytes.Compare(key1, key2) == -1) || key2 == nil { + // key 1 is smaller + if values, skip := c.cleanupValues(value1); !skip { + ki, err := c.writeIndividualNode(c.offset, key1, values, c.propertyLengthsToWrite) + if err != nil { + return nil, errors.Wrap(err, "write individual node (key1 smaller)") + } + + c.offset = ki.ValueEnd + kis = append(kis, ki) + } + key1, value1, _ = c.c1.next() + } else { + // key 2 is smaller + ki, err := c.writeIndividualNode(c.offset, key2, value2, c.propertyLengthsToWrite) + if err != nil { + return nil, errors.Wrap(err, "write individual node (key2 smaller)") + } + + c.offset = ki.ValueEnd + kis = append(kis, ki) + + key2, value2, _ = c.c2.next() + } + } + + return kis, nil +} + +func (c *compactorInverted) writeIndividualNode(offset int, key []byte, + values []MapPair, propertyLengths map[uint64]uint32, +) (segmentindex.Key, error) { + // NOTE: There are no guarantees in the cursor logic that any memory is valid + // for more than a single iteration. Every time you call next() to advance + // the cursor, any memory might be reused. + // + // This includes the key buffer which was the cause of + // https://github.com/weaviate/weaviate/issues/3517 + // + // A previous logic created a new assignment in each iteration, but thatwas + // not an explicit guarantee. A change in v1.21 (for pread/mmap) added a + // reusable buffer for the key which surfaced this bug. + keyCopy := make([]byte, len(key)) + copy(keyCopy, key) + + return segmentInvertedNode{ + values: values, + primaryKey: keyCopy, + offset: offset, + propLengths: propertyLengths, + }.KeyIndexAndWriteTo(c.segmentFile.BodyWriter(), c.docIdEncoder, c.tfEncoder, c.k1, c.b, c.avgPropLen) +} + +func (c *compactorInverted) writeIndices(keys []segmentindex.Key) error { + indices := segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: c.secondaryIndexCount, + ScratchSpacePath: c.scratchSpacePath, + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": StrategyInverted, + "operation": "writeIndices", + }), + AllocChecker: c.allocChecker, + } + + _, err := indices.WriteTo(c.segmentFile.BodyWriter(), uint64(c.maxNewFileSize)) + return err +} + +// Removes values with tombstone set from input slice. Output slice may be smaller than input one. +// Returned skip of true means there are no values left (key can be omitted in segment) +// WARN: method can alter input slice by swapping its elements and reducing length (not capacity) +func (c *compactorInverted) cleanupValues(values []MapPair) (vals []MapPair, skip bool) { + // Reuse input slice not to allocate new memory + // Rearrange slice in a way that tombstoned values are moved to the end + // and reduce slice's length. + last := 0 + for i := 0; i < len(values); i++ { + docId := binary.BigEndian.Uint64(values[i].Key) + if !(c.tombstonesToClean != nil && c.tombstonesToClean.Contains(docId)) { + values[last], values[i] = values[i], values[last] + last++ + } + } + + if last == 0 { + return nil, true + } + return values[:last], false +} + +func (c *compactorInverted) computeTombstonesAndPropLengths() *sroar.Bitmap { + maps.Copy(c.propertyLengthsToWrite, c.propertyLengthsToClean) + + if c.cleanupTombstones { // no tombstones to write + return sroar.NewBitmap() + } + if c.tombstonesToWrite == nil { + return c.tombstonesToClean + } + if c.tombstonesToClean == nil { + return c.tombstonesToWrite + } + + return sroar.Or(c.tombstonesToWrite, c.tombstonesToClean) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_inverted_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_inverted_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ca1b5bcd8455a36c1cb32a5e79dab987770de0ed --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_inverted_integration_test.go @@ -0,0 +1,1280 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math" + "math/rand" + "sort" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted/terms" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/schema" +) + +func NewMapPairFromDocIdAndTf(docId uint64, tf float32, propLength float32, isTombstone bool) MapPair { + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, docId) + + value := make([]byte, 8) + binary.LittleEndian.PutUint32(value[0:4], math.Float32bits(tf)) + binary.LittleEndian.PutUint32(value[4:8], math.Float32bits(propLength)) + + return MapPair{ + Key: key, + Value: value, + Tombstone: isTombstone, + } +} + +func (kv MapPair) UpdateTf(tf float32, propLength float32) { + kv.Value = make([]byte, 8) + binary.LittleEndian.PutUint32(kv.Value[0:4], math.Float32bits(tf)) + binary.LittleEndian.PutUint32(kv.Value[4:8], math.Float32bits(propLength)) +} + +type kv struct { + key []byte + values []MapPair +} + +func validateMapPairListVsBlockMaxSearch(ctx context.Context, bucket *Bucket, expectedMultiKey []kv) error { + for _, termPair := range expectedMultiKey { + expected := termPair.values + mapKey := termPair.key + // get more results, as there may be more results than expected on the result heap + // during intermediate steps of insertions + N := len(expected) * 10 + bm25config := schema.BM25Config{ + K1: 1.2, + B: 0.75, + } + avgPropLen := 1.0 + queries := []string{string(mapKey)} + duplicateTextBoosts := make([]int, 1) + diskTerms, _, release, err := bucket.CreateDiskTerm(float64(N), nil, queries, "", 1, duplicateTextBoosts, bm25config, ctx) + if err != nil { + return fmt.Errorf("failed to create disk term: %w", err) + } + defer func() { + release() + }() + + expectedSet := make(map[uint64][]*terms.DocPointerWithScore, len(expected)) + for _, diskTerm := range diskTerms { + topKHeap, err := DoBlockMaxWand(ctx, N, diskTerm, avgPropLen, true, 1, 1, bucket.logger) + if err != nil { + return fmt.Errorf("failed to execute DoBlockMaxWand for diskTerm %v: %w", diskTerm, err) + } + for topKHeap.Len() > 0 { + item := topKHeap.Pop() + expectedSet[item.ID] = item.Value + } + } + + for _, val := range expected { + docId := binary.BigEndian.Uint64(val.Key) + if val.Tombstone { + continue + } + freq := math.Float32frombits(binary.LittleEndian.Uint32(val.Value[0:4])) + if _, ok := expectedSet[docId]; !ok { + return fmt.Errorf("expected docId %v not found in topKHeap: %v", docId, expectedSet) + } + if expectedSet[docId][0].Frequency != freq { + return fmt.Errorf("expected frequency %v but got %v", freq, expectedSet[docId][0].Frequency) + } + + } + } + + return nil +} + +func createTerm(bucket *Bucket, N float64, filterDocIds helpers.AllowList, query string, queryTermIndex int, propertyBoost float32, duplicateTextBoost int, ctx context.Context, bm25Config schema.BM25Config, logger logrus.FieldLogger) (*terms.Term, error) { + termResult := terms.NewTerm(query, queryTermIndex, float32(1.0), bm25Config) + + allMsAndProps := make([][]terms.DocPointerWithScore, 1) + + m, err := bucket.DocPointerWithScoreList(ctx, []byte(query), 1) + if err != nil { + return nil, err + } + + allMsAndProps[0] = m + + largestN := 0 + // remove empty results from allMsAndProps + nonEmptyMsAndProps := make([][]terms.DocPointerWithScore, 0, len(allMsAndProps)) + for _, m := range allMsAndProps { + if len(m) > 0 { + nonEmptyMsAndProps = append(nonEmptyMsAndProps, m) + } + if len(m) > largestN { + largestN = len(m) + } + } + allMsAndProps = nonEmptyMsAndProps + + if len(nonEmptyMsAndProps) == 0 { + return nil, nil + } + + if len(nonEmptyMsAndProps) == 1 { + termResult.Data = allMsAndProps[0] + n := float64(len(termResult.Data)) + termResult.SetIdf(math.Log(float64(1)+(N-float64(n)+0.5)/(float64(n)+0.5)) * float64(duplicateTextBoost)) + termResult.SetPosPointer(0) + termResult.SetIdPointer(termResult.Data[0].Id) + return termResult, nil + } + indices := make([]int, len(allMsAndProps)) + var docMapPairs []terms.DocPointerWithScore = nil + + // The indices are needed to combining the results of different properties + // They were previously used to keep track of additional explanations TF and prop len, + // but this is now done when adding terms to the heap in the getTopKHeap function + var docMapPairsIndices map[uint64]int = nil + for { + i := -1 + minId := uint64(0) + for ti, mAndProps := range allMsAndProps { + if indices[ti] >= len(mAndProps) { + continue + } + ki := mAndProps[indices[ti]].Id + if i == -1 || ki < minId { + i = ti + minId = ki + } + } + + if i == -1 { + break + } + + m := allMsAndProps[i] + k := indices[i] + val := m[indices[i]] + + indices[i]++ + + // only create maps/slices if we know how many entries there are + if docMapPairs == nil { + docMapPairs = make([]terms.DocPointerWithScore, 0, largestN) + docMapPairsIndices = make(map[uint64]int, largestN) + + docMapPairs = append(docMapPairs, val) + docMapPairsIndices[val.Id] = k + } else { + key := val.Id + ind, ok := docMapPairsIndices[key] + if ok { + if ind >= len(docMapPairs) { + // the index is not valid anymore, but the key is still in the map + logger.Warnf("Skipping pair in BM25: Index %d is out of range for key %d, length %d.", ind, key, len(docMapPairs)) + continue + } + if ind < len(docMapPairs) && docMapPairs[ind].Id != key { + logger.Warnf("Skipping pair in BM25: id at %d in doc map pairs, %d, differs from current key, %d", ind, docMapPairs[ind].Id, key) + continue + } + + docMapPairs[ind].PropLength += val.PropLength + docMapPairs[ind].Frequency += val.Frequency + } else { + docMapPairs = append(docMapPairs, val) + docMapPairsIndices[val.Id] = len(docMapPairs) - 1 // current last entry + } + + } + } + if docMapPairs == nil { + return nil, nil + } + termResult.Data = docMapPairs + + n := float64(len(docMapPairs)) + termResult.SetIdf(math.Log(float64(1)+(N-n+0.5)/(n+0.5)) * float64(duplicateTextBoost)) + + // catch special case where there are no results and would panic termResult.data[0].id + // related to #4125 + if len(termResult.Data) == 0 { + return nil, nil + } + + termResult.SetPosPointer(0) + termResult.SetIdPointer(termResult.Data[0].Id) + return termResult, nil +} + +func validateMapPairListVsWandSearch(ctx context.Context, bucket *Bucket, expectedMultiKey []kv) error { + for _, termPair := range expectedMultiKey { + expected := termPair.values + mapKey := termPair.key + // get more results, as there may be more results than expected on the result heap + // during intermediate steps of insertions + N := len(expected) * 10 + bm25config := schema.BM25Config{ + K1: 1.2, + B: 0.75, + } + avgPropLen := 1.0 + duplicateTextBoosts := make(map[string]float32) + duplicateTextBoosts[string(mapKey)] = 1.0 + term, err := createTerm(bucket, float64(N), nil, string(mapKey), 0, 1.0, 1, ctx, bm25config, bucket.logger) + + countNonTombstones := 0 + for _, val := range expected { + if !val.Tombstone { + countNonTombstones++ + } + } + + // nothing but tombstones and nothing retrieved + if term == nil && err == nil && countNonTombstones == 0 { + continue + } else if term == nil && err == nil { + return fmt.Errorf("expected term to be non-nil") + } else if err != nil { + return fmt.Errorf("failed to create term: %w", err) + } + + expectedSet := make(map[uint64][]*terms.DocPointerWithScore, len(expected)) + terms := &terms.Terms{ + T: []terms.TermInterface{term}, + Count: 1, + } + + topKHeap := DoWand(N, terms, avgPropLen, true, 1) + + for topKHeap.Len() > 0 { + item := topKHeap.Pop() + expectedSet[item.ID] = item.Value + } + + for _, val := range expected { + docId := binary.BigEndian.Uint64(val.Key) + if val.Tombstone { + continue + } + freq := math.Float32frombits(binary.LittleEndian.Uint32(val.Value[0:4])) + if _, ok := expectedSet[docId]; !ok { + return fmt.Errorf("expected docId %v not found in topKHeap: %v", docId, expectedSet) + } + if expectedSet[docId][0].Frequency != freq { + return fmt.Errorf("expected frequency %v but got %v", freq, expectedSet[docId][0].Frequency) + } + + } + } + + return nil +} + +// compare expected and actual, but actual can have more keys than expected +func partialCompare(expected, actual kv) error { + a := 0 + for _, e := range expected.values { + if e.Tombstone { + continue + } + // if the key in actual is smaller than the key in expected, move to the next key in actual + for bytes.Compare(actual.values[a].Key, e.Key) < 0 { + a++ + if a >= len(actual.values) { + docId := binary.BigEndian.Uint64(e.Key) + return fmt.Errorf("expected key %v (docId: %v) not found in actual values", e.Key, docId) + } + } + + if !bytes.Equal(actual.values[a].Key, e.Key) { + docId := binary.BigEndian.Uint64(e.Key) + return fmt.Errorf("expected key %v (docId: %v) not found in actual values", e.Key, docId) + } + if !bytes.Equal(actual.values[a].Value, e.Value) { + return fmt.Errorf("expected value %v, got %v", e.Value, actual.values[a].Value) + } + } + + return nil +} + +func compactionInvertedStrategy(ctx context.Context, t *testing.T, opts []BucketOption, + expectedMinSize, expectedMaxSize int64, +) { + size := 100 + + addedDocIds := make(map[uint64]struct{}) + removedDocIds := make(map[uint64]struct{}) + + // this segment is not part of the merge, but might still play a role in + // overall results. For example if one of the later segments has a tombstone + // for it + var previous1 []kv + var previous2 []kv + + var segment1 []kv + var segment2 []kv + var expected []kv + var bucket *Bucket + + dirName := t.TempDir() + + t.Run("create test data", func(t *testing.T) { + // The test data is split into 4 scenarios evenly: + // + // 0.) created in the first segment, never touched again + // 1.) created in the first segment, appended to it in the second + // 2.) created in the first segment, first element updated in the second + // 3.) created in the first segment, second element updated in the second + // 4.) created in the first segment, first element deleted in the second + // 5.) created in the first segment, second element deleted in the second + // 6.) not present in the first segment, created in the second + // 7.) present in an unrelated previous segment, deleted in the first + // 8.) present in an unrelated previous segment, deleted in the second + // 9.) present in an unrelated previous segment, never touched again + for i := 0; i < size; i++ { + rowKey := []byte(fmt.Sprintf("row-%03d", i)) + + docId1 := uint64(i) + docId2 := uint64(i + 10000) + + pair1 := NewMapPairFromDocIdAndTf(docId1, float32(i+1), float32(i+2), false) + pair2 := NewMapPairFromDocIdAndTf(docId2, float32(i+1), float32(i+2), false) + + pairs := []MapPair{pair1, pair2} + + switch i % 10 { + case 0: + // add to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs[:1], + }) + + addedDocIds[docId1] = struct{}{} + // leave this element untouched in the second segment + expected = append(expected, kv{ + key: rowKey, + values: pairs[:1], + }) + case 1: + // add to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs[:1], + }) + + // add extra pair in the second segment + segment2 = append(segment2, kv{ + key: rowKey, + values: pairs[1:2], + }) + + addedDocIds[docId1] = struct{}{} + addedDocIds[docId2] = struct{}{} + + expected = append(expected, kv{ + key: rowKey, + values: pairs, + }) + case 2: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // update first key in the second segment + updated := pair1 + updated.UpdateTf(float32(i*1000), float32(i*1000)) + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair2, updated}, + }) + + addedDocIds[docId1] = struct{}{} + addedDocIds[docId2] = struct{}{} + + case 3: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // update first key in the second segment + updated := pair2 + updated.UpdateTf(float32(i*1000), float32(i*1000)) + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair1, updated}, + }) + + addedDocIds[docId1] = struct{}{} + addedDocIds[docId2] = struct{}{} + + case 4: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // delete first key in the second segment + updated := pair1 + updated.Value = nil + updated.Tombstone = true + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair2}, + }) + + removedDocIds[docId1] = struct{}{} + case 5: + // add both to segment 1 + segment1 = append(segment1, kv{ + key: rowKey, + values: pairs, + }) + + // delete second key in the second segment + updated := pair2 + updated.Value = nil + updated.Tombstone = true + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{updated}, + }) + + expected = append(expected, kv{ + key: rowKey, + values: []MapPair{pair1}, + }) + + removedDocIds[docId2] = struct{}{} + + case 6: + // do not add to segment 2 + + // only add to segment 2 (first entry) + segment2 = append(segment2, kv{ + key: rowKey, + values: pairs, + }) + + expected = append(expected, kv{ + key: rowKey, + values: pairs, + }) + + addedDocIds[docId1] = struct{}{} + addedDocIds[docId2] = struct{}{} + + case 7: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: rowKey, + values: pairs[:1], + }) + previous2 = append(previous2, kv{ + key: rowKey, + values: pairs[1:], + }) + + // delete in segment 1 + deleted1 := pair1 + deleted1.Value = nil + deleted1.Tombstone = true + + deleted2 := pair2 + deleted2.Value = nil + deleted2.Tombstone = true + + segment1 = append(segment1, kv{ + key: rowKey, + values: []MapPair{deleted1}, + }) + segment1 = append(segment1, kv{ + key: rowKey, + values: []MapPair{deleted2}, + }) + + removedDocIds[docId1] = struct{}{} + removedDocIds[docId2] = struct{}{} + + // should not have any values in expected at all, not even a key + + case 8: + // only part of a previous segment, which is not part of the merge + previous1 = append(previous1, kv{ + key: rowKey, + values: pairs[:1], + }) + previous2 = append(previous2, kv{ + key: rowKey, + values: pairs[1:], + }) + + // delete in segment 1 + deleted1 := pair1 + deleted1.Value = nil + deleted1.Tombstone = true + + deleted2 := pair2 + deleted2.Value = nil + deleted2.Tombstone = true + + removedDocIds[docId1] = struct{}{} + removedDocIds[docId2] = struct{}{} + + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{deleted1}, + }) + segment2 = append(segment2, kv{ + key: rowKey, + values: []MapPair{deleted2}, + }) + + // should not have any values in expected at all, not even a key + + case 9: + // only part of a previous segment + previous1 = append(previous1, kv{ + key: rowKey, + values: pairs[:1], + }) + previous2 = append(previous2, kv{ + key: rowKey, + values: pairs[1:], + }) + + expected = append(expected, kv{ + key: rowKey, + values: pairs, + }) + + addedDocIds[docId1] = struct{}{} + addedDocIds[docId2] = struct{}{} + } + } + }) + + t.Run("shuffle the import order for each segment", func(t *testing.T) { + // this is to make sure we don't accidentally rely on the import order + rand.Shuffle(len(segment1), func(i, j int) { + segment1[i], segment1[j] = segment1[j], segment1[i] + }) + rand.Shuffle(len(segment2), func(i, j int) { + segment2[i], segment2[j] = segment2[j], segment2[i] + }) + }) + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("import previous1 segments", func(t *testing.T) { + for _, kvs := range previous1 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + + require.Nil(t, err) + } + } + }) + + t.Run("verify previous1 before flush", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + + } + for i := range previous1 { + assert.Nil(t, partialCompare(previous1[i], retrieved[i])) + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, previous1)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, previous1)) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import previous2 segments", func(t *testing.T) { + for _, kvs := range previous2 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + require.Nil(t, err) + } + } + }) + + t.Run("verify previous2 before flush", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + + } + for i := range previous2 { + assert.Nil(t, partialCompare(previous2[i], retrieved[i])) + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, previous2)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, previous2)) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("import segment 1", func(t *testing.T) { + for _, kvs := range segment1 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + require.Nil(t, err) + } + } + }) + + t.Run("verify segment1 before flush", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + i := 0 + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + if bytes.Equal(segment1[i].key, k) { + assert.Nil(t, partialCompare(segment1[i], retrieved[len(retrieved)-1])) + i++ + } + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, segment1)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, segment1)) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify segment1 after flush", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + i := 0 + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + if bytes.Equal(segment1[i].key, k) { + assert.Nil(t, partialCompare(segment1[i], retrieved[len(retrieved)-1])) + i++ + } + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, segment1)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, segment1)) + }) + + t.Run("import segment 2", func(t *testing.T) { + for _, kvs := range segment2 { + for _, pair := range kvs.values { + err := bucket.MapSet(kvs.key, pair) + require.Nil(t, err) + } + } + }) + + t.Run("verify segment2 before flush", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + i := 0 + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + if bytes.Equal(segment2[i].key, k) { + assert.Nil(t, partialCompare(segment2[i], retrieved[len(retrieved)-1])) + i++ + } + } + + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, segment2)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, segment2)) + }) + + t.Run("flush to disk", func(t *testing.T) { + require.Nil(t, bucket.FlushAndSwitch()) + }) + + t.Run("verify segment2 after flush", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + i := 0 + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + if bytes.Equal(segment2[i].key, k) { + assert.Nil(t, partialCompare(segment2[i], retrieved[len(retrieved)-1])) + i++ + } + } + + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, segment2)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, segment2)) + }) + + t.Run("within control make sure map keys are sorted", func(t *testing.T) { + for i := range expected { + sort.Slice(expected[i].values, func(a, b int) bool { + return bytes.Compare(expected[i].values[a].Key, expected[i].values[b].Key) < 0 + }) + } + }) + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + + } + for i := range expected { + assert.Nil(t, partialCompare(expected[i], retrieved[i])) + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, expected)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, expected)) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + i := 0 + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + i++ + t.Run("verify control during compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + } + + assert.Equal(t, expected, retrieved) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, expected)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, expected)) + }) + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction using a cursor", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + } + + assert.Equal(t, expected, retrieved) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, expected)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, expected)) + assertSingleSegmentOfSize(t, bucket, expectedMinSize, expectedMaxSize) + }) + + t.Run("verify control using individual get (MapList) operations", + func(t *testing.T) { + // Previously the only verification was done using the cursor. That + // guaranteed that all pairs are present in the payload, but it did not + // guarantee the integrity of the index (DiskTree) which is used to access + // _individual_ keys. Corrupting this index is exactly what happened in + // https://github.com/weaviate/weaviate/issues/3517 + for _, pair := range expected { + kvs, err := bucket.MapList(ctx, pair.key) + require.NoError(t, err) + + assert.Equal(t, pair.values, kvs) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair})) + } + }) +} + +func compactionInvertedStrategy_RemoveUnnecessary(ctx context.Context, t *testing.T, opts []BucketOption) { + // in this test each segment reverses the action of the previous segment so + // that in the end a lot of information is present in the individual segments + // which is no longer needed. We then verify that after all compaction this + // information is gone, thus freeing up disk space + size := 100 + + key := []byte("my-key") + + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), WithStrategy(StrategyInverted)) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + if i != 0 { + // we can only update an existing value if this isn't the first write + pair := NewMapPairFromDocIdAndTf(uint64(i-1), float32(i), float32(i), false) + err := bucket.MapSet(key, pair) + require.Nil(t, err) + } + + if i > 1 { + // we can only delete two back an existing value if this isn't the + // first or second write + pair := NewMapPairFromDocIdAndTf(uint64(i-2), float32(i), float32(i), true) + err := bucket.MapSet(key, pair) + require.Nil(t, err) + } + + pair := NewMapPairFromDocIdAndTf(uint64(i), float32(i), float32(i), false) + err := bucket.MapSet(key, pair) + require.Nil(t, err) + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + expectedPair := NewMapPairFromDocIdAndTf(uint64(size-2), float32(size-1), float32(size-1), false) + + expectedPair2 := NewMapPairFromDocIdAndTf(uint64(size-1), float32(size-1), float32(size-1), false) + + expected := []kv{ + { + key: key, + values: []MapPair{ + expectedPair, + expectedPair2, + }, + }, + } + + t.Run("verify control before compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + + for i := range expected { + assert.Nil(t, partialCompare(expected[i], retrieved[i])) + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, expected)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, expected)) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + t.Run("verify control during compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + if len(kvs) > 0 { + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + } + + assert.Equal(t, expected, retrieved) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, expected)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, expected)) + }) + } + require.Nil(t, err) + }) + + t.Run("verify control after compaction", func(t *testing.T) { + var retrieved []kv + + c := bucket.MapCursor() + defer c.Close() + + for k, _ := c.First(ctx); k != nil; k, _ = c.Next(ctx) { + kvs, err := bucket.MapList(ctx, k) + + assert.Nil(t, err) + + retrieved = append(retrieved, kv{ + key: k, + values: kvs, + }) + } + + for i := range expected { + assert.Nil(t, partialCompare(expected[i], retrieved[i])) + } + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, expected)) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, expected)) + }) + + t.Run("verify control using individual get (MapList) operations", + func(t *testing.T) { + // Previously the only verification was done using the cursor. That + // guaranteed that all pairs are present in the payload, but it did not + // guarantee the integrity of the index (DiskTree) which is used to access + // _individual_ keys. Corrupting this index is exactly what happened in + // https://github.com/weaviate/weaviate/issues/3517 + for _, pair := range expected { + kvs, err := bucket.MapList(ctx, pair.key) + require.NoError(t, err) + + assert.Nil(t, partialCompare(pair, kv{ + key: pair.key, + values: kvs, + })) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair})) + } + }) +} + +func compactionInvertedStrategy_FrequentPutDeleteOperations(ctx context.Context, t *testing.T, opts []BucketOption) { + // In this test we are testing that the compaction works well for map collection + maxSize := 10 + + key := []byte("my-key") + mapKey := make([]byte, 8) + binary.BigEndian.PutUint64(mapKey, 0) + + for size := 4; size < maxSize; size++ { + t.Run(fmt.Sprintf("compact %v segments", size), func(t *testing.T) { + var bucket *Bucket + dirName := t.TempDir() + + t.Run("init bucket", func(t *testing.T) { + b, err := NewBucketCreator().NewBucket(ctx, dirName, dirName, nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), opts...) + require.Nil(t, err) + + // so big it effectively never triggers as part of this test + b.SetMemtableThreshold(1e9) + + bucket = b + }) + + t.Run("write segments", func(t *testing.T) { + for i := 0; i < size; i++ { + pair := NewMapPairFromDocIdAndTf(0, float32(i), float32(i), false) + + err := bucket.MapSet(key, pair) + require.Nil(t, err) + + if size == 5 || size == 6 { + // delete all + err = bucket.MapDeleteKey(key, mapKey) + require.Nil(t, err) + } else if i != size-1 { + // don't delete at the end + err := bucket.MapDeleteKey(key, mapKey) + require.Nil(t, err) + } + + t.Run("check entries before flush", func(t *testing.T) { + pair2 := kv{ + key: key, + values: []MapPair{pair}, + } + + res, err := bucket.MapList(ctx, key) + + if size == 5 || size == 6 || i != size-1 { + assert.Empty(t, res) + pair2.values = []MapPair{} + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair2})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair2})) + } else { + assert.Len(t, res, 1) + + assert.Nil(t, err) + assert.Nil(t, partialCompare(pair2, kv{ + key: key, + values: res, + })) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair2})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair2})) + } + }) + + require.Nil(t, bucket.FlushAndSwitch()) + } + }) + + t.Run("check entries before compaction", func(t *testing.T) { + res, err := bucket.MapList(ctx, key) + assert.Nil(t, err) + + pair2 := kv{ + key: key, + values: []MapPair{NewMapPairFromDocIdAndTf(0, float32(size-1), float32(size-1), false)}, + } + + if size == 5 || size == 6 { + assert.Empty(t, res) + pair2.values = []MapPair{} + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair2})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair2})) + } else { + assert.Len(t, res, 1) + assert.Equal(t, false, res[0].Tombstone) + } + pair := kv{ + key: key, + values: res, + } + + assert.Nil(t, partialCompare(pair, kv{ + key: key, + values: res, + })) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair})) + }) + + t.Run("compact until no longer eligible", func(t *testing.T) { + var compacted bool + var err error + for compacted, err = bucket.disk.compactOnce(); err == nil && compacted; compacted, err = bucket.disk.compactOnce() { + t.Run("check entries during compaction", func(t *testing.T) { + res, err := bucket.MapList(ctx, key) + assert.Nil(t, err) + + pair2 := kv{ + key: key, + values: []MapPair{NewMapPairFromDocIdAndTf(0, float32(size-1), float32(size-1), false)}, + } + + if size == 5 || size == 6 { + assert.Empty(t, res) + pair2.values = []MapPair{} + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair2})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair2})) + } else { + assert.Len(t, res, 1) + assert.Equal(t, false, res[0].Tombstone) + } + + pair := kv{ + key: key, + values: res, + } + + assert.Nil(t, partialCompare(pair, kv{ + key: key, + values: res, + })) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair})) + }) + } + require.Nil(t, err) + }) + + t.Run("check entries after compaction", func(t *testing.T) { + res, err := bucket.MapList(ctx, key) + assert.Nil(t, err) + if size == 5 || size == 6 { + assert.Empty(t, res) + } else { + assert.Len(t, res, 1) + assert.Equal(t, false, res[0].Tombstone) + } + pair := kv{ + key: key, + values: res, + } + + assert.Nil(t, partialCompare(pair, kv{ + key: key, + values: res, + })) + assert.Nil(t, validateMapPairListVsBlockMaxSearch(ctx, bucket, []kv{pair})) + assert.Nil(t, validateMapPairListVsWandSearch(ctx, bucket, []kv{pair})) + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_map.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_map.go new file mode 100644 index 0000000000000000000000000000000000000000..0d89fc6e0a2a84783b86c58847f7acc09883b951 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_map.go @@ -0,0 +1,319 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "fmt" + "io" + "sort" + + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type compactorMap struct { + // c1 is always the older segment, so when there is a conflict c2 wins + // (because of the replace strategy) + c1 *segmentCursorCollectionReusable + c2 *segmentCursorCollectionReusable + + // the level matching those of the cursors + currentLevel uint16 + secondaryIndexCount uint16 + // Tells if tombstones or keys without corresponding values + // can be removed from merged segment. + // (left segment is root (1st) one, keepTombstones is off for bucket) + cleanupTombstones bool + + w io.WriteSeeker + bufw compactor.Writer + mw *compactor.MemoryWriter + + scratchSpacePath string + + // for backward-compatibility with states where the disk state for maps was + // not guaranteed to be sorted yet + requiresSorting bool + + allocChecker memwatch.AllocChecker + + enableChecksumValidation bool + maxNewFileSize int64 +} + +func newCompactorMapCollection(w io.WriteSeeker, + c1, c2 *segmentCursorCollectionReusable, level, secondaryIndexCount uint16, + scratchSpacePath string, requiresSorting bool, cleanupTombstones bool, + enableChecksumValidation bool, maxNewFileSize int64, allocChecker memwatch.AllocChecker, +) *compactorMap { + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "operation": "compaction", + "strategy": StrategyMapCollection, + }) + writeCB := func(written int64) { + observeWrite.Observe(float64(written)) + } + meteredW := diskio.NewMeteredWriter(w, writeCB) + writer, mw := compactor.NewWriter(meteredW, maxNewFileSize) + + return &compactorMap{ + c1: c1, + c2: c2, + w: meteredW, + bufw: writer, + mw: mw, + currentLevel: level, + cleanupTombstones: cleanupTombstones, + secondaryIndexCount: secondaryIndexCount, + scratchSpacePath: scratchSpacePath, + requiresSorting: requiresSorting, + enableChecksumValidation: enableChecksumValidation, + maxNewFileSize: maxNewFileSize, + allocChecker: allocChecker, + } +} + +func (c *compactorMap) do() error { + if err := c.init(); err != nil { + return errors.Wrap(err, "init") + } + + segmentFile := segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(c.bufw), + segmentindex.WithChecksumsDisabled(!c.enableChecksumValidation), + ) + + kis, err := c.writeKeys(segmentFile) + if err != nil { + return errors.Wrap(err, "write keys") + } + + if err := c.writeIndexes(segmentFile, kis); err != nil { + return errors.Wrap(err, "write index") + } + + // flush buffered, so we can safely seek on underlying writer + if c.mw == nil { + if err := c.bufw.Flush(); err != nil { + return fmt.Errorf("flush buffered: %w", err) + } + } + + var dataEnd uint64 = segmentindex.HeaderSize + if len(kis) > 0 { + dataEnd = uint64(kis[len(kis)-1].ValueEnd) + } + + version := segmentindex.ChooseHeaderVersion(c.enableChecksumValidation) + if err := compactor.WriteHeader(c.mw, c.w, c.bufw, segmentFile, c.currentLevel, version, + c.secondaryIndexCount, dataEnd, segmentindex.StrategyMapCollection); err != nil { + return errors.Wrap(err, "write header") + } + + if _, err := segmentFile.WriteChecksum(); err != nil { + return fmt.Errorf("write compactorMap segment checksum: %w", err) + } + + return nil +} + +func (c *compactorMap) init() error { + // write a dummy header, we don't know the contents of the actual header yet, + // we will seek to the beginning and overwrite the actual header at the very + // end + + if _, err := c.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil { + return errors.Wrap(err, "write empty header") + } + + return nil +} + +func (c *compactorMap) writeKeys(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + key1, value1, _ := c.c1.first() + key2, value2, _ := c.c2.first() + + // the (dummy) header was already written, this is our initial offset + offset := segmentindex.HeaderSize + + var kis []segmentindex.Key + pairs := newReusableMapPairs() + me := newMapEncoder() + ssm := newSortedMapMerger() + + for { + if key1 == nil && key2 == nil { + break + } + if bytes.Equal(key1, key2) { + pairs.ResizeLeft(len(value1)) + pairs.ResizeRight(len(value2)) + + for i, v := range value1 { + if err := pairs.left[i].FromBytes(v.value, false); err != nil { + return nil, err + } + pairs.left[i].Tombstone = v.tombstone + } + + for i, v := range value2 { + if err := pairs.right[i].FromBytes(v.value, false); err != nil { + return nil, err + } + pairs.right[i].Tombstone = v.tombstone + } + + if c.requiresSorting { + sort.Slice(pairs.left, func(a, b int) bool { + return bytes.Compare(pairs.left[a].Key, pairs.left[b].Key) < 0 + }) + sort.Slice(pairs.right, func(a, b int) bool { + return bytes.Compare(pairs.right[a].Key, pairs.right[b].Key) < 0 + }) + } + + ssm.reset([][]MapPair{pairs.left, pairs.right}) + mergedPairs, err := ssm. + doKeepTombstonesReusable() + if err != nil { + return nil, err + } + + mergedEncoded, err := me.DoMultiReusable(mergedPairs) + if err != nil { + return nil, err + } + + if values, skip := c.cleanupValues(mergedEncoded); !skip { + ki, err := c.writeIndividualNode(f, offset, key2, values) + if err != nil { + return nil, errors.Wrap(err, "write individual node (equal keys)") + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + // advance both! + key1, value1, _ = c.c1.next() + key2, value2, _ = c.c2.next() + continue + } + + if (key1 != nil && bytes.Compare(key1, key2) == -1) || key2 == nil { + // key 1 is smaller + if values, skip := c.cleanupValues(value1); !skip { + ki, err := c.writeIndividualNode(f, offset, key1, values) + if err != nil { + return nil, errors.Wrap(err, "write individual node (key1 smaller)") + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + key1, value1, _ = c.c1.next() + } else { + // key 2 is smaller + if values, skip := c.cleanupValues(value2); !skip { + ki, err := c.writeIndividualNode(f, offset, key2, values) + if err != nil { + return nil, errors.Wrap(err, "write individual node (key2 smaller)") + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + key2, value2, _ = c.c2.next() + } + } + + return kis, nil +} + +func (c *compactorMap) writeIndividualNode(f *segmentindex.SegmentFile, + offset int, key []byte, values []value, +) (segmentindex.Key, error) { + // NOTE: There are no guarantees in the cursor logic that any memory is valid + // for more than a single iteration. Every time you call next() to advance + // the cursor, any memory might be reused. + // + // This includes the key buffer which was the cause of + // https://github.com/weaviate/weaviate/issues/3517 + // + // A previous logic created a new assignment in each iteration, but thatwas + // not an explicit guarantee. A change in v1.21 (for pread/mmap) added a + // reusable buffer for the key which surfaced this bug. + keyCopy := make([]byte, len(key)) + copy(keyCopy, key) + + return segmentCollectionNode{ + values: values, + primaryKey: keyCopy, + offset: offset, + }.KeyIndexAndWriteTo(f.BodyWriter()) +} + +func (c *compactorMap) writeIndexes(f *segmentindex.SegmentFile, + keys []segmentindex.Key, +) error { + indexes := &segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: c.secondaryIndexCount, + ScratchSpacePath: c.scratchSpacePath, + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": StrategyMapCollection, + "operation": "writeIndices", + }), + AllocChecker: c.allocChecker, + } + _, err := f.WriteIndexes(indexes, c.maxNewFileSize) + return err +} + +// Removes values with tombstone set from input slice. Output slice may be smaller than input one. +// Returned skip of true means there are no values left (key can be omitted in segment) +// WARN: method can alter input slice by swapping its elements and reducing length (not capacity) +func (c *compactorMap) cleanupValues(values []value) (vals []value, skip bool) { + if !c.cleanupTombstones { + return values, false + } + + // Reuse input slice not to allocate new memory + // Rearrange slice in a way that tombstoned values are moved to the end + // and reduce slice's length. + last := 0 + for i := 0; i < len(values); i++ { + if !values[i].tombstone { + // Swap both elements instead overwritting `last` by `i`. + // Overwrite would result in `values[last].value` pointing to the same slice + // as `values[i].value`. + // If `values` slice is reused by multiple nodes (as it happens for map cursors + // `segmentCursorCollectionReusable` using `segmentCollectionNode` as buffer) + // populating slice `values[i].value` would overwrite slice `values[last].value`. + // Swaps makes sure `values[i].value` and `values[last].value` point to different slices. + values[last], values[i] = values[i], values[last] + last++ + } + } + + if last == 0 { + return nil, true + } + return values[:last], false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_map_reusable_pairs.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_map_reusable_pairs.go new file mode 100644 index 0000000000000000000000000000000000000000..a3e768423f7ee3be208205ed293a472419608dc6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_map_reusable_pairs.go @@ -0,0 +1,53 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +// reusableMapPairs is not thread-safe and intended for usage from a single +// thread. The caller is resoponsible for initializing each element themselves, +// the Resize functions will only set the size. If the size is reduced, this +// will only truncate elements, but will not reset values. +type reusableMapPairs struct { + left []MapPair + right []MapPair +} + +func newReusableMapPairs() *reusableMapPairs { + return &reusableMapPairs{} +} + +func (rmp *reusableMapPairs) ResizeLeft(size int) { + if cap(rmp.left) >= size { + rmp.left = rmp.left[:size] + } else { + // The 25% overhead for the capacity was chosen because we saw a lot + // re-allocations during testing with just a few elements more than before. + // This is something that really depends on the user's usage pattern, but + // in the test scenarios based on the + // weaviate-chaos-engineering/apps/importer-no-vector-index test script a + // simple 25% overhead reduced the resizing needs to almost zero. + rmp.left = make([]MapPair, size, int(float64(size)*1.25)) + } +} + +func (rmp *reusableMapPairs) ResizeRight(size int) { + if cap(rmp.right) >= size { + rmp.right = rmp.right[:size] + } else { + // The 25% overhead for the capacity was chosen because we saw a lot + // re-allocations during testing with just a few elements more than before. + // This is something that really depends on the user's usage pattern, but + // in the test scenarios based on the + // weaviate-chaos-engineering/apps/importer-no-vector-index test script a + // simple 25% overhead reduced the resizing needs to almost zero. + rmp.right = make([]MapPair, size, int(float64(size)*1.25)) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_replace.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_replace.go new file mode 100644 index 0000000000000000000000000000000000000000..9033e9945a771f5e4d3892c6723d8f2758f575a7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_replace.go @@ -0,0 +1,234 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type compactorReplace struct { + // c1 is always the older segment, so when there is a conflict c2 wins + // (because of the replace strategy) + c1 *segmentCursorReplace + c2 *segmentCursorReplace + + // the level matching those of the cursors + currentLevel uint16 + // Tells if tombstones or keys without corresponding values + // can be removed from merged segment. + // (left segment is root (1st) one, keepTombstones is off for bucket) + cleanupTombstones bool + secondaryIndexCount uint16 + + w io.WriteSeeker + bufw compactor.Writer + mw *compactor.MemoryWriter + scratchSpacePath string + + allocChecker memwatch.AllocChecker + maxNewFileSize int64 + + enableChecksumValidation bool +} + +func newCompactorReplace(w io.WriteSeeker, + c1, c2 *segmentCursorReplace, level, secondaryIndexCount uint16, + scratchSpacePath string, cleanupTombstones bool, + enableChecksumValidation bool, maxNewFileSize int64, allocChecker memwatch.AllocChecker, +) *compactorReplace { + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "operation": "compaction", + "strategy": StrategyReplace, + }) + writeCB := func(written int64) { + observeWrite.Observe(float64(written)) + } + meteredW := diskio.NewMeteredWriter(w, writeCB) + writer, mw := compactor.NewWriter(meteredW, maxNewFileSize) + + return &compactorReplace{ + c1: c1, + c2: c2, + w: meteredW, + bufw: writer, + mw: mw, + currentLevel: level, + cleanupTombstones: cleanupTombstones, + secondaryIndexCount: secondaryIndexCount, + scratchSpacePath: scratchSpacePath, + enableChecksumValidation: enableChecksumValidation, + allocChecker: allocChecker, + maxNewFileSize: maxNewFileSize, + } +} + +func (c *compactorReplace) do() error { + if err := c.init(); err != nil { + return fmt.Errorf("init: %w", err) + } + + segmentFile := segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(c.bufw), + segmentindex.WithChecksumsDisabled(!c.enableChecksumValidation), + ) + + kis, err := c.writeKeys(segmentFile) + if err != nil { + return fmt.Errorf("write keys: %w", err) + } + + if err := c.writeIndexes(segmentFile, kis); err != nil { + return fmt.Errorf("write indices: %w", err) + } + + // flush buffered, so we can safely seek on underlying writer + if c.mw == nil { + if err := c.bufw.Flush(); err != nil { + return fmt.Errorf("flush buffered: %w", err) + } + } + + var dataEnd uint64 = segmentindex.HeaderSize + if len(kis) > 0 { + dataEnd = uint64(kis[len(kis)-1].ValueEnd) + } + + version := segmentindex.ChooseHeaderVersion(c.enableChecksumValidation) + if err := compactor.WriteHeader(c.mw, c.w, c.bufw, segmentFile, c.currentLevel, version, + c.secondaryIndexCount, dataEnd, segmentindex.StrategyReplace); err != nil { + return fmt.Errorf("write header: %w", err) + } + + if _, err := segmentFile.WriteChecksum(); err != nil { + return fmt.Errorf("write compactorReplace segment checksum: %w", err) + } + + return nil +} + +func (c *compactorReplace) init() error { + // write a dummy header, we don't know the contents of the actual header yet, + // we will seek to the beginning and overwrite the actual header at the very + // end + + if _, err := c.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil { + return fmt.Errorf("write empty header: %w", err) + } + + return nil +} + +func (c *compactorReplace) writeKeys(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + res1, err1 := c.c1.firstWithAllKeys() + res2, err2 := c.c2.firstWithAllKeys() + + // the (dummy) header was already written, this is our initial offset + offset := segmentindex.HeaderSize + + var kis []segmentindex.Key + + for { + if res1.primaryKey == nil && res2.primaryKey == nil { + break + } + if bytes.Equal(res1.primaryKey, res2.primaryKey) { + if !(c.cleanupTombstones && errors.Is(err2, lsmkv.Deleted)) { + ki, err := c.writeIndividualNode(f, offset, res2.primaryKey, res2.value, + res2.secondaryKeys, errors.Is(err2, lsmkv.Deleted)) + if err != nil { + return nil, fmt.Errorf("write individual node (equal keys): %w", err) + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + // advance both! + res1, err1 = c.c1.nextWithAllKeys() + res2, err2 = c.c2.nextWithAllKeys() + continue + } + + if (res1.primaryKey != nil && bytes.Compare(res1.primaryKey, res2.primaryKey) == -1) || res2.primaryKey == nil { + // key 1 is smaller + if !(c.cleanupTombstones && errors.Is(err1, lsmkv.Deleted)) { + ki, err := c.writeIndividualNode(f, offset, res1.primaryKey, res1.value, + res1.secondaryKeys, errors.Is(err1, lsmkv.Deleted)) + if err != nil { + return nil, fmt.Errorf("write individual node (res1.primaryKey smaller)") + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + res1, err1 = c.c1.nextWithAllKeys() + } else { + // key 2 is smaller + if !(c.cleanupTombstones && errors.Is(err2, lsmkv.Deleted)) { + ki, err := c.writeIndividualNode(f, offset, res2.primaryKey, res2.value, + res2.secondaryKeys, errors.Is(err2, lsmkv.Deleted)) + if err != nil { + return nil, fmt.Errorf("write individual node (res2.primaryKey smaller): %w", err) + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + res2, err2 = c.c2.nextWithAllKeys() + } + } + + return kis, nil +} + +func (c *compactorReplace) writeIndividualNode(f *segmentindex.SegmentFile, + offset int, key, value []byte, secondaryKeys [][]byte, tombstone bool, +) (segmentindex.Key, error) { + segNode := segmentReplaceNode{ + offset: offset, + tombstone: tombstone, + value: value, + primaryKey: key, + secondaryIndexCount: c.secondaryIndexCount, + secondaryKeys: secondaryKeys, + } + return segNode.KeyIndexAndWriteTo(f.BodyWriter()) +} + +func (c *compactorReplace) writeIndexes(f *segmentindex.SegmentFile, + keys []segmentindex.Key, +) error { + indexes := &segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: c.secondaryIndexCount, + ScratchSpacePath: c.scratchSpacePath, + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": StrategyReplace, + "operation": "writeIndices", + }), + AllocChecker: c.allocChecker, + } + _, err := f.WriteIndexes(indexes, c.maxNewFileSize) + return err +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_set.go new file mode 100644 index 0000000000000000000000000000000000000000..9750480a10b0114c09b64aa28965ca0023b3f563 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/compactor_set.go @@ -0,0 +1,261 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "fmt" + "io" + + "github.com/weaviate/weaviate/usecases/memwatch" + + "github.com/weaviate/weaviate/adapters/repos/db/compactor" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type compactorSet struct { + // c1 is always the older segment, so when there is a conflict c2 wins + // (because of the replace strategy) + c1 *segmentCursorCollection + c2 *segmentCursorCollection + + // the level matching those of the cursors + currentLevel uint16 + secondaryIndexCount uint16 + // Tells if tombstones or keys without corresponding values + // can be removed from merged segment. + // (left segment is root (1st) one, keepTombstones is off for bucket) + cleanupTombstones bool + + w io.WriteSeeker + bufw compactor.Writer + mw *compactor.MemoryWriter + + maxNewFileSize int64 + allocChecker memwatch.AllocChecker + + scratchSpacePath string + + enableChecksumValidation bool +} + +func newCompactorSetCollection(w io.WriteSeeker, + c1, c2 *segmentCursorCollection, level, secondaryIndexCount uint16, + scratchSpacePath string, cleanupTombstones bool, + enableChecksumValidation bool, maxNewFileSize int64, allocChecker memwatch.AllocChecker, +) *compactorSet { + observeWrite := monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "operation": "compaction", + "strategy": StrategySetCollection, + }) + writeCB := func(written int64) { + observeWrite.Observe(float64(written)) + } + meteredW := diskio.NewMeteredWriter(w, writeCB) + writer, mw := compactor.NewWriter(meteredW, maxNewFileSize) + + return &compactorSet{ + c1: c1, + c2: c2, + w: meteredW, + bufw: writer, + mw: mw, + currentLevel: level, + cleanupTombstones: cleanupTombstones, + secondaryIndexCount: secondaryIndexCount, + scratchSpacePath: scratchSpacePath, + enableChecksumValidation: enableChecksumValidation, + allocChecker: allocChecker, + maxNewFileSize: maxNewFileSize, + } +} + +func (c *compactorSet) do() error { + if err := c.init(); err != nil { + return errors.Wrap(err, "init") + } + + segmentFile := segmentindex.NewSegmentFile( + segmentindex.WithBufferedWriter(c.bufw), + segmentindex.WithChecksumsDisabled(!c.enableChecksumValidation), + ) + + kis, err := c.writeKeys(segmentFile) + if err != nil { + return errors.Wrap(err, "write keys") + } + + if err := c.writeIndexes(segmentFile, kis); err != nil { + return errors.Wrap(err, "write index") + } + + // flush buffered, so we can safely seek on underlying writer + if c.mw == nil { + if err := c.bufw.Flush(); err != nil { + return fmt.Errorf("flush buffered: %w", err) + } + } + + var dataEnd uint64 = segmentindex.HeaderSize + if len(kis) > 0 { + dataEnd = uint64(kis[len(kis)-1].ValueEnd) + } + + version := segmentindex.ChooseHeaderVersion(c.enableChecksumValidation) + if err := compactor.WriteHeader(c.mw, c.w, c.bufw, segmentFile, c.currentLevel, version, + c.secondaryIndexCount, dataEnd, segmentindex.StrategySetCollection); err != nil { + return errors.Wrap(err, "write header") + } + + if _, err := segmentFile.WriteChecksum(); err != nil { + return fmt.Errorf("write compactorSet segment checksum: %w", err) + } + + return nil +} + +func (c *compactorSet) init() error { + // write a dummy header, we don't know the contents of the actual header yet, + // we will seek to the beginning and overwrite the actual header at the very + // end + + if _, err := c.bufw.Write(make([]byte, segmentindex.HeaderSize)); err != nil { + return errors.Wrap(err, "write empty header") + } + + return nil +} + +func (c *compactorSet) writeKeys(f *segmentindex.SegmentFile) ([]segmentindex.Key, error) { + key1, value1, _ := c.c1.first() + key2, value2, _ := c.c2.first() + + // the (dummy) header was already written, this is our initial offset + offset := segmentindex.HeaderSize + + var kis []segmentindex.Key + + for { + if key1 == nil && key2 == nil { + break + } + if bytes.Equal(key1, key2) { + values := append(value1, value2...) + valuesMerged := newSetDecoder().DoPartial(values) + if values, skip := c.cleanupValues(valuesMerged); !skip { + ki, err := c.writeIndividualNode(f, offset, key2, values) + if err != nil { + return nil, errors.Wrap(err, "write individual node (equal keys)") + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + // advance both! + key1, value1, _ = c.c1.next() + key2, value2, _ = c.c2.next() + continue + } + + if (key1 != nil && bytes.Compare(key1, key2) == -1) || key2 == nil { + // key 1 is smaller + if values, skip := c.cleanupValues(value1); !skip { + ki, err := c.writeIndividualNode(f, offset, key1, values) + if err != nil { + return nil, errors.Wrap(err, "write individual node (key1 smaller)") + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + key1, value1, _ = c.c1.next() + } else { + // key 2 is smaller + if values, skip := c.cleanupValues(value2); !skip { + ki, err := c.writeIndividualNode(f, offset, key2, values) + if err != nil { + return nil, errors.Wrap(err, "write individual node (key2 smaller)") + } + + offset = ki.ValueEnd + kis = append(kis, ki) + } + key2, value2, _ = c.c2.next() + } + } + + return kis, nil +} + +func (c *compactorSet) writeIndividualNode(f *segmentindex.SegmentFile, + offset int, key []byte, values []value, +) (segmentindex.Key, error) { + return (&segmentCollectionNode{ + values: values, + primaryKey: key, + offset: offset, + }).KeyIndexAndWriteTo(f.BodyWriter()) +} + +func (c *compactorSet) writeIndexes(f *segmentindex.SegmentFile, + keys []segmentindex.Key, +) error { + indexes := &segmentindex.Indexes{ + Keys: keys, + SecondaryIndexCount: c.secondaryIndexCount, + ScratchSpacePath: c.scratchSpacePath, + ObserveWrite: monitoring.GetMetrics().FileIOWrites.With(prometheus.Labels{ + "strategy": StrategySetCollection, + "operation": "writeIndices", + }), + AllocChecker: c.allocChecker, + } + _, err := f.WriteIndexes(indexes, c.maxNewFileSize) + return err +} + +// Removes values with tombstone set from input slice. Output slice may be smaller than input one. +// Returned skip of true means there are no values left (key can be omitted in segment) +// WARN: method can alter input slice by swapping its elements and reducing length (not capacity) +func (c *compactorSet) cleanupValues(values []value) (vals []value, skip bool) { + if !c.cleanupTombstones { + return values, false + } + + // Reuse input slice not to allocate new memory + // Rearrange slice in a way that tombstoned values are moved to the end + // and reduce slice's length. + last := 0 + for i := 0; i < len(values); i++ { + if !values[i].tombstone { + // Swap both elements instead overwritting `last` by `i`. + // Overwrite would result in `values[last].value` pointing to the same slice + // as `values[i].value`. + // If `values` slice is reused by multiple nodes (as it happens for map cursors + // `segmentCursorCollectionReusable` using `segmentCollectionNode` as buffer) + // populating values[i].value would overwrite values[last].value + // Swaps makes sure values[i].value and values[last].value point to different slices + values[last], values[i] = values[i], values[last] + last++ + } + } + + if last == 0 { + return nil, true + } + return values[:last], false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/concurrent_reading_benchmark_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/concurrent_reading_benchmark_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1ca907952a93bea731ea8f09818d07b3d5a899e8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/concurrent_reading_benchmark_test.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "crypto/rand" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +func BenchmarkConcurrentReading(b *testing.B) { + bucket, cleanup := prepareBucket(b) + defer cleanup() + keys := populateBucket(b, bucket) + + b.ReportAllocs() + b.ResetTimer() + + routines := 500 + + for i := 0; i < b.N; i++ { + wg := sync.WaitGroup{} + for r := 0; r < routines; r++ { + wg.Add(1) + go func() { + defer wg.Done() + for _, key := range keys { + _, err := bucket.MapList(context.Background(), key) + assert.Nil(b, err) + } + }() + } + wg.Wait() + } +} + +func prepareBucket(b *testing.B) (bucket *Bucket, cleanup func()) { + dirName := fmt.Sprintf("./generated_testdata/%d", mustRandIntn(10000000)) + os.MkdirAll(dirName, 0o777) + defer func() { + err := os.RemoveAll(dirName) + fmt.Println(err) + }() + + bucket, err := NewBucketCreator().NewBucket(testCtxB(), dirName, "", nullLoggerB(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyMapCollection), + WithMemtableThreshold(5000)) + require.Nil(b, err) + + return bucket, func() { + err := os.RemoveAll(dirName) + fmt.Println(err) + } +} + +func populateBucket(b *testing.B, bucket *Bucket) (keys [][]byte) { + amount := 2000 + valuesPerKey := 4 + sizePerKey := 8 + sizePerValue := 32 + + keys = make([][]byte, amount) + values := make([][]MapPair, amount) + + for i := range keys { + uuid, err := uuid.New().MarshalBinary() + require.Nil(b, err) + keys[i] = uuid + + values[i] = make([]MapPair, valuesPerKey) + for j := range values[i] { + values[i][j] = MapPair{ + Key: make([]byte, sizePerKey), + Value: make([]byte, sizePerValue), + } + rand.Read(values[i][j].Key) + rand.Read(values[i][j].Value) + } + } + + wg := sync.WaitGroup{} + for i := range keys { + for j := 0; j < valuesPerKey; j++ { + time.Sleep(50 * time.Microsecond) + wg.Add(1) + go func(rowIndex, valueIndex int) { + defer wg.Done() + err := bucket.MapSet(keys[rowIndex], values[rowIndex][valueIndex]) + assert.Nil(b, err) + }(i, j) + } + } + wg.Wait() + + return +} + +func testCtxB() context.Context { + return context.Background() +} + +func nullLoggerB() logrus.FieldLogger { + log, _ := test.NewNullLogger() + return log +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/concurrent_writing_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/concurrent_writing_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3ec25a0dc861a0e507ff183d2c2b551657dca9bd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/concurrent_writing_integration_test.go @@ -0,0 +1,513 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package lsmkv + +import ( + "bytes" + "context" + crand "crypto/rand" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/filters" +) + +// This test continuously writes into a bucket with a small memtable threshold, +// so that a lot of flushing is happening while writing. This is to ensure that +// there will be no lost writes or other inconsistencies under load +func TestConcurrentWriting_Replace(t *testing.T) { + dirName := t.TempDir() + + amount := 2000 + sizePerValue := 128 + + keys := make([][]byte, amount) + values := make([][]byte, amount) + + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyReplace), + WithMemtableThreshold(10000)) + require.Nil(t, err) + + t.Run("generate random data", func(t *testing.T) { + for i := range keys { + uuid, err := uuid.New().MarshalBinary() + require.Nil(t, err) + keys[i] = uuid + + values[i] = make([]byte, sizePerValue) + crand.Read(values[i]) + } + }) + + t.Run("import", func(t *testing.T) { + wg := sync.WaitGroup{} + + for i := range keys { + time.Sleep(50 * time.Microsecond) + wg.Add(1) + go func(index int) { + defer wg.Done() + err := bucket.Put(keys[index], values[index]) + assert.Nil(t, err) + }(i) + } + wg.Wait() + }) + + t.Run("verify get", func(t *testing.T) { + correct := 0 + var missingKeys []int + + for i := range keys { + value, err := bucket.Get(keys[i]) + assert.Nil(t, err) + if bytes.Equal(values[i], value) { + correct++ + } else { + missingKeys = append(missingKeys, i) + } + } + + if len(missingKeys) > 0 { + t.Logf("missing keys: %v\n", missingKeys) + } + assert.Equal(t, amount, correct) + }) + + t.Run("verify cursor", func(t *testing.T) { + correct := 0 + // put all key value/pairs in a map so we can access them by key + targets := map[string][]byte{} + + for i := range keys { + targets[string(keys[i])] = values[i] + } + + c := bucket.Cursor() + defer c.Close() + for k, v := c.First(); k != nil; k, v = c.Next() { + control := targets[string(k)] + if bytes.Equal(control, v) { + correct++ + } + } + + assert.Equal(t, amount, correct) + }) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + require.Nil(t, bucket.Shutdown(ctx)) +} + +// This test continuously writes into a bucket with a small memtable threshold, +// so that a lot of flushing is happening while writing. This is to ensure that +// there will be no lost writes or other inconsistencies under load +func TestConcurrentWriting_Set(t *testing.T) { + dirName := t.TempDir() + + amount := 2000 + valuesPerKey := 4 + sizePerValue := 32 + + keys := make([][]byte, amount) + values := make([][][]byte, amount) + + flushGroup := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + cyclemanager.NewManager( + cyclemanager.NewFixedTicker(5*time.Millisecond), + flushGroup.CycleCallback, + nullLogger()).Start() + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushGroup, + WithStrategy(StrategySetCollection), + WithMemtableThreshold(10000)) + require.Nil(t, err) + + t.Run("generate random data", func(t *testing.T) { + for i := range keys { + uuid, err := uuid.New().MarshalBinary() + require.Nil(t, err) + keys[i] = uuid + + values[i] = make([][]byte, valuesPerKey) + for j := range values[i] { + values[i][j] = make([]byte, sizePerValue) + crand.Read(values[i][j]) + } + } + }) + + t.Run("import", func(t *testing.T) { + wg := sync.WaitGroup{} + + for i := range keys { + time.Sleep(50 * time.Microsecond) + wg.Add(1) + go func(index int) { + defer wg.Done() + err := bucket.SetAdd(keys[index], values[index]) + assert.Nil(t, err) + }(i) + } + wg.Wait() + }) + + t.Run("verify get", func(t *testing.T) { + correct := 0 + + for i := range keys { + value, err := bucket.SetList(keys[i]) + assert.Nil(t, err) + if reflect.DeepEqual(values[i], value) { + correct++ + } + } + + assert.Equal(t, amount, correct) + }) + + t.Run("verify cursor", func(t *testing.T) { + correct := 0 + // put all key value/pairs in a map so we can access them by key + targets := map[string][][]byte{} + + for i := range keys { + targets[string(keys[i])] = values[i] + } + + c := bucket.SetCursor() + defer c.Close() + for k, v := c.First(); k != nil; k, v = c.Next() { + control := targets[string(k)] + if reflect.DeepEqual(control, v) { + correct++ + } + } + + assert.Equal(t, amount, correct) + }) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + require.Nil(t, bucket.Shutdown(ctx)) +} + +// This test continuously writes into a bucket with a small memtable threshold, +// so that a lot of flushing is happening while writing. This is to ensure that +// there will be no lost writes or other inconsistencies under load +func TestConcurrentWriting_RoaringSet(t *testing.T) { + dirName := t.TempDir() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + amount := 2000 + valuesPerKey := 4 + + keys := make([][]byte, amount) + values := make([][]uint64, amount) + + flushGroup := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + cyclemanager.NewManager( + cyclemanager.NewFixedTicker(5*time.Millisecond), + flushGroup.CycleCallback, + logger).Start() + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushGroup, + WithStrategy(StrategyRoaringSet), + WithBitmapBufPool(roaringset.NewBitmapBufPoolNoop()), + WithMemtableThreshold(1000)) + require.Nil(t, err) + + t.Run("generate random data", func(t *testing.T) { + value := uint64(0) + for i := range keys { + uuid, err := uuid.New().MarshalBinary() + require.Nil(t, err) + keys[i] = uuid + + values[i] = make([]uint64, valuesPerKey) + for j := range values[i] { + values[i][j] = value + value += uint64(r.Intn(10) + 1) + } + } + }) + + t.Run("import", func(t *testing.T) { + wg := sync.WaitGroup{} + + for i := range keys { + time.Sleep(50 * time.Microsecond) + wg.Add(1) + go func(index int) { + defer wg.Done() + err := bucket.RoaringSetAddList(keys[index], values[index]) + assert.NoError(t, err) + }(i) + } + wg.Wait() + }) + + t.Run("verify get", func(t *testing.T) { + for i := range keys { + func() { + value, release, err := bucket.RoaringSetGet(keys[i]) + require.NoError(t, err) + defer release() + + assert.ElementsMatch(t, values[i], value.ToArray()) + }() + } + }) + + t.Run("verify cursor", func(t *testing.T) { + // put all key value/pairs in a map so we can access them by key + targets := map[string][]uint64{} + + for i := range keys { + targets[string(keys[i])] = values[i] + } + + c := bucket.CursorRoaringSet() + defer c.Close() + for k, v := c.First(); k != nil; k, v = c.Next() { + control := targets[string(k)] + assert.ElementsMatch(t, control, v.ToArray()) + } + }) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + require.Nil(t, bucket.Shutdown(ctx)) +} + +// This test continuously writes into a bucket with a small memtable threshold, +// so that a lot of flushing is happening while writing. This is to ensure that +// there will be no lost writes or other inconsistencies under load +func TestConcurrentWriting_RoaringSetRange(t *testing.T) { + dirName := t.TempDir() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + amount := 2000 + valuesPerKey := 4 + + keys := make([]uint64, amount) + values := make([][]uint64, amount) + + flushGroup := cyclemanager.NewCallbackGroup("flush", nullLogger(), 1) + cyclemanager.NewManager( + cyclemanager.NewFixedTicker(5*time.Millisecond), + flushGroup.CycleCallback, + nullLogger()).Start() + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), flushGroup, + WithStrategy(StrategyRoaringSetRange), + WithMemtableThreshold(1000), + WithUseBloomFilter(false)) + require.Nil(t, err) + + t.Run("generate random data", func(t *testing.T) { + uniques := map[uint64]struct{}{} + uniqueKey := func() uint64 { + val := r.Uint64() + for _, ok := uniques[val]; ok; _, ok = uniques[val] { + val = r.Uint64() + } + uniques[val] = struct{}{} + return val + } + + value := uint64(0) + for i := range keys { + keys[i] = uniqueKey() + values[i] = make([]uint64, valuesPerKey) + for j := range values[i] { + values[i][j] = value + value += uint64(r.Intn(10) + 1) + } + } + }) + + t.Run("import", func(t *testing.T) { + wg := sync.WaitGroup{} + + for i := range keys { + time.Sleep(50 * time.Microsecond) + wg.Add(1) + go func(index int) { + defer wg.Done() + err := bucket.RoaringSetRangeAdd(keys[index], values[index]...) + assert.NoError(t, err) + }(i) + } + wg.Wait() + }) + + t.Run("verify reader", func(t *testing.T) { + reader := bucket.ReaderRoaringSetRange() + defer reader.Close() + + for i := range keys { + // verify every 5th key to save time + if i%5 == 0 { + func() { + value, release, err := reader.Read(testCtx(), keys[i], filters.OperatorEqual) + require.NoError(t, err) + defer release() + + assert.ElementsMatch(t, values[i], value.ToArray()) + }() + } + } + }) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + require.Nil(t, bucket.Shutdown(ctx)) +} + +// This test continuously writes into a bucket with a small memtable threshold, +// so that a lot of flushing is happening while writing. This is to ensure that +// there will be no lost writes or other inconsistencies under load +func TestConcurrentWriting_Map(t *testing.T) { + dirName := t.TempDir() + + amount := 2000 + valuesPerKey := 4 + sizePerKey := 8 + sizePerValue := 32 + + keys := make([][]byte, amount) + values := make([][]MapPair, amount) + + bucket, err := NewBucketCreator().NewBucket(testCtx(), dirName, "", nullLogger(), nil, + cyclemanager.NewCallbackGroupNoop(), cyclemanager.NewCallbackGroupNoop(), + WithStrategy(StrategyMapCollection), + WithMemtableThreshold(5000)) + require.Nil(t, err) + + t.Run("generate random data", func(t *testing.T) { + for i := range keys { + uuid, err := uuid.New().MarshalBinary() + require.Nil(t, err) + keys[i] = uuid + + values[i] = make([]MapPair, valuesPerKey) + for j := range values[i] { + values[i][j] = MapPair{ + Key: make([]byte, sizePerKey), + Value: make([]byte, sizePerValue), + } + crand.Read(values[i][j].Key) + crand.Read(values[i][j].Value) + } + } + }) + + t.Run("import", func(t *testing.T) { + wg := sync.WaitGroup{} + + for i := range keys { + for j := 0; j < valuesPerKey; j++ { + time.Sleep(50 * time.Microsecond) + wg.Add(1) + go func(rowIndex, valueIndex int) { + defer wg.Done() + err := bucket.MapSet(keys[rowIndex], values[rowIndex][valueIndex]) + assert.Nil(t, err) + }(i, j) + } + } + wg.Wait() + }) + + t.Run("verify cursor", func(t *testing.T) { + correct := 0 + // put all key value/pairs in a map so we can access them by key + targets := map[string][]MapPair{} + + for i := range keys { + targets[string(keys[i])] = values[i] + } + + c := bucket.MapCursor() + defer c.Close() + + ctx := context.Background() + + for k, v := c.First(ctx); k != nil; k, v = c.Next(ctx) { + control := targets[string(k)] + if mapElementsMatch(control, v) { + correct++ + } + } + + assert.Equal(t, amount, correct) + }) + + t.Run("verify get", func(t *testing.T) { + correct := 0 + + for i := range keys { + value, err := bucket.MapList(context.Background(), keys[i]) + assert.Nil(t, err) + if mapElementsMatch(values[i], value) { + correct++ + } + } + + assert.Equal(t, amount, correct) + }) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + require.Nil(t, bucket.Shutdown(ctx)) +} + +func mapElementsMatch(a, b []MapPair) bool { + if len(a) != len(b) { + return false + } + + aMap := map[string][]byte{} + + for _, kv := range a { + aMap[string(kv.Key)] = kv.Value + } + + for _, kv := range b { + control := aMap[string(kv.Key)] + if !bytes.Equal(kv.Value, control) { + return false + } + } + + return true +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_map.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_map.go new file mode 100644 index 0000000000000000000000000000000000000000..70464de85173d1a40ffce22510cd7da7dafe2928 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_map.go @@ -0,0 +1,261 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "context" + "errors" + "fmt" + "sort" + + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type CursorMap struct { + innerCursors []innerCursorMap + state []cursorStateMap + unlock func() + listCfg MapListOptionConfig + keyOnly bool +} + +type cursorStateMap struct { + key []byte + value []MapPair + err error +} + +type innerCursorMap interface { + first() ([]byte, []MapPair, error) + next() ([]byte, []MapPair, error) + seek([]byte) ([]byte, []MapPair, error) +} + +func (b *Bucket) MapCursor(cfgs ...MapListOption) *CursorMap { + b.flushLock.RLock() + + c := MapListOptionConfig{} + for _, cfg := range cfgs { + cfg(&c) + } + + innerCursors, unlockSegmentGroup := b.disk.newMapCursors() + + // we have a flush-RLock, so we have the guarantee that the flushing state + // will not change for the lifetime of the cursor, thus there can only be two + // states: either a flushing memtable currently exists - or it doesn't + if b.flushing != nil { + innerCursors = append(innerCursors, b.flushing.newMapCursor()) + } + + innerCursors = append(innerCursors, b.active.newMapCursor()) + + return &CursorMap{ + unlock: func() { + unlockSegmentGroup() + b.flushLock.RUnlock() + }, + // cursor are in order from oldest to newest, with the memtable cursor + // being at the very top + innerCursors: innerCursors, + listCfg: c, + } +} + +func (b *Bucket) MapCursorKeyOnly(cfgs ...MapListOption) *CursorMap { + c := b.MapCursor(cfgs...) + c.keyOnly = true + return c +} + +func (c *CursorMap) Seek(ctx context.Context, key []byte) ([]byte, []MapPair) { + c.seekAll(key) + return c.serveCurrentStateAndAdvance(ctx) +} + +func (c *CursorMap) Next(ctx context.Context) ([]byte, []MapPair) { + return c.serveCurrentStateAndAdvance(ctx) +} + +func (c *CursorMap) First(ctx context.Context) ([]byte, []MapPair) { + c.firstAll() + return c.serveCurrentStateAndAdvance(ctx) +} + +func (c *CursorMap) Close() { + c.unlock() +} + +func (c *CursorMap) seekAll(target []byte) { + state := make([]cursorStateMap, len(c.innerCursors)) + for i, cur := range c.innerCursors { + key, value, err := cur.seek(target) + if errors.Is(err, lsmkv.NotFound) { + state[i].err = err + continue + } + + if err != nil { + panic(fmt.Errorf("unexpected error in seek: %w", err)) + } + + state[i].key = key + if !c.keyOnly { + state[i].value = value + } + } + + c.state = state +} + +func (c *CursorMap) firstAll() { + state := make([]cursorStateMap, len(c.innerCursors)) + for i, cur := range c.innerCursors { + key, value, err := cur.first() + if errors.Is(err, lsmkv.NotFound) { + state[i].err = err + continue + } + + if err != nil { + panic(fmt.Errorf("unexpected error in seek: %w", err)) + } + + state[i].key = key + if !c.keyOnly { + state[i].value = value + } + } + + c.state = state +} + +func (c *CursorMap) serveCurrentStateAndAdvance(ctx context.Context) ([]byte, []MapPair) { + for { + id, err := c.cursorWithLowestKey() + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + return nil, nil + } + } + + ids, _ := c.haveDuplicatesInState(id) + + // take the key from any of the results, we have the guarantee that they're + // all the same + key := c.state[ids[0]].key + + var perSegmentResults [][]MapPair + + for _, id := range ids { + candidates := c.state[id].value + perSegmentResults = append(perSegmentResults, candidates) + + c.advanceInner(id) + } + + if c.listCfg.legacyRequireManualSorting { + for i := range perSegmentResults { + sort.Slice(perSegmentResults[i], func(a, b int) bool { + return bytes.Compare(perSegmentResults[i][a].Key, + perSegmentResults[i][b].Key) == -1 + }) + } + } + + merged, err := newSortedMapMerger().do(ctx, perSegmentResults) + if err != nil { + panic(fmt.Errorf("unexpected error decoding map values: %w", err)) + } + if len(merged) == 0 { + // all values deleted, proceed + continue + } + + // TODO remove keyOnly option, not used anyway + if !c.keyOnly { + return key, merged + } + return key, nil + } +} + +func (c *CursorMap) cursorWithLowestKey() (int, error) { + err := lsmkv.NotFound + pos := -1 + var lowest []byte + + for i, res := range c.state { + if errors.Is(res.err, lsmkv.NotFound) { + continue + } + + if lowest == nil || bytes.Compare(res.key, lowest) <= 0 { + pos = i + err = res.err + lowest = res.key + } + } + + if err != nil { + return pos, err + } + + return pos, nil +} + +func (c *CursorMap) haveDuplicatesInState(idWithLowestKey int) ([]int, bool) { + key := c.state[idWithLowestKey].key + + var idsFound []int + + for i, cur := range c.state { + if i == idWithLowestKey { + idsFound = append(idsFound, i) + continue + } + + if bytes.Equal(key, cur.key) { + idsFound = append(idsFound, i) + } + } + + return idsFound, len(idsFound) > 1 +} + +func (c *CursorMap) advanceInner(id int) { + k, v, err := c.innerCursors[id].next() + if errors.Is(err, lsmkv.NotFound) { + c.state[id].err = err + c.state[id].key = nil + c.state[id].value = nil + return + } + + if errors.Is(err, lsmkv.Deleted) { + c.state[id].err = err + c.state[id].key = k + c.state[id].value = nil + return + } + + if err != nil { + panic(fmt.Errorf("unexpected error in advance: %w", err)) + } + + c.state[id].key = k + if !c.keyOnly { + c.state[id].value = v + } + c.state[id].err = nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_map_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_map_test.go new file mode 100644 index 0000000000000000000000000000000000000000..159fa35947af4865e667e147c4fdd58bbde4afed --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_map_test.go @@ -0,0 +1,63 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "context" + "encoding/binary" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +// Previous implementation of cursor called recursively Next() when empty entry occurred, +// which could lead to stack overflow. This test prevents a regression. +func TestMapCursor_StackOverflow(t *testing.T) { + cursor := &CursorMap{ + unlock: func() {}, + innerCursors: []innerCursorMap{&emptyInnerCursorMap{}}, + listCfg: MapListOptionConfig{}, + keyOnly: false, + } + + k, mp := cursor.First(context.Background()) + assert.Nil(t, k) + assert.Nil(t, mp) +} + +type emptyInnerCursorMap struct { + key uint64 +} + +func (c *emptyInnerCursorMap) first() ([]byte, []MapPair, error) { + c.key = 0 + return c.bytes(), []MapPair{}, nil +} + +func (c *emptyInnerCursorMap) next() ([]byte, []MapPair, error) { + if c.key >= 1<<20 { + return nil, nil, lsmkv.NotFound + } + c.key++ + return c.bytes(), []MapPair{}, nil +} + +func (c *emptyInnerCursorMap) seek(key []byte) ([]byte, []MapPair, error) { + return c.first() +} + +func (c *emptyInnerCursorMap) bytes() []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, c.key) + return b +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_replace.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_replace.go new file mode 100644 index 0000000000000000000000000000000000000000..243fb2d446aa28375aca91ea731bf2ef7e99339d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_replace.go @@ -0,0 +1,350 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type CursorReplace struct { + innerCursors []innerCursorReplace + state []cursorStateReplace + unlock func() + serveCache cursorStateReplace + + reusableIDList []int +} + +type innerCursorReplace interface { + first() ([]byte, []byte, error) + next() ([]byte, []byte, error) + seek([]byte) ([]byte, []byte, error) +} + +type cursorStateReplace struct { + key []byte + value []byte + err error +} + +// Cursor holds a RLock for the flushing state. It needs to be closed using the +// .Close() methods or otherwise the lock will never be released +func (b *Bucket) Cursor() *CursorReplace { + b.flushLock.RLock() + + if b.strategy != StrategyReplace { + panic("Cursor() called on strategy other than 'replace'") + } + + innerCursors, unlockSegmentGroup := b.disk.newCursors() + + // we have a flush-RLock, so we have the guarantee that the flushing state + // will not change for the lifetime of the cursor, thus there can only be two + // states: either a flushing memtable currently exists - or it doesn't + if b.flushing != nil { + innerCursors = append(innerCursors, b.flushing.newCursor()) + } + + innerCursors = append(innerCursors, b.active.newCursor()) + + return &CursorReplace{ + // cursor are in order from oldest to newest, with the memtable cursor + // being at the very top + innerCursors: innerCursors, + unlock: func() { + unlockSegmentGroup() + b.flushLock.RUnlock() + }, + } +} + +// CursorInMemWith returns a cursor which scan over the primary key of entries +// not yet persisted on disk. +// Segment creation and compaction will be blocked until the cursor is closed +func (b *Bucket) CursorInMem() *CursorReplace { + b.flushLock.RLock() + + if b.strategy != StrategyReplace { + panic("CursorInMemWith() called on strategy other than 'replace'") + } + + var innerCursors []innerCursorReplace + + // we have a flush-RLock, so we have the guarantee that the flushing state + // will not change for the lifetime of the cursor, thus there can only be two + // states: either a flushing memtable currently exists - or it doesn't + var flushingMemtableCursor innerCursorReplace + var releaseFlushingMemtable func() + + if b.flushing != nil { + flushingMemtableCursor, releaseFlushingMemtable = b.flushing.newBlockingCursor() + innerCursors = append(innerCursors, flushingMemtableCursor) + } + + activeMemtableCursor, releaseActiveMemtable := b.active.newBlockingCursor() + innerCursors = append(innerCursors, activeMemtableCursor) + + return &CursorReplace{ + // cursor are in order from oldest to newest, with the memtable cursor + // being at the very top + innerCursors: innerCursors, + unlock: func() { + if b.flushing != nil { + releaseFlushingMemtable() + } + releaseActiveMemtable() + b.flushLock.RUnlock() + }, + } +} + +// CursorOnDiskWith returns a cursor which scan over the primary key of entries +// already persisted on disk. +// New segments can still be created but compaction will be prevented +// while any cursor remains active +func (b *Bucket) CursorOnDisk() *CursorReplace { + if b.strategy != StrategyReplace { + panic("CursorWith(desiredSecondaryIndexCount) called on strategy other than 'replace'") + } + + innerCursors, unlockSegmentGroup := b.disk.newCursorsWithFlushingSupport() + + return &CursorReplace{ + innerCursors: innerCursors, + unlock: func() { + unlockSegmentGroup() + }, + } +} + +// CursorWithSecondaryIndex holds a RLock for the flushing state. It needs to be closed using the +// .Close() methods or otherwise the lock will never be released +func (b *Bucket) CursorWithSecondaryIndex(pos int) *CursorReplace { + b.flushLock.RLock() + + if b.strategy != StrategyReplace { + panic("CursorWithSecondaryIndex() called on strategy other than 'replace'") + } + + if b.secondaryIndices <= uint16(pos) { + panic("CursorWithSecondaryIndex() called on a bucket without enough secondary indexes") + } + + innerCursors, unlockSegmentGroup := b.disk.newCursorsWithSecondaryIndex(pos) + + // we have a flush-RLock, so we have the guarantee that the flushing state + // will not change for the lifetime of the cursor, thus there can only be two + // states: either a flushing memtable currently exists - or it doesn't + if b.flushing != nil { + innerCursors = append(innerCursors, b.flushing.newCursorWithSecondaryIndex(pos)) + } + + innerCursors = append(innerCursors, b.active.newCursorWithSecondaryIndex(pos)) + + return &CursorReplace{ + // cursor are in order from oldest to newest, with the memtable cursor + // being at the very top + innerCursors: innerCursors, + unlock: func() { + unlockSegmentGroup() + b.flushLock.RUnlock() + }, + } +} + +func (c *CursorReplace) Close() { + c.unlock() +} + +func (c *CursorReplace) seekAll(target []byte) { + state := make([]cursorStateReplace, len(c.innerCursors)) + for i, cur := range c.innerCursors { + key, value, err := cur.seek(target) + if errors.Is(err, lsmkv.NotFound) { + state[i].err = err + continue + } + + if errors.Is(err, lsmkv.Deleted) { + state[i].err = err + state[i].key = key + continue + } + + if err != nil { + panic(errors.Wrap(err, "unexpected error in seek (cursor type 'replace')")) + } + + state[i].key = key + state[i].value = value + } + + c.state = state +} + +func (c *CursorReplace) serveCurrentStateAndAdvance() ([]byte, []byte) { + for { + id, err := c.cursorWithLowestKey() + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + return nil, nil + } + } + + ids, _ := c.haveDuplicatesInState(id) + + c.copyStateIntoServeCache(ids[len(ids)-1]) + + // with a replace strategy only the highest will be returned, but still all + // need to be advanced - or we would just encounter them again in the next + // round + for _, id := range ids { + c.advanceInner(id) + } + + if errors.Is(c.serveCache.err, lsmkv.Deleted) { + // element was deleted, proceed with next round + continue + } + + return c.serveCache.key, c.serveCache.value + } +} + +func (c *CursorReplace) haveDuplicatesInState(idWithLowestKey int) ([]int, bool) { + key := c.state[idWithLowestKey].key + + c.reusableIDList = c.reusableIDList[:0] + + for i, cur := range c.state { + if i == idWithLowestKey { + c.reusableIDList = append(c.reusableIDList, i) + continue + } + + if bytes.Equal(key, cur.key) { + c.reusableIDList = append(c.reusableIDList, i) + } + } + + return c.reusableIDList, len(c.reusableIDList) > 1 +} + +func (c *CursorReplace) copyStateIntoServeCache(pos int) { + resMut := c.state[pos] + if len(resMut.key) > cap(c.serveCache.key) { + c.serveCache.key = make([]byte, len(resMut.key)) + } else { + c.serveCache.key = c.serveCache.key[:len(resMut.key)] + } + + if len(resMut.value) > cap(c.serveCache.value) { + c.serveCache.value = make([]byte, len(resMut.value)) + } else { + c.serveCache.value = c.serveCache.value[:len(resMut.value)] + } + + copy(c.serveCache.key, resMut.key) + copy(c.serveCache.value, resMut.value) + c.serveCache.err = resMut.err +} + +func (c *CursorReplace) Seek(key []byte) ([]byte, []byte) { + c.seekAll(key) + return c.serveCurrentStateAndAdvance() +} + +func (c *CursorReplace) cursorWithLowestKey() (int, error) { + err := lsmkv.NotFound + pos := -1 + var lowest []byte + + for i, res := range c.state { + if errors.Is(res.err, lsmkv.NotFound) { + continue + } + + if lowest == nil || bytes.Compare(res.key, lowest) <= 0 { + pos = i + err = res.err + lowest = res.key + } + } + + if err != nil { + return pos, err + } + + return pos, nil +} + +func (c *CursorReplace) advanceInner(id int) { + k, v, err := c.innerCursors[id].next() + if errors.Is(err, lsmkv.NotFound) { + c.state[id].err = err + c.state[id].key = nil + c.state[id].value = nil + return + } + + if errors.Is(err, lsmkv.Deleted) { + c.state[id].err = err + c.state[id].key = k + c.state[id].value = nil + return + } + + if err != nil { + panic(errors.Wrap(err, "unexpected error in advance")) + } + + c.state[id].key = k + c.state[id].value = v + c.state[id].err = nil +} + +func (c *CursorReplace) Next() ([]byte, []byte) { + return c.serveCurrentStateAndAdvance() +} + +func (c *CursorReplace) firstAll() { + state := make([]cursorStateReplace, len(c.innerCursors)) + for i, cur := range c.innerCursors { + key, value, err := cur.first() + if errors.Is(err, lsmkv.NotFound) { + state[i].err = err + continue + } + if errors.Is(err, lsmkv.Deleted) { + state[i].err = err + state[i].key = key + continue + } + + if err != nil { + panic(errors.Wrap(err, "unexpected error in first (cursor type 'replace')")) + } + + state[i].key = key + state[i].value = value + } + + c.state = state +} + +func (c *CursorReplace) First() ([]byte, []byte) { + c.firstAll() + return c.serveCurrentStateAndAdvance() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..4c10a6968ced07fe40aea961e4f39c740108d7a2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_roaring_set.go @@ -0,0 +1,79 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/weaviate/sroar" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +type CursorRoaringSet interface { + First() ([]byte, *sroar.Bitmap) + Next() ([]byte, *sroar.Bitmap) + Seek([]byte) ([]byte, *sroar.Bitmap) + Close() +} + +type cursorRoaringSet struct { + combinedCursor *roaringset.CombinedCursor + unlock func() +} + +func (c *cursorRoaringSet) First() ([]byte, *sroar.Bitmap) { + return c.combinedCursor.First() +} + +func (c *cursorRoaringSet) Next() ([]byte, *sroar.Bitmap) { + return c.combinedCursor.Next() +} + +func (c *cursorRoaringSet) Seek(key []byte) ([]byte, *sroar.Bitmap) { + return c.combinedCursor.Seek(key) +} + +func (c *cursorRoaringSet) Close() { + c.unlock() +} + +func (b *Bucket) CursorRoaringSet() CursorRoaringSet { + return b.cursorRoaringSet(false) +} + +func (b *Bucket) CursorRoaringSetKeyOnly() CursorRoaringSet { + return b.cursorRoaringSet(true) +} + +func (b *Bucket) cursorRoaringSet(keyOnly bool) CursorRoaringSet { + MustBeExpectedStrategy(b.strategy, StrategyRoaringSet) + + b.flushLock.RLock() + + innerCursors, unlockSegmentGroup := b.disk.newRoaringSetCursors() + + // we have a flush-RLock, so we have the guarantee that the flushing state + // will not change for the lifetime of the cursor, thus there can only be two + // states: either a flushing memtable currently exists - or it doesn't + if b.flushing != nil { + innerCursors = append(innerCursors, b.flushing.newRoaringSetCursor()) + } + innerCursors = append(innerCursors, b.active.newRoaringSetCursor()) + + // cursors are in order from oldest to newest, with the memtable cursor + // being at the very top + return &cursorRoaringSet{ + combinedCursor: roaringset.NewCombinedCursor(innerCursors, keyOnly), + unlock: func() { + unlockSegmentGroup() + b.flushLock.RUnlock() + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_set.go new file mode 100644 index 0000000000000000000000000000000000000000..7e2cc5873fb1504cf5f6ad751fe6a45c1fc28b65 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_set.go @@ -0,0 +1,251 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "errors" + "fmt" + + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type CursorSet struct { + innerCursors []innerCursorCollection + state []cursorStateCollection + unlock func() + keyOnly bool +} + +type innerCursorCollection interface { + first() ([]byte, []value, error) + next() ([]byte, []value, error) + seek([]byte) ([]byte, []value, error) +} + +type cursorStateCollection struct { + key []byte + value []value + err error +} + +// SetCursor holds a RLock for the flushing state. It needs to be closed using the +// .Close() methods or otherwise the lock will never be released +func (b *Bucket) SetCursor() *CursorSet { + b.flushLock.RLock() + + if b.strategy != StrategySetCollection { + panic("SetCursor() called on strategy other than 'set'") + } + + innerCursors, unlockSegmentGroup := b.disk.newCollectionCursors() + + // we have a flush-RLock, so we have the guarantee that the flushing state + // will not change for the lifetime of the cursor, thus there can only be two + // states: either a flushing memtable currently exists - or it doesn't + if b.flushing != nil { + innerCursors = append(innerCursors, b.flushing.newCollectionCursor()) + } + + innerCursors = append(innerCursors, b.active.newCollectionCursor()) + + return &CursorSet{ + unlock: func() { + unlockSegmentGroup() + b.flushLock.RUnlock() + }, + // cursor are in order from oldest to newest, with the memtable cursor + // being at the very top + innerCursors: innerCursors, + } +} + +// SetCursorKeyOnly returns nil for all values. It has no control over the +// underlying "inner" cursors which may still retrieve a value which is then +// discarded. It does however, omit any handling of values, such as decoding, +// making this considerably more efficient if only keys are required. +// +// The same locking rules as for SetCursor apply. +func (b *Bucket) SetCursorKeyOnly() *CursorSet { + c := b.SetCursor() + c.keyOnly = true + return c +} + +func (c *CursorSet) Seek(key []byte) ([]byte, [][]byte) { + c.seekAll(key) + return c.serveCurrentStateAndAdvance() +} + +func (c *CursorSet) Next() ([]byte, [][]byte) { + return c.serveCurrentStateAndAdvance() +} + +func (c *CursorSet) First() ([]byte, [][]byte) { + c.firstAll() + return c.serveCurrentStateAndAdvance() +} + +func (c *CursorSet) Close() { + c.unlock() +} + +func (c *CursorSet) seekAll(target []byte) { + state := make([]cursorStateCollection, len(c.innerCursors)) + for i, cur := range c.innerCursors { + key, value, err := cur.seek(target) + if errors.Is(err, lsmkv.NotFound) { + state[i].err = err + continue + } + + if err != nil { + panic(fmt.Errorf("unexpected error in seek: %w", err)) + } + + state[i].key = key + if !c.keyOnly { + state[i].value = value + } + } + + c.state = state +} + +func (c *CursorSet) firstAll() { + state := make([]cursorStateCollection, len(c.innerCursors)) + for i, cur := range c.innerCursors { + key, value, err := cur.first() + if errors.Is(err, lsmkv.NotFound) { + state[i].err = err + continue + } + + if err != nil { + panic(fmt.Errorf("unexpected error in seek: %w", err)) + } + + state[i].key = key + if !c.keyOnly { + state[i].value = value + } + } + + c.state = state +} + +func (c *CursorSet) serveCurrentStateAndAdvance() ([]byte, [][]byte) { + for { + id, err := c.cursorWithLowestKey() + if err != nil { + if errors.Is(err, lsmkv.NotFound) { + return nil, nil + } + } + + ids, _ := c.haveDuplicatesInState(id) + + // take the key from any of the results, we have the guarantee that they're + // all the same + key := c.state[ids[0]].key + + var raw []value + for _, id := range ids { + raw = append(raw, c.state[id].value...) + c.advanceInner(id) + } + + values := newSetDecoder().Do(raw) + if len(values) == 0 { + // all values deleted, proceed + continue + } + + // TODO remove keyOnly option, not used anyway + if !c.keyOnly { + return key, values + } + return key, nil + } +} + +func (c *CursorSet) cursorWithLowestKey() (int, error) { + err := lsmkv.NotFound + pos := -1 + var lowest []byte + + for i, res := range c.state { + if errors.Is(res.err, lsmkv.NotFound) { + continue + } + + if lowest == nil || bytes.Compare(res.key, lowest) <= 0 { + pos = i + err = res.err + lowest = res.key + } + } + + if err != nil { + return pos, err + } + + return pos, nil +} + +func (c *CursorSet) haveDuplicatesInState(idWithLowestKey int) ([]int, bool) { + key := c.state[idWithLowestKey].key + + var idsFound []int + + for i, cur := range c.state { + if i == idWithLowestKey { + idsFound = append(idsFound, i) + continue + } + + if bytes.Equal(key, cur.key) { + idsFound = append(idsFound, i) + } + } + + return idsFound, len(idsFound) > 1 +} + +func (c *CursorSet) advanceInner(id int) { + k, v, err := c.innerCursors[id].next() + if errors.Is(err, lsmkv.NotFound) { + c.state[id].err = err + c.state[id].key = nil + if !c.keyOnly { + c.state[id].value = nil + } + return + } + + if errors.Is(err, lsmkv.Deleted) { + c.state[id].err = err + c.state[id].key = k + c.state[id].value = nil + return + } + + if err != nil { + panic(fmt.Errorf("unexpected error in advance: %w", err)) + } + + c.state[id].key = k + if !c.keyOnly { + c.state[id].value = v + } + c.state[id].err = nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_set_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_set_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7ce8e2af8969f83011c1bec1a8b748d248e6ebcf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_bucket_set_test.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "encoding/binary" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/lsmkv" +) + +// Previous implementation of cursor called recursively Next() when empty entry occurred, +// which could lead to stack overflow. This test prevents a regression. +func TestSetCursor_StackOverflow(t *testing.T) { + cursor := &CursorSet{ + unlock: func() {}, + innerCursors: []innerCursorCollection{&emptyInnerCursorSet{}}, + keyOnly: false, + } + + k, vals := cursor.First() + assert.Nil(t, k) + assert.Nil(t, vals) +} + +type emptyInnerCursorSet struct { + key uint64 +} + +func (c *emptyInnerCursorSet) first() ([]byte, []value, error) { + c.key = 0 + return c.bytes(), []value{}, nil +} + +func (c *emptyInnerCursorSet) next() ([]byte, []value, error) { + if c.key >= 1<<22 { + return nil, nil, lsmkv.NotFound + } + c.key++ + return c.bytes(), []value{}, nil +} + +func (c *emptyInnerCursorSet) seek(key []byte) ([]byte, []value, error) { + return c.first() +} + +func (c *emptyInnerCursorSet) bytes() []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, c.key) + return b +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_collection.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_collection.go new file mode 100644 index 0000000000000000000000000000000000000000..3dd091d4416ff58df5b5d8931aaf5f7b7ada94ec --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_collection.go @@ -0,0 +1,99 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type memtableCursorCollection struct { + data []*binarySearchNodeMulti + current int + lock func() + unlock func() +} + +func (m *Memtable) newCollectionCursor() innerCursorCollection { + // This cursor is a really primitive approach, it actually requires + // flattening the entire memtable - even if the cursor were to point to the + // very last element. However, given that the memtable will on average be + // only half it's max capacity and even that is relatively small, we might + // get away with the full-flattening and a linear search. Let's not optimize + // prematurely. + + m.RLock() + defer m.RUnlock() + + data := m.keyMulti.flattenInOrder() + + return &memtableCursorCollection{ + data: data, + lock: m.RLock, + unlock: m.RUnlock, + } +} + +func (c *memtableCursorCollection) first() ([]byte, []value, error) { + c.lock() + defer c.unlock() + + if len(c.data) == 0 { + return nil, nil, lsmkv.NotFound + } + + c.current = 0 + + // there is no key-level tombstone, only individual values can have + // tombstones + return c.data[c.current].key, c.data[c.current].values, nil +} + +func (c *memtableCursorCollection) seek(key []byte) ([]byte, []value, error) { + c.lock() + defer c.unlock() + + pos := c.posLargerThanEqual(key) + if pos == -1 { + return nil, nil, lsmkv.NotFound + } + + c.current = pos + // there is no key-level tombstone, only individual values can have + // tombstones + return c.data[pos].key, c.data[pos].values, nil +} + +func (c *memtableCursorCollection) posLargerThanEqual(key []byte) int { + for i, node := range c.data { + if bytes.Compare(node.key, key) >= 0 { + return i + } + } + + return -1 +} + +func (c *memtableCursorCollection) next() ([]byte, []value, error) { + c.lock() + defer c.unlock() + + c.current++ + if c.current >= len(c.data) { + return nil, nil, lsmkv.NotFound + } + + // there is no key-level tombstone, only individual values can have + // tombstones + return c.data[c.current].key, c.data[c.current].values, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_map.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_map.go new file mode 100644 index 0000000000000000000000000000000000000000..033d17bfc5c4afab239e6ebcf0dad056469bda85 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_map.go @@ -0,0 +1,99 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type memtableCursorMap struct { + data []*binarySearchNodeMap + current int + lock func() + unlock func() +} + +func (m *Memtable) newMapCursor() innerCursorMap { + // This cursor is a really primitive approach, it actually requires + // flattening the entire memtable - even if the cursor were to point to the + // very last element. However, given that the memtable will on average be + // only half it's max capacity and even that is relatively small, we might + // get away with the full-flattening and a linear search. Let's not optimize + // prematurely. + + m.RLock() + defer m.RUnlock() + + data := m.keyMap.flattenInOrder() + + return &memtableCursorMap{ + data: data, + lock: m.RLock, + unlock: m.RUnlock, + } +} + +func (c *memtableCursorMap) first() ([]byte, []MapPair, error) { + c.lock() + defer c.unlock() + + if len(c.data) == 0 { + return nil, nil, lsmkv.NotFound + } + + c.current = 0 + + // there is no key-level tombstone, only individual values can have + // tombstones + return c.data[c.current].key, c.data[c.current].values, nil +} + +func (c *memtableCursorMap) seek(key []byte) ([]byte, []MapPair, error) { + c.lock() + defer c.unlock() + + pos := c.posLargerThanEqual(key) + if pos == -1 { + return nil, nil, lsmkv.NotFound + } + + c.current = pos + // there is no key-level tombstone, only individual values can have + // tombstones + return c.data[pos].key, c.data[pos].values, nil +} + +func (c *memtableCursorMap) posLargerThanEqual(key []byte) int { + for i, node := range c.data { + if bytes.Compare(node.key, key) >= 0 { + return i + } + } + + return -1 +} + +func (c *memtableCursorMap) next() ([]byte, []MapPair, error) { + c.lock() + defer c.unlock() + + c.current++ + if c.current >= len(c.data) { + return nil, nil, lsmkv.NotFound + } + + // there is no key-level tombstone, only individual values can have + // tombstones + return c.data[c.current].key, c.data[c.current].values, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_replace.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_replace.go new file mode 100644 index 0000000000000000000000000000000000000000..1c7117b8dc6892472933177b07f96cafefa67a8e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_replace.go @@ -0,0 +1,214 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "bytes" + "errors" + "fmt" + "sort" + + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type memtableCursor struct { + data []*binarySearchNode + keyFn func(n *binarySearchNode) []byte + current int + lock func() + unlock func() +} + +func (m *Memtable) newCursor() innerCursorReplace { + // This cursor is a really primitive approach, it actually requires + // flattening the entire memtable - even if the cursor were to point to the + // very last element. However, given that the memtable will on average be + // only half it's max capacity and even that is relatively small, we might + // get away with the full-flattening and a linear search. Let's not optimize + // prematurely. + + m.RLock() + defer m.RUnlock() + + data := m.key.flattenInOrder() + + return &memtableCursor{ + data: data, + keyFn: func(n *binarySearchNode) []byte { + return n.key + }, + lock: m.RLock, + unlock: m.RUnlock, + } +} + +func (m *Memtable) newBlockingCursor() (innerCursorReplace, func()) { + // This cursor is a really primitive approach, it actually requires + // flattening the entire memtable - even if the cursor were to point to the + // very last element. However, given that the memtable will on average be + // only half it's max capacity and even that is relatively small, we might + // get away with the full-flattening and a linear search. Let's not optimize + // prematurely. + m.RLock() + + data := m.key.flattenInOrder() + + return &memtableCursor{ + data: data, + keyFn: func(n *binarySearchNode) []byte { + return n.key + }, + lock: func() {}, + unlock: func() {}, + }, m.RUnlock +} + +func (m *Memtable) newCursorWithSecondaryIndex(pos int) innerCursorReplace { + // This cursor is a really primitive approach, it actually requires + // flattening the entire memtable - even if the cursor were to point to the + // very last element. However, given that the memtable will on average be + // only half it's max capacity and even that is relatively small, we might + // get away with the full-flattening and a linear search. Let's not optimize + // prematurely. + + m.RLock() + defer m.RUnlock() + + secondaryToPrimary := m.secondaryToPrimary[pos] + + sortedSecondaryKeys := make([]string, 0, len(secondaryToPrimary)) + + for skey := range secondaryToPrimary { + if skey == "" { + // this special case is to handle the edge case when a secondary + // key was not removed together with primary key + continue + } + sortedSecondaryKeys = append(sortedSecondaryKeys, skey) + } + + sort.SliceStable(sortedSecondaryKeys, func(i, j int) bool { + return sortedSecondaryKeys[i] <= sortedSecondaryKeys[j] + }) + + // cursor data is immutable so provide a point in time iteration + // without blocking concurrent actions on the memtable while + // the cursor is still allocated + data := make([]*binarySearchNode, len(sortedSecondaryKeys)) + + for i, skey := range sortedSecondaryKeys { + var err error + + node, err := m.key.getNode(secondaryToPrimary[skey]) + if err != nil { + if errors.Is(err, lsmkv.Deleted) { + data[i] = &binarySearchNode{ + key: []byte(skey), + tombstone: true, + } + continue + } + panic(fmt.Errorf("secondaryToPrimary[%s] unexpected: %w)", skey, err)) + } + + secondaryKeys := make([][]byte, len(node.secondaryKeys)) + for i, sk := range node.secondaryKeys { + secondaryKeys[i] = cp(sk) + } + + data[i] = &binarySearchNode{ + key: cp(node.key), + value: cp(node.value), + secondaryKeys: secondaryKeys, + tombstone: node.tombstone, + } + } + + return &memtableCursor{ + data: data, + keyFn: func(n *binarySearchNode) []byte { + if pos >= len(n.secondaryKeys) { + return nil + } + return n.secondaryKeys[pos] + }, + // cursor data is immutable thus locks are not needed + lock: func() {}, + unlock: func() {}, + } +} + +func cp(b []byte) []byte { + if len(b) == 0 { + return nil + } + c := make([]byte, len(b)) + copy(c, b) + return c +} + +func (c *memtableCursor) first() ([]byte, []byte, error) { + c.lock() + defer c.unlock() + + if len(c.data) == 0 { + return nil, nil, lsmkv.NotFound + } + + c.current = 0 + + if c.data[c.current].tombstone { + return c.keyFn(c.data[c.current]), nil, lsmkv.Deleted + } + return c.keyFn(c.data[c.current]), c.data[c.current].value, nil +} + +func (c *memtableCursor) seek(key []byte) ([]byte, []byte, error) { + c.lock() + defer c.unlock() + + pos := c.posLargerThanEqual(key) + if pos == -1 { + return nil, nil, lsmkv.NotFound + } + + c.current = pos + if c.data[c.current].tombstone { + return c.keyFn(c.data[c.current]), nil, lsmkv.Deleted + } + return c.keyFn(c.data[pos]), c.data[pos].value, nil +} + +func (c *memtableCursor) posLargerThanEqual(key []byte) int { + for i, node := range c.data { + if bytes.Compare(c.keyFn(node), key) >= 0 { + return i + } + } + + return -1 +} + +func (c *memtableCursor) next() ([]byte, []byte, error) { + c.lock() + defer c.unlock() + + c.current++ + if c.current >= len(c.data) { + return nil, nil, lsmkv.NotFound + } + + if c.data[c.current].tombstone { + return c.keyFn(c.data[c.current]), nil, lsmkv.Deleted + } + return c.keyFn(c.data[c.current]), c.data[c.current].value, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_roaring_set.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_roaring_set.go new file mode 100644 index 0000000000000000000000000000000000000000..85781f5f05fbce28641365effa7a196438a35790 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_roaring_set.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" +) + +func (m *Memtable) newRoaringSetCursor() roaringset.InnerCursor { + m.RLock() + defer m.RUnlock() + + // Since FlattenInOrder makes deep copy of bst's nodes, + // no further memtable's locking in required on cursor's methods + return roaringset.NewBinarySearchTreeCursor(m.roaringSet) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_roaring_set_range.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_roaring_set_range.go new file mode 100644 index 0000000000000000000000000000000000000000..d3429add34aed9d5ae4d1de38aea40e0ebd9f1a9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_memtable_roaring_set_range.go @@ -0,0 +1,23 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange" +) + +func (m *Memtable) newRoaringSetRangeReader() roaringsetrange.InnerReader { + m.RLock() + defer m.RUnlock() + + return roaringsetrange.NewMemtableReader(m.roaringSetRange.Clone()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection.go new file mode 100644 index 0000000000000000000000000000000000000000..fdb886f3845548998bd07abbb6edd5862e40d5d1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/lsmkv/cursor_segment_collection.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package lsmkv + +import ( + "github.com/weaviate/weaviate/entities/lsmkv" +) + +type segmentCursorCollection struct { + segment *segment + nextOffset uint64 +} + +func (s *segment) newCollectionCursor() *segmentCursorCollection { + return &segmentCursorCollection{ + segment: s, + } +} + +func (sg *SegmentGroup) newCollectionCursors() ([]innerCursorCollection, func()) { + segments, release := sg.getAndLockSegments() + + out := make([]innerCursorCollection, len(segments)) + + for i, segment := range segments { + out[i] = segment.newCollectionCursor() + } + + return out, release +} + +func (s *segmentCursorCollection) seek(key []byte) ([]byte, []value, error) { + node, err := s.segment.index.Seek(key) + if err != nil { + return nil, nil, err + } + + parsed, err := s.parseCollectionNode(nodeOffset{node.Start, node.End}) + // make sure to set the next offset before checking the error. The error + // could be 'entities.Deleted' which would require that the offset is still advanced + // for the next cycle + s.nextOffset = node.End + if err != nil { + return parsed.primaryKey, nil, err + } + + return parsed.primaryKey, parsed.values, nil +} + +func (s *segmentCursorCollection) next() ([]byte, []value, error) { + if s.nextOffset >= s.segment.dataEndPos { + return nil, nil, lsmkv.NotFound + } + + parsed, err := s.parseCollectionNode(nodeOffset{start: s.nextOffset}) + // make sure to set the next offset before checking the error. The error + // could be 'entities.Deleted' which would require that the offset is still advanced + // for the next cycle + s.nextOffset = s.nextOffset + uint64(parsed.offset) + if err != nil { + return parsed.primaryKey, nil, err + } + + return parsed.primaryKey, parsed.values, nil +} + +func (s *segmentCursorCollection) first() ([]byte, []value, error) { + s.nextOffset = s.segment.dataStartPos + parsed, err := s.parseCollectionNode(nodeOffset{start: s.nextOffset}) + // make sure to set the next offset before checking the error. The error + // could be 'entities.Deleted' which would require that the offset is still advanced + // for the next cycle + s.nextOffset = s.nextOffset + uint64(parsed.offset) + if err != nil { + return parsed.primaryKey, nil, err + } + + return parsed.primaryKey, parsed.values, nil +} + +func (s *segmentCursorCollection) parseCollectionNode(offset nodeOffset) (segmentCollectionNode, error) { + r, err := s.segment.newNodeReader(offset, "segmentCursorCollection") + if err != nil { + return segmentCollectionNode{}, err + } + defer r.Release() + + return ParseCollectionNode(r) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/merge_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/merge_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..25bf591b8fdfbafcaaa60c9ddf7b7f7c846284ad --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/merge_integration_test.go @@ -0,0 +1,1063 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects" +) + +func Test_MergingObjects(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "MergeTestTarget", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "MergeTestSource", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ // tries to have "one of each property type" + { + Name: "string", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "text", + DataType: []string{"text"}, + }, + { + Name: "number", + DataType: []string{"number"}, + }, + { + Name: "int", + DataType: []string{"int"}, + }, + { + Name: "date", + DataType: []string{"date"}, + }, + { + Name: "geo", + DataType: []string{"geoCoordinates"}, + }, + { + Name: "toTarget", + DataType: []string{"MergeTestTarget"}, + }, + }, + }, + { + Class: "MergeTestNoVector", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + } + + t.Run("add required classes", func(t *testing.T) { + for _, class := range sch.Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + }) + } + }) + + schemaGetter.schema = sch + + target1 := strfmt.UUID("897be7cc-1ae1-4b40-89d9-d3ea98037638") + target2 := strfmt.UUID("5cc94aba-93e4-408a-ab19-3d803216a04e") + target3 := strfmt.UUID("81982705-8b1e-4228-b84c-911818d7ee85") + target4 := strfmt.UUID("7f69c263-17f4-4529-a54d-891a7c008ca4") + sourceID := strfmt.UUID("8738ddd5-a0ed-408d-a5d6-6f818fd56be6") + noVecID := strfmt.UUID("b4933761-88b2-4666-856d-298eb1ad0a59") + + t.Run("add objects", func(t *testing.T) { + now := time.Now().UnixNano() / int64(time.Millisecond) + err := repo.PutObject(context.Background(), &models.Object{ + ID: sourceID, + Class: "MergeTestSource", + Properties: map[string]interface{}{ + "string": "only the string prop set", + }, + CreationTimeUnix: now, + LastUpdateTimeUnix: now, + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + + targetDimensionsBefore := getDimensionsFromRepo(context.Background(), repo, "MergeTestTarget") + + targets := []strfmt.UUID{target1, target2, target3, target4} + + for i, target := range targets { + err = repo.PutObject(context.Background(), &models.Object{ + ID: target, + Class: "MergeTestTarget", + Properties: map[string]interface{}{ + "name": fmt.Sprintf("target item %d", i), + }, + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + } + + targetDimensionsAfter := getDimensionsFromRepo(context.Background(), repo, "MergeTestTarget") + require.Equal(t, targetDimensionsBefore+4, targetDimensionsAfter) + + err = repo.PutObject(context.Background(), &models.Object{ + ID: noVecID, + Class: "MergeTestNoVector", + Properties: map[string]interface{}{ + "foo": "bar", + }, + CreationTimeUnix: now, + LastUpdateTimeUnix: now, + }, nil, nil, nil, nil, 0) + require.Nil(t, err) + + targetDimensionsAfterNoVec := getDimensionsFromRepo(context.Background(), repo, "MergeTestTarget") + require.Equal(t, targetDimensionsAfter, targetDimensionsAfterNoVec) + }) + + var lastUpdateTimeUnix int64 + + t.Run("fetch original object's update timestamp", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{ + LastUpdateTimeUnix: true, + }, "") + require.Nil(t, err) + + lastUpdateTimeUnix = source.Object().LastUpdateTimeUnix + require.NotEmpty(t, lastUpdateTimeUnix) + }) + + t.Run("merge other previously unset properties into it", func(t *testing.T) { + // give the lastUpdateTimeUnix time to be different. + // on some machines this may not be needed, but for + // faster processors, the difference is undetectable + time.Sleep(time.Millisecond) + + md := objects.MergeDocument{ + Class: "MergeTestSource", + ID: sourceID, + PrimitiveSchema: map[string]interface{}{ + "number": 7.0, + "int": int64(9), + "geo": &models.GeoCoordinates{ + Latitude: ptFloat32(30.2), + Longitude: ptFloat32(60.2), + }, + "text": "some text", + }, + UpdateTime: time.Now().UnixNano() / int64(time.Millisecond), + } + + err := repo.Merge(context.Background(), md, nil, "", 0) + assert.Nil(t, err) + }) + + t.Run("compare merge object's update time with original", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{ + LastUpdateTimeUnix: true, + }, "") + require.Nil(t, err) + + assert.Greater(t, source.Object().LastUpdateTimeUnix, lastUpdateTimeUnix) + }) + + t.Run("check that the object was successfully merged", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{}, "") + require.Nil(t, err) + + sch := source.Object().Properties.(map[string]interface{}) + expectedSchema := map[string]interface{}{ + // from original + "string": "only the string prop set", + + // from merge + "number": 7.0, + "int": float64(9), + "geo": &models.GeoCoordinates{ + Latitude: ptFloat32(30.2), + Longitude: ptFloat32(60.2), + }, + "text": "some text", + } + + assert.Equal(t, expectedSchema, sch) + }) + + t.Run("trying to merge from non-existing index", func(t *testing.T) { + md := objects.MergeDocument{ + Class: "WrongClass", + ID: sourceID, + PrimitiveSchema: map[string]interface{}{ + "number": 7.0, + }, + } + + err := repo.Merge(context.Background(), md, nil, "", 0) + assert.Equal(t, fmt.Errorf( + "merge from non-existing index for WrongClass"), err) + }) + t.Run("add a reference and replace one prop", func(t *testing.T) { + source, err := crossref.ParseSource(fmt.Sprintf( + "weaviate://localhost/MergeTestSource/%s/toTarget", sourceID)) + require.Nil(t, err) + targets := []strfmt.UUID{target1} + refs := make(objects.BatchReferences, len(targets)) + for i, target := range targets { + to, err := crossref.Parse(fmt.Sprintf("weaviate://localhost/%s", target)) + require.Nil(t, err) + refs[i] = objects.BatchReference{ + Err: nil, + From: source, + To: to, + } + } + md := objects.MergeDocument{ + Class: "MergeTestSource", + ID: sourceID, + PrimitiveSchema: map[string]interface{}{ + "string": "let's update the string prop", + }, + References: refs, + } + err = repo.Merge(context.Background(), md, nil, "", 0) + assert.Nil(t, err) + }) + + t.Run("check that the object was successfully merged", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{}, "") + require.Nil(t, err) + + ref, err := crossref.Parse(fmt.Sprintf("weaviate://localhost/%s", target1)) + require.Nil(t, err) + + sch := source.Object().Properties.(map[string]interface{}) + expectedSchema := map[string]interface{}{ + "string": "let's update the string prop", + "number": 7.0, + "int": float64(9), + "geo": &models.GeoCoordinates{ + Latitude: ptFloat32(30.2), + Longitude: ptFloat32(60.2), + }, + "text": "some text", + "toTarget": models.MultipleRef{ + ref.SingleRef(), + }, + } + + assert.Equal(t, expectedSchema, sch) + }) + + t.Run("add more references in rapid succession", func(t *testing.T) { + // this test case prevents a regression on gh-1016 + source, err := crossref.ParseSource(fmt.Sprintf( + "weaviate://localhost/MergeTestSource/%s/toTarget", sourceID)) + require.Nil(t, err) + targets := []strfmt.UUID{target2, target3, target4} + refs := make(objects.BatchReferences, len(targets)) + for i, target := range targets { + to, err := crossref.Parse(fmt.Sprintf("weaviate://localhost/%s", target)) + require.Nil(t, err) + refs[i] = objects.BatchReference{ + Err: nil, + From: source, + To: to, + } + } + md := objects.MergeDocument{ + Class: "MergeTestSource", + ID: sourceID, + References: refs, + } + err = repo.Merge(context.Background(), md, nil, "", 0) + assert.Nil(t, err) + }) + + t.Run("check all references are now present", func(t *testing.T) { + source, err := repo.ObjectByID(context.Background(), sourceID, nil, additional.Properties{}, "") + require.Nil(t, err) + + refs := source.Object().Properties.(map[string]interface{})["toTarget"] + refsSlice, ok := refs.(models.MultipleRef) + require.True(t, ok, fmt.Sprintf("toTarget must be models.MultipleRef, but got %#v", refs)) + + foundBeacons := []string{} + for _, ref := range refsSlice { + foundBeacons = append(foundBeacons, ref.Beacon.String()) + } + expectedBeacons := []string{ + fmt.Sprintf("weaviate://localhost/%s", target1), + fmt.Sprintf("weaviate://localhost/%s", target2), + fmt.Sprintf("weaviate://localhost/%s", target3), + fmt.Sprintf("weaviate://localhost/%s", target4), + } + + assert.ElementsMatch(t, foundBeacons, expectedBeacons) + }) + + t.Run("merge object with no vector", func(t *testing.T) { + err = repo.Merge(context.Background(), objects.MergeDocument{ + Class: "MergeTestNoVector", + ID: noVecID, + PrimitiveSchema: map[string]interface{}{"foo": "baz"}, + }, nil, "", 0) + require.Nil(t, err) + + orig, err := repo.ObjectByID(context.Background(), noVecID, nil, additional.Properties{}, "") + require.Nil(t, err) + + expectedSchema := map[string]interface{}{ + "foo": "baz", + "id": noVecID, + } + + assert.Equal(t, expectedSchema, orig.Schema) + }) +} + +// This prevents a regression on +// https://github.com/weaviate/weaviate/issues/2193 +// +// Prior to the fix it was possible that a prop that was not touched during the +// merge (and therefore only loaded from disk) failed during the +// inverted-indexing for the new doc id. This was then hidden by the fact that +// error handling was broken inside the inverted.Analyzer. This test tries to +// make sure that every possible property type stays intact if untouched +// during a Merge operation +// +// To achieve this, every prop in this class exists twice, once with the prefix +// 'touched_' and once with 'untouched_'. In the initial insert both properties +// contain the same value, but then during the patch merge, the 'touched_' +// properties are updated to a different value while the 'untouched_' +// properties are left untouched. Then we try to retrieve the object through a +// filter matching each property. The 'untouched_' properties are matched with +// the original value, the 'touched_' props are matched with the updated ones +func Test_Merge_UntouchedPropsCorrectlyIndexed(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + MaxImportGoroutinesFactor: 1, + QueryMaximumResults: 10000, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + hnswConfig := enthnsw.NewDefaultUserConfig() + hnswConfig.Skip = true + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "TestClass", + VectorIndexConfig: hnswConfig, + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ // tries to have "one of each property type" + { + Name: "untouched_string", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "touched_string", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "untouched_string_array", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "touched_string_array", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "untouched_text", Tokenization: "word", + DataType: []string{"text"}, + }, + { + Name: "touched_text", Tokenization: "word", + DataType: []string{"text"}, + }, + { + Name: "untouched_text_array", Tokenization: "word", + DataType: []string{"text[]"}, + }, + { + Name: "touched_text_array", Tokenization: "word", + DataType: []string{"text[]"}, + }, + {Name: "untouched_number", DataType: []string{"number"}}, + {Name: "touched_number", DataType: []string{"number"}}, + {Name: "untouched_number_array", DataType: []string{"number[]"}}, + {Name: "touched_number_array", DataType: []string{"number[]"}}, + {Name: "untouched_int", DataType: []string{"int"}}, + {Name: "touched_int", DataType: []string{"int"}}, + {Name: "untouched_int_array", DataType: []string{"int[]"}}, + {Name: "touched_int_array", DataType: []string{"int[]"}}, + {Name: "untouched_date", DataType: []string{"date"}}, + {Name: "touched_date", DataType: []string{"date"}}, + {Name: "untouched_date_array", DataType: []string{"date[]"}}, + {Name: "touched_date_array", DataType: []string{"date[]"}}, + {Name: "untouched_geo", DataType: []string{"geoCoordinates"}}, + {Name: "touched_geo", DataType: []string{"geoCoordinates"}}, + }, + }, + }, + }, + } + + t.Run("add required classes", func(t *testing.T) { + for _, class := range sch.Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + }) + } + }) + + schemaGetter.schema = sch + + t.Run("add initial object", func(t *testing.T) { + id := 0 + err := repo.PutObject(context.Background(), &models.Object{ + ID: uuidFromInt(id), + Class: "TestClass", + Properties: map[string]interface{}{ + "untouched_number": float64(id), + "untouched_number_array": []interface{}{float64(id)}, + "untouched_int": id, + "untouched_int_array": []interface{}{int64(id)}, + "untouched_string": fmt.Sprintf("%d", id), + "untouched_string_array": []string{fmt.Sprintf("%d", id)}, + "untouched_text": fmt.Sprintf("%d", id), + "untouched_text_array": []string{fmt.Sprintf("%d", id)}, + "untouched_date": time.Unix(0, 0).Add(time.Duration(id) * time.Hour), + "untouched_date_array": []time.Time{time.Unix(0, 0).Add(time.Duration(id) * time.Hour)}, + "untouched_geo": &models.GeoCoordinates{ + ptFloat32(float32(id)), ptFloat32(float32(id)), + }, + + "touched_number": float64(id), + "touched_number_array": []interface{}{float64(id)}, + "touched_int": id, + "touched_int_array": []interface{}{int64(id)}, + "touched_string": fmt.Sprintf("%d", id), + "touched_string_array": []string{fmt.Sprintf("%d", id)}, + "touched_text": fmt.Sprintf("%d", id), + "touched_text_array": []string{fmt.Sprintf("%d", id)}, + "touched_date": time.Unix(0, 0).Add(time.Duration(id) * time.Hour), + "touched_date_array": []time.Time{time.Unix(0, 0).Add(time.Duration(id) * time.Hour)}, + "touched_geo": &models.GeoCoordinates{ + ptFloat32(float32(id)), ptFloat32(float32(id)), + }, + }, + CreationTimeUnix: int64(id), + LastUpdateTimeUnix: int64(id), + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + }) + + t.Run("patch half the props (all that contain 'touched')", func(t *testing.T) { + updateID := 28 + md := objects.MergeDocument{ + Class: "TestClass", + ID: uuidFromInt(0), + PrimitiveSchema: map[string]interface{}{ + "touched_number": float64(updateID), + "touched_number_array": []interface{}{float64(updateID)}, + "touched_int": updateID, + "touched_int_array": []interface{}{int64(updateID)}, + "touched_string": fmt.Sprintf("%d", updateID), + "touched_string_array": []string{fmt.Sprintf("%d", updateID)}, + "touched_text": fmt.Sprintf("%d", updateID), + "touched_text_array": []string{fmt.Sprintf("%d", updateID)}, + "touched_date": time.Unix(0, 0).Add(time.Duration(updateID) * time.Hour), + "touched_date_array": []time.Time{time.Unix(0, 0).Add(time.Duration(updateID) * time.Hour)}, + "touched_geo": &models.GeoCoordinates{ + ptFloat32(float32(updateID)), ptFloat32(float32(updateID)), + }, + }, + References: nil, + } + err = repo.Merge(context.Background(), md, nil, "", 0) + assert.Nil(t, err) + }) + + t.Run("retrieve by each individual prop", func(t *testing.T) { + retrieve := func(prefix string, id int) func(t *testing.T) { + return func(t *testing.T) { + type test struct { + name string + filter *filters.LocalFilter + } + + tests := []test{ + { + name: "string filter", + filter: buildFilter( + fmt.Sprintf("%s_string", prefix), + fmt.Sprintf("%d", id), + eq, + schema.DataTypeText), + }, + { + name: "string array filter", + filter: buildFilter( + fmt.Sprintf("%s_string_array", prefix), + fmt.Sprintf("%d", id), + eq, + schema.DataTypeText), + }, + { + name: "text filter", + filter: buildFilter( + fmt.Sprintf("%s_text", prefix), + fmt.Sprintf("%d", id), + eq, + dtText), + }, + { + name: "text array filter", + filter: buildFilter( + fmt.Sprintf("%s_text_array", prefix), + fmt.Sprintf("%d", id), + eq, + dtText), + }, + { + name: "int filter", + filter: buildFilter( + fmt.Sprintf("%s_int", prefix), id, eq, dtInt), + }, + { + name: "int array filter", + filter: buildFilter( + fmt.Sprintf("%s_int_array", prefix), id, eq, dtInt), + }, + { + name: "number filter", + filter: buildFilter( + fmt.Sprintf("%s_number", prefix), float64(id), eq, dtNumber), + }, + { + name: "number array filter", + filter: buildFilter( + fmt.Sprintf("%s_number_array", prefix), float64(id), eq, dtNumber), + }, + { + name: "date filter", + filter: buildFilter( + fmt.Sprintf("%s_date", prefix), + time.Unix(0, 0).Add(time.Duration(id)*time.Hour), + eq, dtDate), + }, + { + name: "date array filter", + filter: buildFilter( + fmt.Sprintf("%s_date_array", prefix), + time.Unix(0, 0).Add(time.Duration(id)*time.Hour), + eq, dtDate), + }, + { + name: "geoFilter filter", + filter: buildFilter( + fmt.Sprintf("%s_geo", prefix), + filters.GeoRange{ + GeoCoordinates: &models.GeoCoordinates{ + ptFloat32(float32(id)), ptFloat32(float32(id)), + }, + Distance: 2, + }, + wgr, dtGeoCoordinates), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + params := dto.GetParams{ + ClassName: "TestClass", + Pagination: &filters.Pagination{Limit: 5}, + Filters: tc.filter, + } + res, err := repo.VectorSearch(context.Background(), params, []string{""}, nil) + require.Nil(t, err) + require.Len(t, res, 1) + + // hard-code the only uuid + assert.Equal(t, uuidFromInt(0), res[0].ID) + }) + } + } + } + t.Run("using untouched", retrieve("untouched", 0)) + t.Run("using touched", retrieve("touched", 28)) + }) +} + +func Test_MergeDocIdPreserved_PropsCorrectlyIndexed(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + MaxImportGoroutinesFactor: 1, + QueryMaximumResults: 10000, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + hnswConfig := enthnsw.NewDefaultUserConfig() + hnswConfig.Skip = true + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "TestClass", + VectorIndexConfig: hnswConfig, + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ // tries to have "one of each property type" + { + Name: "untouched_string", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "touched_string", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "untouched_string_array", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "touched_string_array", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "untouched_text", Tokenization: "word", + DataType: []string{"text"}, + }, + { + Name: "touched_text", Tokenization: "word", + DataType: []string{"text"}, + }, + { + Name: "untouched_text_array", Tokenization: "word", + DataType: []string{"text[]"}, + }, + { + Name: "touched_text_array", Tokenization: "word", + DataType: []string{"text[]"}, + }, + {Name: "untouched_number", DataType: []string{"number"}}, + {Name: "touched_number", DataType: []string{"number"}}, + {Name: "untouched_number_array", DataType: []string{"number[]"}}, + {Name: "touched_number_array", DataType: []string{"number[]"}}, + {Name: "untouched_int", DataType: []string{"int"}}, + {Name: "touched_int", DataType: []string{"int"}}, + {Name: "untouched_int_array", DataType: []string{"int[]"}}, + {Name: "touched_int_array", DataType: []string{"int[]"}}, + {Name: "untouched_date", DataType: []string{"date"}}, + {Name: "touched_date", DataType: []string{"date"}}, + {Name: "untouched_date_array", DataType: []string{"date[]"}}, + {Name: "touched_date_array", DataType: []string{"date[]"}}, + }, + }, + }, + }, + } + + t.Run("add required classes", func(t *testing.T) { + for _, class := range sch.Objects.Classes { + t.Run(fmt.Sprintf("add %s", class.Class), func(t *testing.T) { + err := migrator.AddClass(context.Background(), class) + require.Nil(t, err) + }) + } + }) + + schemaGetter.schema = sch + + t.Run("add initial object", func(t *testing.T) { + id := 0 + err := repo.PutObject(context.Background(), &models.Object{ + ID: uuidFromInt(id), + Class: "TestClass", + Properties: map[string]interface{}{ + "untouched_number": float64(id), + "untouched_number_array": []interface{}{float64(id)}, + "untouched_int": id, + "untouched_int_array": []interface{}{int64(id)}, + "untouched_string": fmt.Sprintf("%d", id), + "untouched_string_array": []string{fmt.Sprintf("%d", id)}, + "untouched_text": fmt.Sprintf("%d", id), + "untouched_text_array": []string{fmt.Sprintf("%d", id)}, + "untouched_date": time.Unix(0, 0).Add(time.Duration(id) * time.Hour), + "untouched_date_array": []time.Time{time.Unix(0, 0).Add(time.Duration(id) * time.Hour)}, + + "touched_number": float64(id), + "touched_number_array": []interface{}{float64(id)}, + "touched_int": id, + "touched_int_array": []interface{}{int64(id)}, + "touched_string": fmt.Sprintf("%d", id), + "touched_string_array": []string{fmt.Sprintf("%d", id)}, + "touched_text": fmt.Sprintf("%d", id), + "touched_text_array": []string{fmt.Sprintf("%d", id)}, + "touched_date": time.Unix(0, 0).Add(time.Duration(id) * time.Hour), + "touched_date_array": []time.Time{time.Unix(0, 0).Add(time.Duration(id) * time.Hour)}, + }, + CreationTimeUnix: int64(id), + LastUpdateTimeUnix: int64(id), + }, []float32{0.5}, nil, nil, nil, 0) + require.Nil(t, err) + }) + + t.Run("patch half the props (all that contain 'touched')", func(t *testing.T) { + updateID := 28 + md := objects.MergeDocument{ + Class: "TestClass", + ID: uuidFromInt(0), + PrimitiveSchema: map[string]interface{}{ + "touched_number": float64(updateID), + "touched_number_array": []interface{}{float64(updateID)}, + "touched_int": updateID, + "touched_int_array": []interface{}{int64(updateID)}, + "touched_string": fmt.Sprintf("%d", updateID), + "touched_string_array": []string{fmt.Sprintf("%d", updateID)}, + "touched_text": fmt.Sprintf("%d", updateID), + "touched_text_array": []string{fmt.Sprintf("%d", updateID)}, + "touched_date": time.Unix(0, 0).Add(time.Duration(updateID) * time.Hour), + "touched_date_array": []time.Time{time.Unix(0, 0).Add(time.Duration(updateID) * time.Hour)}, + }, + References: nil, + } + err = repo.Merge(context.Background(), md, nil, "", 0) + assert.Nil(t, err) + }) + + t.Run("retrieve by each individual prop", func(t *testing.T) { + retrieve := func(prefix string, id int) func(t *testing.T) { + return func(t *testing.T) { + type test struct { + name string + filter *filters.LocalFilter + } + + tests := []test{ + { + name: "string filter", + filter: buildFilter( + fmt.Sprintf("%s_string", prefix), + fmt.Sprintf("%d", id), + eq, + schema.DataTypeText), + }, + { + name: "string array filter", + filter: buildFilter( + fmt.Sprintf("%s_string_array", prefix), + fmt.Sprintf("%d", id), + eq, + schema.DataTypeText), + }, + { + name: "text filter", + filter: buildFilter( + fmt.Sprintf("%s_text", prefix), + fmt.Sprintf("%d", id), + eq, + dtText), + }, + { + name: "text array filter", + filter: buildFilter( + fmt.Sprintf("%s_text_array", prefix), + fmt.Sprintf("%d", id), + eq, + dtText), + }, + { + name: "int filter", + filter: buildFilter( + fmt.Sprintf("%s_int", prefix), id, eq, dtInt), + }, + { + name: "int array filter", + filter: buildFilter( + fmt.Sprintf("%s_int_array", prefix), id, eq, dtInt), + }, + { + name: "number filter", + filter: buildFilter( + fmt.Sprintf("%s_number", prefix), float64(id), eq, dtNumber), + }, + { + name: "number array filter", + filter: buildFilter( + fmt.Sprintf("%s_number_array", prefix), float64(id), eq, dtNumber), + }, + { + name: "date filter", + filter: buildFilter( + fmt.Sprintf("%s_date", prefix), + time.Unix(0, 0).Add(time.Duration(id)*time.Hour), + eq, dtDate), + }, + { + name: "date array filter", + filter: buildFilter( + fmt.Sprintf("%s_date_array", prefix), + time.Unix(0, 0).Add(time.Duration(id)*time.Hour), + eq, dtDate), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + params := dto.GetParams{ + ClassName: "TestClass", + Pagination: &filters.Pagination{Limit: 5}, + Filters: tc.filter, + } + res, err := repo.VectorSearch(context.Background(), params, []string{""}, nil) + require.Nil(t, err) + require.Len(t, res, 1) + + // hard-code the only uuid + assert.Equal(t, uuidFromInt(0), res[0].ID) + }) + } + } + } + t.Run("using untouched", retrieve("untouched", 0)) + t.Run("using touched", retrieve("touched", 28)) + }) +} + +func TestMerge_ObjectWithNamedVectors(t *testing.T) { + var ( + ctx = context.Background() + namedVecName = "vec" + multiVecName = "multivec" + class = &models.Class{ + Class: "testclass", + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "text", + DataType: schema.DataTypeText.PropString(), + }, + }, + VectorConfig: map[string]models.VectorConfig{ + namedVecName: { + Vectorizer: noopVectorizerConfig(), + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + }, + multiVecName: { + Vectorizer: noopVectorizerConfig(), + VectorIndexConfig: enthnsw.NewDefaultMultiVectorUserConfig(), + }, + }, + } + objectID = strfmt.UUID("897be7cc-1ae1-4b40-89d9-d3ea98037638") + + db = createTestDatabaseWithClass(t, monitoring.GetMetrics(), class) + ) + + require.NoError(t, db.PutObject(ctx, &models.Object{ + ID: objectID, + Class: class.Class, + Properties: map[string]interface{}{ + "text": "test1", + }, + }, nil, map[string][]float32{ + namedVecName: randVector(10), + }, map[string][][]float32{ + multiVecName: { + randVector(10), + randVector(10), + }, + }, nil, 0)) + + newVectors := models.Vectors{ + namedVecName: randVector(10), + multiVecName: [][]float32{ + randVector(10), + randVector(10), + randVector(10), + }, + } + + require.NoError(t, db.Merge(ctx, objects.MergeDocument{ + Class: class.Class, + ID: objectID, + PrimitiveSchema: map[string]interface{}{ + "text": "test2", + "number": 2, + }, + Vectors: newVectors, + }, nil, "", 0)) + + object, err := db.ObjectByID(context.Background(), objectID, nil, additional.Properties{}, "") + require.NoError(t, err) + + require.Equal(t, newVectors, object.Vectors) +} + +func noopVectorizerConfig() any { + return map[string]interface{}{"none": map[string]interface{}{}} +} + +func uuidFromInt(in int) strfmt.UUID { + return strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", in)).String()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..505f8eb6542d578ec4f325581c1e592319a54529 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/metrics.go @@ -0,0 +1,388 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "errors" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type Metrics struct { + logger logrus.FieldLogger + monitoring bool + batchTime prometheus.ObserverVec + batchDeleteTime prometheus.ObserverVec + batchCount prometheus.Counter + batchCountBytes prometheus.Counter + objectTime prometheus.ObserverVec + startupDurations prometheus.ObserverVec + filteredVectorFilter prometheus.Observer + filteredVectorVector prometheus.Observer + filteredVectorObjects prometheus.Observer + filteredVectorSort prometheus.Observer + grouped bool + baseMetrics *monitoring.PrometheusMetrics + + shardsCount *prometheus.GaugeVec + shardStatusUpdateDurationsSeconds *prometheus.HistogramVec +} + +func NewMetrics( + logger logrus.FieldLogger, prom *monitoring.PrometheusMetrics, + className, shardName string, +) *Metrics { + m := &Metrics{ + logger: logger, + } + + if prom == nil { + return m + } + + m.baseMetrics = prom + + if prom.Group { + className = "n/a" + shardName = "n/a" + m.grouped = true + } + + m.monitoring = true + m.batchTime = prom.BatchTime.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + m.batchDeleteTime = prom.BatchDeleteTime.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + m.batchCount = prom.BatchCount.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + m.batchCountBytes = prom.BatchCountBytes.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + m.objectTime = prom.ObjectsTime.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + m.startupDurations = prom.StartupDurations.MustCurryWith(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + }) + + m.filteredVectorFilter = prom.QueriesFilteredVectorDurations.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "filter", + }) + + m.filteredVectorVector = prom.QueriesFilteredVectorDurations.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "vector", + }) + + m.filteredVectorObjects = prom.QueriesFilteredVectorDurations.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "objects", + }) + + m.filteredVectorSort = prom.QueriesFilteredVectorDurations.With(prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + "operation": "sort", + }) + + if prom.Registerer == nil { + prom.Registerer = prometheus.DefaultRegisterer + } + + // TODO: This is a temporary solution to avoid duplicating metrics registered + // in the index package. it shall be removed once the index package metric is refactored + // and to bring the metrics to the db package. + shardsCount := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "weaviate_index_shards_total", + Help: "Total number of shards per index status", + }, []string{"status"}) // status: READONLY, INDEXING, LOADING, READY, SHUTDOWN + + shardStatusUpdateDurationsSeconds := prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "weaviate_index_shard_status_update_duration_seconds", + Help: "Time taken to update shard status in seconds", + }, []string{"status"}) // status: READONLY, INDEXING, LOADING, READY, SHUTDOWN + + // Try to register metrics, reuse existing ones if already registered + if err := prom.Registerer.Register(shardsCount); err != nil { + var are prometheus.AlreadyRegisteredError + if errors.As(err, &are) { + shardsCount = are.ExistingCollector.(*prometheus.GaugeVec) + } + } + + if err := prom.Registerer.Register(shardStatusUpdateDurationsSeconds); err != nil { + var are prometheus.AlreadyRegisteredError + if errors.As(err, &are) { + shardStatusUpdateDurationsSeconds = are.ExistingCollector.(*prometheus.HistogramVec) + } + } + + m.shardsCount = shardsCount + m.shardStatusUpdateDurationsSeconds = shardStatusUpdateDurationsSeconds + + return m +} + +func (m *Metrics) UpdateShardStatus(old, new string) { + if m.shardsCount == nil { + return + } + + if old != "" { + m.shardsCount.WithLabelValues(old).Dec() + } + + m.shardsCount.WithLabelValues(new).Inc() +} + +func (m *Metrics) ObserveUpdateShardStatus(status string, duration time.Duration) { + if m.shardStatusUpdateDurationsSeconds == nil { + return + } + + m.shardStatusUpdateDurationsSeconds.With(prometheus.Labels{"status": status}).Observe(float64(duration.Seconds())) +} + +func (m *Metrics) DeleteShardLabels(class, shard string) { + if m.grouped { + // never delete the shared label, only individual ones + return + } + + m.baseMetrics.DeleteShard(class, shard) +} + +func (m *Metrics) BatchObject(start time.Time, size int) { + took := time.Since(start) + m.logger.WithField("action", "batch_objects"). + WithField("batch_size", size). + WithField("took", took). + Tracef("object batch took %s", took) +} + +func (m *Metrics) ObjectStore(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "store_object_store"). + WithField("took", took). + Tracef("storing objects in KV/inverted store took %s", took) + + if !m.monitoring { + return + } + + m.batchTime.With(prometheus.Labels{"operation": "object_storage"}). + Observe(float64(took / time.Millisecond)) +} + +func (m *Metrics) VectorIndex(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "store_vector_index"). + WithField("took", took). + Tracef("storing objects vector index took %s", took) + + if !m.monitoring { + return + } + + m.batchTime.With(prometheus.Labels{"operation": "vector_storage"}). + Observe(float64(took / time.Millisecond)) +} + +func (m *Metrics) PutObject(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "store_object_store_single_object_in_tx"). + WithField("took", took). + Tracef("storing single object (complete) in KV/inverted took %s", took) + + if !m.monitoring { + return + } + + m.objectTime.With(prometheus.Labels{ + "operation": "put", + "step": "total", + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) PutObjectDetermineStatus(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "store_object_store_determine_status"). + WithField("took", took). + Tracef("retrieving previous and determining status in KV took %s", took) + + if !m.monitoring { + return + } + + m.objectTime.With(prometheus.Labels{ + "operation": "put", + "step": "retrieve_previous_determine_status", + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) PutObjectUpsertObject(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "store_object_store_upsert_object_data"). + WithField("took", took). + Tracef("storing object data in KV took %s", took) + + if !m.monitoring { + return + } + + m.objectTime.With(prometheus.Labels{ + "operation": "put", + "step": "upsert_object_store", + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) PutObjectUpdateInverted(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "store_object_store_update_inverted"). + WithField("took", took). + Tracef("updating inverted index for single object took %s", took) + + if !m.monitoring { + return + } + + m.objectTime.With(prometheus.Labels{ + "operation": "put", + "step": "inverted_total", + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) InvertedDeleteOld(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "inverted_delete_old"). + WithField("took", took). + Tracef("deleting old entries from inverted index %s", took) + if !m.monitoring { + return + } + + m.objectTime.With(prometheus.Labels{ + "operation": "put", + "step": "inverted_delete", + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) InvertedDeleteDelta(start time.Time) { + took := time.Since(start) + m.logger.WithField("action", "inverted_delete_delta"). + WithField("took", took). + Tracef("deleting delta entries from inverted index %s", took) +} + +func (m *Metrics) InvertedExtend(start time.Time, propCount int) { + took := time.Since(start) + m.logger.WithField("action", "inverted_extend"). + WithField("took", took). + WithField("prop_count", propCount). + Tracef("extending inverted index took %s", took) + + if !m.monitoring { + return + } + + m.objectTime.With(prometheus.Labels{ + "operation": "put", + "step": "inverted_extend", + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) ShardStartup(start time.Time) { + if !m.monitoring { + return + } + + took := time.Since(start) + m.startupDurations.With(prometheus.Labels{ + "operation": "shard_total_init", + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) BatchDelete(start time.Time, op string) { + if !m.monitoring { + return + } + + took := time.Since(start) + m.batchDeleteTime.With(prometheus.Labels{ + "operation": op, + }).Observe(float64(took) / float64(time.Millisecond)) +} + +func (m *Metrics) BatchCount(size int) { + if !m.monitoring { + return + } + + m.batchCount.Add(float64(size)) +} + +func (m *Metrics) BatchCountBytes(size int64) { + if !m.monitoring { + return + } + + m.batchCountBytes.Add(float64(size)) +} + +func (m *Metrics) FilteredVectorFilter(dur time.Duration) { + if !m.monitoring { + return + } + + m.filteredVectorFilter.Observe(float64(dur) / float64(time.Millisecond)) +} + +func (m *Metrics) FilteredVectorVector(dur time.Duration) { + if !m.monitoring { + return + } + + m.filteredVectorVector.Observe(float64(dur) / float64(time.Millisecond)) +} + +func (m *Metrics) FilteredVectorObjects(dur time.Duration) { + if !m.monitoring { + return + } + + m.filteredVectorObjects.Observe(float64(dur) / float64(time.Millisecond)) +} + +func (m *Metrics) FilteredVectorSort(dur time.Duration) { + if !m.monitoring { + return + } + + m.filteredVectorSort.Observe(float64(dur) / float64(time.Millisecond)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator.go new file mode 100644 index 0000000000000000000000000000000000000000..78c76d96486e7ec307eab07ce8bab417f2a0191c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator.go @@ -0,0 +1,1123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "slices" + "time" + + "github.com/weaviate/weaviate/usecases/multitenancy" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/vector/dynamic" + "github.com/weaviate/weaviate/adapters/repos/db/vector/flat" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/router" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/storobj" + esync "github.com/weaviate/weaviate/entities/sync" + "github.com/weaviate/weaviate/entities/vectorindex" + "github.com/weaviate/weaviate/usecases/replica" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// provider is an interface has to be implemented by modules +// to get the backend (s3, azure blob storage, google cloud storage) module +type provider interface { + // OffloadBackend returns the backend module for (s3, azure blob storage, google cloud storage) + OffloadBackend(backend string) (modulecapabilities.OffloadCloud, bool) +} + +type processor interface { + UpdateTenantsProcess(ctx context.Context, + class string, req *command.TenantProcessRequest) (uint64, error) +} + +type Migrator struct { + db *DB + cloud modulecapabilities.OffloadCloud + logger logrus.FieldLogger + cluster processor + nodeId string + localNodeName string + + classLocks *esync.KeyLocker +} + +func NewMigrator(db *DB, logger logrus.FieldLogger, localNodeName string) *Migrator { + return &Migrator{ + db: db, + logger: logger, + classLocks: esync.NewKeyLocker(), + localNodeName: localNodeName, + } +} + +func (m *Migrator) SetNode(nodeID string) { + m.nodeId = nodeID +} + +func (m *Migrator) SetCluster(c processor) { + m.cluster = c +} + +func (m *Migrator) SetOffloadProvider(provider provider, moduleName string) { + cloud, enabled := provider.OffloadBackend(moduleName) + if !enabled { + m.logger.Debug(fmt.Sprintf("module %s is not enabled", moduleName)) + } + m.cloud = cloud + m.logger.Info(fmt.Sprintf("module %s is enabled", moduleName)) +} + +func (m *Migrator) AddClass(ctx context.Context, class *models.Class) error { + if err := replica.ValidateConfig(class, m.db.config.Replication); err != nil { + return fmt.Errorf("replication config: %w", err) + } + + indexID := indexID(schema.ClassName(class.Class)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(class.Class)) + if idx != nil { + return fmt.Errorf("index for class %v already found locally", idx.ID()) + } + + collection := schema.ClassName(class.Class).String() + indexRouter := router.NewBuilder( + collection, + multitenancy.IsMultiTenant(class.MultiTenancyConfig), + m.db.nodeSelector, + m.db.schemaGetter, + m.db.schemaReader, + m.db.replicationFSM, + ).Build() + idx, err := NewIndex(ctx, + IndexConfig{ + ClassName: schema.ClassName(class.Class), + RootPath: m.db.config.RootPath, + ResourceUsage: m.db.config.ResourceUsage, + QueryMaximumResults: m.db.config.QueryMaximumResults, + QueryHybridMaximumResults: m.db.config.QueryHybridMaximumResults, + QueryNestedRefLimit: m.db.config.QueryNestedRefLimit, + MemtablesFlushDirtyAfter: m.db.config.MemtablesFlushDirtyAfter, + MemtablesInitialSizeMB: m.db.config.MemtablesInitialSizeMB, + MemtablesMaxSizeMB: m.db.config.MemtablesMaxSizeMB, + MemtablesMinActiveSeconds: m.db.config.MemtablesMinActiveSeconds, + MemtablesMaxActiveSeconds: m.db.config.MemtablesMaxActiveSeconds, + MinMMapSize: m.db.config.MinMMapSize, + LazySegmentsDisabled: m.db.config.LazySegmentsDisabled, + SegmentInfoIntoFileNameEnabled: m.db.config.SegmentInfoIntoFileNameEnabled, + WriteMetadataFilesEnabled: m.db.config.WriteMetadataFilesEnabled, + MaxReuseWalSize: m.db.config.MaxReuseWalSize, + SegmentsCleanupIntervalSeconds: m.db.config.SegmentsCleanupIntervalSeconds, + SeparateObjectsCompactions: m.db.config.SeparateObjectsCompactions, + CycleManagerRoutinesFactor: m.db.config.CycleManagerRoutinesFactor, + IndexRangeableInMemory: m.db.config.IndexRangeableInMemory, + MaxSegmentSize: m.db.config.MaxSegmentSize, + TrackVectorDimensions: m.db.config.TrackVectorDimensions, + TrackVectorDimensionsInterval: m.db.config.TrackVectorDimensionsInterval, + UsageEnabled: m.db.config.UsageEnabled, + AvoidMMap: m.db.config.AvoidMMap, + DisableLazyLoadShards: m.db.config.DisableLazyLoadShards, + ForceFullReplicasSearch: m.db.config.ForceFullReplicasSearch, + TransferInactivityTimeout: m.db.config.TransferInactivityTimeout, + LSMEnableSegmentsChecksumValidation: m.db.config.LSMEnableSegmentsChecksumValidation, + ReplicationFactor: class.ReplicationConfig.Factor, + AsyncReplicationEnabled: class.ReplicationConfig.AsyncEnabled, + DeletionStrategy: class.ReplicationConfig.DeletionStrategy, + ShardLoadLimiter: m.db.shardLoadLimiter, + HNSWMaxLogSize: m.db.config.HNSWMaxLogSize, + HNSWDisableSnapshots: m.db.config.HNSWDisableSnapshots, + HNSWSnapshotIntervalSeconds: m.db.config.HNSWSnapshotIntervalSeconds, + HNSWSnapshotOnStartup: m.db.config.HNSWSnapshotOnStartup, + HNSWSnapshotMinDeltaCommitlogsNumber: m.db.config.HNSWSnapshotMinDeltaCommitlogsNumber, + HNSWSnapshotMinDeltaCommitlogsSizePercentage: m.db.config.HNSWSnapshotMinDeltaCommitlogsSizePercentage, + HNSWWaitForCachePrefill: m.db.config.HNSWWaitForCachePrefill, + HNSWFlatSearchConcurrency: m.db.config.HNSWFlatSearchConcurrency, + HNSWAcornFilterRatio: m.db.config.HNSWAcornFilterRatio, + VisitedListPoolMaxSize: m.db.config.VisitedListPoolMaxSize, + QuerySlowLogEnabled: m.db.config.QuerySlowLogEnabled, + QuerySlowLogThreshold: m.db.config.QuerySlowLogThreshold, + InvertedSorterDisabled: m.db.config.InvertedSorterDisabled, + MaintenanceModeEnabled: m.db.config.MaintenanceModeEnabled, + }, + // no backward-compatibility check required, since newly added classes will + // always have the field set + inverted.ConfigFromModel(class.InvertedIndexConfig), + convertToVectorIndexConfig(class.VectorIndexConfig), + convertToVectorIndexConfigs(class.VectorConfig), + indexRouter, m.db.schemaGetter, m.db.schemaReader, m.db, m.logger, m.db.nodeResolver, m.db.remoteIndex, + m.db.replicaClient, &m.db.config.Replication, m.db.promMetrics, class, m.db.jobQueueCh, m.db.scheduler, m.db.indexCheckpoints, + m.db.memMonitor, m.db.reindexer, m.db.bitmapBufPool) + if err != nil { + return errors.Wrap(err, "create index") + } + + m.db.indexLock.Lock() + m.db.indices[idx.ID()] = idx + m.db.indexLock.Unlock() + + return nil +} + +func (m *Migrator) DropClass(ctx context.Context, className string, hasFrozen bool) error { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + if err := m.db.DeleteIndex(schema.ClassName(className)); err != nil { + return err + } + + if m.cloud != nil && hasFrozen { + return m.cloud.Delete(ctx, className, "", "") + } + + return nil +} + +func (m *Migrator) UpdateClass(ctx context.Context, className string, newClassName *string) error { + if newClassName != nil { + return errors.New("weaviate does not support renaming of classes") + } + + return nil +} + +func (m *Migrator) LoadShard(ctx context.Context, class, shard string) error { + idx := m.db.GetIndex(schema.ClassName(class)) + if idx == nil { + return fmt.Errorf("could not find collection %s", class) + } + return idx.LoadLocalShard(ctx, shard, false) +} + +func (m *Migrator) DropShard(ctx context.Context, class, shard string) error { + idx := m.db.GetIndex(schema.ClassName(class)) + if idx == nil { + return fmt.Errorf("could not find collection %s", class) + } + return idx.dropShards([]string{shard}) +} + +func (m *Migrator) ShutdownShard(ctx context.Context, class, shard string) error { + idx := m.db.GetIndex(schema.ClassName(class)) + if idx == nil { + return fmt.Errorf("could not find collection %s", class) + } + + idx.shardCreateLocks.Lock(shard) + defer idx.shardCreateLocks.Unlock(shard) + + shardLike, ok := idx.shards.LoadAndDelete(shard) + if !ok { + return fmt.Errorf("could not find shard %s", shard) + } + if err := shardLike.Shutdown(ctx); err != nil { + if !errors.Is(err, errAlreadyShutdown) { + return errors.Wrapf(err, "shutdown shard %q", shard) + } + idx.logger.WithField("shard", shardLike.Name()).Debug("was already shut or dropped") + } + return nil +} + +// UpdateIndex ensures that the local index is up2date with the latest sharding +// state (shards/tenants) and index properties that may have been added in the +// case that the local node was down during a class update operation. +// +// This method is relevant when the local node is a part of a cluster, +// particularly with the introduction of the v2 RAFT-based schema +func (m *Migrator) UpdateIndex(ctx context.Context, incomingClass *models.Class, + incomingSS *sharding.State, +) error { + idx := m.db.GetIndex(schema.ClassName(incomingClass.Class)) + + { // add index if missing + if idx == nil { + if err := m.AddClass(ctx, incomingClass); err != nil { + return fmt.Errorf( + "add missing class %s during update index: %w", + incomingClass.Class, err) + } + return nil + } + } + + { // add/remove missing shards + if incomingSS.PartitioningEnabled { + if err := m.updateIndexTenants(ctx, idx, incomingSS); err != nil { + return err + } + } else { + if err := m.updateIndexShards(ctx, idx, incomingSS); err != nil { + return err + } + } + } + + { // add missing properties + if err := m.updateIndexAddMissingProperties(ctx, idx, incomingClass); err != nil { + return err + } + } + + return nil +} + +func (m *Migrator) updateIndexTenants(ctx context.Context, idx *Index, + incomingSS *sharding.State, +) error { + if err := m.updateIndexTenantsStatus(ctx, idx, incomingSS); err != nil { + return err + } + return m.updateIndexDeleteTenants(ctx, idx, incomingSS) +} + +func (m *Migrator) updateIndexTenantsStatus(ctx context.Context, idx *Index, + incomingSS *sharding.State, +) error { + nodeName := m.db.schemaGetter.NodeName() + for shardName, phys := range incomingSS.Physical { + if !phys.IsLocalShard(nodeName) { + continue + } + + if phys.Status == models.TenantActivityStatusHOT { + // Only load the tenant if activity status == HOT. + if err := idx.LoadLocalShard(ctx, shardName, false); err != nil { + return fmt.Errorf("add missing tenant shard %s during update index: %w", shardName, err) + } + } else { + // Shutdown the tenant if activity status != HOT + if err := idx.UnloadLocalShard(ctx, shardName); err != nil { + return fmt.Errorf("shutdown tenant shard %s during update index: %w", shardName, err) + } + } + } + return nil +} + +func (m *Migrator) updateIndexDeleteTenants(ctx context.Context, + idx *Index, incomingSS *sharding.State, +) error { + var toRemove []string + + idx.ForEachShard(func(name string, _ ShardLike) error { + if _, ok := incomingSS.Physical[name]; !ok { + toRemove = append(toRemove, name) + } + return nil + }) + + if len(toRemove) == 0 { + return nil + } + + if err := idx.dropShards(toRemove); err != nil { + return fmt.Errorf("drop tenant shards %v during update index: %w", toRemove, err) + } + + if m.cloud != nil { + // TODO-offload: currently we send all tenants and if it did find one in the cloud will delete + // better to filter the passed shards and get the frozen only + if err := idx.dropCloudShards(ctx, m.cloud, toRemove, m.nodeId); err != nil { + return fmt.Errorf("drop tenant shards %v during update index: %w", toRemove, err) + } + } + + return nil +} + +func (m *Migrator) updateIndexShards(ctx context.Context, idx *Index, + incomingSS *sharding.State, +) error { + requestedShards := incomingSS.AllLocalPhysicalShards() + existingShards := make(map[string]ShardLike) + + if err := idx.ForEachShard(func(name string, shard ShardLike) error { + existingShards[name] = shard + return nil + }); err != nil { + return fmt.Errorf("failed to iterate over loaded shards: %w", err) + } + + // Initialize missing shards and shutdown unneeded ones + for shardName := range existingShards { + if !slices.Contains(requestedShards, shardName) { + if err := idx.UnloadLocalShard(ctx, shardName); err != nil { + // TODO: an error should be returned but keeping the old behavior for now + m.logger.WithField("shard", shardName).Error("shutdown shard during update index: %w", err) + continue + } + } + } + + for _, shardName := range requestedShards { + if _, exists := existingShards[shardName]; !exists { + if err := idx.initLocalShard(ctx, shardName); err != nil { + return fmt.Errorf("add missing shard %s during update index: %w", shardName, err) + } + } + } + + return nil +} + +func (m *Migrator) updateIndexAddMissingProperties(ctx context.Context, idx *Index, + incomingClass *models.Class, +) error { + for _, prop := range incomingClass.Properties { + // Returning an error in idx.ForEachShard stops the range. + // So if one shard is missing the property bucket, we know + // that the property needs to be added to the index, and + // don't need to continue iterating over all shards + errMissingProp := errors.New("missing prop") + // Ensure we iterate over loaded shard to avoid force loading a lazy loaded shard + err := idx.ForEachLoadedShard(func(name string, shard ShardLike) error { + bucket := shard.Store().Bucket(helpers.BucketFromPropNameLSM(prop.Name)) + if bucket == nil { + return errMissingProp + } + return nil + }) + if errors.Is(err, errMissingProp) { + if err := idx.addProperty(ctx, prop); err != nil { + return fmt.Errorf("add missing prop %s during update index: %w", prop.Name, err) + } + } + } + return nil +} + +func (m *Migrator) AddProperty(ctx context.Context, className string, prop ...*models.Property) error { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return errors.Errorf("cannot add property to a non-existing index for %s", className) + } + + return idx.addProperty(ctx, prop...) +} + +// DropProperty is ignored, API compliant change +func (m *Migrator) DropProperty(ctx context.Context, className string, propertyName string) error { + // ignore but don't error + return nil +} + +func (m *Migrator) UpdateProperty(ctx context.Context, className string, propName string, newName *string) error { + if newName != nil { + return errors.New("weaviate does not support renaming of properties") + } + + return nil +} + +func (m *Migrator) GetShardsQueueSize(ctx context.Context, className, tenant string) (map[string]int64, error) { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return nil, errors.Errorf("cannot get shards status for a non-existing index for %s", className) + } + + return idx.getShardsQueueSize(ctx, tenant) +} + +func (m *Migrator) GetShardsStatus(ctx context.Context, className, tenant string) (map[string]string, error) { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return nil, errors.Errorf("cannot get shards status for a non-existing index for %s", className) + } + + return idx.getShardsStatus(ctx, tenant) +} + +func (m *Migrator) UpdateShardStatus(ctx context.Context, className, shardName, targetStatus string, schemaVersion uint64) error { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return errors.Errorf("cannot update shard status to a non-existing index for %s", className) + } + + tenantName := "" + if idx.partitioningEnabled { + // If partitioning is enable it means the collection is multi tenant and the shard name must match the tenant name + // otherwise the tenant name is expected to be empty. + tenantName = shardName + } + return idx.updateShardStatus(ctx, tenantName, shardName, targetStatus, schemaVersion) +} + +// NewTenants creates new partitions +func (m *Migrator) NewTenants(ctx context.Context, class *models.Class, creates []*schemaUC.CreateTenantPayload) error { + indexID := indexID(schema.ClassName(class.Class)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(class.Class)) + if idx == nil { + return fmt.Errorf("cannot find index for %q", class.Class) + } + + ec := errorcompounder.New() + for _, pl := range creates { + if pl.Status != models.TenantActivityStatusHOT { + continue // skip creating inactive shards + } + + err := idx.initLocalShard(ctx, pl.Name) + ec.Add(err) + } + return ec.ToError() +} + +// UpdateTenants activates or deactivates tenant partitions and returns a commit func +// that can be used to either commit or rollback the changes +func (m *Migrator) UpdateTenants(ctx context.Context, class *models.Class, updates []*schemaUC.UpdateTenantPayload, implicitTenantActivation bool) error { + indexID := indexID(schema.ClassName(class.Class)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(class.Class)) + if idx == nil { + return fmt.Errorf("cannot find index for %q", class.Class) + } + + hot := make([]string, 0, len(updates)) + cold := make([]string, 0, len(updates)) + freezing := make([]string, 0, len(updates)) + frozen := make([]string, 0, len(updates)) + unfreezing := make([]string, 0, len(updates)) + + for _, tenant := range updates { + switch tenant.Status { + case models.TenantActivityStatusHOT: + hot = append(hot, tenant.Name) + case models.TenantActivityStatusCOLD: + cold = append(cold, tenant.Name) + case models.TenantActivityStatusFROZEN: + frozen = append(frozen, tenant.Name) + + case types.TenantActivityStatusFREEZING: // never arrives from user + freezing = append(freezing, tenant.Name) + case types.TenantActivityStatusUNFREEZING: // never arrives from user + unfreezing = append(unfreezing, tenant.Name) + } + } + + ec := errorcompounder.NewSafe() + if len(hot) > 0 { + m.logger.WithField("action", "tenants_to_hot").Debug(hot) + idx.shardTransferMutex.RLock() + defer idx.shardTransferMutex.RUnlock() + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU * 2) + + for _, name := range hot { + name := name // prevent loop variable capture + // enterrors.GoWrapper(func() { + // The timeout is rather arbitrary. It's meant to be so high that it can + // never stop a valid tenant activation use case, but low enough to + // prevent a context-leak. + + eg.Go(func() error { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + if err := idx.LoadLocalShard(ctx, name, implicitTenantActivation); err != nil { + ec.Add(err) + idx.logger.WithFields(logrus.Fields{ + "action": "tenant_activation_lazy_load_shard", + "shard": name, + }).WithError(err).Errorf("loading shard %q failed", name) + } + return nil + }) + } + + eg.Wait() + } + + if len(cold) > 0 { + m.logger.WithField("action", "tenants_to_cold").Debug(cold) + idx.shardTransferMutex.RLock() + defer idx.shardTransferMutex.RUnlock() + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU * 2) + + for _, name := range cold { + name := name + + eg.Go(func() error { + idx.closeLock.RLock() + defer idx.closeLock.RUnlock() + + if idx.closed { + m.logger.WithField("index", idx.ID()).Debug("index is already shut down or dropped") + ec.Add(errAlreadyShutdown) + return nil + } + + idx.shardCreateLocks.Lock(name) + defer idx.shardCreateLocks.Unlock(name) + + shard, ok := idx.shards.LoadAndDelete(name) + if !ok { + m.logger.WithField("shard", name).Debug("already shut down or dropped") + return nil // shard already does not exist or inactive + } + + m.logger.WithField("shard", name).Debug("starting shutdown") + + if err := shard.Shutdown(ctx); err != nil { + if errors.Is(err, errAlreadyShutdown) { + m.logger.WithField("shard", shard.Name()).Debug("already shut down or dropped") + } else { + idx.logger. + WithField("action", "shard_shutdown"). + WithField("shard", shard.ID()). + Error(err) + ec.Add(err) + } + } + + return nil + }) + } + + eg.Wait() + } + + if len(frozen) > 0 { + m.logger.WithField("action", "tenants_to_frozen").Debug(frozen) + m.frozen(ctx, idx, frozen, ec) + } + + if len(freezing) > 0 { + m.logger.WithField("action", "tenants_to_freezing").Debug(freezing) + m.freeze(ctx, idx, class.Class, freezing, ec) + } + + if len(unfreezing) > 0 { + m.logger.WithField("action", "tenants_to_unfreezing").Debug(unfreezing) + m.unfreeze(ctx, idx, class.Class, unfreezing, ec) + } + + return ec.ToError() +} + +// DeleteTenants deletes tenant from the database and data from the disk, no matter the current status of the tenant +func (m *Migrator) DeleteTenants(ctx context.Context, class string, tenants []*models.Tenant) error { + indexID := indexID(schema.ClassName(class)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(class)) + if idx == nil { + return nil + } + + // Collect tenant names and frozen tenant names + allTenantNames := make([]string, 0, len(tenants)) + frozenTenants := make([]string, 0, len(tenants)) + + for _, tenant := range tenants { + allTenantNames = append(allTenantNames, tenant.Name) + if tenant.ActivityStatus == models.TenantActivityStatusFROZEN || + tenant.ActivityStatus == models.TenantActivityStatusFREEZING { + frozenTenants = append(frozenTenants, tenant.Name) + } + } + + if err := idx.dropShards(allTenantNames); err != nil { + return err + } + + if m.cloud != nil && len(frozenTenants) > 0 { + if err := idx.dropCloudShards(ctx, m.cloud, frozenTenants, m.nodeId); err != nil { + return fmt.Errorf("drop tenant shards %v during update index: %w", frozenTenants, err) + } + } + + return nil +} + +func (m *Migrator) UpdateVectorIndexConfig(ctx context.Context, + className string, updated schemaConfig.VectorIndexConfig, +) error { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return errors.Errorf("cannot update vector index config of non-existing index for %s", className) + } + + return idx.updateVectorIndexConfig(ctx, updated) +} + +func (m *Migrator) UpdateVectorIndexConfigs(ctx context.Context, + className string, updated map[string]schemaConfig.VectorIndexConfig, +) error { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return errors.Errorf("cannot update vector config of non-existing index for %s", className) + } + + return idx.updateVectorIndexConfigs(ctx, updated) +} + +func (m *Migrator) ValidateVectorIndexConfigUpdate( + old, updated schemaConfig.VectorIndexConfig, +) error { + // hnsw is the only supported vector index type at the moment, so no need + // to check, we can always use that an hnsw-specific validation should be + // used for now. + switch old.IndexType() { + case vectorindex.VectorIndexTypeHNSW: + return hnsw.ValidateUserConfigUpdate(old, updated) + case vectorindex.VectorIndexTypeFLAT: + return flat.ValidateUserConfigUpdate(old, updated) + case vectorindex.VectorIndexTypeDYNAMIC: + return dynamic.ValidateUserConfigUpdate(old, updated) + } + return fmt.Errorf("invalid index type: %s", old.IndexType()) +} + +func (m *Migrator) ValidateVectorIndexConfigsUpdate(old, updated map[string]schemaConfig.VectorIndexConfig, +) error { + for vecName := range old { + if err := m.ValidateVectorIndexConfigUpdate(old[vecName], updated[vecName]); err != nil { + return fmt.Errorf("vector %q", vecName) + } + } + return nil +} + +func (m *Migrator) ValidateInvertedIndexConfigUpdate(old, updated *models.InvertedIndexConfig, +) error { + return inverted.ValidateUserConfigUpdate(old, updated) +} + +func (m *Migrator) UpdateInvertedIndexConfig(ctx context.Context, className string, + updated *models.InvertedIndexConfig, +) error { + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return errors.Errorf("cannot update inverted index config of non-existing index for %s", className) + } + + conf := inverted.ConfigFromModel(updated) + + return idx.updateInvertedIndexConfig(ctx, conf) +} + +func (m *Migrator) UpdateReplicationConfig(ctx context.Context, className string, cfg *models.ReplicationConfig) error { + if cfg == nil { + return nil + } + + indexID := indexID(schema.ClassName(className)) + + m.classLocks.Lock(indexID) + defer m.classLocks.Unlock(indexID) + + idx := m.db.GetIndex(schema.ClassName(className)) + if idx == nil { + return errors.Errorf("cannot update replication factor of non-existing index for %s", className) + } + + if err := idx.updateReplicationConfig(ctx, cfg); err != nil { + return fmt.Errorf("update replication config for class %q: %w", className, err) + } + + return nil +} + +func (m *Migrator) RecalculateVectorDimensions(ctx context.Context) error { + count := 0 + m.logger. + WithField("action", "reindex"). + Info("Reindexing dimensions, this may take a while") + + m.db.indexLock.Lock() + defer m.db.indexLock.Unlock() + + // Iterate over all indexes + for _, index := range m.db.indices { + err := index.ForEachShard(func(name string, shard ShardLike) error { + return shard.resetDimensionsLSM(ctx) + }) + if err != nil { + m.logger.WithField("action", "reindex").WithError(err).Warn("could not reset vector dimensions") + return err + } + + // Iterate over all shards + err = index.IterateObjects(ctx, func(index *Index, shard ShardLike, object *storobj.Object) error { + count = count + 1 + return object.IterateThroughVectorDimensions(func(targetVector string, dims int) error { + if err = shard.extendDimensionTrackerLSM(dims, object.DocID, targetVector); err != nil { + return fmt.Errorf("failed to extend dimension tracker for vector %q: %w", targetVector, err) + } + return nil + }) + }) + if err != nil { + m.logger.WithField("action", "reindex").WithError(err).Warn("could not extend vector dimensions") + return err + } + } + f := func() { + for { + m.logger. + WithField("action", "reindex"). + Warnf("Reindexed %v objects. Reindexing dimensions complete. Please remove environment variable REINDEX_VECTOR_DIMENSIONS_AT_STARTUP before next startup", count) + time.Sleep(5 * time.Minute) + } + } + enterrors.GoWrapper(f, m.logger) + + return nil +} + +func (m *Migrator) RecountProperties(ctx context.Context) error { + count := 0 + m.logger. + WithField("action", "recount"). + Info("Recounting properties, this may take a while") + + m.db.indexLock.Lock() + defer m.db.indexLock.Unlock() + // Iterate over all indexes + for _, index := range m.db.indices { + + // Clear the shards before counting + err := index.IterateShards(ctx, func(index *Index, shard ShardLike) error { + shard.GetPropertyLengthTracker().Clear() + return nil + }) + if err != nil { + m.logger.WithField("error", err).Error("could not clear prop lengths") + } + + // Iterate over all shards + err = index.IterateObjects(ctx, func(index *Index, shard ShardLike, object *storobj.Object) error { + count = count + 1 + props, _, err := shard.AnalyzeObject(object) + if err != nil { + m.logger.WithField("error", err).Error("could not analyze object") + return nil + } + + if err := shard.SetPropertyLengths(props); err != nil { + m.logger.WithField("error", err).Error("could not add prop lengths") + return nil + } + + shard.GetPropertyLengthTracker().Flush() + + return nil + }) + if err != nil { + m.logger.WithField("error", err).Error("could not iterate over objects") + } + + // Flush the GetPropertyLengthTracker() to disk + err = index.IterateShards(ctx, func(index *Index, shard ShardLike) error { + return shard.GetPropertyLengthTracker().Flush() + }) + if err != nil { + m.logger.WithField("error", err).Error("could not flush prop lengths") + } + + } + f := func() { + for { + m.logger. + WithField("action", "recount"). + Warnf("Recounted %v objects. Recounting properties complete. Please remove environment variable RECOUNT_PROPERTIES_AT_STARTUP before next startup", count) + time.Sleep(5 * time.Minute) + } + } + enterrors.GoWrapper(f, m.logger) + + return nil +} + +func (m *Migrator) InvertedReindex(ctx context.Context, taskNamesWithArgs map[string]any) error { + var errs errorcompounder.ErrorCompounder + errs.Add(m.doInvertedReindex(ctx, taskNamesWithArgs)) + errs.Add(m.doInvertedIndexMissingTextFilterable(ctx, taskNamesWithArgs)) + return errs.ToError() +} + +func (m *Migrator) doInvertedReindex(ctx context.Context, taskNamesWithArgs map[string]any) error { + tasks := map[string]ShardInvertedReindexTask{} + for name, args := range taskNamesWithArgs { + switch name { + case "ShardInvertedReindexTaskSetToRoaringSet": + tasks[name] = &ShardInvertedReindexTaskSetToRoaringSet{} + case "ShardInvertedReindexTask_SpecifiedIndex": + if args == nil { + return fmt.Errorf("no args given for %q reindex task", name) + } + argsMap, ok := args.(map[string][]string) + if !ok { + return fmt.Errorf("invalid args given for %q reindex task", name) + } + classNamesWithPropertyNames := map[string]map[string]struct{}{} + for class, props := range argsMap { + classNamesWithPropertyNames[class] = map[string]struct{}{} + for _, prop := range props { + classNamesWithPropertyNames[class][prop] = struct{}{} + } + } + tasks[name] = &ShardInvertedReindexTask_SpecifiedIndex{ + classNamesWithPropertyNames: classNamesWithPropertyNames, + } + } + } + + if len(tasks) == 0 { + return nil + } + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU) + for _, index := range m.db.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + eg.Go(func() error { + reindexer := NewShardInvertedReindexer(shard, m.logger) + for taskName, task := range tasks { + reindexer.AddTask(task) + m.logInvertedReindexShard(shard). + WithField("task", taskName). + Info("About to start inverted reindexing, this may take a while") + } + if err := reindexer.Do(ctx); err != nil { + m.logInvertedReindexShard(shard). + WithError(err). + Error("failed reindexing") + return errors.Wrapf(err, "failed reindexing shard '%s'", shard.ID()) + } + m.logInvertedReindexShard(shard). + Info("Finished inverted reindexing") + return nil + }, name) + return nil + }) + } + return eg.Wait() +} + +func (m *Migrator) doInvertedIndexMissingTextFilterable(ctx context.Context, taskNamesWithArgs map[string]any) error { + if _, ok := taskNamesWithArgs["ShardInvertedReindexTaskMissingTextFilterable"]; !ok { + return nil + } + + task := newShardInvertedReindexTaskMissingTextFilterable(m) + if err := task.init(); err != nil { + m.logMissingFilterable().WithError(err).Error("failed init missing text filterable task") + return errors.Wrap(err, "failed init missing text filterable task") + } + + if len(task.migrationState.MissingFilterableClass2Props) == 0 { + m.logMissingFilterable().Info("no classes to create filterable index, skipping") + return nil + } + + m.logMissingFilterable().Info("staring missing text filterable task") + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU * 2) + for _, index := range m.db.indices { + index := index + className := index.Config.ClassName.String() + + if _, ok := task.migrationState.MissingFilterableClass2Props[className]; !ok { + continue + } + + eg.Go(func() error { + errgrpShards := enterrors.NewErrorGroupWrapper(m.logger) + index.ForEachShard(func(_ string, shard ShardLike) error { + errgrpShards.Go(func() error { + m.logMissingFilterableShard(shard). + Info("starting filterable indexing on shard, this may take a while") + + reindexer := NewShardInvertedReindexer(shard, m.logger) + reindexer.AddTask(task) + + if err := reindexer.Do(ctx); err != nil { + m.logMissingFilterableShard(shard). + WithError(err). + Error("failed filterable indexing on shard") + return errors.Wrapf(err, "failed filterable indexing for shard '%s' of index '%s'", + shard.ID(), index.ID()) + } + m.logMissingFilterableShard(shard). + Info("finished filterable indexing on shard") + return nil + }, shard.ID()) + return nil + }) + + if err := errgrpShards.Wait(); err != nil { + m.logMissingFilterableIndex(index). + WithError(err). + Error("failed filterable indexing on index") + return errors.Wrapf(err, "failed filterable indexing of index '%s'", index.ID()) + } + + if err := task.updateMigrationStateAndSave(className); err != nil { + m.logMissingFilterableIndex(index). + WithError(err). + Error("failed updating migration state file") + return errors.Wrapf(err, "failed updating migration state file for class '%s'", className) + } + + m.logMissingFilterableIndex(index). + Info("finished filterable indexing on index") + + return nil + }, index.ID()) + } + + if err := eg.Wait(); err != nil { + m.logMissingFilterable(). + WithError(err). + Error("failed missing text filterable task") + return errors.Wrap(err, "failed missing text filterable task") + } + + m.logMissingFilterable().Info("finished missing text filterable task") + return nil +} + +func (m *Migrator) logInvertedReindex() *logrus.Entry { + return m.logger.WithField("action", "inverted_reindex") +} + +func (m *Migrator) logInvertedReindexShard(shard ShardLike) *logrus.Entry { + return m.logInvertedReindex(). + WithField("index", shard.Index().ID()). + WithField("shard", shard.ID()) +} + +func (m *Migrator) logMissingFilterable() *logrus.Entry { + return m.logger.WithField("action", "ii_missing_text_filterable") +} + +func (m *Migrator) logMissingFilterableIndex(index *Index) *logrus.Entry { + return m.logMissingFilterable().WithField("index", index.ID()) +} + +func (m *Migrator) logMissingFilterableShard(shard ShardLike) *logrus.Entry { + return m.logMissingFilterableIndex(shard.Index()).WithField("shard", shard.ID()) +} + +// As of v1.19 property's IndexInverted setting is replaced with IndexFilterable +// and IndexSearchable +// Filterable buckets use roaring set strategy and searchable ones use map strategy +// (therefore are applicable just for text/text[]) +// Since both type of buckets can coexist for text/text[] props they need to be +// distinguished by their name: searchable bucket has "searchable" suffix. +// Up until v1.19 default text/text[]/string/string[] (string/string[] deprecated since v1.19) +// strategy for buckets was map, migrating from pre v1.19 to v1.19 needs to properly +// handle existing text/text[] buckets of map strategy having filterable bucket name. +// +// Enabled InvertedIndex translates in v1.19 to both InvertedFilterable and InvertedSearchable +// enabled, but since only searchable bucket exist (with filterable name), it has to be renamed +// to searchable bucket. +// Though IndexFilterable setting is enabled filterable index does not exists, +// therefore shards are switched into fallback mode, to use searchable buckets instead of +// filterable ones whenever filtered are expected. +// Fallback mode effectively sets IndexFilterable to false, although it stays enabled according +// to schema. +// +// If filterable indexes will be created (that is up to user to decide whether missing indexes +// should be created later on), shards will not be working in fallback mode, and actual filterable index +// will be used when needed. +func (m *Migrator) AdjustFilterablePropSettings(ctx context.Context) error { + f2sm := newFilterableToSearchableMigrator(m) + if err := f2sm.migrate(ctx); err != nil { + return err + } + return f2sm.switchShardsToFallbackMode(ctx) +} + +func (m *Migrator) WaitForStartup(ctx context.Context) error { + return m.db.WaitForStartup(ctx) +} + +// Shutdown no-op if db was never loaded +func (m *Migrator) Shutdown(ctx context.Context) error { + if !m.db.StartupComplete() { + return nil + } + m.logger.Info("closing loaded database ...") + return m.db.Shutdown(ctx) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator_shard_status_ops.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator_shard_status_ops.go new file mode 100644 index 0000000000000000000000000000000000000000..f43f9f37470f34caddc02d8af98af1da509b57c4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator_shard_status_ops.go @@ -0,0 +1,321 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "os" + "strings" + "sync" + + "github.com/sirupsen/logrus" + + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" +) + +func (m *Migrator) frozen(ctx context.Context, idx *Index, frozen []string, ec *errorcompounder.SafeErrorCompounder) { + if m.cluster == nil { + ec.Add(fmt.Errorf("no cluster exists in the migrator")) + return + } + + idx.shardTransferMutex.RLock() + defer idx.shardTransferMutex.RUnlock() + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU * 2) + + for _, name := range frozen { + eg.Go(func() error { + idx.shardCreateLocks.Lock(name) + defer idx.shardCreateLocks.Unlock(name) + + shard, ok := idx.shards.LoadAndDelete(name) + if !ok { + // shard already does not exist or inactive, so remove local files if exists + // this pass will happen if the shard was COLD for example + if err := os.RemoveAll(fmt.Sprintf("%s/%s", idx.path(), name)); err != nil { + err = fmt.Errorf("attempt to delete local fs for shard %s: %w", name, err) + ec.Add(err) + return err + } + return nil + } + + if err := shard.drop(); err != nil { + ec.Add(err) + } + return nil + }) + } + eg.Wait() +} + +func (m *Migrator) freeze(ctx context.Context, idx *Index, class string, freeze []string, ec *errorcompounder.SafeErrorCompounder) { + if m.cloud == nil { + ec.Add(fmt.Errorf("offload to cloud module is not enabled")) + return + } + + if m.cluster == nil { + ec.Add(fmt.Errorf("no cluster exists in the migrator")) + return + } + + idx.shardTransferMutex.RLock() + defer idx.shardTransferMutex.RUnlock() + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU * 2) + + cmd := command.TenantProcessRequest{ + Node: m.nodeId, + Action: command.TenantProcessRequest_ACTION_FREEZING, + TenantsProcesses: make([]*command.TenantsProcess, len(freeze)), + } + + for uidx, name := range freeze { + name := name + uidx := uidx + eg.Go(func() error { + originalStatus := models.TenantActivityStatusHOT + shard, release, err := idx.getOrInitShard(ctx, name) + if err != nil { + m.logger.WithFields(logrus.Fields{ + "action": "get_local_shard_no_shutdown", + "error": err, + "name": class, + "tenant": name, + }).Error("getLocalShardNoShutdown") + cmd.TenantsProcesses[uidx] = &command.TenantsProcess{ + Tenant: &command.Tenant{ + Name: name, + Status: originalStatus, + }, + Op: command.TenantsProcess_OP_ABORT, + } + ec.Add(err) + return nil + } + + defer release() + + if shard == nil { + // shard already does not exist or inactive + originalStatus = models.TenantActivityStatusCOLD + } + + idx.shardCreateLocks.Lock(name) + defer idx.shardCreateLocks.Unlock(name) + + if shard != nil { + if err := shard.HaltForTransfer(ctx, true, 0); err != nil { + m.logger.WithFields(logrus.Fields{ + "action": "halt_for_transfer", + "error": err, + "name": class, + "tenant": name, + }).Error("HaltForTransfer") + cmd.TenantsProcesses[uidx] = &command.TenantsProcess{ + Tenant: &command.Tenant{ + Name: name, + Status: originalStatus, + }, + Op: command.TenantsProcess_OP_ABORT, + } + ec.Add(err) + return fmt.Errorf("attempt to mark begin offloading: %w", err) + } + } + + if err := m.cloud.Upload(ctx, class, name, m.nodeId); err != nil { + m.logger.WithFields(logrus.Fields{ + "action": "upload_tenant_to_cloud", + "error": err, + "name": class, + "tenant": name, + }).Error("uploading") + + ec.Add(fmt.Errorf("uploading error: %w", err)) + cmd.TenantsProcesses[uidx] = &command.TenantsProcess{ + Tenant: &command.Tenant{ + Name: name, + Status: originalStatus, + }, + Op: command.TenantsProcess_OP_ABORT, + } + } else { + cmd.TenantsProcesses[uidx] = &command.TenantsProcess{ + Tenant: &command.Tenant{ + Name: name, + Status: models.TenantActivityStatusFROZEN, + }, + Op: command.TenantsProcess_OP_DONE, + } + } + + return nil + }) + } + eg.Wait() + + if len(cmd.TenantsProcesses) == 0 { + m.logger.WithFields(logrus.Fields{ + "errors": ec.ToError().Error(), + "action": "update_tenants_process", + "name": class, + "process": cmd.TenantsProcesses, + }).Error("empty UpdateTenantsProcess") + return + } + + enterrors.GoWrapper(func() { + if _, err := m.cluster.UpdateTenantsProcess(ctx, class, &cmd); err != nil { + m.logger.WithFields(logrus.Fields{ + "action": "update_tenants_process", + "error": err, + "name": class, + "process": cmd.TenantsProcesses, + }).Error("UpdateTenantsProcess") + ec.Add(fmt.Errorf("UpdateTenantsProcess error: %w", err)) + } + }, idx.logger) +} + +func (m *Migrator) unfreeze(ctx context.Context, idx *Index, class string, unfreeze []string, ec *errorcompounder.SafeErrorCompounder) { + if m.cloud == nil { + ec.Add(fmt.Errorf("offload to cloud module is not enabled")) + return + } + + if m.cluster == nil { + ec.Add(fmt.Errorf("no cluster exists in the migrator")) + return + } + + idx.shardTransferMutex.RLock() + defer idx.shardTransferMutex.RUnlock() + + eg := enterrors.NewErrorGroupWrapper(m.logger) + eg.SetLimit(_NUMCPU * 2) + tenantsToBeDeletedFromCloud := sync.Map{} + cmd := command.TenantProcessRequest{ + Node: m.nodeId, + Action: command.TenantProcessRequest_ACTION_UNFREEZING, + TenantsProcesses: make([]*command.TenantsProcess, len(unfreeze)), + } + + for uidx, name := range unfreeze { + name := name + uidx := uidx + eg.Go(func() error { + // # is a delineator shall come from RAFT and it's away e.g. tenant1#node1 + // to identify which node path in the cloud shall we get the data from. + // it's made because nodeID could be changed on download based on new candidates + // when the tenant is unfrozen + split := strings.Split(name, "#") + if len(split) < 2 { + cmd.TenantsProcesses[uidx] = &command.TenantsProcess{ + Tenant: &command.Tenant{ + Name: name, + }, + Op: command.TenantsProcess_OP_ABORT, + } + err := fmt.Errorf("can't detect the old node name") + ec.Add(err) + return err + } + name := split[0] + nodeID := split[1] + idx.shardCreateLocks.Lock(name) + defer idx.shardCreateLocks.Unlock(name) + + if err := m.cloud.Download(ctx, class, name, nodeID); err != nil { + m.logger.WithFields(logrus.Fields{ + "action": "download_tenant_from_cloud", + "error": err, + "name": class, + "tenant": name, + }).Error("downloading") + ec.Add(fmt.Errorf("downloading error: %w", err)) + // one success will be sufficient for changing the status + // no status provided here it will be detected which status + // requested by RAFT processes + cmd.TenantsProcesses[uidx] = &command.TenantsProcess{ + Tenant: &command.Tenant{ + Name: name, + }, + Op: command.TenantsProcess_OP_ABORT, + } + } else { + cmd.TenantsProcesses[uidx] = &command.TenantsProcess{ + Tenant: &command.Tenant{ + Name: name, + }, + Op: command.TenantsProcess_OP_DONE, + } + tenantsToBeDeletedFromCloud.Store(name, nodeID) + } + + return nil + }) + } + eg.Wait() + + if len(cmd.TenantsProcesses) == 0 { + m.logger.WithFields(logrus.Fields{ + "errors": ec.ToError().Error(), + "action": "update_tenants_process", + "name": class, + "process": cmd.TenantsProcesses, + }).Error("empty UpdateTenantsProcess") + return + } + + enterrors.GoWrapper(func() { + if _, err := m.cluster.UpdateTenantsProcess(ctx, class, &cmd); err != nil { + m.logger.WithFields(logrus.Fields{ + "action": "update_tenants_process", + "error": err, + "name": class, + "process": cmd.TenantsProcesses, + }).Error("UpdateTenantsProcess") + ec.Add(fmt.Errorf("UpdateTenantsProcess error: %w", err)) + return + } + }, idx.logger) + + tenantsToBeDeletedFromCloud.Range(func(name, nodeID any) bool { + m.logger.WithFields(logrus.Fields{ + "action": "deleting_tenant_from_cloud", + "name": class, + "node": nodeID, + "currentNode": m.nodeId, + "tenant": name, + }).Debug() + + if err := m.cloud.Delete(ctx, class, name.(string), nodeID.(string)); err != nil { + // we just logging in case of we are not able to delete the cloud + m.logger.WithFields(logrus.Fields{ + "action": "deleting_tenant_from_cloud", + "error": err, + "name": class, + "tenant": name, + }).Error("deleting") + } + return true + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..df54148de0f30f4d6121a3aa929fef229c74a090 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/migrator_test.go @@ -0,0 +1,427 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "hash/crc32" + "io" + "slices" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/monitoring" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestUpdateIndexTenants(t *testing.T) { + tests := []struct { + name string + originalStatus string + incomingStatus string + expectedStatus storagestate.Status + getClass bool + }{ + { + name: "when tenant is marked as COLD in incoming state while being HOT in original index", + originalStatus: models.TenantActivityStatusHOT, + incomingStatus: models.TenantActivityStatusCOLD, + expectedStatus: storagestate.StatusShutdown, + }, + { + name: "when tenant is marked as HOT in incoming state while being COLD in original index", + originalStatus: models.TenantActivityStatusCOLD, + incomingStatus: models.TenantActivityStatusHOT, + expectedStatus: storagestate.StatusReady, + getClass: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockSchemaGetter := schemaUC.NewMockSchemaGetter(t) + mockSchemaGetter.On("NodeName").Return("node1") + + class := &models.Class{ + Class: "TestClass", + InvertedIndexConfig: &models.InvertedIndexConfig{}, + } + if tt.getClass { + mockSchemaGetter.On("ReadOnlyClass", "TestClass").Return(class) + } + logger := logrus.New() + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + // Create original index state + originalSS := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1"}, + Status: tt.originalStatus, + }, + }, + PartitioningEnabled: true, + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, originalSS) + }).Maybe() + index, err := NewIndex(context.Background(), IndexConfig{ + ClassName: schema.ClassName("TestClass"), + RootPath: t.TempDir(), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, nil, mockSchemaGetter, mockSchemaReader, nil, logger, nil, nil, nil, nil, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + + shard, err := NewShard(context.Background(), nil, "shard1", index, class, nil, scheduler, nil, + NewShardReindexerV3Noop(), false, roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + + index.shards.Store("shard1", shard) + + migrator := &Migrator{ + db: &DB{ + schemaGetter: mockSchemaGetter, + }, + nodeId: "node1", + } + + // Create incoming state + incomingSS := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1"}, + Status: tt.incomingStatus, + }, + }, + PartitioningEnabled: true, + } + + err = migrator.updateIndexTenants(context.Background(), index, incomingSS) + require.NoError(t, err) + + mockSchemaGetter.AssertExpectations(t) + + // Verify the shard status + require.Equal(t, tt.expectedStatus, shard.GetStatus()) + }) + } +} + +func TestUpdateIndexShards(t *testing.T) { + tests := []struct { + name string + initialShards []string + newShards []string + expectedShards []string + mustLoad bool + lazyLoading bool + }{ + { + name: "add new shard with lazy loading", + initialShards: []string{"shard1", "shard2"}, + newShards: []string{"shard1", "shard2", "shard3"}, + expectedShards: []string{"shard1", "shard2", "shard3"}, + mustLoad: false, + lazyLoading: false, + }, + { + name: "remove shard with lazy loading", + initialShards: []string{"shard1", "shard2", "shard3"}, + newShards: []string{"shard1", "shard3"}, + expectedShards: []string{"shard1", "shard3"}, + mustLoad: false, + lazyLoading: false, + }, + { + name: "keep existing shards with lazy loading", + initialShards: []string{"shard1", "shard3"}, + newShards: []string{"shard1", "shard3"}, + expectedShards: []string{"shard1", "shard3"}, + mustLoad: false, + lazyLoading: false, + }, + { + name: "add new shard with immediate loading", + initialShards: []string{"shard1", "shard2"}, + newShards: []string{"shard1", "shard2", "shard3"}, + expectedShards: []string{"shard1", "shard2", "shard3"}, + mustLoad: true, + lazyLoading: false, + }, + { + name: "remove shard with immediate loading", + initialShards: []string{"shard1", "shard2", "shard3"}, + newShards: []string{"shard1", "shard3"}, + expectedShards: []string{"shard1", "shard3"}, + mustLoad: true, + lazyLoading: false, + }, + { + name: "keep existing shards with immediate loading", + initialShards: []string{"shard1", "shard3"}, + newShards: []string{"shard1", "shard3"}, + expectedShards: []string{"shard1", "shard3"}, + mustLoad: true, + lazyLoading: false, + }, + { + name: "add new shard with lazy loading enabled", + initialShards: []string{"shard1", "shard2"}, + newShards: []string{"shard1", "shard2", "shard3"}, + expectedShards: []string{"shard1", "shard2", "shard3"}, + mustLoad: false, + lazyLoading: true, + }, + { + name: "remove shard with lazy loading enabled", + initialShards: []string{"shard1", "shard2", "shard3"}, + newShards: []string{"shard1", "shard3"}, + expectedShards: []string{"shard1", "shard3"}, + mustLoad: false, + lazyLoading: true, + }, + { + name: "keep existing shards with lazy loading enabled", + initialShards: []string{"shard1", "shard3"}, + newShards: []string{"shard1", "shard3"}, + expectedShards: []string{"shard1", "shard3"}, + mustLoad: false, + lazyLoading: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + logger := logrus.New() + + mockSchemaGetter := schemaUC.NewMockSchemaGetter(t) + mockSchemaGetter.On("NodeName").Return("node1") + + // Create a test class + class := &models.Class{ + Class: "TestClass", + InvertedIndexConfig: &models.InvertedIndexConfig{}, + } + mockSchemaGetter.On("ReadOnlyClass", "TestClass").Return(class).Maybe() + + // Create initial sharding state + initialPhysical := make(map[string]sharding.Physical) + for _, shard := range tt.initialShards { + initialPhysical[shard] = sharding.Physical{ + Name: shard, + BelongsToNodes: []string{"node1"}, + } + } + initialState := &sharding.State{ + Physical: initialPhysical, + } + initialState.SetLocalName("node1") + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, initialState) + }).Maybe() + // Create index with proper configuration + index, err := NewIndex(ctx, IndexConfig{ + ClassName: schema.ClassName("TestClass"), + RootPath: t.TempDir(), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + DisableLazyLoadShards: !tt.lazyLoading, // Enable lazy loading when lazyLoading is true + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, nil, mockSchemaGetter, mockSchemaReader, nil, logger, nil, nil, nil, nil, nil, class, nil, scheduler, nil, memwatch.NewDummyMonitor(), NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + + // Initialize shards + for _, shardName := range tt.initialShards { + err := index.initLocalShardWithForcedLoading(ctx, class, shardName, tt.mustLoad, false) + require.NoError(t, err) + } + + migrator := &Migrator{ + db: &DB{ + schemaGetter: mockSchemaGetter, + }, + nodeId: "node1", + } + + // Create new sharding state + newPhysical := make(map[string]sharding.Physical) + for _, shard := range tt.newShards { + newPhysical[shard] = sharding.Physical{ + Name: shard, + BelongsToNodes: []string{"node1"}, + } + } + newState := &sharding.State{ + Physical: newPhysical, + } + newState.SetLocalName("node1") + + // Update shards + err = migrator.updateIndexShards(ctx, index, newState) + require.NoError(t, err) + + // Verify expected shards exist and are of the correct type and status + for _, expectedShard := range tt.expectedShards { + shard := index.shards.Load(expectedShard) + require.NotNil(t, shard, "shard %s should exist", expectedShard) + + _, isLazy := shard.(*LazyLoadShard) + if tt.lazyLoading { + // If lazyLoading is true, shard should be a LazyLoadShard + require.True(t, isLazy, "shard %s should be a LazyLoadShard when lazyLoading=true", expectedShard) + status := shard.GetStatus() + require.True(t, status == storagestate.StatusLazyLoading, "shard %s should be in lazy loading state", expectedShard) + } else { + require.False(t, isLazy, "shard %s should be a regular Shard when lazyLoading=false", expectedShard) + require.Equal(t, storagestate.StatusReady, shard.GetStatus(), "shard %s should be ready", expectedShard) + } + } + + // Verify removed shards are dropped + for _, initialShard := range tt.initialShards { + if !slices.Contains(tt.newShards, initialShard) { + shard := index.shards.Load(initialShard) + require.Nil(t, shard, "shard %s should be dropped", initialShard) + } + } + + mockSchemaGetter.AssertExpectations(t) + }) + } +} + +func TestListAndGetFilesWithIntegrityChecking(t *testing.T) { + mockSchemaGetter := schemaUC.NewMockSchemaGetter(t) + mockSchemaGetter.On("NodeName").Return("node1") + + class := &models.Class{ + Class: "TestClass", + InvertedIndexConfig: &models.InvertedIndexConfig{}, + } + mockSchemaGetter.On("ReadOnlyClass", "TestClass").Return(class).Maybe() + + mockSchemaGetter.On("ShardOwner", "TestClass", "shard1").Return("node1", nil) + + logger := logrus.New() + scheduler := queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + Workers: 1, + }) + + // Create original index state + originalSS := &sharding.State{ + Physical: map[string]sharding.Physical{ + "shard1": { + Name: "shard1", + BelongsToNodes: []string{"node1"}, + Status: models.TenantActivityStatusHOT, + }, + }, + PartitioningEnabled: true, + } + + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + return readFunc(class, originalSS) + }).Maybe() + index, err := NewIndex(context.Background(), IndexConfig{ + ClassName: schema.ClassName("TestClass"), + RootPath: t.TempDir(), + ReplicationFactor: 1, + ShardLoadLimiter: NewShardLoadLimiter(monitoring.NoopRegisterer, 1), + }, inverted.ConfigFromModel(class.InvertedIndexConfig), + hnsw.NewDefaultUserConfig(), nil, nil, mockSchemaGetter, mockSchemaReader, nil, logger, nil, nil, nil, nil, nil, class, nil, scheduler, nil, nil, NewShardReindexerV3Noop(), roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + + shard, err := NewShard(context.Background(), nil, "shard1", index, class, nil, scheduler, nil, + NewShardReindexerV3Noop(), false, roaringset.NewBitmapBufPoolNoop()) + require.NoError(t, err) + + index.shards.Store("shard1", shard) + + ctx := context.Background() + + err = index.IncomingPutObject(ctx, "shard1", &storobj.Object{ + MarshallerVersion: 1, + DocID: 0, + Object: models.Object{ + ID: strfmt.UUID("40d3be3e-2ecc-49c8-b37c-d8983164848b"), + Class: "TestClass", + }, + }, 0) + require.NoError(t, err) + + err = index.IncomingPauseFileActivity(ctx, "shard1") + require.NoError(t, err) + + files, err := index.IncomingListFiles(ctx, "shard1") + require.NoError(t, err) + require.NotEmpty(t, files) + + for i, f := range files { + md, err := index.IncomingGetFileMetadata(ctx, "shard1", f) + require.NoError(t, err) + + // object insertion should not affect file copy process + err = index.IncomingPutObject(ctx, "shard1", &storobj.Object{ + MarshallerVersion: 1, + DocID: uint64(i) + 1, + Object: models.Object{ + ID: strfmt.UUID("40d3be3e-2ecc-49c8-b37c-d8983164848b"), + Class: "TestClass", + }, + }, 0) + require.NoError(t, err) + + r, err := index.IncomingGetFile(ctx, "shard1", f) + require.NoError(t, err) + + h := crc32.NewIEEE() + + _, err = io.Copy(h, r) + require.NoError(t, err) + + require.Equal(t, md.CRC32, h.Sum32()) + } + + err = index.IncomingResumeFileActivity(ctx, "shard1") + require.NoError(t, err) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_index_getter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_index_getter.go new file mode 100644 index 0000000000000000000000000000000000000000..7f337fafe7babba755c1537f4261af98a3133c36 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_index_getter.go @@ -0,0 +1,94 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package db + +import ( + mock "github.com/stretchr/testify/mock" + schema "github.com/weaviate/weaviate/entities/schema" +) + +// MockIndexGetter is an autogenerated mock type for the IndexGetter type +type MockIndexGetter struct { + mock.Mock +} + +type MockIndexGetter_Expecter struct { + mock *mock.Mock +} + +func (_m *MockIndexGetter) EXPECT() *MockIndexGetter_Expecter { + return &MockIndexGetter_Expecter{mock: &_m.Mock} +} + +// GetIndexLike provides a mock function with given fields: className +func (_m *MockIndexGetter) GetIndexLike(className schema.ClassName) IndexLike { + ret := _m.Called(className) + + if len(ret) == 0 { + panic("no return value specified for GetIndexLike") + } + + var r0 IndexLike + if rf, ok := ret.Get(0).(func(schema.ClassName) IndexLike); ok { + r0 = rf(className) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(IndexLike) + } + } + + return r0 +} + +// MockIndexGetter_GetIndexLike_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetIndexLike' +type MockIndexGetter_GetIndexLike_Call struct { + *mock.Call +} + +// GetIndexLike is a helper method to define mock.On call +// - className schema.ClassName +func (_e *MockIndexGetter_Expecter) GetIndexLike(className interface{}) *MockIndexGetter_GetIndexLike_Call { + return &MockIndexGetter_GetIndexLike_Call{Call: _e.mock.On("GetIndexLike", className)} +} + +func (_c *MockIndexGetter_GetIndexLike_Call) Run(run func(className schema.ClassName)) *MockIndexGetter_GetIndexLike_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(schema.ClassName)) + }) + return _c +} + +func (_c *MockIndexGetter_GetIndexLike_Call) Return(_a0 IndexLike) *MockIndexGetter_GetIndexLike_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockIndexGetter_GetIndexLike_Call) RunAndReturn(run func(schema.ClassName) IndexLike) *MockIndexGetter_GetIndexLike_Call { + _c.Call.Return(run) + return _c +} + +// NewMockIndexGetter creates a new instance of MockIndexGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockIndexGetter(t interface { + mock.TestingT + Cleanup(func()) +}) *MockIndexGetter { + mock := &MockIndexGetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_index_like.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_index_like.go new file mode 100644 index 0000000000000000000000000000000000000000..39f34a0d0811c248572135cba51b45b1d0f8373f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_index_like.go @@ -0,0 +1,208 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package db + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + types "github.com/weaviate/weaviate/cluster/usage/types" +) + +// MockIndexLike is an autogenerated mock type for the IndexLike type +type MockIndexLike struct { + mock.Mock +} + +type MockIndexLike_Expecter struct { + mock *mock.Mock +} + +func (_m *MockIndexLike) EXPECT() *MockIndexLike_Expecter { + return &MockIndexLike_Expecter{mock: &_m.Mock} +} + +// CalculateUnloadedObjectsMetrics provides a mock function with given fields: ctx, tenantName +func (_m *MockIndexLike) CalculateUnloadedObjectsMetrics(ctx context.Context, tenantName string) (types.ObjectUsage, error) { + ret := _m.Called(ctx, tenantName) + + if len(ret) == 0 { + panic("no return value specified for CalculateUnloadedObjectsMetrics") + } + + var r0 types.ObjectUsage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (types.ObjectUsage, error)); ok { + return rf(ctx, tenantName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) types.ObjectUsage); ok { + r0 = rf(ctx, tenantName) + } else { + r0 = ret.Get(0).(types.ObjectUsage) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, tenantName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockIndexLike_CalculateUnloadedObjectsMetrics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CalculateUnloadedObjectsMetrics' +type MockIndexLike_CalculateUnloadedObjectsMetrics_Call struct { + *mock.Call +} + +// CalculateUnloadedObjectsMetrics is a helper method to define mock.On call +// - ctx context.Context +// - tenantName string +func (_e *MockIndexLike_Expecter) CalculateUnloadedObjectsMetrics(ctx interface{}, tenantName interface{}) *MockIndexLike_CalculateUnloadedObjectsMetrics_Call { + return &MockIndexLike_CalculateUnloadedObjectsMetrics_Call{Call: _e.mock.On("CalculateUnloadedObjectsMetrics", ctx, tenantName)} +} + +func (_c *MockIndexLike_CalculateUnloadedObjectsMetrics_Call) Run(run func(ctx context.Context, tenantName string)) *MockIndexLike_CalculateUnloadedObjectsMetrics_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockIndexLike_CalculateUnloadedObjectsMetrics_Call) Return(_a0 types.ObjectUsage, _a1 error) *MockIndexLike_CalculateUnloadedObjectsMetrics_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockIndexLike_CalculateUnloadedObjectsMetrics_Call) RunAndReturn(run func(context.Context, string) (types.ObjectUsage, error)) *MockIndexLike_CalculateUnloadedObjectsMetrics_Call { + _c.Call.Return(run) + return _c +} + +// CalculateUnloadedVectorsMetrics provides a mock function with given fields: ctx, tenantName +func (_m *MockIndexLike) CalculateUnloadedVectorsMetrics(ctx context.Context, tenantName string) (int64, error) { + ret := _m.Called(ctx, tenantName) + + if len(ret) == 0 { + panic("no return value specified for CalculateUnloadedVectorsMetrics") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok { + return rf(ctx, tenantName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok { + r0 = rf(ctx, tenantName) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, tenantName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockIndexLike_CalculateUnloadedVectorsMetrics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CalculateUnloadedVectorsMetrics' +type MockIndexLike_CalculateUnloadedVectorsMetrics_Call struct { + *mock.Call +} + +// CalculateUnloadedVectorsMetrics is a helper method to define mock.On call +// - ctx context.Context +// - tenantName string +func (_e *MockIndexLike_Expecter) CalculateUnloadedVectorsMetrics(ctx interface{}, tenantName interface{}) *MockIndexLike_CalculateUnloadedVectorsMetrics_Call { + return &MockIndexLike_CalculateUnloadedVectorsMetrics_Call{Call: _e.mock.On("CalculateUnloadedVectorsMetrics", ctx, tenantName)} +} + +func (_c *MockIndexLike_CalculateUnloadedVectorsMetrics_Call) Run(run func(ctx context.Context, tenantName string)) *MockIndexLike_CalculateUnloadedVectorsMetrics_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockIndexLike_CalculateUnloadedVectorsMetrics_Call) Return(_a0 int64, _a1 error) *MockIndexLike_CalculateUnloadedVectorsMetrics_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockIndexLike_CalculateUnloadedVectorsMetrics_Call) RunAndReturn(run func(context.Context, string) (int64, error)) *MockIndexLike_CalculateUnloadedVectorsMetrics_Call { + _c.Call.Return(run) + return _c +} + +// ForEachShard provides a mock function with given fields: f +func (_m *MockIndexLike) ForEachShard(f func(string, ShardLike) error) error { + ret := _m.Called(f) + + if len(ret) == 0 { + panic("no return value specified for ForEachShard") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func(string, ShardLike) error) error); ok { + r0 = rf(f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockIndexLike_ForEachShard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForEachShard' +type MockIndexLike_ForEachShard_Call struct { + *mock.Call +} + +// ForEachShard is a helper method to define mock.On call +// - f func(string , ShardLike) error +func (_e *MockIndexLike_Expecter) ForEachShard(f interface{}) *MockIndexLike_ForEachShard_Call { + return &MockIndexLike_ForEachShard_Call{Call: _e.mock.On("ForEachShard", f)} +} + +func (_c *MockIndexLike_ForEachShard_Call) Run(run func(f func(string, ShardLike) error)) *MockIndexLike_ForEachShard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func(string, ShardLike) error)) + }) + return _c +} + +func (_c *MockIndexLike_ForEachShard_Call) Return(_a0 error) *MockIndexLike_ForEachShard_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockIndexLike_ForEachShard_Call) RunAndReturn(run func(func(string, ShardLike) error) error) *MockIndexLike_ForEachShard_Call { + _c.Call.Return(run) + return _c +} + +// NewMockIndexLike creates a new instance of MockIndexLike. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockIndexLike(t interface { + mock.TestingT + Cleanup(func()) +}) *MockIndexLike { + mock := &MockIndexLike{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_shard_like.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_shard_like.go new file mode 100644 index 0000000000000000000000000000000000000000..610036941049a3ff7e58de0b0105d59792b41f4d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_shard_like.go @@ -0,0 +1,5007 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package db + +import ( + additional "github.com/weaviate/weaviate/entities/additional" + aggregation "github.com/weaviate/weaviate/entities/aggregation" + + backup "github.com/weaviate/weaviate/entities/backup" + + config "github.com/weaviate/weaviate/entities/schema/config" + + context "context" + + dto "github.com/weaviate/weaviate/entities/dto" + + errors "github.com/weaviate/weaviate/entities/errors" + + file "github.com/weaviate/weaviate/usecases/file" + + filters "github.com/weaviate/weaviate/entities/filters" + + hashtree "github.com/weaviate/weaviate/usecases/replica/hashtree" + + indexcounter "github.com/weaviate/weaviate/adapters/repos/db/indexcounter" + + inverted "github.com/weaviate/weaviate/adapters/repos/db/inverted" + + io "io" + + lsmkv "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + + mock "github.com/stretchr/testify/mock" + + models "github.com/weaviate/weaviate/entities/models" + + modules "github.com/weaviate/weaviate/usecases/modules" + + multi "github.com/weaviate/weaviate/entities/multi" + + objects "github.com/weaviate/weaviate/usecases/objects" + + replica "github.com/weaviate/weaviate/usecases/replica" + + routertypes "github.com/weaviate/weaviate/cluster/router/types" + + schema "github.com/weaviate/weaviate/entities/schema" + + search "github.com/weaviate/weaviate/entities/search" + + searchparams "github.com/weaviate/weaviate/entities/searchparams" + + storagestate "github.com/weaviate/weaviate/entities/storagestate" + + storobj "github.com/weaviate/weaviate/entities/storobj" + + strfmt "github.com/go-openapi/strfmt" + + time "time" + + types "github.com/weaviate/weaviate/cluster/usage/types" +) + +// MockShardLike is an autogenerated mock type for the ShardLike type +type MockShardLike struct { + mock.Mock +} + +type MockShardLike_Expecter struct { + mock *mock.Mock +} + +func (_m *MockShardLike) EXPECT() *MockShardLike_Expecter { + return &MockShardLike_Expecter{mock: &_m.Mock} +} + +// Activity provides a mock function with no fields +func (_m *MockShardLike) Activity() (int32, int32) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Activity") + } + + var r0 int32 + var r1 int32 + if rf, ok := ret.Get(0).(func() (int32, int32)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + if rf, ok := ret.Get(1).(func() int32); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(int32) + } + + return r0, r1 +} + +// MockShardLike_Activity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Activity' +type MockShardLike_Activity_Call struct { + *mock.Call +} + +// Activity is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) Activity() *MockShardLike_Activity_Call { + return &MockShardLike_Activity_Call{Call: _e.mock.On("Activity")} +} + +func (_c *MockShardLike_Activity_Call) Run(run func()) *MockShardLike_Activity_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_Activity_Call) Return(_a0 int32, _a1 int32) *MockShardLike_Activity_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_Activity_Call) RunAndReturn(run func() (int32, int32)) *MockShardLike_Activity_Call { + _c.Call.Return(run) + return _c +} + +// AddReferencesBatch provides a mock function with given fields: ctx, refs +func (_m *MockShardLike) AddReferencesBatch(ctx context.Context, refs objects.BatchReferences) []error { + ret := _m.Called(ctx, refs) + + if len(ret) == 0 { + panic("no return value specified for AddReferencesBatch") + } + + var r0 []error + if rf, ok := ret.Get(0).(func(context.Context, objects.BatchReferences) []error); ok { + r0 = rf(ctx, refs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]error) + } + } + + return r0 +} + +// MockShardLike_AddReferencesBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddReferencesBatch' +type MockShardLike_AddReferencesBatch_Call struct { + *mock.Call +} + +// AddReferencesBatch is a helper method to define mock.On call +// - ctx context.Context +// - refs objects.BatchReferences +func (_e *MockShardLike_Expecter) AddReferencesBatch(ctx interface{}, refs interface{}) *MockShardLike_AddReferencesBatch_Call { + return &MockShardLike_AddReferencesBatch_Call{Call: _e.mock.On("AddReferencesBatch", ctx, refs)} +} + +func (_c *MockShardLike_AddReferencesBatch_Call) Run(run func(ctx context.Context, refs objects.BatchReferences)) *MockShardLike_AddReferencesBatch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(objects.BatchReferences)) + }) + return _c +} + +func (_c *MockShardLike_AddReferencesBatch_Call) Return(_a0 []error) *MockShardLike_AddReferencesBatch_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_AddReferencesBatch_Call) RunAndReturn(run func(context.Context, objects.BatchReferences) []error) *MockShardLike_AddReferencesBatch_Call { + _c.Call.Return(run) + return _c +} + +// Aggregate provides a mock function with given fields: ctx, params, _a2 +func (_m *MockShardLike) Aggregate(ctx context.Context, params aggregation.Params, _a2 *modules.Provider) (*aggregation.Result, error) { + ret := _m.Called(ctx, params, _a2) + + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + + var r0 *aggregation.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, aggregation.Params, *modules.Provider) (*aggregation.Result, error)); ok { + return rf(ctx, params, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, aggregation.Params, *modules.Provider) *aggregation.Result); ok { + r0 = rf(ctx, params, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*aggregation.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, aggregation.Params, *modules.Provider) error); ok { + r1 = rf(ctx, params, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_Aggregate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Aggregate' +type MockShardLike_Aggregate_Call struct { + *mock.Call +} + +// Aggregate is a helper method to define mock.On call +// - ctx context.Context +// - params aggregation.Params +// - _a2 *modules.Provider +func (_e *MockShardLike_Expecter) Aggregate(ctx interface{}, params interface{}, _a2 interface{}) *MockShardLike_Aggregate_Call { + return &MockShardLike_Aggregate_Call{Call: _e.mock.On("Aggregate", ctx, params, _a2)} +} + +func (_c *MockShardLike_Aggregate_Call) Run(run func(ctx context.Context, params aggregation.Params, _a2 *modules.Provider)) *MockShardLike_Aggregate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(aggregation.Params), args[2].(*modules.Provider)) + }) + return _c +} + +func (_c *MockShardLike_Aggregate_Call) Return(_a0 *aggregation.Result, _a1 error) *MockShardLike_Aggregate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_Aggregate_Call) RunAndReturn(run func(context.Context, aggregation.Params, *modules.Provider) (*aggregation.Result, error)) *MockShardLike_Aggregate_Call { + _c.Call.Return(run) + return _c +} + +// AnalyzeObject provides a mock function with given fields: _a0 +func (_m *MockShardLike) AnalyzeObject(_a0 *storobj.Object) ([]inverted.Property, []inverted.NilProperty, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for AnalyzeObject") + } + + var r0 []inverted.Property + var r1 []inverted.NilProperty + var r2 error + if rf, ok := ret.Get(0).(func(*storobj.Object) ([]inverted.Property, []inverted.NilProperty, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(*storobj.Object) []inverted.Property); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]inverted.Property) + } + } + + if rf, ok := ret.Get(1).(func(*storobj.Object) []inverted.NilProperty); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]inverted.NilProperty) + } + } + + if rf, ok := ret.Get(2).(func(*storobj.Object) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockShardLike_AnalyzeObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AnalyzeObject' +type MockShardLike_AnalyzeObject_Call struct { + *mock.Call +} + +// AnalyzeObject is a helper method to define mock.On call +// - _a0 *storobj.Object +func (_e *MockShardLike_Expecter) AnalyzeObject(_a0 interface{}) *MockShardLike_AnalyzeObject_Call { + return &MockShardLike_AnalyzeObject_Call{Call: _e.mock.On("AnalyzeObject", _a0)} +} + +func (_c *MockShardLike_AnalyzeObject_Call) Run(run func(_a0 *storobj.Object)) *MockShardLike_AnalyzeObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*storobj.Object)) + }) + return _c +} + +func (_c *MockShardLike_AnalyzeObject_Call) Return(_a0 []inverted.Property, _a1 []inverted.NilProperty, _a2 error) *MockShardLike_AnalyzeObject_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockShardLike_AnalyzeObject_Call) RunAndReturn(run func(*storobj.Object) ([]inverted.Property, []inverted.NilProperty, error)) *MockShardLike_AnalyzeObject_Call { + _c.Call.Return(run) + return _c +} + +// ConvertQueue provides a mock function with given fields: targetVector +func (_m *MockShardLike) ConvertQueue(targetVector string) error { + ret := _m.Called(targetVector) + + if len(ret) == 0 { + panic("no return value specified for ConvertQueue") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(targetVector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_ConvertQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConvertQueue' +type MockShardLike_ConvertQueue_Call struct { + *mock.Call +} + +// ConvertQueue is a helper method to define mock.On call +// - targetVector string +func (_e *MockShardLike_Expecter) ConvertQueue(targetVector interface{}) *MockShardLike_ConvertQueue_Call { + return &MockShardLike_ConvertQueue_Call{Call: _e.mock.On("ConvertQueue", targetVector)} +} + +func (_c *MockShardLike_ConvertQueue_Call) Run(run func(targetVector string)) *MockShardLike_ConvertQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockShardLike_ConvertQueue_Call) Return(_a0 error) *MockShardLike_ConvertQueue_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_ConvertQueue_Call) RunAndReturn(run func(string) error) *MockShardLike_ConvertQueue_Call { + _c.Call.Return(run) + return _c +} + +// Counter provides a mock function with no fields +func (_m *MockShardLike) Counter() *indexcounter.Counter { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Counter") + } + + var r0 *indexcounter.Counter + if rf, ok := ret.Get(0).(func() *indexcounter.Counter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*indexcounter.Counter) + } + } + + return r0 +} + +// MockShardLike_Counter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Counter' +type MockShardLike_Counter_Call struct { + *mock.Call +} + +// Counter is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) Counter() *MockShardLike_Counter_Call { + return &MockShardLike_Counter_Call{Call: _e.mock.On("Counter")} +} + +func (_c *MockShardLike_Counter_Call) Run(run func()) *MockShardLike_Counter_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_Counter_Call) Return(_a0 *indexcounter.Counter) *MockShardLike_Counter_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_Counter_Call) RunAndReturn(run func() *indexcounter.Counter) *MockShardLike_Counter_Call { + _c.Call.Return(run) + return _c +} + +// DebugResetVectorIndex provides a mock function with given fields: ctx, targetVector +func (_m *MockShardLike) DebugResetVectorIndex(ctx context.Context, targetVector string) error { + ret := _m.Called(ctx, targetVector) + + if len(ret) == 0 { + panic("no return value specified for DebugResetVectorIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, targetVector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_DebugResetVectorIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DebugResetVectorIndex' +type MockShardLike_DebugResetVectorIndex_Call struct { + *mock.Call +} + +// DebugResetVectorIndex is a helper method to define mock.On call +// - ctx context.Context +// - targetVector string +func (_e *MockShardLike_Expecter) DebugResetVectorIndex(ctx interface{}, targetVector interface{}) *MockShardLike_DebugResetVectorIndex_Call { + return &MockShardLike_DebugResetVectorIndex_Call{Call: _e.mock.On("DebugResetVectorIndex", ctx, targetVector)} +} + +func (_c *MockShardLike_DebugResetVectorIndex_Call) Run(run func(ctx context.Context, targetVector string)) *MockShardLike_DebugResetVectorIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_DebugResetVectorIndex_Call) Return(_a0 error) *MockShardLike_DebugResetVectorIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_DebugResetVectorIndex_Call) RunAndReturn(run func(context.Context, string) error) *MockShardLike_DebugResetVectorIndex_Call { + _c.Call.Return(run) + return _c +} + +// DeleteObject provides a mock function with given fields: ctx, id, deletionTime +func (_m *MockShardLike) DeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error { + ret := _m.Called(ctx, id, deletionTime) + + if len(ret) == 0 { + panic("no return value specified for DeleteObject") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, time.Time) error); ok { + r0 = rf(ctx, id, deletionTime) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_DeleteObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteObject' +type MockShardLike_DeleteObject_Call struct { + *mock.Call +} + +// DeleteObject is a helper method to define mock.On call +// - ctx context.Context +// - id strfmt.UUID +// - deletionTime time.Time +func (_e *MockShardLike_Expecter) DeleteObject(ctx interface{}, id interface{}, deletionTime interface{}) *MockShardLike_DeleteObject_Call { + return &MockShardLike_DeleteObject_Call{Call: _e.mock.On("DeleteObject", ctx, id, deletionTime)} +} + +func (_c *MockShardLike_DeleteObject_Call) Run(run func(ctx context.Context, id strfmt.UUID, deletionTime time.Time)) *MockShardLike_DeleteObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID), args[2].(time.Time)) + }) + return _c +} + +func (_c *MockShardLike_DeleteObject_Call) Return(_a0 error) *MockShardLike_DeleteObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_DeleteObject_Call) RunAndReturn(run func(context.Context, strfmt.UUID, time.Time) error) *MockShardLike_DeleteObject_Call { + _c.Call.Return(run) + return _c +} + +// DeleteObjectBatch provides a mock function with given fields: ctx, ids, deletionTime, dryRun +func (_m *MockShardLike) DeleteObjectBatch(ctx context.Context, ids []strfmt.UUID, deletionTime time.Time, dryRun bool) objects.BatchSimpleObjects { + ret := _m.Called(ctx, ids, deletionTime, dryRun) + + if len(ret) == 0 { + panic("no return value specified for DeleteObjectBatch") + } + + var r0 objects.BatchSimpleObjects + if rf, ok := ret.Get(0).(func(context.Context, []strfmt.UUID, time.Time, bool) objects.BatchSimpleObjects); ok { + r0 = rf(ctx, ids, deletionTime, dryRun) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(objects.BatchSimpleObjects) + } + } + + return r0 +} + +// MockShardLike_DeleteObjectBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteObjectBatch' +type MockShardLike_DeleteObjectBatch_Call struct { + *mock.Call +} + +// DeleteObjectBatch is a helper method to define mock.On call +// - ctx context.Context +// - ids []strfmt.UUID +// - deletionTime time.Time +// - dryRun bool +func (_e *MockShardLike_Expecter) DeleteObjectBatch(ctx interface{}, ids interface{}, deletionTime interface{}, dryRun interface{}) *MockShardLike_DeleteObjectBatch_Call { + return &MockShardLike_DeleteObjectBatch_Call{Call: _e.mock.On("DeleteObjectBatch", ctx, ids, deletionTime, dryRun)} +} + +func (_c *MockShardLike_DeleteObjectBatch_Call) Run(run func(ctx context.Context, ids []strfmt.UUID, deletionTime time.Time, dryRun bool)) *MockShardLike_DeleteObjectBatch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]strfmt.UUID), args[2].(time.Time), args[3].(bool)) + }) + return _c +} + +func (_c *MockShardLike_DeleteObjectBatch_Call) Return(_a0 objects.BatchSimpleObjects) *MockShardLike_DeleteObjectBatch_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_DeleteObjectBatch_Call) RunAndReturn(run func(context.Context, []strfmt.UUID, time.Time, bool) objects.BatchSimpleObjects) *MockShardLike_DeleteObjectBatch_Call { + _c.Call.Return(run) + return _c +} + +// Dimensions provides a mock function with given fields: ctx, targetVector +func (_m *MockShardLike) Dimensions(ctx context.Context, targetVector string) (int, error) { + ret := _m.Called(ctx, targetVector) + + if len(ret) == 0 { + panic("no return value specified for Dimensions") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (int, error)); ok { + return rf(ctx, targetVector) + } + if rf, ok := ret.Get(0).(func(context.Context, string) int); ok { + r0 = rf(ctx, targetVector) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, targetVector) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_Dimensions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dimensions' +type MockShardLike_Dimensions_Call struct { + *mock.Call +} + +// Dimensions is a helper method to define mock.On call +// - ctx context.Context +// - targetVector string +func (_e *MockShardLike_Expecter) Dimensions(ctx interface{}, targetVector interface{}) *MockShardLike_Dimensions_Call { + return &MockShardLike_Dimensions_Call{Call: _e.mock.On("Dimensions", ctx, targetVector)} +} + +func (_c *MockShardLike_Dimensions_Call) Run(run func(ctx context.Context, targetVector string)) *MockShardLike_Dimensions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_Dimensions_Call) Return(_a0 int, _a1 error) *MockShardLike_Dimensions_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_Dimensions_Call) RunAndReturn(run func(context.Context, string) (int, error)) *MockShardLike_Dimensions_Call { + _c.Call.Return(run) + return _c +} + +// DimensionsUsage provides a mock function with given fields: ctx, targetVector +func (_m *MockShardLike) DimensionsUsage(ctx context.Context, targetVector string) (types.Dimensionality, error) { + ret := _m.Called(ctx, targetVector) + + if len(ret) == 0 { + panic("no return value specified for DimensionsUsage") + } + + var r0 types.Dimensionality + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (types.Dimensionality, error)); ok { + return rf(ctx, targetVector) + } + if rf, ok := ret.Get(0).(func(context.Context, string) types.Dimensionality); ok { + r0 = rf(ctx, targetVector) + } else { + r0 = ret.Get(0).(types.Dimensionality) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, targetVector) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_DimensionsUsage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DimensionsUsage' +type MockShardLike_DimensionsUsage_Call struct { + *mock.Call +} + +// DimensionsUsage is a helper method to define mock.On call +// - ctx context.Context +// - targetVector string +func (_e *MockShardLike_Expecter) DimensionsUsage(ctx interface{}, targetVector interface{}) *MockShardLike_DimensionsUsage_Call { + return &MockShardLike_DimensionsUsage_Call{Call: _e.mock.On("DimensionsUsage", ctx, targetVector)} +} + +func (_c *MockShardLike_DimensionsUsage_Call) Run(run func(ctx context.Context, targetVector string)) *MockShardLike_DimensionsUsage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_DimensionsUsage_Call) Return(_a0 types.Dimensionality, _a1 error) *MockShardLike_DimensionsUsage_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_DimensionsUsage_Call) RunAndReturn(run func(context.Context, string) (types.Dimensionality, error)) *MockShardLike_DimensionsUsage_Call { + _c.Call.Return(run) + return _c +} + +// Exists provides a mock function with given fields: ctx, id +func (_m *MockShardLike) Exists(ctx context.Context, id strfmt.UUID) (bool, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Exists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) (bool, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) bool); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, strfmt.UUID) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists' +type MockShardLike_Exists_Call struct { + *mock.Call +} + +// Exists is a helper method to define mock.On call +// - ctx context.Context +// - id strfmt.UUID +func (_e *MockShardLike_Expecter) Exists(ctx interface{}, id interface{}) *MockShardLike_Exists_Call { + return &MockShardLike_Exists_Call{Call: _e.mock.On("Exists", ctx, id)} +} + +func (_c *MockShardLike_Exists_Call) Run(run func(ctx context.Context, id strfmt.UUID)) *MockShardLike_Exists_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID)) + }) + return _c +} + +func (_c *MockShardLike_Exists_Call) Return(_a0 bool, _a1 error) *MockShardLike_Exists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_Exists_Call) RunAndReturn(run func(context.Context, strfmt.UUID) (bool, error)) *MockShardLike_Exists_Call { + _c.Call.Return(run) + return _c +} + +// FillQueue provides a mock function with given fields: targetVector, from +func (_m *MockShardLike) FillQueue(targetVector string, from uint64) error { + ret := _m.Called(targetVector, from) + + if len(ret) == 0 { + panic("no return value specified for FillQueue") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, uint64) error); ok { + r0 = rf(targetVector, from) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_FillQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FillQueue' +type MockShardLike_FillQueue_Call struct { + *mock.Call +} + +// FillQueue is a helper method to define mock.On call +// - targetVector string +// - from uint64 +func (_e *MockShardLike_Expecter) FillQueue(targetVector interface{}, from interface{}) *MockShardLike_FillQueue_Call { + return &MockShardLike_FillQueue_Call{Call: _e.mock.On("FillQueue", targetVector, from)} +} + +func (_c *MockShardLike_FillQueue_Call) Run(run func(targetVector string, from uint64)) *MockShardLike_FillQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(uint64)) + }) + return _c +} + +func (_c *MockShardLike_FillQueue_Call) Return(_a0 error) *MockShardLike_FillQueue_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_FillQueue_Call) RunAndReturn(run func(string, uint64) error) *MockShardLike_FillQueue_Call { + _c.Call.Return(run) + return _c +} + +// FindUUIDs provides a mock function with given fields: ctx, _a1 +func (_m *MockShardLike) FindUUIDs(ctx context.Context, _a1 *filters.LocalFilter) ([]strfmt.UUID, error) { + ret := _m.Called(ctx, _a1) + + if len(ret) == 0 { + panic("no return value specified for FindUUIDs") + } + + var r0 []strfmt.UUID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *filters.LocalFilter) ([]strfmt.UUID, error)); ok { + return rf(ctx, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *filters.LocalFilter) []strfmt.UUID); ok { + r0 = rf(ctx, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]strfmt.UUID) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *filters.LocalFilter) error); ok { + r1 = rf(ctx, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_FindUUIDs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindUUIDs' +type MockShardLike_FindUUIDs_Call struct { + *mock.Call +} + +// FindUUIDs is a helper method to define mock.On call +// - ctx context.Context +// - _a1 *filters.LocalFilter +func (_e *MockShardLike_Expecter) FindUUIDs(ctx interface{}, _a1 interface{}) *MockShardLike_FindUUIDs_Call { + return &MockShardLike_FindUUIDs_Call{Call: _e.mock.On("FindUUIDs", ctx, _a1)} +} + +func (_c *MockShardLike_FindUUIDs_Call) Run(run func(ctx context.Context, _a1 *filters.LocalFilter)) *MockShardLike_FindUUIDs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*filters.LocalFilter)) + }) + return _c +} + +func (_c *MockShardLike_FindUUIDs_Call) Return(_a0 []strfmt.UUID, _a1 error) *MockShardLike_FindUUIDs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_FindUUIDs_Call) RunAndReturn(run func(context.Context, *filters.LocalFilter) ([]strfmt.UUID, error)) *MockShardLike_FindUUIDs_Call { + _c.Call.Return(run) + return _c +} + +// ForEachVectorIndex provides a mock function with given fields: f +func (_m *MockShardLike) ForEachVectorIndex(f func(string, VectorIndex) error) error { + ret := _m.Called(f) + + if len(ret) == 0 { + panic("no return value specified for ForEachVectorIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func(string, VectorIndex) error) error); ok { + r0 = rf(f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_ForEachVectorIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForEachVectorIndex' +type MockShardLike_ForEachVectorIndex_Call struct { + *mock.Call +} + +// ForEachVectorIndex is a helper method to define mock.On call +// - f func(string , VectorIndex) error +func (_e *MockShardLike_Expecter) ForEachVectorIndex(f interface{}) *MockShardLike_ForEachVectorIndex_Call { + return &MockShardLike_ForEachVectorIndex_Call{Call: _e.mock.On("ForEachVectorIndex", f)} +} + +func (_c *MockShardLike_ForEachVectorIndex_Call) Run(run func(f func(string, VectorIndex) error)) *MockShardLike_ForEachVectorIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func(string, VectorIndex) error)) + }) + return _c +} + +func (_c *MockShardLike_ForEachVectorIndex_Call) Return(_a0 error) *MockShardLike_ForEachVectorIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_ForEachVectorIndex_Call) RunAndReturn(run func(func(string, VectorIndex) error) error) *MockShardLike_ForEachVectorIndex_Call { + _c.Call.Return(run) + return _c +} + +// ForEachVectorQueue provides a mock function with given fields: f +func (_m *MockShardLike) ForEachVectorQueue(f func(string, *VectorIndexQueue) error) error { + ret := _m.Called(f) + + if len(ret) == 0 { + panic("no return value specified for ForEachVectorQueue") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func(string, *VectorIndexQueue) error) error); ok { + r0 = rf(f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_ForEachVectorQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForEachVectorQueue' +type MockShardLike_ForEachVectorQueue_Call struct { + *mock.Call +} + +// ForEachVectorQueue is a helper method to define mock.On call +// - f func(string , *VectorIndexQueue) error +func (_e *MockShardLike_Expecter) ForEachVectorQueue(f interface{}) *MockShardLike_ForEachVectorQueue_Call { + return &MockShardLike_ForEachVectorQueue_Call{Call: _e.mock.On("ForEachVectorQueue", f)} +} + +func (_c *MockShardLike_ForEachVectorQueue_Call) Run(run func(f func(string, *VectorIndexQueue) error)) *MockShardLike_ForEachVectorQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func(string, *VectorIndexQueue) error)) + }) + return _c +} + +func (_c *MockShardLike_ForEachVectorQueue_Call) Return(_a0 error) *MockShardLike_ForEachVectorQueue_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_ForEachVectorQueue_Call) RunAndReturn(run func(func(string, *VectorIndexQueue) error) error) *MockShardLike_ForEachVectorQueue_Call { + _c.Call.Return(run) + return _c +} + +// GetFile provides a mock function with given fields: ctx, relativeFilePath +func (_m *MockShardLike) GetFile(ctx context.Context, relativeFilePath string) (io.ReadCloser, error) { + ret := _m.Called(ctx, relativeFilePath) + + if len(ret) == 0 { + panic("no return value specified for GetFile") + } + + var r0 io.ReadCloser + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (io.ReadCloser, error)); ok { + return rf(ctx, relativeFilePath) + } + if rf, ok := ret.Get(0).(func(context.Context, string) io.ReadCloser); ok { + r0 = rf(ctx, relativeFilePath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, relativeFilePath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_GetFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFile' +type MockShardLike_GetFile_Call struct { + *mock.Call +} + +// GetFile is a helper method to define mock.On call +// - ctx context.Context +// - relativeFilePath string +func (_e *MockShardLike_Expecter) GetFile(ctx interface{}, relativeFilePath interface{}) *MockShardLike_GetFile_Call { + return &MockShardLike_GetFile_Call{Call: _e.mock.On("GetFile", ctx, relativeFilePath)} +} + +func (_c *MockShardLike_GetFile_Call) Run(run func(ctx context.Context, relativeFilePath string)) *MockShardLike_GetFile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_GetFile_Call) Return(_a0 io.ReadCloser, _a1 error) *MockShardLike_GetFile_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_GetFile_Call) RunAndReturn(run func(context.Context, string) (io.ReadCloser, error)) *MockShardLike_GetFile_Call { + _c.Call.Return(run) + return _c +} + +// GetFileMetadata provides a mock function with given fields: ctx, relativeFilePath +func (_m *MockShardLike) GetFileMetadata(ctx context.Context, relativeFilePath string) (file.FileMetadata, error) { + ret := _m.Called(ctx, relativeFilePath) + + if len(ret) == 0 { + panic("no return value specified for GetFileMetadata") + } + + var r0 file.FileMetadata + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (file.FileMetadata, error)); ok { + return rf(ctx, relativeFilePath) + } + if rf, ok := ret.Get(0).(func(context.Context, string) file.FileMetadata); ok { + r0 = rf(ctx, relativeFilePath) + } else { + r0 = ret.Get(0).(file.FileMetadata) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, relativeFilePath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_GetFileMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFileMetadata' +type MockShardLike_GetFileMetadata_Call struct { + *mock.Call +} + +// GetFileMetadata is a helper method to define mock.On call +// - ctx context.Context +// - relativeFilePath string +func (_e *MockShardLike_Expecter) GetFileMetadata(ctx interface{}, relativeFilePath interface{}) *MockShardLike_GetFileMetadata_Call { + return &MockShardLike_GetFileMetadata_Call{Call: _e.mock.On("GetFileMetadata", ctx, relativeFilePath)} +} + +func (_c *MockShardLike_GetFileMetadata_Call) Run(run func(ctx context.Context, relativeFilePath string)) *MockShardLike_GetFileMetadata_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_GetFileMetadata_Call) Return(_a0 file.FileMetadata, _a1 error) *MockShardLike_GetFileMetadata_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_GetFileMetadata_Call) RunAndReturn(run func(context.Context, string) (file.FileMetadata, error)) *MockShardLike_GetFileMetadata_Call { + _c.Call.Return(run) + return _c +} + +// GetPropertyLengthTracker provides a mock function with no fields +func (_m *MockShardLike) GetPropertyLengthTracker() *inverted.JsonShardMetaData { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetPropertyLengthTracker") + } + + var r0 *inverted.JsonShardMetaData + if rf, ok := ret.Get(0).(func() *inverted.JsonShardMetaData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*inverted.JsonShardMetaData) + } + } + + return r0 +} + +// MockShardLike_GetPropertyLengthTracker_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPropertyLengthTracker' +type MockShardLike_GetPropertyLengthTracker_Call struct { + *mock.Call +} + +// GetPropertyLengthTracker is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) GetPropertyLengthTracker() *MockShardLike_GetPropertyLengthTracker_Call { + return &MockShardLike_GetPropertyLengthTracker_Call{Call: _e.mock.On("GetPropertyLengthTracker")} +} + +func (_c *MockShardLike_GetPropertyLengthTracker_Call) Run(run func()) *MockShardLike_GetPropertyLengthTracker_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_GetPropertyLengthTracker_Call) Return(_a0 *inverted.JsonShardMetaData) *MockShardLike_GetPropertyLengthTracker_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_GetPropertyLengthTracker_Call) RunAndReturn(run func() *inverted.JsonShardMetaData) *MockShardLike_GetPropertyLengthTracker_Call { + _c.Call.Return(run) + return _c +} + +// GetStatus provides a mock function with no fields +func (_m *MockShardLike) GetStatus() storagestate.Status { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetStatus") + } + + var r0 storagestate.Status + if rf, ok := ret.Get(0).(func() storagestate.Status); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storagestate.Status) + } + + return r0 +} + +// MockShardLike_GetStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStatus' +type MockShardLike_GetStatus_Call struct { + *mock.Call +} + +// GetStatus is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) GetStatus() *MockShardLike_GetStatus_Call { + return &MockShardLike_GetStatus_Call{Call: _e.mock.On("GetStatus")} +} + +func (_c *MockShardLike_GetStatus_Call) Run(run func()) *MockShardLike_GetStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_GetStatus_Call) Return(_a0 storagestate.Status) *MockShardLike_GetStatus_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_GetStatus_Call) RunAndReturn(run func() storagestate.Status) *MockShardLike_GetStatus_Call { + _c.Call.Return(run) + return _c +} + +// GetVectorIndex provides a mock function with given fields: targetVector +func (_m *MockShardLike) GetVectorIndex(targetVector string) (VectorIndex, bool) { + ret := _m.Called(targetVector) + + if len(ret) == 0 { + panic("no return value specified for GetVectorIndex") + } + + var r0 VectorIndex + var r1 bool + if rf, ok := ret.Get(0).(func(string) (VectorIndex, bool)); ok { + return rf(targetVector) + } + if rf, ok := ret.Get(0).(func(string) VectorIndex); ok { + r0 = rf(targetVector) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(VectorIndex) + } + } + + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(targetVector) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// MockShardLike_GetVectorIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetVectorIndex' +type MockShardLike_GetVectorIndex_Call struct { + *mock.Call +} + +// GetVectorIndex is a helper method to define mock.On call +// - targetVector string +func (_e *MockShardLike_Expecter) GetVectorIndex(targetVector interface{}) *MockShardLike_GetVectorIndex_Call { + return &MockShardLike_GetVectorIndex_Call{Call: _e.mock.On("GetVectorIndex", targetVector)} +} + +func (_c *MockShardLike_GetVectorIndex_Call) Run(run func(targetVector string)) *MockShardLike_GetVectorIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockShardLike_GetVectorIndex_Call) Return(_a0 VectorIndex, _a1 bool) *MockShardLike_GetVectorIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_GetVectorIndex_Call) RunAndReturn(run func(string) (VectorIndex, bool)) *MockShardLike_GetVectorIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetVectorIndexQueue provides a mock function with given fields: targetVector +func (_m *MockShardLike) GetVectorIndexQueue(targetVector string) (*VectorIndexQueue, bool) { + ret := _m.Called(targetVector) + + if len(ret) == 0 { + panic("no return value specified for GetVectorIndexQueue") + } + + var r0 *VectorIndexQueue + var r1 bool + if rf, ok := ret.Get(0).(func(string) (*VectorIndexQueue, bool)); ok { + return rf(targetVector) + } + if rf, ok := ret.Get(0).(func(string) *VectorIndexQueue); ok { + r0 = rf(targetVector) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*VectorIndexQueue) + } + } + + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(targetVector) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// MockShardLike_GetVectorIndexQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetVectorIndexQueue' +type MockShardLike_GetVectorIndexQueue_Call struct { + *mock.Call +} + +// GetVectorIndexQueue is a helper method to define mock.On call +// - targetVector string +func (_e *MockShardLike_Expecter) GetVectorIndexQueue(targetVector interface{}) *MockShardLike_GetVectorIndexQueue_Call { + return &MockShardLike_GetVectorIndexQueue_Call{Call: _e.mock.On("GetVectorIndexQueue", targetVector)} +} + +func (_c *MockShardLike_GetVectorIndexQueue_Call) Run(run func(targetVector string)) *MockShardLike_GetVectorIndexQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockShardLike_GetVectorIndexQueue_Call) Return(_a0 *VectorIndexQueue, _a1 bool) *MockShardLike_GetVectorIndexQueue_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_GetVectorIndexQueue_Call) RunAndReturn(run func(string) (*VectorIndexQueue, bool)) *MockShardLike_GetVectorIndexQueue_Call { + _c.Call.Return(run) + return _c +} + +// HaltForTransfer provides a mock function with given fields: ctx, offloading, inactivityTimeout +func (_m *MockShardLike) HaltForTransfer(ctx context.Context, offloading bool, inactivityTimeout time.Duration) error { + ret := _m.Called(ctx, offloading, inactivityTimeout) + + if len(ret) == 0 { + panic("no return value specified for HaltForTransfer") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, bool, time.Duration) error); ok { + r0 = rf(ctx, offloading, inactivityTimeout) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_HaltForTransfer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HaltForTransfer' +type MockShardLike_HaltForTransfer_Call struct { + *mock.Call +} + +// HaltForTransfer is a helper method to define mock.On call +// - ctx context.Context +// - offloading bool +// - inactivityTimeout time.Duration +func (_e *MockShardLike_Expecter) HaltForTransfer(ctx interface{}, offloading interface{}, inactivityTimeout interface{}) *MockShardLike_HaltForTransfer_Call { + return &MockShardLike_HaltForTransfer_Call{Call: _e.mock.On("HaltForTransfer", ctx, offloading, inactivityTimeout)} +} + +func (_c *MockShardLike_HaltForTransfer_Call) Run(run func(ctx context.Context, offloading bool, inactivityTimeout time.Duration)) *MockShardLike_HaltForTransfer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool), args[2].(time.Duration)) + }) + return _c +} + +func (_c *MockShardLike_HaltForTransfer_Call) Return(_a0 error) *MockShardLike_HaltForTransfer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_HaltForTransfer_Call) RunAndReturn(run func(context.Context, bool, time.Duration) error) *MockShardLike_HaltForTransfer_Call { + _c.Call.Return(run) + return _c +} + +// HashTreeLevel provides a mock function with given fields: ctx, level, discriminant +func (_m *MockShardLike) HashTreeLevel(ctx context.Context, level int, discriminant *hashtree.Bitset) ([]hashtree.Digest, error) { + ret := _m.Called(ctx, level, discriminant) + + if len(ret) == 0 { + panic("no return value specified for HashTreeLevel") + } + + var r0 []hashtree.Digest + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int, *hashtree.Bitset) ([]hashtree.Digest, error)); ok { + return rf(ctx, level, discriminant) + } + if rf, ok := ret.Get(0).(func(context.Context, int, *hashtree.Bitset) []hashtree.Digest); ok { + r0 = rf(ctx, level, discriminant) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]hashtree.Digest) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int, *hashtree.Bitset) error); ok { + r1 = rf(ctx, level, discriminant) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_HashTreeLevel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HashTreeLevel' +type MockShardLike_HashTreeLevel_Call struct { + *mock.Call +} + +// HashTreeLevel is a helper method to define mock.On call +// - ctx context.Context +// - level int +// - discriminant *hashtree.Bitset +func (_e *MockShardLike_Expecter) HashTreeLevel(ctx interface{}, level interface{}, discriminant interface{}) *MockShardLike_HashTreeLevel_Call { + return &MockShardLike_HashTreeLevel_Call{Call: _e.mock.On("HashTreeLevel", ctx, level, discriminant)} +} + +func (_c *MockShardLike_HashTreeLevel_Call) Run(run func(ctx context.Context, level int, discriminant *hashtree.Bitset)) *MockShardLike_HashTreeLevel_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int), args[2].(*hashtree.Bitset)) + }) + return _c +} + +func (_c *MockShardLike_HashTreeLevel_Call) Return(digests []hashtree.Digest, err error) *MockShardLike_HashTreeLevel_Call { + _c.Call.Return(digests, err) + return _c +} + +func (_c *MockShardLike_HashTreeLevel_Call) RunAndReturn(run func(context.Context, int, *hashtree.Bitset) ([]hashtree.Digest, error)) *MockShardLike_HashTreeLevel_Call { + _c.Call.Return(run) + return _c +} + +// ID provides a mock function with no fields +func (_m *MockShardLike) ID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockShardLike_ID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ID' +type MockShardLike_ID_Call struct { + *mock.Call +} + +// ID is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) ID() *MockShardLike_ID_Call { + return &MockShardLike_ID_Call{Call: _e.mock.On("ID")} +} + +func (_c *MockShardLike_ID_Call) Run(run func()) *MockShardLike_ID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_ID_Call) Return(_a0 string) *MockShardLike_ID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_ID_Call) RunAndReturn(run func() string) *MockShardLike_ID_Call { + _c.Call.Return(run) + return _c +} + +// Index provides a mock function with no fields +func (_m *MockShardLike) Index() *Index { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Index") + } + + var r0 *Index + if rf, ok := ret.Get(0).(func() *Index); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*Index) + } + } + + return r0 +} + +// MockShardLike_Index_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Index' +type MockShardLike_Index_Call struct { + *mock.Call +} + +// Index is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) Index() *MockShardLike_Index_Call { + return &MockShardLike_Index_Call{Call: _e.mock.On("Index")} +} + +func (_c *MockShardLike_Index_Call) Run(run func()) *MockShardLike_Index_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_Index_Call) Return(_a0 *Index) *MockShardLike_Index_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_Index_Call) RunAndReturn(run func() *Index) *MockShardLike_Index_Call { + _c.Call.Return(run) + return _c +} + +// ListBackupFiles provides a mock function with given fields: ctx, ret +func (_m *MockShardLike) ListBackupFiles(ctx context.Context, ret *backup.ShardDescriptor) error { + ret_2 := _m.Called(ctx, ret) + + if len(ret_2) == 0 { + panic("no return value specified for ListBackupFiles") + } + + var r0 error + if rf, ok := ret_2.Get(0).(func(context.Context, *backup.ShardDescriptor) error); ok { + r0 = rf(ctx, ret) + } else { + r0 = ret_2.Error(0) + } + + return r0 +} + +// MockShardLike_ListBackupFiles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListBackupFiles' +type MockShardLike_ListBackupFiles_Call struct { + *mock.Call +} + +// ListBackupFiles is a helper method to define mock.On call +// - ctx context.Context +// - ret *backup.ShardDescriptor +func (_e *MockShardLike_Expecter) ListBackupFiles(ctx interface{}, ret interface{}) *MockShardLike_ListBackupFiles_Call { + return &MockShardLike_ListBackupFiles_Call{Call: _e.mock.On("ListBackupFiles", ctx, ret)} +} + +func (_c *MockShardLike_ListBackupFiles_Call) Run(run func(ctx context.Context, ret *backup.ShardDescriptor)) *MockShardLike_ListBackupFiles_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*backup.ShardDescriptor)) + }) + return _c +} + +func (_c *MockShardLike_ListBackupFiles_Call) Return(_a0 error) *MockShardLike_ListBackupFiles_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_ListBackupFiles_Call) RunAndReturn(run func(context.Context, *backup.ShardDescriptor) error) *MockShardLike_ListBackupFiles_Call { + _c.Call.Return(run) + return _c +} + +// MergeObject provides a mock function with given fields: ctx, object +func (_m *MockShardLike) MergeObject(ctx context.Context, object objects.MergeDocument) error { + ret := _m.Called(ctx, object) + + if len(ret) == 0 { + panic("no return value specified for MergeObject") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, objects.MergeDocument) error); ok { + r0 = rf(ctx, object) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_MergeObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MergeObject' +type MockShardLike_MergeObject_Call struct { + *mock.Call +} + +// MergeObject is a helper method to define mock.On call +// - ctx context.Context +// - object objects.MergeDocument +func (_e *MockShardLike_Expecter) MergeObject(ctx interface{}, object interface{}) *MockShardLike_MergeObject_Call { + return &MockShardLike_MergeObject_Call{Call: _e.mock.On("MergeObject", ctx, object)} +} + +func (_c *MockShardLike_MergeObject_Call) Run(run func(ctx context.Context, object objects.MergeDocument)) *MockShardLike_MergeObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(objects.MergeDocument)) + }) + return _c +} + +func (_c *MockShardLike_MergeObject_Call) Return(_a0 error) *MockShardLike_MergeObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_MergeObject_Call) RunAndReturn(run func(context.Context, objects.MergeDocument) error) *MockShardLike_MergeObject_Call { + _c.Call.Return(run) + return _c +} + +// Metrics provides a mock function with no fields +func (_m *MockShardLike) Metrics() *Metrics { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Metrics") + } + + var r0 *Metrics + if rf, ok := ret.Get(0).(func() *Metrics); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*Metrics) + } + } + + return r0 +} + +// MockShardLike_Metrics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Metrics' +type MockShardLike_Metrics_Call struct { + *mock.Call +} + +// Metrics is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) Metrics() *MockShardLike_Metrics_Call { + return &MockShardLike_Metrics_Call{Call: _e.mock.On("Metrics")} +} + +func (_c *MockShardLike_Metrics_Call) Run(run func()) *MockShardLike_Metrics_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_Metrics_Call) Return(_a0 *Metrics) *MockShardLike_Metrics_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_Metrics_Call) RunAndReturn(run func() *Metrics) *MockShardLike_Metrics_Call { + _c.Call.Return(run) + return _c +} + +// MultiObjectByID provides a mock function with given fields: ctx, query +func (_m *MockShardLike) MultiObjectByID(ctx context.Context, query []multi.Identifier) ([]*storobj.Object, error) { + ret := _m.Called(ctx, query) + + if len(ret) == 0 { + panic("no return value specified for MultiObjectByID") + } + + var r0 []*storobj.Object + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []multi.Identifier) ([]*storobj.Object, error)); ok { + return rf(ctx, query) + } + if rf, ok := ret.Get(0).(func(context.Context, []multi.Identifier) []*storobj.Object); ok { + r0 = rf(ctx, query) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*storobj.Object) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []multi.Identifier) error); ok { + r1 = rf(ctx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_MultiObjectByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MultiObjectByID' +type MockShardLike_MultiObjectByID_Call struct { + *mock.Call +} + +// MultiObjectByID is a helper method to define mock.On call +// - ctx context.Context +// - query []multi.Identifier +func (_e *MockShardLike_Expecter) MultiObjectByID(ctx interface{}, query interface{}) *MockShardLike_MultiObjectByID_Call { + return &MockShardLike_MultiObjectByID_Call{Call: _e.mock.On("MultiObjectByID", ctx, query)} +} + +func (_c *MockShardLike_MultiObjectByID_Call) Run(run func(ctx context.Context, query []multi.Identifier)) *MockShardLike_MultiObjectByID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]multi.Identifier)) + }) + return _c +} + +func (_c *MockShardLike_MultiObjectByID_Call) Return(_a0 []*storobj.Object, _a1 error) *MockShardLike_MultiObjectByID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_MultiObjectByID_Call) RunAndReturn(run func(context.Context, []multi.Identifier) ([]*storobj.Object, error)) *MockShardLike_MultiObjectByID_Call { + _c.Call.Return(run) + return _c +} + +// Name provides a mock function with no fields +func (_m *MockShardLike) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockShardLike_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type MockShardLike_Name_Call struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) Name() *MockShardLike_Name_Call { + return &MockShardLike_Name_Call{Call: _e.mock.On("Name")} +} + +func (_c *MockShardLike_Name_Call) Run(run func()) *MockShardLike_Name_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_Name_Call) Return(_a0 string) *MockShardLike_Name_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_Name_Call) RunAndReturn(run func() string) *MockShardLike_Name_Call { + _c.Call.Return(run) + return _c +} + +// NotifyReady provides a mock function with no fields +func (_m *MockShardLike) NotifyReady() { + _m.Called() +} + +// MockShardLike_NotifyReady_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NotifyReady' +type MockShardLike_NotifyReady_Call struct { + *mock.Call +} + +// NotifyReady is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) NotifyReady() *MockShardLike_NotifyReady_Call { + return &MockShardLike_NotifyReady_Call{Call: _e.mock.On("NotifyReady")} +} + +func (_c *MockShardLike_NotifyReady_Call) Run(run func()) *MockShardLike_NotifyReady_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_NotifyReady_Call) Return() *MockShardLike_NotifyReady_Call { + _c.Call.Return() + return _c +} + +func (_c *MockShardLike_NotifyReady_Call) RunAndReturn(run func()) *MockShardLike_NotifyReady_Call { + _c.Run(run) + return _c +} + +// ObjectByID provides a mock function with given fields: ctx, id, props, _a3 +func (_m *MockShardLike) ObjectByID(ctx context.Context, id strfmt.UUID, props search.SelectProperties, _a3 additional.Properties) (*storobj.Object, error) { + ret := _m.Called(ctx, id, props, _a3) + + if len(ret) == 0 { + panic("no return value specified for ObjectByID") + } + + var r0 *storobj.Object + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) (*storobj.Object, error)); ok { + return rf(ctx, id, props, _a3) + } + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) *storobj.Object); ok { + r0 = rf(ctx, id, props, _a3) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*storobj.Object) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) error); ok { + r1 = rf(ctx, id, props, _a3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_ObjectByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectByID' +type MockShardLike_ObjectByID_Call struct { + *mock.Call +} + +// ObjectByID is a helper method to define mock.On call +// - ctx context.Context +// - id strfmt.UUID +// - props search.SelectProperties +// - _a3 additional.Properties +func (_e *MockShardLike_Expecter) ObjectByID(ctx interface{}, id interface{}, props interface{}, _a3 interface{}) *MockShardLike_ObjectByID_Call { + return &MockShardLike_ObjectByID_Call{Call: _e.mock.On("ObjectByID", ctx, id, props, _a3)} +} + +func (_c *MockShardLike_ObjectByID_Call) Run(run func(ctx context.Context, id strfmt.UUID, props search.SelectProperties, _a3 additional.Properties)) *MockShardLike_ObjectByID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID), args[2].(search.SelectProperties), args[3].(additional.Properties)) + }) + return _c +} + +func (_c *MockShardLike_ObjectByID_Call) Return(_a0 *storobj.Object, _a1 error) *MockShardLike_ObjectByID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_ObjectByID_Call) RunAndReturn(run func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) (*storobj.Object, error)) *MockShardLike_ObjectByID_Call { + _c.Call.Return(run) + return _c +} + +// ObjectByIDErrDeleted provides a mock function with given fields: ctx, id, props, _a3 +func (_m *MockShardLike) ObjectByIDErrDeleted(ctx context.Context, id strfmt.UUID, props search.SelectProperties, _a3 additional.Properties) (*storobj.Object, error) { + ret := _m.Called(ctx, id, props, _a3) + + if len(ret) == 0 { + panic("no return value specified for ObjectByIDErrDeleted") + } + + var r0 *storobj.Object + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) (*storobj.Object, error)); ok { + return rf(ctx, id, props, _a3) + } + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) *storobj.Object); ok { + r0 = rf(ctx, id, props, _a3) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*storobj.Object) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) error); ok { + r1 = rf(ctx, id, props, _a3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_ObjectByIDErrDeleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectByIDErrDeleted' +type MockShardLike_ObjectByIDErrDeleted_Call struct { + *mock.Call +} + +// ObjectByIDErrDeleted is a helper method to define mock.On call +// - ctx context.Context +// - id strfmt.UUID +// - props search.SelectProperties +// - _a3 additional.Properties +func (_e *MockShardLike_Expecter) ObjectByIDErrDeleted(ctx interface{}, id interface{}, props interface{}, _a3 interface{}) *MockShardLike_ObjectByIDErrDeleted_Call { + return &MockShardLike_ObjectByIDErrDeleted_Call{Call: _e.mock.On("ObjectByIDErrDeleted", ctx, id, props, _a3)} +} + +func (_c *MockShardLike_ObjectByIDErrDeleted_Call) Run(run func(ctx context.Context, id strfmt.UUID, props search.SelectProperties, _a3 additional.Properties)) *MockShardLike_ObjectByIDErrDeleted_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID), args[2].(search.SelectProperties), args[3].(additional.Properties)) + }) + return _c +} + +func (_c *MockShardLike_ObjectByIDErrDeleted_Call) Return(_a0 *storobj.Object, _a1 error) *MockShardLike_ObjectByIDErrDeleted_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_ObjectByIDErrDeleted_Call) RunAndReturn(run func(context.Context, strfmt.UUID, search.SelectProperties, additional.Properties) (*storobj.Object, error)) *MockShardLike_ObjectByIDErrDeleted_Call { + _c.Call.Return(run) + return _c +} + +// ObjectCount provides a mock function with no fields +func (_m *MockShardLike) ObjectCount() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ObjectCount") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// MockShardLike_ObjectCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectCount' +type MockShardLike_ObjectCount_Call struct { + *mock.Call +} + +// ObjectCount is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) ObjectCount() *MockShardLike_ObjectCount_Call { + return &MockShardLike_ObjectCount_Call{Call: _e.mock.On("ObjectCount")} +} + +func (_c *MockShardLike_ObjectCount_Call) Run(run func()) *MockShardLike_ObjectCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_ObjectCount_Call) Return(_a0 int) *MockShardLike_ObjectCount_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_ObjectCount_Call) RunAndReturn(run func() int) *MockShardLike_ObjectCount_Call { + _c.Call.Return(run) + return _c +} + +// ObjectCountAsync provides a mock function with given fields: ctx +func (_m *MockShardLike) ObjectCountAsync(ctx context.Context) (int64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ObjectCountAsync") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_ObjectCountAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectCountAsync' +type MockShardLike_ObjectCountAsync_Call struct { + *mock.Call +} + +// ObjectCountAsync is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockShardLike_Expecter) ObjectCountAsync(ctx interface{}) *MockShardLike_ObjectCountAsync_Call { + return &MockShardLike_ObjectCountAsync_Call{Call: _e.mock.On("ObjectCountAsync", ctx)} +} + +func (_c *MockShardLike_ObjectCountAsync_Call) Run(run func(ctx context.Context)) *MockShardLike_ObjectCountAsync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_ObjectCountAsync_Call) Return(_a0 int64, _a1 error) *MockShardLike_ObjectCountAsync_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_ObjectCountAsync_Call) RunAndReturn(run func(context.Context) (int64, error)) *MockShardLike_ObjectCountAsync_Call { + _c.Call.Return(run) + return _c +} + +// ObjectDigestsInRange provides a mock function with given fields: ctx, initialUUID, finalUUID, limit +func (_m *MockShardLike) ObjectDigestsInRange(ctx context.Context, initialUUID strfmt.UUID, finalUUID strfmt.UUID, limit int) ([]routertypes.RepairResponse, error) { + ret := _m.Called(ctx, initialUUID, finalUUID, limit) + + if len(ret) == 0 { + panic("no return value specified for ObjectDigestsInRange") + } + + var r0 []routertypes.RepairResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, strfmt.UUID, int) ([]routertypes.RepairResponse, error)); ok { + return rf(ctx, initialUUID, finalUUID, limit) + } + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, strfmt.UUID, int) []routertypes.RepairResponse); ok { + r0 = rf(ctx, initialUUID, finalUUID, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]routertypes.RepairResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, strfmt.UUID, strfmt.UUID, int) error); ok { + r1 = rf(ctx, initialUUID, finalUUID, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_ObjectDigestsInRange_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectDigestsInRange' +type MockShardLike_ObjectDigestsInRange_Call struct { + *mock.Call +} + +// ObjectDigestsInRange is a helper method to define mock.On call +// - ctx context.Context +// - initialUUID strfmt.UUID +// - finalUUID strfmt.UUID +// - limit int +func (_e *MockShardLike_Expecter) ObjectDigestsInRange(ctx interface{}, initialUUID interface{}, finalUUID interface{}, limit interface{}) *MockShardLike_ObjectDigestsInRange_Call { + return &MockShardLike_ObjectDigestsInRange_Call{Call: _e.mock.On("ObjectDigestsInRange", ctx, initialUUID, finalUUID, limit)} +} + +func (_c *MockShardLike_ObjectDigestsInRange_Call) Run(run func(ctx context.Context, initialUUID strfmt.UUID, finalUUID strfmt.UUID, limit int)) *MockShardLike_ObjectDigestsInRange_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID), args[2].(strfmt.UUID), args[3].(int)) + }) + return _c +} + +func (_c *MockShardLike_ObjectDigestsInRange_Call) Return(objs []routertypes.RepairResponse, err error) *MockShardLike_ObjectDigestsInRange_Call { + _c.Call.Return(objs, err) + return _c +} + +func (_c *MockShardLike_ObjectDigestsInRange_Call) RunAndReturn(run func(context.Context, strfmt.UUID, strfmt.UUID, int) ([]routertypes.RepairResponse, error)) *MockShardLike_ObjectDigestsInRange_Call { + _c.Call.Return(run) + return _c +} + +// ObjectList provides a mock function with given fields: ctx, limit, sort, cursor, _a4, className +func (_m *MockShardLike) ObjectList(ctx context.Context, limit int, sort []filters.Sort, cursor *filters.Cursor, _a4 additional.Properties, className schema.ClassName) ([]*storobj.Object, error) { + ret := _m.Called(ctx, limit, sort, cursor, _a4, className) + + if len(ret) == 0 { + panic("no return value specified for ObjectList") + } + + var r0 []*storobj.Object + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int, []filters.Sort, *filters.Cursor, additional.Properties, schema.ClassName) ([]*storobj.Object, error)); ok { + return rf(ctx, limit, sort, cursor, _a4, className) + } + if rf, ok := ret.Get(0).(func(context.Context, int, []filters.Sort, *filters.Cursor, additional.Properties, schema.ClassName) []*storobj.Object); ok { + r0 = rf(ctx, limit, sort, cursor, _a4, className) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*storobj.Object) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int, []filters.Sort, *filters.Cursor, additional.Properties, schema.ClassName) error); ok { + r1 = rf(ctx, limit, sort, cursor, _a4, className) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_ObjectList_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectList' +type MockShardLike_ObjectList_Call struct { + *mock.Call +} + +// ObjectList is a helper method to define mock.On call +// - ctx context.Context +// - limit int +// - sort []filters.Sort +// - cursor *filters.Cursor +// - _a4 additional.Properties +// - className schema.ClassName +func (_e *MockShardLike_Expecter) ObjectList(ctx interface{}, limit interface{}, sort interface{}, cursor interface{}, _a4 interface{}, className interface{}) *MockShardLike_ObjectList_Call { + return &MockShardLike_ObjectList_Call{Call: _e.mock.On("ObjectList", ctx, limit, sort, cursor, _a4, className)} +} + +func (_c *MockShardLike_ObjectList_Call) Run(run func(ctx context.Context, limit int, sort []filters.Sort, cursor *filters.Cursor, _a4 additional.Properties, className schema.ClassName)) *MockShardLike_ObjectList_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int), args[2].([]filters.Sort), args[3].(*filters.Cursor), args[4].(additional.Properties), args[5].(schema.ClassName)) + }) + return _c +} + +func (_c *MockShardLike_ObjectList_Call) Return(_a0 []*storobj.Object, _a1 error) *MockShardLike_ObjectList_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_ObjectList_Call) RunAndReturn(run func(context.Context, int, []filters.Sort, *filters.Cursor, additional.Properties, schema.ClassName) ([]*storobj.Object, error)) *MockShardLike_ObjectList_Call { + _c.Call.Return(run) + return _c +} + +// ObjectSearch provides a mock function with given fields: ctx, limit, _a2, keywordRanking, sort, cursor, _a6, properties +func (_m *MockShardLike) ObjectSearch(ctx context.Context, limit int, _a2 *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, sort []filters.Sort, cursor *filters.Cursor, _a6 additional.Properties, properties []string) ([]*storobj.Object, []float32, error) { + ret := _m.Called(ctx, limit, _a2, keywordRanking, sort, cursor, _a6, properties) + + if len(ret) == 0 { + panic("no return value specified for ObjectSearch") + } + + var r0 []*storobj.Object + var r1 []float32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, int, *filters.LocalFilter, *searchparams.KeywordRanking, []filters.Sort, *filters.Cursor, additional.Properties, []string) ([]*storobj.Object, []float32, error)); ok { + return rf(ctx, limit, _a2, keywordRanking, sort, cursor, _a6, properties) + } + if rf, ok := ret.Get(0).(func(context.Context, int, *filters.LocalFilter, *searchparams.KeywordRanking, []filters.Sort, *filters.Cursor, additional.Properties, []string) []*storobj.Object); ok { + r0 = rf(ctx, limit, _a2, keywordRanking, sort, cursor, _a6, properties) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*storobj.Object) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int, *filters.LocalFilter, *searchparams.KeywordRanking, []filters.Sort, *filters.Cursor, additional.Properties, []string) []float32); ok { + r1 = rf(ctx, limit, _a2, keywordRanking, sort, cursor, _a6, properties) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]float32) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, int, *filters.LocalFilter, *searchparams.KeywordRanking, []filters.Sort, *filters.Cursor, additional.Properties, []string) error); ok { + r2 = rf(ctx, limit, _a2, keywordRanking, sort, cursor, _a6, properties) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockShardLike_ObjectSearch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectSearch' +type MockShardLike_ObjectSearch_Call struct { + *mock.Call +} + +// ObjectSearch is a helper method to define mock.On call +// - ctx context.Context +// - limit int +// - _a2 *filters.LocalFilter +// - keywordRanking *searchparams.KeywordRanking +// - sort []filters.Sort +// - cursor *filters.Cursor +// - _a6 additional.Properties +// - properties []string +func (_e *MockShardLike_Expecter) ObjectSearch(ctx interface{}, limit interface{}, _a2 interface{}, keywordRanking interface{}, sort interface{}, cursor interface{}, _a6 interface{}, properties interface{}) *MockShardLike_ObjectSearch_Call { + return &MockShardLike_ObjectSearch_Call{Call: _e.mock.On("ObjectSearch", ctx, limit, _a2, keywordRanking, sort, cursor, _a6, properties)} +} + +func (_c *MockShardLike_ObjectSearch_Call) Run(run func(ctx context.Context, limit int, _a2 *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, sort []filters.Sort, cursor *filters.Cursor, _a6 additional.Properties, properties []string)) *MockShardLike_ObjectSearch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int), args[2].(*filters.LocalFilter), args[3].(*searchparams.KeywordRanking), args[4].([]filters.Sort), args[5].(*filters.Cursor), args[6].(additional.Properties), args[7].([]string)) + }) + return _c +} + +func (_c *MockShardLike_ObjectSearch_Call) Return(_a0 []*storobj.Object, _a1 []float32, _a2 error) *MockShardLike_ObjectSearch_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockShardLike_ObjectSearch_Call) RunAndReturn(run func(context.Context, int, *filters.LocalFilter, *searchparams.KeywordRanking, []filters.Sort, *filters.Cursor, additional.Properties, []string) ([]*storobj.Object, []float32, error)) *MockShardLike_ObjectSearch_Call { + _c.Call.Return(run) + return _c +} + +// ObjectStorageSize provides a mock function with given fields: ctx +func (_m *MockShardLike) ObjectStorageSize(ctx context.Context) (int64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ObjectStorageSize") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_ObjectStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectStorageSize' +type MockShardLike_ObjectStorageSize_Call struct { + *mock.Call +} + +// ObjectStorageSize is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockShardLike_Expecter) ObjectStorageSize(ctx interface{}) *MockShardLike_ObjectStorageSize_Call { + return &MockShardLike_ObjectStorageSize_Call{Call: _e.mock.On("ObjectStorageSize", ctx)} +} + +func (_c *MockShardLike_ObjectStorageSize_Call) Run(run func(ctx context.Context)) *MockShardLike_ObjectStorageSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_ObjectStorageSize_Call) Return(_a0 int64, _a1 error) *MockShardLike_ObjectStorageSize_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_ObjectStorageSize_Call) RunAndReturn(run func(context.Context) (int64, error)) *MockShardLike_ObjectStorageSize_Call { + _c.Call.Return(run) + return _c +} + +// ObjectVectorSearch provides a mock function with given fields: ctx, searchVectors, targetVectors, targetDist, limit, _a5, sort, groupBy, _a8, targetCombination, properties +func (_m *MockShardLike) ObjectVectorSearch(ctx context.Context, searchVectors []models.Vector, targetVectors []string, targetDist float32, limit int, _a5 *filters.LocalFilter, sort []filters.Sort, groupBy *searchparams.GroupBy, _a8 additional.Properties, targetCombination *dto.TargetCombination, properties []string) ([]*storobj.Object, []float32, error) { + ret := _m.Called(ctx, searchVectors, targetVectors, targetDist, limit, _a5, sort, groupBy, _a8, targetCombination, properties) + + if len(ret) == 0 { + panic("no return value specified for ObjectVectorSearch") + } + + var r0 []*storobj.Object + var r1 []float32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []models.Vector, []string, float32, int, *filters.LocalFilter, []filters.Sort, *searchparams.GroupBy, additional.Properties, *dto.TargetCombination, []string) ([]*storobj.Object, []float32, error)); ok { + return rf(ctx, searchVectors, targetVectors, targetDist, limit, _a5, sort, groupBy, _a8, targetCombination, properties) + } + if rf, ok := ret.Get(0).(func(context.Context, []models.Vector, []string, float32, int, *filters.LocalFilter, []filters.Sort, *searchparams.GroupBy, additional.Properties, *dto.TargetCombination, []string) []*storobj.Object); ok { + r0 = rf(ctx, searchVectors, targetVectors, targetDist, limit, _a5, sort, groupBy, _a8, targetCombination, properties) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*storobj.Object) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []models.Vector, []string, float32, int, *filters.LocalFilter, []filters.Sort, *searchparams.GroupBy, additional.Properties, *dto.TargetCombination, []string) []float32); ok { + r1 = rf(ctx, searchVectors, targetVectors, targetDist, limit, _a5, sort, groupBy, _a8, targetCombination, properties) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]float32) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []models.Vector, []string, float32, int, *filters.LocalFilter, []filters.Sort, *searchparams.GroupBy, additional.Properties, *dto.TargetCombination, []string) error); ok { + r2 = rf(ctx, searchVectors, targetVectors, targetDist, limit, _a5, sort, groupBy, _a8, targetCombination, properties) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockShardLike_ObjectVectorSearch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObjectVectorSearch' +type MockShardLike_ObjectVectorSearch_Call struct { + *mock.Call +} + +// ObjectVectorSearch is a helper method to define mock.On call +// - ctx context.Context +// - searchVectors []models.Vector +// - targetVectors []string +// - targetDist float32 +// - limit int +// - _a5 *filters.LocalFilter +// - sort []filters.Sort +// - groupBy *searchparams.GroupBy +// - _a8 additional.Properties +// - targetCombination *dto.TargetCombination +// - properties []string +func (_e *MockShardLike_Expecter) ObjectVectorSearch(ctx interface{}, searchVectors interface{}, targetVectors interface{}, targetDist interface{}, limit interface{}, _a5 interface{}, sort interface{}, groupBy interface{}, _a8 interface{}, targetCombination interface{}, properties interface{}) *MockShardLike_ObjectVectorSearch_Call { + return &MockShardLike_ObjectVectorSearch_Call{Call: _e.mock.On("ObjectVectorSearch", ctx, searchVectors, targetVectors, targetDist, limit, _a5, sort, groupBy, _a8, targetCombination, properties)} +} + +func (_c *MockShardLike_ObjectVectorSearch_Call) Run(run func(ctx context.Context, searchVectors []models.Vector, targetVectors []string, targetDist float32, limit int, _a5 *filters.LocalFilter, sort []filters.Sort, groupBy *searchparams.GroupBy, _a8 additional.Properties, targetCombination *dto.TargetCombination, properties []string)) *MockShardLike_ObjectVectorSearch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]models.Vector), args[2].([]string), args[3].(float32), args[4].(int), args[5].(*filters.LocalFilter), args[6].([]filters.Sort), args[7].(*searchparams.GroupBy), args[8].(additional.Properties), args[9].(*dto.TargetCombination), args[10].([]string)) + }) + return _c +} + +func (_c *MockShardLike_ObjectVectorSearch_Call) Return(_a0 []*storobj.Object, _a1 []float32, _a2 error) *MockShardLike_ObjectVectorSearch_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockShardLike_ObjectVectorSearch_Call) RunAndReturn(run func(context.Context, []models.Vector, []string, float32, int, *filters.LocalFilter, []filters.Sort, *searchparams.GroupBy, additional.Properties, *dto.TargetCombination, []string) ([]*storobj.Object, []float32, error)) *MockShardLike_ObjectVectorSearch_Call { + _c.Call.Return(run) + return _c +} + +// PutObject provides a mock function with given fields: _a0, _a1 +func (_m *MockShardLike) PutObject(_a0 context.Context, _a1 *storobj.Object) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for PutObject") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *storobj.Object) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_PutObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PutObject' +type MockShardLike_PutObject_Call struct { + *mock.Call +} + +// PutObject is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *storobj.Object +func (_e *MockShardLike_Expecter) PutObject(_a0 interface{}, _a1 interface{}) *MockShardLike_PutObject_Call { + return &MockShardLike_PutObject_Call{Call: _e.mock.On("PutObject", _a0, _a1)} +} + +func (_c *MockShardLike_PutObject_Call) Run(run func(_a0 context.Context, _a1 *storobj.Object)) *MockShardLike_PutObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*storobj.Object)) + }) + return _c +} + +func (_c *MockShardLike_PutObject_Call) Return(_a0 error) *MockShardLike_PutObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_PutObject_Call) RunAndReturn(run func(context.Context, *storobj.Object) error) *MockShardLike_PutObject_Call { + _c.Call.Return(run) + return _c +} + +// PutObjectBatch provides a mock function with given fields: _a0, _a1 +func (_m *MockShardLike) PutObjectBatch(_a0 context.Context, _a1 []*storobj.Object) []error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for PutObjectBatch") + } + + var r0 []error + if rf, ok := ret.Get(0).(func(context.Context, []*storobj.Object) []error); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]error) + } + } + + return r0 +} + +// MockShardLike_PutObjectBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PutObjectBatch' +type MockShardLike_PutObjectBatch_Call struct { + *mock.Call +} + +// PutObjectBatch is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []*storobj.Object +func (_e *MockShardLike_Expecter) PutObjectBatch(_a0 interface{}, _a1 interface{}) *MockShardLike_PutObjectBatch_Call { + return &MockShardLike_PutObjectBatch_Call{Call: _e.mock.On("PutObjectBatch", _a0, _a1)} +} + +func (_c *MockShardLike_PutObjectBatch_Call) Run(run func(_a0 context.Context, _a1 []*storobj.Object)) *MockShardLike_PutObjectBatch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]*storobj.Object)) + }) + return _c +} + +func (_c *MockShardLike_PutObjectBatch_Call) Return(_a0 []error) *MockShardLike_PutObjectBatch_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_PutObjectBatch_Call) RunAndReturn(run func(context.Context, []*storobj.Object) []error) *MockShardLike_PutObjectBatch_Call { + _c.Call.Return(run) + return _c +} + +// QuantizedDimensions provides a mock function with given fields: ctx, targetVector, segments +func (_m *MockShardLike) QuantizedDimensions(ctx context.Context, targetVector string, segments int) int { + ret := _m.Called(ctx, targetVector, segments) + + if len(ret) == 0 { + panic("no return value specified for QuantizedDimensions") + } + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, string, int) int); ok { + r0 = rf(ctx, targetVector, segments) + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// MockShardLike_QuantizedDimensions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QuantizedDimensions' +type MockShardLike_QuantizedDimensions_Call struct { + *mock.Call +} + +// QuantizedDimensions is a helper method to define mock.On call +// - ctx context.Context +// - targetVector string +// - segments int +func (_e *MockShardLike_Expecter) QuantizedDimensions(ctx interface{}, targetVector interface{}, segments interface{}) *MockShardLike_QuantizedDimensions_Call { + return &MockShardLike_QuantizedDimensions_Call{Call: _e.mock.On("QuantizedDimensions", ctx, targetVector, segments)} +} + +func (_c *MockShardLike_QuantizedDimensions_Call) Run(run func(ctx context.Context, targetVector string, segments int)) *MockShardLike_QuantizedDimensions_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(int)) + }) + return _c +} + +func (_c *MockShardLike_QuantizedDimensions_Call) Return(_a0 int) *MockShardLike_QuantizedDimensions_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_QuantizedDimensions_Call) RunAndReturn(run func(context.Context, string, int) int) *MockShardLike_QuantizedDimensions_Call { + _c.Call.Return(run) + return _c +} + +// RepairIndex provides a mock function with given fields: ctx, targetVector +func (_m *MockShardLike) RepairIndex(ctx context.Context, targetVector string) error { + ret := _m.Called(ctx, targetVector) + + if len(ret) == 0 { + panic("no return value specified for RepairIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, targetVector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_RepairIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RepairIndex' +type MockShardLike_RepairIndex_Call struct { + *mock.Call +} + +// RepairIndex is a helper method to define mock.On call +// - ctx context.Context +// - targetVector string +func (_e *MockShardLike_Expecter) RepairIndex(ctx interface{}, targetVector interface{}) *MockShardLike_RepairIndex_Call { + return &MockShardLike_RepairIndex_Call{Call: _e.mock.On("RepairIndex", ctx, targetVector)} +} + +func (_c *MockShardLike_RepairIndex_Call) Run(run func(ctx context.Context, targetVector string)) *MockShardLike_RepairIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_RepairIndex_Call) Return(_a0 error) *MockShardLike_RepairIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_RepairIndex_Call) RunAndReturn(run func(context.Context, string) error) *MockShardLike_RepairIndex_Call { + _c.Call.Return(run) + return _c +} + +// SetAsyncReplicationEnabled provides a mock function with given fields: ctx, enabled +func (_m *MockShardLike) SetAsyncReplicationEnabled(ctx context.Context, enabled bool) error { + ret := _m.Called(ctx, enabled) + + if len(ret) == 0 { + panic("no return value specified for SetAsyncReplicationEnabled") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, bool) error); ok { + r0 = rf(ctx, enabled) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_SetAsyncReplicationEnabled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetAsyncReplicationEnabled' +type MockShardLike_SetAsyncReplicationEnabled_Call struct { + *mock.Call +} + +// SetAsyncReplicationEnabled is a helper method to define mock.On call +// - ctx context.Context +// - enabled bool +func (_e *MockShardLike_Expecter) SetAsyncReplicationEnabled(ctx interface{}, enabled interface{}) *MockShardLike_SetAsyncReplicationEnabled_Call { + return &MockShardLike_SetAsyncReplicationEnabled_Call{Call: _e.mock.On("SetAsyncReplicationEnabled", ctx, enabled)} +} + +func (_c *MockShardLike_SetAsyncReplicationEnabled_Call) Run(run func(ctx context.Context, enabled bool)) *MockShardLike_SetAsyncReplicationEnabled_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool)) + }) + return _c +} + +func (_c *MockShardLike_SetAsyncReplicationEnabled_Call) Return(_a0 error) *MockShardLike_SetAsyncReplicationEnabled_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_SetAsyncReplicationEnabled_Call) RunAndReturn(run func(context.Context, bool) error) *MockShardLike_SetAsyncReplicationEnabled_Call { + _c.Call.Return(run) + return _c +} + +// SetPropertyLengths provides a mock function with given fields: props +func (_m *MockShardLike) SetPropertyLengths(props []inverted.Property) error { + ret := _m.Called(props) + + if len(ret) == 0 { + panic("no return value specified for SetPropertyLengths") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]inverted.Property) error); ok { + r0 = rf(props) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_SetPropertyLengths_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPropertyLengths' +type MockShardLike_SetPropertyLengths_Call struct { + *mock.Call +} + +// SetPropertyLengths is a helper method to define mock.On call +// - props []inverted.Property +func (_e *MockShardLike_Expecter) SetPropertyLengths(props interface{}) *MockShardLike_SetPropertyLengths_Call { + return &MockShardLike_SetPropertyLengths_Call{Call: _e.mock.On("SetPropertyLengths", props)} +} + +func (_c *MockShardLike_SetPropertyLengths_Call) Run(run func(props []inverted.Property)) *MockShardLike_SetPropertyLengths_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]inverted.Property)) + }) + return _c +} + +func (_c *MockShardLike_SetPropertyLengths_Call) Return(_a0 error) *MockShardLike_SetPropertyLengths_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_SetPropertyLengths_Call) RunAndReturn(run func([]inverted.Property) error) *MockShardLike_SetPropertyLengths_Call { + _c.Call.Return(run) + return _c +} + +// SetStatusReadonly provides a mock function with given fields: reason +func (_m *MockShardLike) SetStatusReadonly(reason string) error { + ret := _m.Called(reason) + + if len(ret) == 0 { + panic("no return value specified for SetStatusReadonly") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(reason) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_SetStatusReadonly_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetStatusReadonly' +type MockShardLike_SetStatusReadonly_Call struct { + *mock.Call +} + +// SetStatusReadonly is a helper method to define mock.On call +// - reason string +func (_e *MockShardLike_Expecter) SetStatusReadonly(reason interface{}) *MockShardLike_SetStatusReadonly_Call { + return &MockShardLike_SetStatusReadonly_Call{Call: _e.mock.On("SetStatusReadonly", reason)} +} + +func (_c *MockShardLike_SetStatusReadonly_Call) Run(run func(reason string)) *MockShardLike_SetStatusReadonly_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockShardLike_SetStatusReadonly_Call) Return(_a0 error) *MockShardLike_SetStatusReadonly_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_SetStatusReadonly_Call) RunAndReturn(run func(string) error) *MockShardLike_SetStatusReadonly_Call { + _c.Call.Return(run) + return _c +} + +// Shutdown provides a mock function with given fields: _a0 +func (_m *MockShardLike) Shutdown(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' +type MockShardLike_Shutdown_Call struct { + *mock.Call +} + +// Shutdown is a helper method to define mock.On call +// - _a0 context.Context +func (_e *MockShardLike_Expecter) Shutdown(_a0 interface{}) *MockShardLike_Shutdown_Call { + return &MockShardLike_Shutdown_Call{Call: _e.mock.On("Shutdown", _a0)} +} + +func (_c *MockShardLike_Shutdown_Call) Run(run func(_a0 context.Context)) *MockShardLike_Shutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_Shutdown_Call) Return(_a0 error) *MockShardLike_Shutdown_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_Shutdown_Call) RunAndReturn(run func(context.Context) error) *MockShardLike_Shutdown_Call { + _c.Call.Return(run) + return _c +} + +// Store provides a mock function with no fields +func (_m *MockShardLike) Store() *lsmkv.Store { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 *lsmkv.Store + if rf, ok := ret.Get(0).(func() *lsmkv.Store); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*lsmkv.Store) + } + } + + return r0 +} + +// MockShardLike_Store_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Store' +type MockShardLike_Store_Call struct { + *mock.Call +} + +// Store is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) Store() *MockShardLike_Store_Call { + return &MockShardLike_Store_Call{Call: _e.mock.On("Store")} +} + +func (_c *MockShardLike_Store_Call) Run(run func()) *MockShardLike_Store_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_Store_Call) Return(_a0 *lsmkv.Store) *MockShardLike_Store_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_Store_Call) RunAndReturn(run func() *lsmkv.Store) *MockShardLike_Store_Call { + _c.Call.Return(run) + return _c +} + +// UpdateStatus provides a mock function with given fields: status, reason +func (_m *MockShardLike) UpdateStatus(status string, reason string) error { + ret := _m.Called(status, reason) + + if len(ret) == 0 { + panic("no return value specified for UpdateStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(status, reason) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_UpdateStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStatus' +type MockShardLike_UpdateStatus_Call struct { + *mock.Call +} + +// UpdateStatus is a helper method to define mock.On call +// - status string +// - reason string +func (_e *MockShardLike_Expecter) UpdateStatus(status interface{}, reason interface{}) *MockShardLike_UpdateStatus_Call { + return &MockShardLike_UpdateStatus_Call{Call: _e.mock.On("UpdateStatus", status, reason)} +} + +func (_c *MockShardLike_UpdateStatus_Call) Run(run func(status string, reason string)) *MockShardLike_UpdateStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_UpdateStatus_Call) Return(_a0 error) *MockShardLike_UpdateStatus_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_UpdateStatus_Call) RunAndReturn(run func(string, string) error) *MockShardLike_UpdateStatus_Call { + _c.Call.Return(run) + return _c +} + +// UpdateVectorIndexConfig provides a mock function with given fields: ctx, updated +func (_m *MockShardLike) UpdateVectorIndexConfig(ctx context.Context, updated config.VectorIndexConfig) error { + ret := _m.Called(ctx, updated) + + if len(ret) == 0 { + panic("no return value specified for UpdateVectorIndexConfig") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, config.VectorIndexConfig) error); ok { + r0 = rf(ctx, updated) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_UpdateVectorIndexConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateVectorIndexConfig' +type MockShardLike_UpdateVectorIndexConfig_Call struct { + *mock.Call +} + +// UpdateVectorIndexConfig is a helper method to define mock.On call +// - ctx context.Context +// - updated config.VectorIndexConfig +func (_e *MockShardLike_Expecter) UpdateVectorIndexConfig(ctx interface{}, updated interface{}) *MockShardLike_UpdateVectorIndexConfig_Call { + return &MockShardLike_UpdateVectorIndexConfig_Call{Call: _e.mock.On("UpdateVectorIndexConfig", ctx, updated)} +} + +func (_c *MockShardLike_UpdateVectorIndexConfig_Call) Run(run func(ctx context.Context, updated config.VectorIndexConfig)) *MockShardLike_UpdateVectorIndexConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(config.VectorIndexConfig)) + }) + return _c +} + +func (_c *MockShardLike_UpdateVectorIndexConfig_Call) Return(_a0 error) *MockShardLike_UpdateVectorIndexConfig_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_UpdateVectorIndexConfig_Call) RunAndReturn(run func(context.Context, config.VectorIndexConfig) error) *MockShardLike_UpdateVectorIndexConfig_Call { + _c.Call.Return(run) + return _c +} + +// UpdateVectorIndexConfigs provides a mock function with given fields: ctx, updated +func (_m *MockShardLike) UpdateVectorIndexConfigs(ctx context.Context, updated map[string]config.VectorIndexConfig) error { + ret := _m.Called(ctx, updated) + + if len(ret) == 0 { + panic("no return value specified for UpdateVectorIndexConfigs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, map[string]config.VectorIndexConfig) error); ok { + r0 = rf(ctx, updated) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_UpdateVectorIndexConfigs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateVectorIndexConfigs' +type MockShardLike_UpdateVectorIndexConfigs_Call struct { + *mock.Call +} + +// UpdateVectorIndexConfigs is a helper method to define mock.On call +// - ctx context.Context +// - updated map[string]config.VectorIndexConfig +func (_e *MockShardLike_Expecter) UpdateVectorIndexConfigs(ctx interface{}, updated interface{}) *MockShardLike_UpdateVectorIndexConfigs_Call { + return &MockShardLike_UpdateVectorIndexConfigs_Call{Call: _e.mock.On("UpdateVectorIndexConfigs", ctx, updated)} +} + +func (_c *MockShardLike_UpdateVectorIndexConfigs_Call) Run(run func(ctx context.Context, updated map[string]config.VectorIndexConfig)) *MockShardLike_UpdateVectorIndexConfigs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(map[string]config.VectorIndexConfig)) + }) + return _c +} + +func (_c *MockShardLike_UpdateVectorIndexConfigs_Call) Return(_a0 error) *MockShardLike_UpdateVectorIndexConfigs_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_UpdateVectorIndexConfigs_Call) RunAndReturn(run func(context.Context, map[string]config.VectorIndexConfig) error) *MockShardLike_UpdateVectorIndexConfigs_Call { + _c.Call.Return(run) + return _c +} + +// VectorDistanceForQuery provides a mock function with given fields: ctx, id, searchVectors, targets +func (_m *MockShardLike) VectorDistanceForQuery(ctx context.Context, id uint64, searchVectors []models.Vector, targets []string) ([]float32, error) { + ret := _m.Called(ctx, id, searchVectors, targets) + + if len(ret) == 0 { + panic("no return value specified for VectorDistanceForQuery") + } + + var r0 []float32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, []models.Vector, []string) ([]float32, error)); ok { + return rf(ctx, id, searchVectors, targets) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, []models.Vector, []string) []float32); ok { + r0 = rf(ctx, id, searchVectors, targets) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]float32) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, []models.Vector, []string) error); ok { + r1 = rf(ctx, id, searchVectors, targets) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_VectorDistanceForQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VectorDistanceForQuery' +type MockShardLike_VectorDistanceForQuery_Call struct { + *mock.Call +} + +// VectorDistanceForQuery is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +// - searchVectors []models.Vector +// - targets []string +func (_e *MockShardLike_Expecter) VectorDistanceForQuery(ctx interface{}, id interface{}, searchVectors interface{}, targets interface{}) *MockShardLike_VectorDistanceForQuery_Call { + return &MockShardLike_VectorDistanceForQuery_Call{Call: _e.mock.On("VectorDistanceForQuery", ctx, id, searchVectors, targets)} +} + +func (_c *MockShardLike_VectorDistanceForQuery_Call) Run(run func(ctx context.Context, id uint64, searchVectors []models.Vector, targets []string)) *MockShardLike_VectorDistanceForQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].([]models.Vector), args[3].([]string)) + }) + return _c +} + +func (_c *MockShardLike_VectorDistanceForQuery_Call) Return(_a0 []float32, _a1 error) *MockShardLike_VectorDistanceForQuery_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_VectorDistanceForQuery_Call) RunAndReturn(run func(context.Context, uint64, []models.Vector, []string) ([]float32, error)) *MockShardLike_VectorDistanceForQuery_Call { + _c.Call.Return(run) + return _c +} + +// VectorStorageSize provides a mock function with given fields: ctx +func (_m *MockShardLike) VectorStorageSize(ctx context.Context) (int64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for VectorStorageSize") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) int64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_VectorStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VectorStorageSize' +type MockShardLike_VectorStorageSize_Call struct { + *mock.Call +} + +// VectorStorageSize is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockShardLike_Expecter) VectorStorageSize(ctx interface{}) *MockShardLike_VectorStorageSize_Call { + return &MockShardLike_VectorStorageSize_Call{Call: _e.mock.On("VectorStorageSize", ctx)} +} + +func (_c *MockShardLike_VectorStorageSize_Call) Run(run func(ctx context.Context)) *MockShardLike_VectorStorageSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_VectorStorageSize_Call) Return(_a0 int64, _a1 error) *MockShardLike_VectorStorageSize_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_VectorStorageSize_Call) RunAndReturn(run func(context.Context) (int64, error)) *MockShardLike_VectorStorageSize_Call { + _c.Call.Return(run) + return _c +} + +// Versioner provides a mock function with no fields +func (_m *MockShardLike) Versioner() *shardVersioner { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Versioner") + } + + var r0 *shardVersioner + if rf, ok := ret.Get(0).(func() *shardVersioner); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*shardVersioner) + } + } + + return r0 +} + +// MockShardLike_Versioner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Versioner' +type MockShardLike_Versioner_Call struct { + *mock.Call +} + +// Versioner is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) Versioner() *MockShardLike_Versioner_Call { + return &MockShardLike_Versioner_Call{Call: _e.mock.On("Versioner")} +} + +func (_c *MockShardLike_Versioner_Call) Run(run func()) *MockShardLike_Versioner_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_Versioner_Call) Return(_a0 *shardVersioner) *MockShardLike_Versioner_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_Versioner_Call) RunAndReturn(run func() *shardVersioner) *MockShardLike_Versioner_Call { + _c.Call.Return(run) + return _c +} + +// WasDeleted provides a mock function with given fields: ctx, id +func (_m *MockShardLike) WasDeleted(ctx context.Context, id strfmt.UUID) (bool, time.Time, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for WasDeleted") + } + + var r0 bool + var r1 time.Time + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) (bool, time.Time, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID) bool); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, strfmt.UUID) time.Time); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Get(1).(time.Time) + } + + if rf, ok := ret.Get(2).(func(context.Context, strfmt.UUID) error); ok { + r2 = rf(ctx, id) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockShardLike_WasDeleted_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WasDeleted' +type MockShardLike_WasDeleted_Call struct { + *mock.Call +} + +// WasDeleted is a helper method to define mock.On call +// - ctx context.Context +// - id strfmt.UUID +func (_e *MockShardLike_Expecter) WasDeleted(ctx interface{}, id interface{}) *MockShardLike_WasDeleted_Call { + return &MockShardLike_WasDeleted_Call{Call: _e.mock.On("WasDeleted", ctx, id)} +} + +func (_c *MockShardLike_WasDeleted_Call) Run(run func(ctx context.Context, id strfmt.UUID)) *MockShardLike_WasDeleted_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID)) + }) + return _c +} + +func (_c *MockShardLike_WasDeleted_Call) Return(_a0 bool, _a1 time.Time, _a2 error) *MockShardLike_WasDeleted_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockShardLike_WasDeleted_Call) RunAndReturn(run func(context.Context, strfmt.UUID) (bool, time.Time, error)) *MockShardLike_WasDeleted_Call { + _c.Call.Return(run) + return _c +} + +// abortReplication provides a mock function with given fields: _a0, _a1 +func (_m *MockShardLike) abortReplication(_a0 context.Context, _a1 string) replica.SimpleResponse { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for abortReplication") + } + + var r0 replica.SimpleResponse + if rf, ok := ret.Get(0).(func(context.Context, string) replica.SimpleResponse); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(replica.SimpleResponse) + } + + return r0 +} + +// MockShardLike_abortReplication_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'abortReplication' +type MockShardLike_abortReplication_Call struct { + *mock.Call +} + +// abortReplication is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *MockShardLike_Expecter) abortReplication(_a0 interface{}, _a1 interface{}) *MockShardLike_abortReplication_Call { + return &MockShardLike_abortReplication_Call{Call: _e.mock.On("abortReplication", _a0, _a1)} +} + +func (_c *MockShardLike_abortReplication_Call) Run(run func(_a0 context.Context, _a1 string)) *MockShardLike_abortReplication_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_abortReplication_Call) Return(_a0 replica.SimpleResponse) *MockShardLike_abortReplication_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_abortReplication_Call) RunAndReturn(run func(context.Context, string) replica.SimpleResponse) *MockShardLike_abortReplication_Call { + _c.Call.Return(run) + return _c +} + +// addJobToQueue provides a mock function with given fields: job0 +func (_m *MockShardLike) addJobToQueue(job0 job) { + _m.Called(job0) +} + +// MockShardLike_addJobToQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'addJobToQueue' +type MockShardLike_addJobToQueue_Call struct { + *mock.Call +} + +// addJobToQueue is a helper method to define mock.On call +// - job0 job +func (_e *MockShardLike_Expecter) addJobToQueue(job0 interface{}) *MockShardLike_addJobToQueue_Call { + return &MockShardLike_addJobToQueue_Call{Call: _e.mock.On("addJobToQueue", job0)} +} + +func (_c *MockShardLike_addJobToQueue_Call) Run(run func(job0 job)) *MockShardLike_addJobToQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(job)) + }) + return _c +} + +func (_c *MockShardLike_addJobToQueue_Call) Return() *MockShardLike_addJobToQueue_Call { + _c.Call.Return() + return _c +} + +func (_c *MockShardLike_addJobToQueue_Call) RunAndReturn(run func(job)) *MockShardLike_addJobToQueue_Call { + _c.Run(run) + return _c +} + +// addTargetNodeOverride provides a mock function with given fields: ctx, targetNodeOverride +func (_m *MockShardLike) addTargetNodeOverride(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + ret := _m.Called(ctx, targetNodeOverride) + + if len(ret) == 0 { + panic("no return value specified for addTargetNodeOverride") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, additional.AsyncReplicationTargetNodeOverride) error); ok { + r0 = rf(ctx, targetNodeOverride) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_addTargetNodeOverride_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'addTargetNodeOverride' +type MockShardLike_addTargetNodeOverride_Call struct { + *mock.Call +} + +// addTargetNodeOverride is a helper method to define mock.On call +// - ctx context.Context +// - targetNodeOverride additional.AsyncReplicationTargetNodeOverride +func (_e *MockShardLike_Expecter) addTargetNodeOverride(ctx interface{}, targetNodeOverride interface{}) *MockShardLike_addTargetNodeOverride_Call { + return &MockShardLike_addTargetNodeOverride_Call{Call: _e.mock.On("addTargetNodeOverride", ctx, targetNodeOverride)} +} + +func (_c *MockShardLike_addTargetNodeOverride_Call) Run(run func(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride)) *MockShardLike_addTargetNodeOverride_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(additional.AsyncReplicationTargetNodeOverride)) + }) + return _c +} + +func (_c *MockShardLike_addTargetNodeOverride_Call) Return(_a0 error) *MockShardLike_addTargetNodeOverride_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_addTargetNodeOverride_Call) RunAndReturn(run func(context.Context, additional.AsyncReplicationTargetNodeOverride) error) *MockShardLike_addTargetNodeOverride_Call { + _c.Call.Return(run) + return _c +} + +// addToPropertyMapBucket provides a mock function with given fields: bucket, pair, key +func (_m *MockShardLike) addToPropertyMapBucket(bucket *lsmkv.Bucket, pair lsmkv.MapPair, key []byte) error { + ret := _m.Called(bucket, pair, key) + + if len(ret) == 0 { + panic("no return value specified for addToPropertyMapBucket") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*lsmkv.Bucket, lsmkv.MapPair, []byte) error); ok { + r0 = rf(bucket, pair, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_addToPropertyMapBucket_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'addToPropertyMapBucket' +type MockShardLike_addToPropertyMapBucket_Call struct { + *mock.Call +} + +// addToPropertyMapBucket is a helper method to define mock.On call +// - bucket *lsmkv.Bucket +// - pair lsmkv.MapPair +// - key []byte +func (_e *MockShardLike_Expecter) addToPropertyMapBucket(bucket interface{}, pair interface{}, key interface{}) *MockShardLike_addToPropertyMapBucket_Call { + return &MockShardLike_addToPropertyMapBucket_Call{Call: _e.mock.On("addToPropertyMapBucket", bucket, pair, key)} +} + +func (_c *MockShardLike_addToPropertyMapBucket_Call) Run(run func(bucket *lsmkv.Bucket, pair lsmkv.MapPair, key []byte)) *MockShardLike_addToPropertyMapBucket_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*lsmkv.Bucket), args[1].(lsmkv.MapPair), args[2].([]byte)) + }) + return _c +} + +func (_c *MockShardLike_addToPropertyMapBucket_Call) Return(_a0 error) *MockShardLike_addToPropertyMapBucket_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_addToPropertyMapBucket_Call) RunAndReturn(run func(*lsmkv.Bucket, lsmkv.MapPair, []byte) error) *MockShardLike_addToPropertyMapBucket_Call { + _c.Call.Return(run) + return _c +} + +// addToPropertyRangeBucket provides a mock function with given fields: bucket, docID, key +func (_m *MockShardLike) addToPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + ret := _m.Called(bucket, docID, key) + + if len(ret) == 0 { + panic("no return value specified for addToPropertyRangeBucket") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*lsmkv.Bucket, uint64, []byte) error); ok { + r0 = rf(bucket, docID, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_addToPropertyRangeBucket_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'addToPropertyRangeBucket' +type MockShardLike_addToPropertyRangeBucket_Call struct { + *mock.Call +} + +// addToPropertyRangeBucket is a helper method to define mock.On call +// - bucket *lsmkv.Bucket +// - docID uint64 +// - key []byte +func (_e *MockShardLike_Expecter) addToPropertyRangeBucket(bucket interface{}, docID interface{}, key interface{}) *MockShardLike_addToPropertyRangeBucket_Call { + return &MockShardLike_addToPropertyRangeBucket_Call{Call: _e.mock.On("addToPropertyRangeBucket", bucket, docID, key)} +} + +func (_c *MockShardLike_addToPropertyRangeBucket_Call) Run(run func(bucket *lsmkv.Bucket, docID uint64, key []byte)) *MockShardLike_addToPropertyRangeBucket_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*lsmkv.Bucket), args[1].(uint64), args[2].([]byte)) + }) + return _c +} + +func (_c *MockShardLike_addToPropertyRangeBucket_Call) Return(_a0 error) *MockShardLike_addToPropertyRangeBucket_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_addToPropertyRangeBucket_Call) RunAndReturn(run func(*lsmkv.Bucket, uint64, []byte) error) *MockShardLike_addToPropertyRangeBucket_Call { + _c.Call.Return(run) + return _c +} + +// addToPropertySetBucket provides a mock function with given fields: bucket, docID, key +func (_m *MockShardLike) addToPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + ret := _m.Called(bucket, docID, key) + + if len(ret) == 0 { + panic("no return value specified for addToPropertySetBucket") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*lsmkv.Bucket, uint64, []byte) error); ok { + r0 = rf(bucket, docID, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_addToPropertySetBucket_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'addToPropertySetBucket' +type MockShardLike_addToPropertySetBucket_Call struct { + *mock.Call +} + +// addToPropertySetBucket is a helper method to define mock.On call +// - bucket *lsmkv.Bucket +// - docID uint64 +// - key []byte +func (_e *MockShardLike_Expecter) addToPropertySetBucket(bucket interface{}, docID interface{}, key interface{}) *MockShardLike_addToPropertySetBucket_Call { + return &MockShardLike_addToPropertySetBucket_Call{Call: _e.mock.On("addToPropertySetBucket", bucket, docID, key)} +} + +func (_c *MockShardLike_addToPropertySetBucket_Call) Run(run func(bucket *lsmkv.Bucket, docID uint64, key []byte)) *MockShardLike_addToPropertySetBucket_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*lsmkv.Bucket), args[1].(uint64), args[2].([]byte)) + }) + return _c +} + +func (_c *MockShardLike_addToPropertySetBucket_Call) Return(_a0 error) *MockShardLike_addToPropertySetBucket_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_addToPropertySetBucket_Call) RunAndReturn(run func(*lsmkv.Bucket, uint64, []byte) error) *MockShardLike_addToPropertySetBucket_Call { + _c.Call.Return(run) + return _c +} + +// batchDeleteObject provides a mock function with given fields: ctx, id, deletionTime +func (_m *MockShardLike) batchDeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error { + ret := _m.Called(ctx, id, deletionTime) + + if len(ret) == 0 { + panic("no return value specified for batchDeleteObject") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, strfmt.UUID, time.Time) error); ok { + r0 = rf(ctx, id, deletionTime) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_batchDeleteObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'batchDeleteObject' +type MockShardLike_batchDeleteObject_Call struct { + *mock.Call +} + +// batchDeleteObject is a helper method to define mock.On call +// - ctx context.Context +// - id strfmt.UUID +// - deletionTime time.Time +func (_e *MockShardLike_Expecter) batchDeleteObject(ctx interface{}, id interface{}, deletionTime interface{}) *MockShardLike_batchDeleteObject_Call { + return &MockShardLike_batchDeleteObject_Call{Call: _e.mock.On("batchDeleteObject", ctx, id, deletionTime)} +} + +func (_c *MockShardLike_batchDeleteObject_Call) Run(run func(ctx context.Context, id strfmt.UUID, deletionTime time.Time)) *MockShardLike_batchDeleteObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(strfmt.UUID), args[2].(time.Time)) + }) + return _c +} + +func (_c *MockShardLike_batchDeleteObject_Call) Return(_a0 error) *MockShardLike_batchDeleteObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_batchDeleteObject_Call) RunAndReturn(run func(context.Context, strfmt.UUID, time.Time) error) *MockShardLike_batchDeleteObject_Call { + _c.Call.Return(run) + return _c +} + +// batchExtendInvertedIndexItemsLSMNoFrequency provides a mock function with given fields: b, item +func (_m *MockShardLike) batchExtendInvertedIndexItemsLSMNoFrequency(b *lsmkv.Bucket, item inverted.MergeItem) error { + ret := _m.Called(b, item) + + if len(ret) == 0 { + panic("no return value specified for batchExtendInvertedIndexItemsLSMNoFrequency") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*lsmkv.Bucket, inverted.MergeItem) error); ok { + r0 = rf(b, item) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'batchExtendInvertedIndexItemsLSMNoFrequency' +type MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call struct { + *mock.Call +} + +// batchExtendInvertedIndexItemsLSMNoFrequency is a helper method to define mock.On call +// - b *lsmkv.Bucket +// - item inverted.MergeItem +func (_e *MockShardLike_Expecter) batchExtendInvertedIndexItemsLSMNoFrequency(b interface{}, item interface{}) *MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call { + return &MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call{Call: _e.mock.On("batchExtendInvertedIndexItemsLSMNoFrequency", b, item)} +} + +func (_c *MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call) Run(run func(b *lsmkv.Bucket, item inverted.MergeItem)) *MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*lsmkv.Bucket), args[1].(inverted.MergeItem)) + }) + return _c +} + +func (_c *MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call) Return(_a0 error) *MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call) RunAndReturn(run func(*lsmkv.Bucket, inverted.MergeItem) error) *MockShardLike_batchExtendInvertedIndexItemsLSMNoFrequency_Call { + _c.Call.Return(run) + return _c +} + +// commitReplication provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockShardLike) commitReplication(_a0 context.Context, _a1 string, _a2 *shardTransfer) interface{} { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for commitReplication") + } + + var r0 interface{} + if rf, ok := ret.Get(0).(func(context.Context, string, *shardTransfer) interface{}); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +// MockShardLike_commitReplication_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'commitReplication' +type MockShardLike_commitReplication_Call struct { + *mock.Call +} + +// commitReplication is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 *shardTransfer +func (_e *MockShardLike_Expecter) commitReplication(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockShardLike_commitReplication_Call { + return &MockShardLike_commitReplication_Call{Call: _e.mock.On("commitReplication", _a0, _a1, _a2)} +} + +func (_c *MockShardLike_commitReplication_Call) Run(run func(_a0 context.Context, _a1 string, _a2 *shardTransfer)) *MockShardLike_commitReplication_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(*shardTransfer)) + }) + return _c +} + +func (_c *MockShardLike_commitReplication_Call) Return(_a0 interface{}) *MockShardLike_commitReplication_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_commitReplication_Call) RunAndReturn(run func(context.Context, string, *shardTransfer) interface{}) *MockShardLike_commitReplication_Call { + _c.Call.Return(run) + return _c +} + +// deleteFromPropertyRangeBucket provides a mock function with given fields: bucket, docID, key +func (_m *MockShardLike) deleteFromPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + ret := _m.Called(bucket, docID, key) + + if len(ret) == 0 { + panic("no return value specified for deleteFromPropertyRangeBucket") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*lsmkv.Bucket, uint64, []byte) error); ok { + r0 = rf(bucket, docID, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_deleteFromPropertyRangeBucket_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'deleteFromPropertyRangeBucket' +type MockShardLike_deleteFromPropertyRangeBucket_Call struct { + *mock.Call +} + +// deleteFromPropertyRangeBucket is a helper method to define mock.On call +// - bucket *lsmkv.Bucket +// - docID uint64 +// - key []byte +func (_e *MockShardLike_Expecter) deleteFromPropertyRangeBucket(bucket interface{}, docID interface{}, key interface{}) *MockShardLike_deleteFromPropertyRangeBucket_Call { + return &MockShardLike_deleteFromPropertyRangeBucket_Call{Call: _e.mock.On("deleteFromPropertyRangeBucket", bucket, docID, key)} +} + +func (_c *MockShardLike_deleteFromPropertyRangeBucket_Call) Run(run func(bucket *lsmkv.Bucket, docID uint64, key []byte)) *MockShardLike_deleteFromPropertyRangeBucket_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*lsmkv.Bucket), args[1].(uint64), args[2].([]byte)) + }) + return _c +} + +func (_c *MockShardLike_deleteFromPropertyRangeBucket_Call) Return(_a0 error) *MockShardLike_deleteFromPropertyRangeBucket_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_deleteFromPropertyRangeBucket_Call) RunAndReturn(run func(*lsmkv.Bucket, uint64, []byte) error) *MockShardLike_deleteFromPropertyRangeBucket_Call { + _c.Call.Return(run) + return _c +} + +// deleteFromPropertySetBucket provides a mock function with given fields: bucket, docID, key +func (_m *MockShardLike) deleteFromPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + ret := _m.Called(bucket, docID, key) + + if len(ret) == 0 { + panic("no return value specified for deleteFromPropertySetBucket") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*lsmkv.Bucket, uint64, []byte) error); ok { + r0 = rf(bucket, docID, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_deleteFromPropertySetBucket_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'deleteFromPropertySetBucket' +type MockShardLike_deleteFromPropertySetBucket_Call struct { + *mock.Call +} + +// deleteFromPropertySetBucket is a helper method to define mock.On call +// - bucket *lsmkv.Bucket +// - docID uint64 +// - key []byte +func (_e *MockShardLike_Expecter) deleteFromPropertySetBucket(bucket interface{}, docID interface{}, key interface{}) *MockShardLike_deleteFromPropertySetBucket_Call { + return &MockShardLike_deleteFromPropertySetBucket_Call{Call: _e.mock.On("deleteFromPropertySetBucket", bucket, docID, key)} +} + +func (_c *MockShardLike_deleteFromPropertySetBucket_Call) Run(run func(bucket *lsmkv.Bucket, docID uint64, key []byte)) *MockShardLike_deleteFromPropertySetBucket_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*lsmkv.Bucket), args[1].(uint64), args[2].([]byte)) + }) + return _c +} + +func (_c *MockShardLike_deleteFromPropertySetBucket_Call) Return(_a0 error) *MockShardLike_deleteFromPropertySetBucket_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_deleteFromPropertySetBucket_Call) RunAndReturn(run func(*lsmkv.Bucket, uint64, []byte) error) *MockShardLike_deleteFromPropertySetBucket_Call { + _c.Call.Return(run) + return _c +} + +// drop provides a mock function with no fields +func (_m *MockShardLike) drop() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for drop") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_drop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'drop' +type MockShardLike_drop_Call struct { + *mock.Call +} + +// drop is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) drop() *MockShardLike_drop_Call { + return &MockShardLike_drop_Call{Call: _e.mock.On("drop")} +} + +func (_c *MockShardLike_drop_Call) Run(run func()) *MockShardLike_drop_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_drop_Call) Return(_a0 error) *MockShardLike_drop_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_drop_Call) RunAndReturn(run func() error) *MockShardLike_drop_Call { + _c.Call.Return(run) + return _c +} + +// extendDimensionTrackerLSM provides a mock function with given fields: dimLength, docID, targetVector +func (_m *MockShardLike) extendDimensionTrackerLSM(dimLength int, docID uint64, targetVector string) error { + ret := _m.Called(dimLength, docID, targetVector) + + if len(ret) == 0 { + panic("no return value specified for extendDimensionTrackerLSM") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int, uint64, string) error); ok { + r0 = rf(dimLength, docID, targetVector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_extendDimensionTrackerLSM_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'extendDimensionTrackerLSM' +type MockShardLike_extendDimensionTrackerLSM_Call struct { + *mock.Call +} + +// extendDimensionTrackerLSM is a helper method to define mock.On call +// - dimLength int +// - docID uint64 +// - targetVector string +func (_e *MockShardLike_Expecter) extendDimensionTrackerLSM(dimLength interface{}, docID interface{}, targetVector interface{}) *MockShardLike_extendDimensionTrackerLSM_Call { + return &MockShardLike_extendDimensionTrackerLSM_Call{Call: _e.mock.On("extendDimensionTrackerLSM", dimLength, docID, targetVector)} +} + +func (_c *MockShardLike_extendDimensionTrackerLSM_Call) Run(run func(dimLength int, docID uint64, targetVector string)) *MockShardLike_extendDimensionTrackerLSM_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int), args[1].(uint64), args[2].(string)) + }) + return _c +} + +func (_c *MockShardLike_extendDimensionTrackerLSM_Call) Return(_a0 error) *MockShardLike_extendDimensionTrackerLSM_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_extendDimensionTrackerLSM_Call) RunAndReturn(run func(int, uint64, string) error) *MockShardLike_extendDimensionTrackerLSM_Call { + _c.Call.Return(run) + return _c +} + +// filePutter provides a mock function with given fields: _a0, _a1 +func (_m *MockShardLike) filePutter(_a0 context.Context, _a1 string) (io.WriteCloser, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for filePutter") + } + + var r0 io.WriteCloser + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (io.WriteCloser, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) io.WriteCloser); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.WriteCloser) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_filePutter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'filePutter' +type MockShardLike_filePutter_Call struct { + *mock.Call +} + +// filePutter is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *MockShardLike_Expecter) filePutter(_a0 interface{}, _a1 interface{}) *MockShardLike_filePutter_Call { + return &MockShardLike_filePutter_Call{Call: _e.mock.On("filePutter", _a0, _a1)} +} + +func (_c *MockShardLike_filePutter_Call) Run(run func(_a0 context.Context, _a1 string)) *MockShardLike_filePutter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockShardLike_filePutter_Call) Return(_a0 io.WriteCloser, _a1 error) *MockShardLike_filePutter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_filePutter_Call) RunAndReturn(run func(context.Context, string) (io.WriteCloser, error)) *MockShardLike_filePutter_Call { + _c.Call.Return(run) + return _c +} + +// getAsyncReplicationStats provides a mock function with given fields: ctx +func (_m *MockShardLike) getAsyncReplicationStats(ctx context.Context) []*models.AsyncReplicationStatus { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for getAsyncReplicationStats") + } + + var r0 []*models.AsyncReplicationStatus + if rf, ok := ret.Get(0).(func(context.Context) []*models.AsyncReplicationStatus); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.AsyncReplicationStatus) + } + } + + return r0 +} + +// MockShardLike_getAsyncReplicationStats_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'getAsyncReplicationStats' +type MockShardLike_getAsyncReplicationStats_Call struct { + *mock.Call +} + +// getAsyncReplicationStats is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockShardLike_Expecter) getAsyncReplicationStats(ctx interface{}) *MockShardLike_getAsyncReplicationStats_Call { + return &MockShardLike_getAsyncReplicationStats_Call{Call: _e.mock.On("getAsyncReplicationStats", ctx)} +} + +func (_c *MockShardLike_getAsyncReplicationStats_Call) Run(run func(ctx context.Context)) *MockShardLike_getAsyncReplicationStats_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_getAsyncReplicationStats_Call) Return(_a0 []*models.AsyncReplicationStatus) *MockShardLike_getAsyncReplicationStats_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_getAsyncReplicationStats_Call) RunAndReturn(run func(context.Context) []*models.AsyncReplicationStatus) *MockShardLike_getAsyncReplicationStats_Call { + _c.Call.Return(run) + return _c +} + +// hasGeoIndex provides a mock function with no fields +func (_m *MockShardLike) hasGeoIndex() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for hasGeoIndex") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockShardLike_hasGeoIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'hasGeoIndex' +type MockShardLike_hasGeoIndex_Call struct { + *mock.Call +} + +// hasGeoIndex is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) hasGeoIndex() *MockShardLike_hasGeoIndex_Call { + return &MockShardLike_hasGeoIndex_Call{Call: _e.mock.On("hasGeoIndex")} +} + +func (_c *MockShardLike_hasGeoIndex_Call) Run(run func()) *MockShardLike_hasGeoIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_hasGeoIndex_Call) Return(_a0 bool) *MockShardLike_hasGeoIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_hasGeoIndex_Call) RunAndReturn(run func() bool) *MockShardLike_hasGeoIndex_Call { + _c.Call.Return(run) + return _c +} + +// initPropertyBuckets provides a mock function with given fields: ctx, eg, lazyLoadSegments, props +func (_m *MockShardLike) initPropertyBuckets(ctx context.Context, eg *errors.ErrorGroupWrapper, lazyLoadSegments bool, props ...*models.Property) { + _va := make([]interface{}, len(props)) + for _i := range props { + _va[_i] = props[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, eg, lazyLoadSegments) + _ca = append(_ca, _va...) + _m.Called(_ca...) +} + +// MockShardLike_initPropertyBuckets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'initPropertyBuckets' +type MockShardLike_initPropertyBuckets_Call struct { + *mock.Call +} + +// initPropertyBuckets is a helper method to define mock.On call +// - ctx context.Context +// - eg *errors.ErrorGroupWrapper +// - lazyLoadSegments bool +// - props ...*models.Property +func (_e *MockShardLike_Expecter) initPropertyBuckets(ctx interface{}, eg interface{}, lazyLoadSegments interface{}, props ...interface{}) *MockShardLike_initPropertyBuckets_Call { + return &MockShardLike_initPropertyBuckets_Call{Call: _e.mock.On("initPropertyBuckets", + append([]interface{}{ctx, eg, lazyLoadSegments}, props...)...)} +} + +func (_c *MockShardLike_initPropertyBuckets_Call) Run(run func(ctx context.Context, eg *errors.ErrorGroupWrapper, lazyLoadSegments bool, props ...*models.Property)) *MockShardLike_initPropertyBuckets_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]*models.Property, len(args)-3) + for i, a := range args[3:] { + if a != nil { + variadicArgs[i] = a.(*models.Property) + } + } + run(args[0].(context.Context), args[1].(*errors.ErrorGroupWrapper), args[2].(bool), variadicArgs...) + }) + return _c +} + +func (_c *MockShardLike_initPropertyBuckets_Call) Return() *MockShardLike_initPropertyBuckets_Call { + _c.Call.Return() + return _c +} + +func (_c *MockShardLike_initPropertyBuckets_Call) RunAndReturn(run func(context.Context, *errors.ErrorGroupWrapper, bool, ...*models.Property)) *MockShardLike_initPropertyBuckets_Call { + _c.Run(run) + return _c +} + +// isReadOnly provides a mock function with no fields +func (_m *MockShardLike) isReadOnly() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for isReadOnly") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_isReadOnly_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'isReadOnly' +type MockShardLike_isReadOnly_Call struct { + *mock.Call +} + +// isReadOnly is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) isReadOnly() *MockShardLike_isReadOnly_Call { + return &MockShardLike_isReadOnly_Call{Call: _e.mock.On("isReadOnly")} +} + +func (_c *MockShardLike_isReadOnly_Call) Run(run func()) *MockShardLike_isReadOnly_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_isReadOnly_Call) Return(_a0 error) *MockShardLike_isReadOnly_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_isReadOnly_Call) RunAndReturn(run func() error) *MockShardLike_isReadOnly_Call { + _c.Call.Return(run) + return _c +} + +// mayUpsertObjectHashTree provides a mock function with given fields: object, idBytes, status +func (_m *MockShardLike) mayUpsertObjectHashTree(object *storobj.Object, idBytes []byte, status objectInsertStatus) error { + ret := _m.Called(object, idBytes, status) + + if len(ret) == 0 { + panic("no return value specified for mayUpsertObjectHashTree") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*storobj.Object, []byte, objectInsertStatus) error); ok { + r0 = rf(object, idBytes, status) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_mayUpsertObjectHashTree_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mayUpsertObjectHashTree' +type MockShardLike_mayUpsertObjectHashTree_Call struct { + *mock.Call +} + +// mayUpsertObjectHashTree is a helper method to define mock.On call +// - object *storobj.Object +// - idBytes []byte +// - status objectInsertStatus +func (_e *MockShardLike_Expecter) mayUpsertObjectHashTree(object interface{}, idBytes interface{}, status interface{}) *MockShardLike_mayUpsertObjectHashTree_Call { + return &MockShardLike_mayUpsertObjectHashTree_Call{Call: _e.mock.On("mayUpsertObjectHashTree", object, idBytes, status)} +} + +func (_c *MockShardLike_mayUpsertObjectHashTree_Call) Run(run func(object *storobj.Object, idBytes []byte, status objectInsertStatus)) *MockShardLike_mayUpsertObjectHashTree_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*storobj.Object), args[1].([]byte), args[2].(objectInsertStatus)) + }) + return _c +} + +func (_c *MockShardLike_mayUpsertObjectHashTree_Call) Return(_a0 error) *MockShardLike_mayUpsertObjectHashTree_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_mayUpsertObjectHashTree_Call) RunAndReturn(run func(*storobj.Object, []byte, objectInsertStatus) error) *MockShardLike_mayUpsertObjectHashTree_Call { + _c.Call.Return(run) + return _c +} + +// mutableMergeObjectLSM provides a mock function with given fields: merge, idBytes +func (_m *MockShardLike) mutableMergeObjectLSM(merge objects.MergeDocument, idBytes []byte) (mutableMergeResult, error) { + ret := _m.Called(merge, idBytes) + + if len(ret) == 0 { + panic("no return value specified for mutableMergeObjectLSM") + } + + var r0 mutableMergeResult + var r1 error + if rf, ok := ret.Get(0).(func(objects.MergeDocument, []byte) (mutableMergeResult, error)); ok { + return rf(merge, idBytes) + } + if rf, ok := ret.Get(0).(func(objects.MergeDocument, []byte) mutableMergeResult); ok { + r0 = rf(merge, idBytes) + } else { + r0 = ret.Get(0).(mutableMergeResult) + } + + if rf, ok := ret.Get(1).(func(objects.MergeDocument, []byte) error); ok { + r1 = rf(merge, idBytes) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_mutableMergeObjectLSM_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mutableMergeObjectLSM' +type MockShardLike_mutableMergeObjectLSM_Call struct { + *mock.Call +} + +// mutableMergeObjectLSM is a helper method to define mock.On call +// - merge objects.MergeDocument +// - idBytes []byte +func (_e *MockShardLike_Expecter) mutableMergeObjectLSM(merge interface{}, idBytes interface{}) *MockShardLike_mutableMergeObjectLSM_Call { + return &MockShardLike_mutableMergeObjectLSM_Call{Call: _e.mock.On("mutableMergeObjectLSM", merge, idBytes)} +} + +func (_c *MockShardLike_mutableMergeObjectLSM_Call) Run(run func(merge objects.MergeDocument, idBytes []byte)) *MockShardLike_mutableMergeObjectLSM_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(objects.MergeDocument), args[1].([]byte)) + }) + return _c +} + +func (_c *MockShardLike_mutableMergeObjectLSM_Call) Return(_a0 mutableMergeResult, _a1 error) *MockShardLike_mutableMergeObjectLSM_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_mutableMergeObjectLSM_Call) RunAndReturn(run func(objects.MergeDocument, []byte) (mutableMergeResult, error)) *MockShardLike_mutableMergeObjectLSM_Call { + _c.Call.Return(run) + return _c +} + +// pairPropertyWithFrequency provides a mock function with given fields: docID, freq, propLen +func (_m *MockShardLike) pairPropertyWithFrequency(docID uint64, freq float32, propLen float32) lsmkv.MapPair { + ret := _m.Called(docID, freq, propLen) + + if len(ret) == 0 { + panic("no return value specified for pairPropertyWithFrequency") + } + + var r0 lsmkv.MapPair + if rf, ok := ret.Get(0).(func(uint64, float32, float32) lsmkv.MapPair); ok { + r0 = rf(docID, freq, propLen) + } else { + r0 = ret.Get(0).(lsmkv.MapPair) + } + + return r0 +} + +// MockShardLike_pairPropertyWithFrequency_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'pairPropertyWithFrequency' +type MockShardLike_pairPropertyWithFrequency_Call struct { + *mock.Call +} + +// pairPropertyWithFrequency is a helper method to define mock.On call +// - docID uint64 +// - freq float32 +// - propLen float32 +func (_e *MockShardLike_Expecter) pairPropertyWithFrequency(docID interface{}, freq interface{}, propLen interface{}) *MockShardLike_pairPropertyWithFrequency_Call { + return &MockShardLike_pairPropertyWithFrequency_Call{Call: _e.mock.On("pairPropertyWithFrequency", docID, freq, propLen)} +} + +func (_c *MockShardLike_pairPropertyWithFrequency_Call) Run(run func(docID uint64, freq float32, propLen float32)) *MockShardLike_pairPropertyWithFrequency_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64), args[1].(float32), args[2].(float32)) + }) + return _c +} + +func (_c *MockShardLike_pairPropertyWithFrequency_Call) Return(_a0 lsmkv.MapPair) *MockShardLike_pairPropertyWithFrequency_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_pairPropertyWithFrequency_Call) RunAndReturn(run func(uint64, float32, float32) lsmkv.MapPair) *MockShardLike_pairPropertyWithFrequency_Call { + _c.Call.Return(run) + return _c +} + +// pathLSM provides a mock function with no fields +func (_m *MockShardLike) pathLSM() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for pathLSM") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// MockShardLike_pathLSM_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'pathLSM' +type MockShardLike_pathLSM_Call struct { + *mock.Call +} + +// pathLSM is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) pathLSM() *MockShardLike_pathLSM_Call { + return &MockShardLike_pathLSM_Call{Call: _e.mock.On("pathLSM")} +} + +func (_c *MockShardLike_pathLSM_Call) Run(run func()) *MockShardLike_pathLSM_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_pathLSM_Call) Return(_a0 string) *MockShardLike_pathLSM_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_pathLSM_Call) RunAndReturn(run func() string) *MockShardLike_pathLSM_Call { + _c.Call.Return(run) + return _c +} + +// prepareAddReferences provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockShardLike) prepareAddReferences(_a0 context.Context, _a1 string, _a2 []objects.BatchReference) replica.SimpleResponse { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for prepareAddReferences") + } + + var r0 replica.SimpleResponse + if rf, ok := ret.Get(0).(func(context.Context, string, []objects.BatchReference) replica.SimpleResponse); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Get(0).(replica.SimpleResponse) + } + + return r0 +} + +// MockShardLike_prepareAddReferences_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'prepareAddReferences' +type MockShardLike_prepareAddReferences_Call struct { + *mock.Call +} + +// prepareAddReferences is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 []objects.BatchReference +func (_e *MockShardLike_Expecter) prepareAddReferences(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockShardLike_prepareAddReferences_Call { + return &MockShardLike_prepareAddReferences_Call{Call: _e.mock.On("prepareAddReferences", _a0, _a1, _a2)} +} + +func (_c *MockShardLike_prepareAddReferences_Call) Run(run func(_a0 context.Context, _a1 string, _a2 []objects.BatchReference)) *MockShardLike_prepareAddReferences_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].([]objects.BatchReference)) + }) + return _c +} + +func (_c *MockShardLike_prepareAddReferences_Call) Return(_a0 replica.SimpleResponse) *MockShardLike_prepareAddReferences_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_prepareAddReferences_Call) RunAndReturn(run func(context.Context, string, []objects.BatchReference) replica.SimpleResponse) *MockShardLike_prepareAddReferences_Call { + _c.Call.Return(run) + return _c +} + +// prepareDeleteObject provides a mock function with given fields: _a0, _a1, _a2, _a3 +func (_m *MockShardLike) prepareDeleteObject(_a0 context.Context, _a1 string, _a2 strfmt.UUID, _a3 time.Time) replica.SimpleResponse { + ret := _m.Called(_a0, _a1, _a2, _a3) + + if len(ret) == 0 { + panic("no return value specified for prepareDeleteObject") + } + + var r0 replica.SimpleResponse + if rf, ok := ret.Get(0).(func(context.Context, string, strfmt.UUID, time.Time) replica.SimpleResponse); ok { + r0 = rf(_a0, _a1, _a2, _a3) + } else { + r0 = ret.Get(0).(replica.SimpleResponse) + } + + return r0 +} + +// MockShardLike_prepareDeleteObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'prepareDeleteObject' +type MockShardLike_prepareDeleteObject_Call struct { + *mock.Call +} + +// prepareDeleteObject is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 strfmt.UUID +// - _a3 time.Time +func (_e *MockShardLike_Expecter) prepareDeleteObject(_a0 interface{}, _a1 interface{}, _a2 interface{}, _a3 interface{}) *MockShardLike_prepareDeleteObject_Call { + return &MockShardLike_prepareDeleteObject_Call{Call: _e.mock.On("prepareDeleteObject", _a0, _a1, _a2, _a3)} +} + +func (_c *MockShardLike_prepareDeleteObject_Call) Run(run func(_a0 context.Context, _a1 string, _a2 strfmt.UUID, _a3 time.Time)) *MockShardLike_prepareDeleteObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(strfmt.UUID), args[3].(time.Time)) + }) + return _c +} + +func (_c *MockShardLike_prepareDeleteObject_Call) Return(_a0 replica.SimpleResponse) *MockShardLike_prepareDeleteObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_prepareDeleteObject_Call) RunAndReturn(run func(context.Context, string, strfmt.UUID, time.Time) replica.SimpleResponse) *MockShardLike_prepareDeleteObject_Call { + _c.Call.Return(run) + return _c +} + +// prepareDeleteObjects provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4 +func (_m *MockShardLike) prepareDeleteObjects(_a0 context.Context, _a1 string, _a2 []strfmt.UUID, _a3 time.Time, _a4 bool) replica.SimpleResponse { + ret := _m.Called(_a0, _a1, _a2, _a3, _a4) + + if len(ret) == 0 { + panic("no return value specified for prepareDeleteObjects") + } + + var r0 replica.SimpleResponse + if rf, ok := ret.Get(0).(func(context.Context, string, []strfmt.UUID, time.Time, bool) replica.SimpleResponse); ok { + r0 = rf(_a0, _a1, _a2, _a3, _a4) + } else { + r0 = ret.Get(0).(replica.SimpleResponse) + } + + return r0 +} + +// MockShardLike_prepareDeleteObjects_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'prepareDeleteObjects' +type MockShardLike_prepareDeleteObjects_Call struct { + *mock.Call +} + +// prepareDeleteObjects is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 []strfmt.UUID +// - _a3 time.Time +// - _a4 bool +func (_e *MockShardLike_Expecter) prepareDeleteObjects(_a0 interface{}, _a1 interface{}, _a2 interface{}, _a3 interface{}, _a4 interface{}) *MockShardLike_prepareDeleteObjects_Call { + return &MockShardLike_prepareDeleteObjects_Call{Call: _e.mock.On("prepareDeleteObjects", _a0, _a1, _a2, _a3, _a4)} +} + +func (_c *MockShardLike_prepareDeleteObjects_Call) Run(run func(_a0 context.Context, _a1 string, _a2 []strfmt.UUID, _a3 time.Time, _a4 bool)) *MockShardLike_prepareDeleteObjects_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].([]strfmt.UUID), args[3].(time.Time), args[4].(bool)) + }) + return _c +} + +func (_c *MockShardLike_prepareDeleteObjects_Call) Return(_a0 replica.SimpleResponse) *MockShardLike_prepareDeleteObjects_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_prepareDeleteObjects_Call) RunAndReturn(run func(context.Context, string, []strfmt.UUID, time.Time, bool) replica.SimpleResponse) *MockShardLike_prepareDeleteObjects_Call { + _c.Call.Return(run) + return _c +} + +// prepareMergeObject provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockShardLike) prepareMergeObject(_a0 context.Context, _a1 string, _a2 *objects.MergeDocument) replica.SimpleResponse { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for prepareMergeObject") + } + + var r0 replica.SimpleResponse + if rf, ok := ret.Get(0).(func(context.Context, string, *objects.MergeDocument) replica.SimpleResponse); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Get(0).(replica.SimpleResponse) + } + + return r0 +} + +// MockShardLike_prepareMergeObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'prepareMergeObject' +type MockShardLike_prepareMergeObject_Call struct { + *mock.Call +} + +// prepareMergeObject is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 *objects.MergeDocument +func (_e *MockShardLike_Expecter) prepareMergeObject(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockShardLike_prepareMergeObject_Call { + return &MockShardLike_prepareMergeObject_Call{Call: _e.mock.On("prepareMergeObject", _a0, _a1, _a2)} +} + +func (_c *MockShardLike_prepareMergeObject_Call) Run(run func(_a0 context.Context, _a1 string, _a2 *objects.MergeDocument)) *MockShardLike_prepareMergeObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(*objects.MergeDocument)) + }) + return _c +} + +func (_c *MockShardLike_prepareMergeObject_Call) Return(_a0 replica.SimpleResponse) *MockShardLike_prepareMergeObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_prepareMergeObject_Call) RunAndReturn(run func(context.Context, string, *objects.MergeDocument) replica.SimpleResponse) *MockShardLike_prepareMergeObject_Call { + _c.Call.Return(run) + return _c +} + +// preparePutObject provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockShardLike) preparePutObject(_a0 context.Context, _a1 string, _a2 *storobj.Object) replica.SimpleResponse { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for preparePutObject") + } + + var r0 replica.SimpleResponse + if rf, ok := ret.Get(0).(func(context.Context, string, *storobj.Object) replica.SimpleResponse); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Get(0).(replica.SimpleResponse) + } + + return r0 +} + +// MockShardLike_preparePutObject_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'preparePutObject' +type MockShardLike_preparePutObject_Call struct { + *mock.Call +} + +// preparePutObject is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 *storobj.Object +func (_e *MockShardLike_Expecter) preparePutObject(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockShardLike_preparePutObject_Call { + return &MockShardLike_preparePutObject_Call{Call: _e.mock.On("preparePutObject", _a0, _a1, _a2)} +} + +func (_c *MockShardLike_preparePutObject_Call) Run(run func(_a0 context.Context, _a1 string, _a2 *storobj.Object)) *MockShardLike_preparePutObject_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(*storobj.Object)) + }) + return _c +} + +func (_c *MockShardLike_preparePutObject_Call) Return(_a0 replica.SimpleResponse) *MockShardLike_preparePutObject_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_preparePutObject_Call) RunAndReturn(run func(context.Context, string, *storobj.Object) replica.SimpleResponse) *MockShardLike_preparePutObject_Call { + _c.Call.Return(run) + return _c +} + +// preparePutObjects provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockShardLike) preparePutObjects(_a0 context.Context, _a1 string, _a2 []*storobj.Object) replica.SimpleResponse { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for preparePutObjects") + } + + var r0 replica.SimpleResponse + if rf, ok := ret.Get(0).(func(context.Context, string, []*storobj.Object) replica.SimpleResponse); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Get(0).(replica.SimpleResponse) + } + + return r0 +} + +// MockShardLike_preparePutObjects_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'preparePutObjects' +type MockShardLike_preparePutObjects_Call struct { + *mock.Call +} + +// preparePutObjects is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 []*storobj.Object +func (_e *MockShardLike_Expecter) preparePutObjects(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockShardLike_preparePutObjects_Call { + return &MockShardLike_preparePutObjects_Call{Call: _e.mock.On("preparePutObjects", _a0, _a1, _a2)} +} + +func (_c *MockShardLike_preparePutObjects_Call) Run(run func(_a0 context.Context, _a1 string, _a2 []*storobj.Object)) *MockShardLike_preparePutObjects_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].([]*storobj.Object)) + }) + return _c +} + +func (_c *MockShardLike_preparePutObjects_Call) Return(_a0 replica.SimpleResponse) *MockShardLike_preparePutObjects_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_preparePutObjects_Call) RunAndReturn(run func(context.Context, string, []*storobj.Object) replica.SimpleResponse) *MockShardLike_preparePutObjects_Call { + _c.Call.Return(run) + return _c +} + +// preventShutdown provides a mock function with no fields +func (_m *MockShardLike) preventShutdown() (func(), error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for preventShutdown") + } + + var r0 func() + var r1 error + if rf, ok := ret.Get(0).(func() (func(), error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() func()); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func()) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_preventShutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'preventShutdown' +type MockShardLike_preventShutdown_Call struct { + *mock.Call +} + +// preventShutdown is a helper method to define mock.On call +func (_e *MockShardLike_Expecter) preventShutdown() *MockShardLike_preventShutdown_Call { + return &MockShardLike_preventShutdown_Call{Call: _e.mock.On("preventShutdown")} +} + +func (_c *MockShardLike_preventShutdown_Call) Run(run func()) *MockShardLike_preventShutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockShardLike_preventShutdown_Call) Return(release func(), err error) *MockShardLike_preventShutdown_Call { + _c.Call.Return(release, err) + return _c +} + +func (_c *MockShardLike_preventShutdown_Call) RunAndReturn(run func() (func(), error)) *MockShardLike_preventShutdown_Call { + _c.Call.Return(run) + return _c +} + +// putObjectLSM provides a mock function with given fields: object, idBytes +func (_m *MockShardLike) putObjectLSM(object *storobj.Object, idBytes []byte) (objectInsertStatus, error) { + ret := _m.Called(object, idBytes) + + if len(ret) == 0 { + panic("no return value specified for putObjectLSM") + } + + var r0 objectInsertStatus + var r1 error + if rf, ok := ret.Get(0).(func(*storobj.Object, []byte) (objectInsertStatus, error)); ok { + return rf(object, idBytes) + } + if rf, ok := ret.Get(0).(func(*storobj.Object, []byte) objectInsertStatus); ok { + r0 = rf(object, idBytes) + } else { + r0 = ret.Get(0).(objectInsertStatus) + } + + if rf, ok := ret.Get(1).(func(*storobj.Object, []byte) error); ok { + r1 = rf(object, idBytes) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_putObjectLSM_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'putObjectLSM' +type MockShardLike_putObjectLSM_Call struct { + *mock.Call +} + +// putObjectLSM is a helper method to define mock.On call +// - object *storobj.Object +// - idBytes []byte +func (_e *MockShardLike_Expecter) putObjectLSM(object interface{}, idBytes interface{}) *MockShardLike_putObjectLSM_Call { + return &MockShardLike_putObjectLSM_Call{Call: _e.mock.On("putObjectLSM", object, idBytes)} +} + +func (_c *MockShardLike_putObjectLSM_Call) Run(run func(object *storobj.Object, idBytes []byte)) *MockShardLike_putObjectLSM_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*storobj.Object), args[1].([]byte)) + }) + return _c +} + +func (_c *MockShardLike_putObjectLSM_Call) Return(_a0 objectInsertStatus, _a1 error) *MockShardLike_putObjectLSM_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_putObjectLSM_Call) RunAndReturn(run func(*storobj.Object, []byte) (objectInsertStatus, error)) *MockShardLike_putObjectLSM_Call { + _c.Call.Return(run) + return _c +} + +// removeAllTargetNodeOverrides provides a mock function with given fields: ctx +func (_m *MockShardLike) removeAllTargetNodeOverrides(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for removeAllTargetNodeOverrides") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_removeAllTargetNodeOverrides_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'removeAllTargetNodeOverrides' +type MockShardLike_removeAllTargetNodeOverrides_Call struct { + *mock.Call +} + +// removeAllTargetNodeOverrides is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockShardLike_Expecter) removeAllTargetNodeOverrides(ctx interface{}) *MockShardLike_removeAllTargetNodeOverrides_Call { + return &MockShardLike_removeAllTargetNodeOverrides_Call{Call: _e.mock.On("removeAllTargetNodeOverrides", ctx)} +} + +func (_c *MockShardLike_removeAllTargetNodeOverrides_Call) Run(run func(ctx context.Context)) *MockShardLike_removeAllTargetNodeOverrides_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_removeAllTargetNodeOverrides_Call) Return(_a0 error) *MockShardLike_removeAllTargetNodeOverrides_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_removeAllTargetNodeOverrides_Call) RunAndReturn(run func(context.Context) error) *MockShardLike_removeAllTargetNodeOverrides_Call { + _c.Call.Return(run) + return _c +} + +// removeTargetNodeOverride provides a mock function with given fields: ctx, targetNodeOverride +func (_m *MockShardLike) removeTargetNodeOverride(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + ret := _m.Called(ctx, targetNodeOverride) + + if len(ret) == 0 { + panic("no return value specified for removeTargetNodeOverride") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, additional.AsyncReplicationTargetNodeOverride) error); ok { + r0 = rf(ctx, targetNodeOverride) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_removeTargetNodeOverride_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'removeTargetNodeOverride' +type MockShardLike_removeTargetNodeOverride_Call struct { + *mock.Call +} + +// removeTargetNodeOverride is a helper method to define mock.On call +// - ctx context.Context +// - targetNodeOverride additional.AsyncReplicationTargetNodeOverride +func (_e *MockShardLike_Expecter) removeTargetNodeOverride(ctx interface{}, targetNodeOverride interface{}) *MockShardLike_removeTargetNodeOverride_Call { + return &MockShardLike_removeTargetNodeOverride_Call{Call: _e.mock.On("removeTargetNodeOverride", ctx, targetNodeOverride)} +} + +func (_c *MockShardLike_removeTargetNodeOverride_Call) Run(run func(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride)) *MockShardLike_removeTargetNodeOverride_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(additional.AsyncReplicationTargetNodeOverride)) + }) + return _c +} + +func (_c *MockShardLike_removeTargetNodeOverride_Call) Return(_a0 error) *MockShardLike_removeTargetNodeOverride_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_removeTargetNodeOverride_Call) RunAndReturn(run func(context.Context, additional.AsyncReplicationTargetNodeOverride) error) *MockShardLike_removeTargetNodeOverride_Call { + _c.Call.Return(run) + return _c +} + +// resetDimensionsLSM provides a mock function with given fields: ctx +func (_m *MockShardLike) resetDimensionsLSM(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for resetDimensionsLSM") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_resetDimensionsLSM_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'resetDimensionsLSM' +type MockShardLike_resetDimensionsLSM_Call struct { + *mock.Call +} + +// resetDimensionsLSM is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockShardLike_Expecter) resetDimensionsLSM(ctx interface{}) *MockShardLike_resetDimensionsLSM_Call { + return &MockShardLike_resetDimensionsLSM_Call{Call: _e.mock.On("resetDimensionsLSM", ctx)} +} + +func (_c *MockShardLike_resetDimensionsLSM_Call) Run(run func(ctx context.Context)) *MockShardLike_resetDimensionsLSM_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_resetDimensionsLSM_Call) Return(_a0 error) *MockShardLike_resetDimensionsLSM_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_resetDimensionsLSM_Call) RunAndReturn(run func(context.Context) error) *MockShardLike_resetDimensionsLSM_Call { + _c.Call.Return(run) + return _c +} + +// resumeMaintenanceCycles provides a mock function with given fields: ctx +func (_m *MockShardLike) resumeMaintenanceCycles(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for resumeMaintenanceCycles") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_resumeMaintenanceCycles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'resumeMaintenanceCycles' +type MockShardLike_resumeMaintenanceCycles_Call struct { + *mock.Call +} + +// resumeMaintenanceCycles is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockShardLike_Expecter) resumeMaintenanceCycles(ctx interface{}) *MockShardLike_resumeMaintenanceCycles_Call { + return &MockShardLike_resumeMaintenanceCycles_Call{Call: _e.mock.On("resumeMaintenanceCycles", ctx)} +} + +func (_c *MockShardLike_resumeMaintenanceCycles_Call) Run(run func(ctx context.Context)) *MockShardLike_resumeMaintenanceCycles_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockShardLike_resumeMaintenanceCycles_Call) Return(_a0 error) *MockShardLike_resumeMaintenanceCycles_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_resumeMaintenanceCycles_Call) RunAndReturn(run func(context.Context) error) *MockShardLike_resumeMaintenanceCycles_Call { + _c.Call.Return(run) + return _c +} + +// setFallbackToSearchable provides a mock function with given fields: fallback +func (_m *MockShardLike) setFallbackToSearchable(fallback bool) { + _m.Called(fallback) +} + +// MockShardLike_setFallbackToSearchable_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'setFallbackToSearchable' +type MockShardLike_setFallbackToSearchable_Call struct { + *mock.Call +} + +// setFallbackToSearchable is a helper method to define mock.On call +// - fallback bool +func (_e *MockShardLike_Expecter) setFallbackToSearchable(fallback interface{}) *MockShardLike_setFallbackToSearchable_Call { + return &MockShardLike_setFallbackToSearchable_Call{Call: _e.mock.On("setFallbackToSearchable", fallback)} +} + +func (_c *MockShardLike_setFallbackToSearchable_Call) Run(run func(fallback bool)) *MockShardLike_setFallbackToSearchable_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(bool)) + }) + return _c +} + +func (_c *MockShardLike_setFallbackToSearchable_Call) Return() *MockShardLike_setFallbackToSearchable_Call { + _c.Call.Return() + return _c +} + +func (_c *MockShardLike_setFallbackToSearchable_Call) RunAndReturn(run func(bool)) *MockShardLike_setFallbackToSearchable_Call { + _c.Run(run) + return _c +} + +// updateMultiVectorIndexesIgnoreDelete provides a mock function with given fields: ctx, multiVectors, status +func (_m *MockShardLike) updateMultiVectorIndexesIgnoreDelete(ctx context.Context, multiVectors map[string][][]float32, status objectInsertStatus) error { + ret := _m.Called(ctx, multiVectors, status) + + if len(ret) == 0 { + panic("no return value specified for updateMultiVectorIndexesIgnoreDelete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, map[string][][]float32, objectInsertStatus) error); ok { + r0 = rf(ctx, multiVectors, status) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'updateMultiVectorIndexesIgnoreDelete' +type MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call struct { + *mock.Call +} + +// updateMultiVectorIndexesIgnoreDelete is a helper method to define mock.On call +// - ctx context.Context +// - multiVectors map[string][][]float32 +// - status objectInsertStatus +func (_e *MockShardLike_Expecter) updateMultiVectorIndexesIgnoreDelete(ctx interface{}, multiVectors interface{}, status interface{}) *MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call { + return &MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call{Call: _e.mock.On("updateMultiVectorIndexesIgnoreDelete", ctx, multiVectors, status)} +} + +func (_c *MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call) Run(run func(ctx context.Context, multiVectors map[string][][]float32, status objectInsertStatus)) *MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(map[string][][]float32), args[2].(objectInsertStatus)) + }) + return _c +} + +func (_c *MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call) Return(_a0 error) *MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call) RunAndReturn(run func(context.Context, map[string][][]float32, objectInsertStatus) error) *MockShardLike_updateMultiVectorIndexesIgnoreDelete_Call { + _c.Call.Return(run) + return _c +} + +// updatePropertySpecificIndices provides a mock function with given fields: ctx, object, status +func (_m *MockShardLike) updatePropertySpecificIndices(ctx context.Context, object *storobj.Object, status objectInsertStatus) error { + ret := _m.Called(ctx, object, status) + + if len(ret) == 0 { + panic("no return value specified for updatePropertySpecificIndices") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *storobj.Object, objectInsertStatus) error); ok { + r0 = rf(ctx, object, status) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_updatePropertySpecificIndices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'updatePropertySpecificIndices' +type MockShardLike_updatePropertySpecificIndices_Call struct { + *mock.Call +} + +// updatePropertySpecificIndices is a helper method to define mock.On call +// - ctx context.Context +// - object *storobj.Object +// - status objectInsertStatus +func (_e *MockShardLike_Expecter) updatePropertySpecificIndices(ctx interface{}, object interface{}, status interface{}) *MockShardLike_updatePropertySpecificIndices_Call { + return &MockShardLike_updatePropertySpecificIndices_Call{Call: _e.mock.On("updatePropertySpecificIndices", ctx, object, status)} +} + +func (_c *MockShardLike_updatePropertySpecificIndices_Call) Run(run func(ctx context.Context, object *storobj.Object, status objectInsertStatus)) *MockShardLike_updatePropertySpecificIndices_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*storobj.Object), args[2].(objectInsertStatus)) + }) + return _c +} + +func (_c *MockShardLike_updatePropertySpecificIndices_Call) Return(_a0 error) *MockShardLike_updatePropertySpecificIndices_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_updatePropertySpecificIndices_Call) RunAndReturn(run func(context.Context, *storobj.Object, objectInsertStatus) error) *MockShardLike_updatePropertySpecificIndices_Call { + _c.Call.Return(run) + return _c +} + +// updateVectorIndexIgnoreDelete provides a mock function with given fields: ctx, vector, status +func (_m *MockShardLike) updateVectorIndexIgnoreDelete(ctx context.Context, vector []float32, status objectInsertStatus) error { + ret := _m.Called(ctx, vector, status) + + if len(ret) == 0 { + panic("no return value specified for updateVectorIndexIgnoreDelete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []float32, objectInsertStatus) error); ok { + r0 = rf(ctx, vector, status) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_updateVectorIndexIgnoreDelete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'updateVectorIndexIgnoreDelete' +type MockShardLike_updateVectorIndexIgnoreDelete_Call struct { + *mock.Call +} + +// updateVectorIndexIgnoreDelete is a helper method to define mock.On call +// - ctx context.Context +// - vector []float32 +// - status objectInsertStatus +func (_e *MockShardLike_Expecter) updateVectorIndexIgnoreDelete(ctx interface{}, vector interface{}, status interface{}) *MockShardLike_updateVectorIndexIgnoreDelete_Call { + return &MockShardLike_updateVectorIndexIgnoreDelete_Call{Call: _e.mock.On("updateVectorIndexIgnoreDelete", ctx, vector, status)} +} + +func (_c *MockShardLike_updateVectorIndexIgnoreDelete_Call) Run(run func(ctx context.Context, vector []float32, status objectInsertStatus)) *MockShardLike_updateVectorIndexIgnoreDelete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]float32), args[2].(objectInsertStatus)) + }) + return _c +} + +func (_c *MockShardLike_updateVectorIndexIgnoreDelete_Call) Return(_a0 error) *MockShardLike_updateVectorIndexIgnoreDelete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_updateVectorIndexIgnoreDelete_Call) RunAndReturn(run func(context.Context, []float32, objectInsertStatus) error) *MockShardLike_updateVectorIndexIgnoreDelete_Call { + _c.Call.Return(run) + return _c +} + +// updateVectorIndexesIgnoreDelete provides a mock function with given fields: ctx, vectors, status +func (_m *MockShardLike) updateVectorIndexesIgnoreDelete(ctx context.Context, vectors map[string][]float32, status objectInsertStatus) error { + ret := _m.Called(ctx, vectors, status) + + if len(ret) == 0 { + panic("no return value specified for updateVectorIndexesIgnoreDelete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, map[string][]float32, objectInsertStatus) error); ok { + r0 = rf(ctx, vectors, status) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockShardLike_updateVectorIndexesIgnoreDelete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'updateVectorIndexesIgnoreDelete' +type MockShardLike_updateVectorIndexesIgnoreDelete_Call struct { + *mock.Call +} + +// updateVectorIndexesIgnoreDelete is a helper method to define mock.On call +// - ctx context.Context +// - vectors map[string][]float32 +// - status objectInsertStatus +func (_e *MockShardLike_Expecter) updateVectorIndexesIgnoreDelete(ctx interface{}, vectors interface{}, status interface{}) *MockShardLike_updateVectorIndexesIgnoreDelete_Call { + return &MockShardLike_updateVectorIndexesIgnoreDelete_Call{Call: _e.mock.On("updateVectorIndexesIgnoreDelete", ctx, vectors, status)} +} + +func (_c *MockShardLike_updateVectorIndexesIgnoreDelete_Call) Run(run func(ctx context.Context, vectors map[string][]float32, status objectInsertStatus)) *MockShardLike_updateVectorIndexesIgnoreDelete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(map[string][]float32), args[2].(objectInsertStatus)) + }) + return _c +} + +func (_c *MockShardLike_updateVectorIndexesIgnoreDelete_Call) Return(_a0 error) *MockShardLike_updateVectorIndexesIgnoreDelete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockShardLike_updateVectorIndexesIgnoreDelete_Call) RunAndReturn(run func(context.Context, map[string][]float32, objectInsertStatus) error) *MockShardLike_updateVectorIndexesIgnoreDelete_Call { + _c.Call.Return(run) + return _c +} + +// uuidFromDocID provides a mock function with given fields: docID +func (_m *MockShardLike) uuidFromDocID(docID uint64) (strfmt.UUID, error) { + ret := _m.Called(docID) + + if len(ret) == 0 { + panic("no return value specified for uuidFromDocID") + } + + var r0 strfmt.UUID + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (strfmt.UUID, error)); ok { + return rf(docID) + } + if rf, ok := ret.Get(0).(func(uint64) strfmt.UUID); ok { + r0 = rf(docID) + } else { + r0 = ret.Get(0).(strfmt.UUID) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(docID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockShardLike_uuidFromDocID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'uuidFromDocID' +type MockShardLike_uuidFromDocID_Call struct { + *mock.Call +} + +// uuidFromDocID is a helper method to define mock.On call +// - docID uint64 +func (_e *MockShardLike_Expecter) uuidFromDocID(docID interface{}) *MockShardLike_uuidFromDocID_Call { + return &MockShardLike_uuidFromDocID_Call{Call: _e.mock.On("uuidFromDocID", docID)} +} + +func (_c *MockShardLike_uuidFromDocID_Call) Run(run func(docID uint64)) *MockShardLike_uuidFromDocID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *MockShardLike_uuidFromDocID_Call) Return(_a0 strfmt.UUID, _a1 error) *MockShardLike_uuidFromDocID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockShardLike_uuidFromDocID_Call) RunAndReturn(run func(uint64) (strfmt.UUID, error)) *MockShardLike_uuidFromDocID_Call { + _c.Call.Return(run) + return _c +} + +// NewMockShardLike creates a new instance of MockShardLike. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockShardLike(t interface { + mock.TestingT + Cleanup(func()) +}) *MockShardLike { + mock := &MockShardLike{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_vector_index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_vector_index.go new file mode 100644 index 0000000000000000000000000000000000000000..2d414afe32b6960bc4c1be1b5af47e7b133e478e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/mock_vector_index.go @@ -0,0 +1,1024 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package db + +import ( + common "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + compressionhelpers "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + + config "github.com/weaviate/weaviate/entities/schema/config" + + context "context" + + helpers "github.com/weaviate/weaviate/adapters/repos/db/helpers" + + mock "github.com/stretchr/testify/mock" +) + +// MockVectorIndex is an autogenerated mock type for the VectorIndex type +type MockVectorIndex struct { + mock.Mock +} + +type MockVectorIndex_Expecter struct { + mock *mock.Mock +} + +func (_m *MockVectorIndex) EXPECT() *MockVectorIndex_Expecter { + return &MockVectorIndex_Expecter{mock: &_m.Mock} +} + +// Add provides a mock function with given fields: ctx, id, vector +func (_m *MockVectorIndex) Add(ctx context.Context, id uint64, vector []float32) error { + ret := _m.Called(ctx, id, vector) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, []float32) error); ok { + r0 = rf(ctx, id, vector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Add_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Add' +type MockVectorIndex_Add_Call struct { + *mock.Call +} + +// Add is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +// - vector []float32 +func (_e *MockVectorIndex_Expecter) Add(ctx interface{}, id interface{}, vector interface{}) *MockVectorIndex_Add_Call { + return &MockVectorIndex_Add_Call{Call: _e.mock.On("Add", ctx, id, vector)} +} + +func (_c *MockVectorIndex_Add_Call) Run(run func(ctx context.Context, id uint64, vector []float32)) *MockVectorIndex_Add_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].([]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_Add_Call) Return(_a0 error) *MockVectorIndex_Add_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Add_Call) RunAndReturn(run func(context.Context, uint64, []float32) error) *MockVectorIndex_Add_Call { + _c.Call.Return(run) + return _c +} + +// AddBatch provides a mock function with given fields: ctx, ids, vector +func (_m *MockVectorIndex) AddBatch(ctx context.Context, ids []uint64, vector [][]float32) error { + ret := _m.Called(ctx, ids, vector) + + if len(ret) == 0 { + panic("no return value specified for AddBatch") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []uint64, [][]float32) error); ok { + r0 = rf(ctx, ids, vector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_AddBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBatch' +type MockVectorIndex_AddBatch_Call struct { + *mock.Call +} + +// AddBatch is a helper method to define mock.On call +// - ctx context.Context +// - ids []uint64 +// - vector [][]float32 +func (_e *MockVectorIndex_Expecter) AddBatch(ctx interface{}, ids interface{}, vector interface{}) *MockVectorIndex_AddBatch_Call { + return &MockVectorIndex_AddBatch_Call{Call: _e.mock.On("AddBatch", ctx, ids, vector)} +} + +func (_c *MockVectorIndex_AddBatch_Call) Run(run func(ctx context.Context, ids []uint64, vector [][]float32)) *MockVectorIndex_AddBatch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]uint64), args[2].([][]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_AddBatch_Call) Return(_a0 error) *MockVectorIndex_AddBatch_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_AddBatch_Call) RunAndReturn(run func(context.Context, []uint64, [][]float32) error) *MockVectorIndex_AddBatch_Call { + _c.Call.Return(run) + return _c +} + +// Compressed provides a mock function with no fields +func (_m *MockVectorIndex) Compressed() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Compressed") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockVectorIndex_Compressed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Compressed' +type MockVectorIndex_Compressed_Call struct { + *mock.Call +} + +// Compressed is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Compressed() *MockVectorIndex_Compressed_Call { + return &MockVectorIndex_Compressed_Call{Call: _e.mock.On("Compressed")} +} + +func (_c *MockVectorIndex_Compressed_Call) Run(run func()) *MockVectorIndex_Compressed_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Compressed_Call) Return(_a0 bool) *MockVectorIndex_Compressed_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Compressed_Call) RunAndReturn(run func() bool) *MockVectorIndex_Compressed_Call { + _c.Call.Return(run) + return _c +} + +// CompressionStats provides a mock function with no fields +func (_m *MockVectorIndex) CompressionStats() compressionhelpers.CompressionStats { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CompressionStats") + } + + var r0 compressionhelpers.CompressionStats + if rf, ok := ret.Get(0).(func() compressionhelpers.CompressionStats); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(compressionhelpers.CompressionStats) + } + } + + return r0 +} + +// MockVectorIndex_CompressionStats_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CompressionStats' +type MockVectorIndex_CompressionStats_Call struct { + *mock.Call +} + +// CompressionStats is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) CompressionStats() *MockVectorIndex_CompressionStats_Call { + return &MockVectorIndex_CompressionStats_Call{Call: _e.mock.On("CompressionStats")} +} + +func (_c *MockVectorIndex_CompressionStats_Call) Run(run func()) *MockVectorIndex_CompressionStats_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_CompressionStats_Call) Return(_a0 compressionhelpers.CompressionStats) *MockVectorIndex_CompressionStats_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_CompressionStats_Call) RunAndReturn(run func() compressionhelpers.CompressionStats) *MockVectorIndex_CompressionStats_Call { + _c.Call.Return(run) + return _c +} + +// ContainsDoc provides a mock function with given fields: docID +func (_m *MockVectorIndex) ContainsDoc(docID uint64) bool { + ret := _m.Called(docID) + + if len(ret) == 0 { + panic("no return value specified for ContainsDoc") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(uint64) bool); ok { + r0 = rf(docID) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockVectorIndex_ContainsDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainsDoc' +type MockVectorIndex_ContainsDoc_Call struct { + *mock.Call +} + +// ContainsDoc is a helper method to define mock.On call +// - docID uint64 +func (_e *MockVectorIndex_Expecter) ContainsDoc(docID interface{}) *MockVectorIndex_ContainsDoc_Call { + return &MockVectorIndex_ContainsDoc_Call{Call: _e.mock.On("ContainsDoc", docID)} +} + +func (_c *MockVectorIndex_ContainsDoc_Call) Run(run func(docID uint64)) *MockVectorIndex_ContainsDoc_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *MockVectorIndex_ContainsDoc_Call) Return(_a0 bool) *MockVectorIndex_ContainsDoc_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_ContainsDoc_Call) RunAndReturn(run func(uint64) bool) *MockVectorIndex_ContainsDoc_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: id +func (_m *MockVectorIndex) Delete(id ...uint64) error { + _va := make([]interface{}, len(id)) + for _i := range id { + _va[_i] = id[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(...uint64) error); ok { + r0 = rf(id...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type MockVectorIndex_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - id ...uint64 +func (_e *MockVectorIndex_Expecter) Delete(id ...interface{}) *MockVectorIndex_Delete_Call { + return &MockVectorIndex_Delete_Call{Call: _e.mock.On("Delete", + append([]interface{}{}, id...)...)} +} + +func (_c *MockVectorIndex_Delete_Call) Run(run func(id ...uint64)) *MockVectorIndex_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]uint64, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(uint64) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *MockVectorIndex_Delete_Call) Return(_a0 error) *MockVectorIndex_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Delete_Call) RunAndReturn(run func(...uint64) error) *MockVectorIndex_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Drop provides a mock function with given fields: ctx +func (_m *MockVectorIndex) Drop(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Drop") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Drop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Drop' +type MockVectorIndex_Drop_Call struct { + *mock.Call +} + +// Drop is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockVectorIndex_Expecter) Drop(ctx interface{}) *MockVectorIndex_Drop_Call { + return &MockVectorIndex_Drop_Call{Call: _e.mock.On("Drop", ctx)} +} + +func (_c *MockVectorIndex_Drop_Call) Run(run func(ctx context.Context)) *MockVectorIndex_Drop_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockVectorIndex_Drop_Call) Return(_a0 error) *MockVectorIndex_Drop_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Drop_Call) RunAndReturn(run func(context.Context) error) *MockVectorIndex_Drop_Call { + _c.Call.Return(run) + return _c +} + +// Flush provides a mock function with no fields +func (_m *MockVectorIndex) Flush() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Flush") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Flush_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Flush' +type MockVectorIndex_Flush_Call struct { + *mock.Call +} + +// Flush is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Flush() *MockVectorIndex_Flush_Call { + return &MockVectorIndex_Flush_Call{Call: _e.mock.On("Flush")} +} + +func (_c *MockVectorIndex_Flush_Call) Run(run func()) *MockVectorIndex_Flush_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Flush_Call) Return(_a0 error) *MockVectorIndex_Flush_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Flush_Call) RunAndReturn(run func() error) *MockVectorIndex_Flush_Call { + _c.Call.Return(run) + return _c +} + +// Iterate provides a mock function with given fields: fn +func (_m *MockVectorIndex) Iterate(fn func(uint64) bool) { + _m.Called(fn) +} + +// MockVectorIndex_Iterate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Iterate' +type MockVectorIndex_Iterate_Call struct { + *mock.Call +} + +// Iterate is a helper method to define mock.On call +// - fn func(uint64) bool +func (_e *MockVectorIndex_Expecter) Iterate(fn interface{}) *MockVectorIndex_Iterate_Call { + return &MockVectorIndex_Iterate_Call{Call: _e.mock.On("Iterate", fn)} +} + +func (_c *MockVectorIndex_Iterate_Call) Run(run func(fn func(uint64) bool)) *MockVectorIndex_Iterate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func(uint64) bool)) + }) + return _c +} + +func (_c *MockVectorIndex_Iterate_Call) Return() *MockVectorIndex_Iterate_Call { + _c.Call.Return() + return _c +} + +func (_c *MockVectorIndex_Iterate_Call) RunAndReturn(run func(func(uint64) bool)) *MockVectorIndex_Iterate_Call { + _c.Run(run) + return _c +} + +// ListFiles provides a mock function with given fields: ctx, basePath +func (_m *MockVectorIndex) ListFiles(ctx context.Context, basePath string) ([]string, error) { + ret := _m.Called(ctx, basePath) + + if len(ret) == 0 { + panic("no return value specified for ListFiles") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok { + return rf(ctx, basePath) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []string); ok { + r0 = rf(ctx, basePath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, basePath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorIndex_ListFiles_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListFiles' +type MockVectorIndex_ListFiles_Call struct { + *mock.Call +} + +// ListFiles is a helper method to define mock.On call +// - ctx context.Context +// - basePath string +func (_e *MockVectorIndex_Expecter) ListFiles(ctx interface{}, basePath interface{}) *MockVectorIndex_ListFiles_Call { + return &MockVectorIndex_ListFiles_Call{Call: _e.mock.On("ListFiles", ctx, basePath)} +} + +func (_c *MockVectorIndex_ListFiles_Call) Run(run func(ctx context.Context, basePath string)) *MockVectorIndex_ListFiles_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockVectorIndex_ListFiles_Call) Return(_a0 []string, _a1 error) *MockVectorIndex_ListFiles_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorIndex_ListFiles_Call) RunAndReturn(run func(context.Context, string) ([]string, error)) *MockVectorIndex_ListFiles_Call { + _c.Call.Return(run) + return _c +} + +// Multivector provides a mock function with no fields +func (_m *MockVectorIndex) Multivector() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Multivector") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MockVectorIndex_Multivector_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Multivector' +type MockVectorIndex_Multivector_Call struct { + *mock.Call +} + +// Multivector is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Multivector() *MockVectorIndex_Multivector_Call { + return &MockVectorIndex_Multivector_Call{Call: _e.mock.On("Multivector")} +} + +func (_c *MockVectorIndex_Multivector_Call) Run(run func()) *MockVectorIndex_Multivector_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Multivector_Call) Return(_a0 bool) *MockVectorIndex_Multivector_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Multivector_Call) RunAndReturn(run func() bool) *MockVectorIndex_Multivector_Call { + _c.Call.Return(run) + return _c +} + +// PostStartup provides a mock function with no fields +func (_m *MockVectorIndex) PostStartup() { + _m.Called() +} + +// MockVectorIndex_PostStartup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PostStartup' +type MockVectorIndex_PostStartup_Call struct { + *mock.Call +} + +// PostStartup is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) PostStartup() *MockVectorIndex_PostStartup_Call { + return &MockVectorIndex_PostStartup_Call{Call: _e.mock.On("PostStartup")} +} + +func (_c *MockVectorIndex_PostStartup_Call) Run(run func()) *MockVectorIndex_PostStartup_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_PostStartup_Call) Return() *MockVectorIndex_PostStartup_Call { + _c.Call.Return() + return _c +} + +func (_c *MockVectorIndex_PostStartup_Call) RunAndReturn(run func()) *MockVectorIndex_PostStartup_Call { + _c.Run(run) + return _c +} + +// QueryVectorDistancer provides a mock function with given fields: queryVector +func (_m *MockVectorIndex) QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer { + ret := _m.Called(queryVector) + + if len(ret) == 0 { + panic("no return value specified for QueryVectorDistancer") + } + + var r0 common.QueryVectorDistancer + if rf, ok := ret.Get(0).(func([]float32) common.QueryVectorDistancer); ok { + r0 = rf(queryVector) + } else { + r0 = ret.Get(0).(common.QueryVectorDistancer) + } + + return r0 +} + +// MockVectorIndex_QueryVectorDistancer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryVectorDistancer' +type MockVectorIndex_QueryVectorDistancer_Call struct { + *mock.Call +} + +// QueryVectorDistancer is a helper method to define mock.On call +// - queryVector []float32 +func (_e *MockVectorIndex_Expecter) QueryVectorDistancer(queryVector interface{}) *MockVectorIndex_QueryVectorDistancer_Call { + return &MockVectorIndex_QueryVectorDistancer_Call{Call: _e.mock.On("QueryVectorDistancer", queryVector)} +} + +func (_c *MockVectorIndex_QueryVectorDistancer_Call) Run(run func(queryVector []float32)) *MockVectorIndex_QueryVectorDistancer_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_QueryVectorDistancer_Call) Return(_a0 common.QueryVectorDistancer) *MockVectorIndex_QueryVectorDistancer_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_QueryVectorDistancer_Call) RunAndReturn(run func([]float32) common.QueryVectorDistancer) *MockVectorIndex_QueryVectorDistancer_Call { + _c.Call.Return(run) + return _c +} + +// SearchByVector provides a mock function with given fields: ctx, vector, k, allow +func (_m *MockVectorIndex) SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) { + ret := _m.Called(ctx, vector, k, allow) + + if len(ret) == 0 { + panic("no return value specified for SearchByVector") + } + + var r0 []uint64 + var r1 []float32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []float32, int, helpers.AllowList) ([]uint64, []float32, error)); ok { + return rf(ctx, vector, k, allow) + } + if rf, ok := ret.Get(0).(func(context.Context, []float32, int, helpers.AllowList) []uint64); ok { + r0 = rf(ctx, vector, k, allow) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]uint64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []float32, int, helpers.AllowList) []float32); ok { + r1 = rf(ctx, vector, k, allow) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]float32) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []float32, int, helpers.AllowList) error); ok { + r2 = rf(ctx, vector, k, allow) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockVectorIndex_SearchByVector_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SearchByVector' +type MockVectorIndex_SearchByVector_Call struct { + *mock.Call +} + +// SearchByVector is a helper method to define mock.On call +// - ctx context.Context +// - vector []float32 +// - k int +// - allow helpers.AllowList +func (_e *MockVectorIndex_Expecter) SearchByVector(ctx interface{}, vector interface{}, k interface{}, allow interface{}) *MockVectorIndex_SearchByVector_Call { + return &MockVectorIndex_SearchByVector_Call{Call: _e.mock.On("SearchByVector", ctx, vector, k, allow)} +} + +func (_c *MockVectorIndex_SearchByVector_Call) Run(run func(ctx context.Context, vector []float32, k int, allow helpers.AllowList)) *MockVectorIndex_SearchByVector_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]float32), args[2].(int), args[3].(helpers.AllowList)) + }) + return _c +} + +func (_c *MockVectorIndex_SearchByVector_Call) Return(_a0 []uint64, _a1 []float32, _a2 error) *MockVectorIndex_SearchByVector_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockVectorIndex_SearchByVector_Call) RunAndReturn(run func(context.Context, []float32, int, helpers.AllowList) ([]uint64, []float32, error)) *MockVectorIndex_SearchByVector_Call { + _c.Call.Return(run) + return _c +} + +// SearchByVectorDistance provides a mock function with given fields: ctx, vector, dist, maxLimit, allow +func (_m *MockVectorIndex) SearchByVectorDistance(ctx context.Context, vector []float32, dist float32, maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) { + ret := _m.Called(ctx, vector, dist, maxLimit, allow) + + if len(ret) == 0 { + panic("no return value specified for SearchByVectorDistance") + } + + var r0 []uint64 + var r1 []float32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []float32, float32, int64, helpers.AllowList) ([]uint64, []float32, error)); ok { + return rf(ctx, vector, dist, maxLimit, allow) + } + if rf, ok := ret.Get(0).(func(context.Context, []float32, float32, int64, helpers.AllowList) []uint64); ok { + r0 = rf(ctx, vector, dist, maxLimit, allow) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]uint64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []float32, float32, int64, helpers.AllowList) []float32); ok { + r1 = rf(ctx, vector, dist, maxLimit, allow) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]float32) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []float32, float32, int64, helpers.AllowList) error); ok { + r2 = rf(ctx, vector, dist, maxLimit, allow) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockVectorIndex_SearchByVectorDistance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SearchByVectorDistance' +type MockVectorIndex_SearchByVectorDistance_Call struct { + *mock.Call +} + +// SearchByVectorDistance is a helper method to define mock.On call +// - ctx context.Context +// - vector []float32 +// - dist float32 +// - maxLimit int64 +// - allow helpers.AllowList +func (_e *MockVectorIndex_Expecter) SearchByVectorDistance(ctx interface{}, vector interface{}, dist interface{}, maxLimit interface{}, allow interface{}) *MockVectorIndex_SearchByVectorDistance_Call { + return &MockVectorIndex_SearchByVectorDistance_Call{Call: _e.mock.On("SearchByVectorDistance", ctx, vector, dist, maxLimit, allow)} +} + +func (_c *MockVectorIndex_SearchByVectorDistance_Call) Run(run func(ctx context.Context, vector []float32, dist float32, maxLimit int64, allow helpers.AllowList)) *MockVectorIndex_SearchByVectorDistance_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]float32), args[2].(float32), args[3].(int64), args[4].(helpers.AllowList)) + }) + return _c +} + +func (_c *MockVectorIndex_SearchByVectorDistance_Call) Return(_a0 []uint64, _a1 []float32, _a2 error) *MockVectorIndex_SearchByVectorDistance_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockVectorIndex_SearchByVectorDistance_Call) RunAndReturn(run func(context.Context, []float32, float32, int64, helpers.AllowList) ([]uint64, []float32, error)) *MockVectorIndex_SearchByVectorDistance_Call { + _c.Call.Return(run) + return _c +} + +// Shutdown provides a mock function with given fields: ctx +func (_m *MockVectorIndex) Shutdown(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Shutdown") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' +type MockVectorIndex_Shutdown_Call struct { + *mock.Call +} + +// Shutdown is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockVectorIndex_Expecter) Shutdown(ctx interface{}) *MockVectorIndex_Shutdown_Call { + return &MockVectorIndex_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} +} + +func (_c *MockVectorIndex_Shutdown_Call) Run(run func(ctx context.Context)) *MockVectorIndex_Shutdown_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockVectorIndex_Shutdown_Call) Return(_a0 error) *MockVectorIndex_Shutdown_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Shutdown_Call) RunAndReturn(run func(context.Context) error) *MockVectorIndex_Shutdown_Call { + _c.Call.Return(run) + return _c +} + +// SwitchCommitLogs provides a mock function with given fields: ctx +func (_m *MockVectorIndex) SwitchCommitLogs(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SwitchCommitLogs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_SwitchCommitLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SwitchCommitLogs' +type MockVectorIndex_SwitchCommitLogs_Call struct { + *mock.Call +} + +// SwitchCommitLogs is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockVectorIndex_Expecter) SwitchCommitLogs(ctx interface{}) *MockVectorIndex_SwitchCommitLogs_Call { + return &MockVectorIndex_SwitchCommitLogs_Call{Call: _e.mock.On("SwitchCommitLogs", ctx)} +} + +func (_c *MockVectorIndex_SwitchCommitLogs_Call) Run(run func(ctx context.Context)) *MockVectorIndex_SwitchCommitLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockVectorIndex_SwitchCommitLogs_Call) Return(_a0 error) *MockVectorIndex_SwitchCommitLogs_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_SwitchCommitLogs_Call) RunAndReturn(run func(context.Context) error) *MockVectorIndex_SwitchCommitLogs_Call { + _c.Call.Return(run) + return _c +} + +// Type provides a mock function with no fields +func (_m *MockVectorIndex) Type() common.IndexType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Type") + } + + var r0 common.IndexType + if rf, ok := ret.Get(0).(func() common.IndexType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(common.IndexType) + } + + return r0 +} + +// MockVectorIndex_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type' +type MockVectorIndex_Type_Call struct { + *mock.Call +} + +// Type is a helper method to define mock.On call +func (_e *MockVectorIndex_Expecter) Type() *MockVectorIndex_Type_Call { + return &MockVectorIndex_Type_Call{Call: _e.mock.On("Type")} +} + +func (_c *MockVectorIndex_Type_Call) Run(run func()) *MockVectorIndex_Type_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockVectorIndex_Type_Call) Return(_a0 common.IndexType) *MockVectorIndex_Type_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_Type_Call) RunAndReturn(run func() common.IndexType) *MockVectorIndex_Type_Call { + _c.Call.Return(run) + return _c +} + +// UpdateUserConfig provides a mock function with given fields: updated, callback +func (_m *MockVectorIndex) UpdateUserConfig(updated config.VectorIndexConfig, callback func()) error { + ret := _m.Called(updated, callback) + + if len(ret) == 0 { + panic("no return value specified for UpdateUserConfig") + } + + var r0 error + if rf, ok := ret.Get(0).(func(config.VectorIndexConfig, func()) error); ok { + r0 = rf(updated, callback) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_UpdateUserConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateUserConfig' +type MockVectorIndex_UpdateUserConfig_Call struct { + *mock.Call +} + +// UpdateUserConfig is a helper method to define mock.On call +// - updated config.VectorIndexConfig +// - callback func() +func (_e *MockVectorIndex_Expecter) UpdateUserConfig(updated interface{}, callback interface{}) *MockVectorIndex_UpdateUserConfig_Call { + return &MockVectorIndex_UpdateUserConfig_Call{Call: _e.mock.On("UpdateUserConfig", updated, callback)} +} + +func (_c *MockVectorIndex_UpdateUserConfig_Call) Run(run func(updated config.VectorIndexConfig, callback func())) *MockVectorIndex_UpdateUserConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(config.VectorIndexConfig), args[1].(func())) + }) + return _c +} + +func (_c *MockVectorIndex_UpdateUserConfig_Call) Return(_a0 error) *MockVectorIndex_UpdateUserConfig_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_UpdateUserConfig_Call) RunAndReturn(run func(config.VectorIndexConfig, func()) error) *MockVectorIndex_UpdateUserConfig_Call { + _c.Call.Return(run) + return _c +} + +// ValidateBeforeInsert provides a mock function with given fields: vector +func (_m *MockVectorIndex) ValidateBeforeInsert(vector []float32) error { + ret := _m.Called(vector) + + if len(ret) == 0 { + panic("no return value specified for ValidateBeforeInsert") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]float32) error); ok { + r0 = rf(vector) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockVectorIndex_ValidateBeforeInsert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateBeforeInsert' +type MockVectorIndex_ValidateBeforeInsert_Call struct { + *mock.Call +} + +// ValidateBeforeInsert is a helper method to define mock.On call +// - vector []float32 +func (_e *MockVectorIndex_Expecter) ValidateBeforeInsert(vector interface{}) *MockVectorIndex_ValidateBeforeInsert_Call { + return &MockVectorIndex_ValidateBeforeInsert_Call{Call: _e.mock.On("ValidateBeforeInsert", vector)} +} + +func (_c *MockVectorIndex_ValidateBeforeInsert_Call) Run(run func(vector []float32)) *MockVectorIndex_ValidateBeforeInsert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]float32)) + }) + return _c +} + +func (_c *MockVectorIndex_ValidateBeforeInsert_Call) Return(_a0 error) *MockVectorIndex_ValidateBeforeInsert_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockVectorIndex_ValidateBeforeInsert_Call) RunAndReturn(run func([]float32) error) *MockVectorIndex_ValidateBeforeInsert_Call { + _c.Call.Return(run) + return _c +} + +// NewMockVectorIndex creates a new instance of MockVectorIndex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockVectorIndex(t interface { + mock.TestingT + Cleanup(func()) +}) *MockVectorIndex { + mock := &MockVectorIndex{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/multi_shard_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/multi_shard_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c8611e079a8b7193aae8835b3561d2f2a4d7d6cf --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/multi_shard_integration_test.go @@ -0,0 +1,941 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package db + +import ( + "context" + "fmt" + "math" + "math/rand" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/entities/verbosity" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/objects" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func Test_MultiShardJourneys_IndividualImports(t *testing.T) { + r := getRandomSeed() + repo, logger, mockSchemaReader := setupMultiShardTest(t) + defer func() { + repo.Shutdown(context.Background()) + }() + + t.Run("prepare", makeTestMultiShardSchema(repo, logger, mockSchemaReader, false, testClassesForImporting()...)) + + data := multiShardTestData(r) + queryVec := exampleQueryVec(r) + groundTruth := bruteForceObjectsByQuery(data, queryVec) + refData := multiShardRefClassData(r, data) + + t.Run("import all individually", func(t *testing.T) { + for _, obj := range data { + require.Nil(t, repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0)) + } + }) + + t.Run("nodes api", testNodesAPI(repo)) + + t.Run("sorting objects", makeTestSortingClass(repo)) + + t.Run("verify objects", makeTestRetrievingBaseClass(repo, data, queryVec, + groundTruth)) + + t.Run("import refs individually", func(t *testing.T) { + for _, obj := range refData { + require.Nil(t, repo.PutObject(context.Background(), obj, obj.Vector, nil, nil, nil, 0)) + } + }) + + t.Run("verify refs", makeTestRetrieveRefClass(repo, data, refData)) + + t.Run("batch delete", makeTestBatchDeleteAllObjects(repo)) +} + +func Test_MultiShardJourneys_BatchedImports(t *testing.T) { + r := getRandomSeed() + repo, logger, mockSchemaReader := setupMultiShardTest(t) + defer func() { + repo.Shutdown(context.Background()) + }() + + t.Run("prepare", makeTestMultiShardSchema(repo, logger, mockSchemaReader, false, testClassesForImporting()...)) + + data := multiShardTestData(r) + queryVec := exampleQueryVec(r) + groundTruth := bruteForceObjectsByQuery(data, queryVec) + refData := multiShardRefClassData(r, data) + + t.Run("import in a batch", func(t *testing.T) { + batch := make(objects.BatchObjects, len(data)) + for i, obj := range data { + batch[i] = objects.BatchObject{ + OriginalIndex: i, + Object: obj, + UUID: obj.ID, + } + } + + _, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + }) + + t.Run("nodes api", testNodesAPI(repo)) + + t.Run("verify objects", makeTestRetrievingBaseClass(repo, data, queryVec, + groundTruth)) + + t.Run("import refs in large batch", func(t *testing.T) { + // first strip the refs from the objects, so we can import them in a second + // step as batch ref + + for _, obj := range refData { + withoutRef := &models.Object{ + ID: obj.ID, + Class: obj.Class, + Vector: obj.Vector, + Properties: map[string]interface{}{}, // empty so we remove the ref + } + + require.Nil(t, repo.PutObject(context.Background(), withoutRef, withoutRef.Vector, nil, nil, nil, 0)) + } + + index := 0 + refBatch := make(objects.BatchReferences, len(refData)*len(data)) + for _, obj := range refData { + for _, ref := range obj.Properties.(map[string]interface{})["toOther"].(models.MultipleRef) { + to, _ := crossref.ParseSingleRef(ref) + refBatch[index] = objects.BatchReference{ + OriginalIndex: index, + To: to, + From: crossref.NewSource(schema.ClassName(obj.Class), "toOther", obj.ID), + } + index++ + } + } + + _, err := repo.AddBatchReferences(context.Background(), refBatch, nil, 0) + require.Nil(t, err) + }) + + t.Run("verify refs", makeTestRetrieveRefClass(repo, data, refData)) + + t.Run("batch delete", makeTestBatchDeleteAllObjects(repo)) +} + +func Test_MultiShardJourneys_BM25_Search(t *testing.T) { + repo, logger, mockSchemaReader := setupMultiShardTest(t) + defer func() { + repo.Shutdown(context.Background()) + }() + + className := "RacecarPosts" + + t.Run("prepare", func(t *testing.T) { + class := &models.Class{ + Class: className, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: &models.InvertedIndexConfig{ + CleanupIntervalSeconds: 60, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "contents", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "textArrayProp", + DataType: []string{string(schema.DataTypeTextArray)}, + }, + }, + } + + t.Run("prepare", makeTestMultiShardSchema(repo, logger, mockSchemaReader, true, class)) + }) + + t.Run("insert search data", func(t *testing.T) { + objs := objects.BatchObjects{ + { + UUID: "c39751ed-ddc2-4c9f-a45b-8b5732ddde56", + Object: &models.Object{ + ID: "c39751ed-ddc2-4c9f-a45b-8b5732ddde56", + Class: className, + Properties: map[string]interface{}{ + "contents": "Team Lotus was a domineering force in the early 90s", + }, + }, + }, + { + UUID: "5d034311-06e1-476e-b446-1306db91d906", + Object: &models.Object{ + ID: "5d034311-06e1-476e-b446-1306db91d906", + Class: className, + Properties: map[string]interface{}{ + "contents": "When a car becomes unserviceable, the driver must retire early from the race", + }, + }, + }, + { + UUID: "01989a8c-e37f-471d-89ca-9a787dbbf5f2", + Object: &models.Object{ + ID: "01989a8c-e37f-471d-89ca-9a787dbbf5f2", + Class: className, + Properties: map[string]interface{}{ + "contents": "A young driver is better than an old driver", + }, + }, + }, + { + UUID: "392614c5-4ca4-4630-a014-61fe868a20fd", + Object: &models.Object{ + ID: "392614c5-4ca4-4630-a014-61fe868a20fd", + Class: className, + Properties: map[string]interface{}{ + "contents": "an old driver doesn't retire early", + }, + }, + }, + } + + _, err := repo.BatchPutObjects(context.Background(), objs, nil, 0) + require.Nil(t, err) + }) + + t.Run("ranked keyword search", func(t *testing.T) { + type testcase struct { + expectedResults []string + rankingParams *searchparams.KeywordRanking + } + + tests := []testcase{ + { + rankingParams: &searchparams.KeywordRanking{ + Query: "driver", + Properties: []string{"contents"}, + }, + expectedResults: []string{ + "01989a8c-e37f-471d-89ca-9a787dbbf5f2", + "392614c5-4ca4-4630-a014-61fe868a20fd", + "5d034311-06e1-476e-b446-1306db91d906", + }, + }, + } + + for _, test := range tests { + res, err := repo.Search(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: 10}, + KeywordRanking: test.rankingParams, + }) + require.Nil(t, err) + require.Equal(t, len(test.expectedResults), len(res)) + for i := range res { + assert.Equal(t, test.expectedResults[i], res[i].ID.String()) + } + t.Logf("res: %+v", res) + } + }) +} + +func setupMultiShardTest(t *testing.T) (*DB, *logrus.Logger, *schemaUC.MockSchemaReader) { + dirName := t.TempDir() + logger, _ := test.NewNullLogger() + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil) + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + ServerVersion: "server-version", + GitHash: "git-hash", + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + return repo, logger, mockSchemaReader +} + +func makeTestMultiShardSchema(repo *DB, logger logrus.FieldLogger, mockSchemaReader *schemaUC.MockSchemaReader, fixedShardState bool, classes ...*models.Class) func(t *testing.T) { + return func(t *testing.T) { + var shardState *sharding.State + if fixedShardState { + shardState = fixedMultiShardState() + } else { + shardState = multiShardState() + } + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + repo.SetSchemaGetter(schemaGetter) + err := repo.WaitForStartup(testCtx()) + require.Nil(t, err) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the class", func(t *testing.T) { + for _, class := range classes { + require.Nil(t, migrator.AddClass(context.Background(), class)) + } + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: classes, + }, + } + } +} + +func makeTestRetrievingBaseClass(repo *DB, data []*models.Object, + queryVec []float32, groundTruth []*models.Object, +) func(t *testing.T) { + return func(t *testing.T) { + t.Run("retrieve all individually", func(t *testing.T) { + for _, desired := range data { + res, err := repo.ObjectByID(context.Background(), desired.ID, search.SelectProperties{}, additional.Properties{}, "") + assert.Nil(t, err) + + require.NotNil(t, res) + assert.Equal(t, desired.Properties.(map[string]interface{})["boolProp"].(bool), + res.Object().Properties.(map[string]interface{})["boolProp"].(bool)) + assert.Equal(t, desired.ID, res.Object().ID) + } + }) + + t.Run("retrieve through filter (object search)", func(t *testing.T) { + do := func(limit, expected int) { + filters := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: true, + Type: schema.DataTypeBoolean, + }, + On: &filters.Path{ + Property: "boolProp", + }, + }, + } + res, err := repo.ObjectSearch(context.Background(), 0, limit, filters, nil, + additional.Properties{}, "") + assert.Nil(t, err) + + assert.Len(t, res, expected) + for _, obj := range res { + assert.Equal(t, true, obj.Schema.(map[string]interface{})["boolProp"].(bool)) + } + } + + t.Run("with high limit", func(t *testing.T) { + do(100, 10) + }) + + t.Run("with low limit", func(t *testing.T) { + do(3, 3) + }) + }) + + t.Run("retrieve through filter (class search)", func(t *testing.T) { + do := func(limit, expected int) { + filter := &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: true, + Type: schema.DataTypeBoolean, + }, + On: &filters.Path{ + Property: "boolProp", + }, + }, + } + res, err := repo.Search(context.Background(), dto.GetParams{ + Filters: filter, + Pagination: &filters.Pagination{ + Limit: limit, + }, + ClassName: "TestClass", + Properties: search.SelectProperties{{Name: "boolProp"}}, + }) + assert.Nil(t, err) + + assert.Len(t, res, expected) + for _, obj := range res { + assert.Equal(t, true, obj.Schema.(map[string]interface{})["boolProp"].(bool)) + } + } + + t.Run("with high limit", func(t *testing.T) { + do(100, 10) + }) + + t.Run("with low limit", func(t *testing.T) { + do(3, 3) + }) + }) + + t.Run("retrieve through class-level vector search", func(t *testing.T) { + do := func(t *testing.T, limit, expected int) { + res, err := repo.VectorSearch(context.Background(), dto.GetParams{ + Pagination: &filters.Pagination{ + Limit: limit, + }, + ClassName: "TestClass", + }, []string{""}, []models.Vector{queryVec}) + assert.Nil(t, err) + assert.Len(t, res, expected) + for i, obj := range res { + assert.Equal(t, groundTruth[i].ID, obj.ID) + } + } + + t.Run("with high limit", func(t *testing.T) { + do(t, 100, 20) + }) + + t.Run("with low limit", func(t *testing.T) { + do(t, 3, 3) + }) + }) + + t.Run("retrieve through inter-class vector search", func(t *testing.T) { + do := func(t *testing.T, limit, expected int) { + res, err := repo.CrossClassVectorSearch(context.Background(), queryVec, "", 0, limit, nil) + assert.Nil(t, err) + assert.Len(t, res, expected) + for i, obj := range res { + assert.Equal(t, groundTruth[i].ID, obj.ID) + } + } + + t.Run("with high limit", func(t *testing.T) { + do(t, 100, 20) + }) + + t.Run("with low limit", func(t *testing.T) { + do(t, 3, 3) + }) + }) + } +} + +func makeTestRetrieveRefClass(repo *DB, data, refData []*models.Object) func(t *testing.T) { + return func(t *testing.T) { + t.Run("retrieve ref data individually with select props", func(t *testing.T) { + for _, desired := range refData { + res, err := repo.ObjectByID(context.Background(), desired.ID, search.SelectProperties{ + search.SelectProperty{ + IsPrimitive: false, + Name: "toOther", + Refs: []search.SelectClass{{ + ClassName: "TestClass", + RefProperties: search.SelectProperties{{ + Name: "index", + IsPrimitive: true, + }}, + }}, + }, + }, additional.Properties{}, "") + assert.Nil(t, err) + refs := res.Schema.(map[string]interface{})["toOther"].([]interface{}) + assert.Len(t, refs, len(data)) + for i, ref := range refs { + indexField := ref.(search.LocalRef).Fields["index"].(float64) + assert.Equal(t, i, int(indexField)) + } + } + }) + } +} + +func makeTestSortingClass(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + t.Run("sort by property", func(t *testing.T) { + getIndex := func(res search.Result) float64 { + if prop := res.Object().Properties.(map[string]interface{})["index"]; prop != nil { + return prop.(float64) + } + return -1 + } + getBoolProp := func(res search.Result) bool { + if prop := res.Object().Properties.(map[string]interface{})["boolProp"]; prop != nil { + return prop.(bool) + } + return false + } + getStringProp := func(res search.Result) string { + if prop := res.Object().Properties.(map[string]interface{})["stringProp"]; prop != nil { + return prop.(string) + } + return "" + } + getTextArrayProp := func(res search.Result) []string { + if prop := res.Object().Properties.(map[string]interface{})["textArrayProp"]; prop != nil { + return prop.([]string) + } + return nil + } + type test struct { + name string + sort []filters.Sort + expectedIndexes []float64 + expectedBoolProps []bool + expectedStringProps []string + expectedTextArrayProps [][]string + constainsErrorMsgs []string + } + tests := []test{ + { + name: "indexProp desc", + sort: []filters.Sort{{Path: []string{"indexProp"}, Order: "desc"}}, + expectedIndexes: []float64{19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, + }, + { + name: "indexProp asc", + sort: []filters.Sort{{Path: []string{"indexProp"}, Order: "asc"}}, + expectedIndexes: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, + }, + { + name: "stringProp desc", + sort: []filters.Sort{{Path: []string{"stringProp"}, Order: "desc"}}, + expectedStringProps: []string{"s19", "s18", "s17", "s16", "s15", "s14", "s13", "s12", "s11", "s10", "s09", "s08", "s07", "s06", "s05", "s04", "s03", "s02", "s01", "s00"}, + }, + { + name: "stringProp asc", + sort: []filters.Sort{{Path: []string{"stringProp"}, Order: "asc"}}, + expectedStringProps: []string{"s00", "s01", "s02", "s03", "s04", "s05", "s06", "s07", "s08", "s09", "s10", "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19"}, + }, + { + name: "textArrayProp desc", + sort: []filters.Sort{{Path: []string{"textArrayProp"}, Order: "desc"}}, + expectedTextArrayProps: [][]string{{"s19", "19"}, {"s18", "18"}, {"s17", "17"}, {"s16", "16"}, {"s15", "15"}, {"s14", "14"}, {"s13", "13"}, {"s12", "12"}, {"s11", "11"}, {"s10", "10"}, {"s09", "09"}, {"s08", "08"}, {"s07", "07"}, {"s06", "06"}, {"s05", "05"}, {"s04", "04"}, {"s03", "03"}, {"s02", "02"}, {"s01", "01"}, {"s00", "00"}}, + }, + { + name: "textArrayProp asc", + sort: []filters.Sort{{Path: []string{"textArrayProp"}, Order: "asc"}}, + expectedTextArrayProps: [][]string{{"s00", "00"}, {"s01", "01"}, {"s02", "02"}, {"s03", "03"}, {"s04", "04"}, {"s05", "05"}, {"s06", "06"}, {"s07", "07"}, {"s08", "08"}, {"s09", "09"}, {"s10", "10"}, {"s11", "11"}, {"s12", "12"}, {"s13", "13"}, {"s14", "14"}, {"s15", "15"}, {"s16", "16"}, {"s17", "17"}, {"s18", "18"}, {"s19", "19"}}, + }, + { + name: "boolProp desc", + sort: []filters.Sort{{Path: []string{"boolProp"}, Order: "desc"}}, + expectedBoolProps: []bool{true, true, true, true, true, true, true, true, true, true, false, false, false, false, false, false, false, false, false, false}, + }, + { + name: "boolProp asc", + sort: []filters.Sort{{Path: []string{"boolProp"}, Order: "asc"}}, + expectedBoolProps: []bool{false, false, false, false, false, false, false, false, false, false, true, true, true, true, true, true, true, true, true, true}, + }, + { + name: "boolProp asc stringProp asc", + sort: []filters.Sort{{Path: []string{"boolProp"}, Order: "asc"}, {Path: []string{"stringProp"}, Order: "asc"}}, + expectedBoolProps: []bool{false, false, false, false, false, false, false, false, false, false, true, true, true, true, true, true, true, true, true, true}, + expectedStringProps: []string{"s01", "s03", "s05", "s07", "s09", "s11", "s13", "s15", "s17", "s19", "s00", "s02", "s04", "s06", "s08", "s10", "s12", "s14", "s16", "s18"}, + }, + { + name: "boolProp desc stringProp asc", + sort: []filters.Sort{{Path: []string{"boolProp"}, Order: "desc"}, {Path: []string{"stringProp"}, Order: "asc"}}, + expectedBoolProps: []bool{true, true, true, true, true, true, true, true, true, true, false, false, false, false, false, false, false, false, false, false}, + expectedStringProps: []string{"s00", "s02", "s04", "s06", "s08", "s10", "s12", "s14", "s16", "s18", "s01", "s03", "s05", "s07", "s09", "s11", "s13", "s15", "s17", "s19"}, + }, + { + name: "boolProp asc indexProp asc", + sort: []filters.Sort{{Path: []string{"boolProp"}, Order: "asc"}, {Path: []string{"indexProp"}, Order: "asc"}}, + expectedBoolProps: []bool{false, false, false, false, false, false, false, false, false, false, true, true, true, true, true, true, true, true, true, true}, + expectedIndexes: []float64{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18}, + }, + { + name: "boolProp asc indexProp desc", + sort: []filters.Sort{{Path: []string{"boolProp"}, Order: "asc"}, {Path: []string{"indexProp"}, Order: "desc"}}, + expectedBoolProps: []bool{false, false, false, false, false, false, false, false, false, false, true, true, true, true, true, true, true, true, true, true}, + expectedIndexes: []float64{19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0}, + }, + { + name: "index property doesn't exist in testrefclass", + sort: []filters.Sort{{Path: []string{"index"}, Order: "desc"}}, + expectedIndexes: nil, + constainsErrorMsgs: []string{ + "no such prop with name 'index' found in class 'TestRefClass' in the schema. " + + "Check your schema files for which properties in this class are available", + }, + }, + { + name: "non existent property in all classes", + sort: []filters.Sort{{Path: []string{"nonexistentproperty"}, Order: "desc"}}, + expectedIndexes: nil, + constainsErrorMsgs: []string{ + "no such prop with name 'nonexistentproperty' found in class 'TestClass' in the schema. " + + "Check your schema files for which properties in this class are available", + "no such prop with name 'nonexistentproperty' found in class 'TestRefClass' in the schema. " + + "Check your schema files for which properties in this class are available", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := repo.ObjectSearch(context.Background(), 0, 1000, nil, test.sort, + additional.Properties{}, "") + if len(test.constainsErrorMsgs) > 0 { + require.NotNil(t, err) + for _, errorMsg := range test.constainsErrorMsgs { + assert.Contains(t, err.Error(), errorMsg) + } + } else { + require.Nil(t, err) + if len(test.expectedIndexes) > 0 { + for i := range res { + assert.Equal(t, test.expectedIndexes[i], getIndex(res[i])) + } + } + if len(test.expectedBoolProps) > 0 { + for i := range res { + assert.Equal(t, test.expectedBoolProps[i], getBoolProp(res[i])) + } + } + if len(test.expectedStringProps) > 0 { + for i := range res { + assert.Equal(t, test.expectedStringProps[i], getStringProp(res[i])) + } + } + if len(test.expectedTextArrayProps) > 0 { + for i := range res { + assert.EqualValues(t, test.expectedTextArrayProps[i], getTextArrayProp(res[i])) + } + } + } + }) + } + }) + } +} + +func testNodesAPI(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + nodeStatues, err := repo.GetNodeStatus(context.Background(), "", "", verbosity.OutputVerbose) + require.Nil(t, err) + require.NotNil(t, nodeStatues) + + require.Len(t, nodeStatues, 1) + nodeStatus := nodeStatues[0] + assert.NotNil(t, nodeStatus) + assert.Equal(t, "node1", nodeStatus.Name) + assert.Equal(t, "server-version", nodeStatus.Version) + assert.Equal(t, "git-hash", nodeStatus.GitHash) + assert.Len(t, nodeStatus.Shards, 6) + var testClassShardsCount, testClassObjectsCount int64 + var testRefClassShardsCount, testRefClassObjectsCount int64 + for _, status := range nodeStatus.Shards { + if status.Class == "TestClass" { + testClassShardsCount += 1 + testClassObjectsCount += status.ObjectCount + } + if status.Class == "TestRefClass" { + testRefClassShardsCount += 1 + testRefClassObjectsCount += status.ObjectCount + } + } + assert.Equal(t, int64(3), testClassShardsCount) + // a previous version of this test made assertions on object counts, + // however with object count becoming async, we can no longer make exact + // assertions here. See https://github.com/weaviate/weaviate/issues/4193 + // for details. + assert.Equal(t, int64(3), testRefClassShardsCount) + assert.Equal(t, int64(6), nodeStatus.Stats.ShardCount) + } +} + +func makeTestBatchDeleteAllObjects(repo *DB) func(t *testing.T) { + return func(t *testing.T) { + performDelete := func(t *testing.T, className string) { + getParams := func(className string, dryRun bool) objects.BatchDeleteParams { + return objects.BatchDeleteParams{ + ClassName: schema.ClassName(className), + Filters: &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorLike, + Value: &filters.Value{ + Value: "*", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Property: "id", + }, + }, + }, + DryRun: dryRun, + Output: "verbose", + } + } + performClassSearch := func(className string) ([]search.Result, error) { + return repo.Search(context.Background(), dto.GetParams{ + ClassName: className, + Pagination: &filters.Pagination{Limit: 10000}, + }) + } + // get the initial count of the objects + res, err := performClassSearch(className) + require.Nil(t, err) + beforeDelete := len(res) + require.True(t, beforeDelete > 0) + // dryRun == true + batchDeleteRes, err := repo.BatchDeleteObjects(context.Background(), getParams(className, true), time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, int64(beforeDelete), batchDeleteRes.Matches) + require.Equal(t, beforeDelete, len(batchDeleteRes.Objects)) + for _, batchRes := range batchDeleteRes.Objects { + require.Nil(t, batchRes.Err) + } + // check that every object is preserved (not deleted) + res, err = performClassSearch(className) + require.Nil(t, err) + require.Equal(t, beforeDelete, len(res)) + // dryRun == false, perform actual delete + batchDeleteRes, err = repo.BatchDeleteObjects(context.Background(), getParams(className, false), time.Now(), nil, "", 0) + require.Nil(t, err) + require.Equal(t, int64(beforeDelete), batchDeleteRes.Matches) + require.Equal(t, beforeDelete, len(batchDeleteRes.Objects)) + for _, batchRes := range batchDeleteRes.Objects { + require.Nil(t, batchRes.Err) + } + // check that every object is deleted + res, err = performClassSearch(className) + require.Nil(t, err) + require.Equal(t, 0, len(res)) + } + t.Run("batch delete TestRefClass", func(t *testing.T) { + performDelete(t, "TestRefClass") + }) + t.Run("batch delete TestClass", func(t *testing.T) { + performDelete(t, "TestClass") + }) + } +} + +func exampleQueryVec(r *rand.Rand) []float32 { + dim := 10 + vec := make([]float32, dim) + for j := range vec { + vec[j] = r.Float32() + } + return vec +} + +func multiShardTestData(r *rand.Rand) []*models.Object { + size := 20 + dim := 10 + out := make([]*models.Object, size) + for i := range out { + vec := make([]float32, dim) + for j := range vec { + vec[j] = r.Float32() + } + + out[i] = &models.Object{ + ID: strfmt.UUID(uuid.New().String()), + Class: "TestClass", + Vector: vec, + Properties: map[string]interface{}{ + "boolProp": i%2 == 0, + "index": i, + "indexProp": i, + "stringProp": fmt.Sprintf("s%02d", i), + "textArrayProp": []string{fmt.Sprintf("s%02d", i), fmt.Sprintf("%02d", i)}, + }, + } + } + + return out +} + +func multiShardRefClassData(r *rand.Rand, targets []*models.Object) []*models.Object { + // each class will link to all possible targets, so that we can be sure that + // we hit cross-shard links + targetLinks := make(models.MultipleRef, len(targets)) + for i, obj := range targets { + targetLinks[i] = &models.SingleRef{ + Beacon: strfmt.URI(crossref.NewLocalhost("", obj.ID).String()), + } + } + + size := 20 + dim := 10 + out := make([]*models.Object, size) + for i := range out { + vec := make([]float32, dim) + for j := range vec { + vec[j] = r.Float32() + } + + out[i] = &models.Object{ + ID: strfmt.UUID(uuid.New().String()), + Class: "TestRefClass", + Vector: vec, + Properties: map[string]interface{}{ + "toOther": targetLinks, + }, + } + } + + return out +} + +func bruteForceObjectsByQuery(objs []*models.Object, + query []float32, +) []*models.Object { + type distanceAndObj struct { + distance float32 + obj *models.Object + } + + distProv := distancer.NewDotProductProvider() + distances := make([]distanceAndObj, len(objs)) + + for i := range objs { + dist, _ := distProv.SingleDist(normalize(query), normalize(objs[i].Vector)) + distances[i] = distanceAndObj{ + distance: dist, + obj: objs[i], + } + } + + sort.Slice(distances, func(a, b int) bool { + return distances[a].distance < distances[b].distance + }) + + out := make([]*models.Object, len(objs)) + for i := range out { + out[i] = distances[i].obj + } + + return out +} + +func testClassesForImporting() []*models.Class { + return []*models.Class{ + { + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "TestClass", + Properties: []*models.Property{ + { + Name: "boolProp", + DataType: []string{string(schema.DataTypeBoolean)}, + }, + { + Name: "index", + DataType: []string{string(schema.DataTypeInt)}, + }, + { + Name: "indexProp", + DataType: []string{string(schema.DataTypeInt)}, + }, + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "textArrayProp", + DataType: []string{string(schema.DataTypeTextArray)}, + }, + }, + }, + { + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "TestRefClass", + Properties: []*models.Property{ + { + Name: "boolProp", + DataType: []string{string(schema.DataTypeBoolean)}, + }, + { + Name: "toOther", + DataType: []string{"TestClass"}, + }, + { + Name: "indexProp", + DataType: []string{string(schema.DataTypeInt)}, + }, + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "textArrayProp", + DataType: []string{string(schema.DataTypeTextArray)}, + }, + }, + }, + } +} + +func normalize(v []float32) []float32 { + var norm float32 + for i := range v { + norm += v[i] * v[i] + } + + norm = float32(math.Sqrt(float64(norm))) + for i := range v { + v[i] = v[i] / norm + } + + return v +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/node_wide_metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/node_wide_metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..115077e7fa357cd9ae4b1b0fea566ad6616ba8a4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/node_wide_metrics.go @@ -0,0 +1,459 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "maps" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + enterrors "github.com/weaviate/weaviate/entities/errors" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/tenantactivity" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" +) + +type nodeWideMetricsObserver struct { + db *DB + + // Goroutines spawned by nodeWideMetricsObserver must exit after receiving on this channel. + shutdown chan struct{} + + activityLock sync.Mutex + activityTracker activityByCollection + lastTenantUsage tenantactivity.ByCollection + lastTenantUsageReads tenantactivity.ByCollection + lastTenantUsageWrites tenantactivity.ByCollection +} + +// internal types used for tenant activity aggregation, not exposed to the user +type ( + activityByCollection map[string]activityByTenant + activityByTenant map[string]activity + activity struct { + read int32 + write int32 + } +) + +func newNodeWideMetricsObserver(db *DB) *nodeWideMetricsObserver { + return &nodeWideMetricsObserver{db: db, shutdown: make(chan struct{})} +} + +// Start goroutines for periodically polling node-wide metrics. +// Shard read/write activity and objects_count are only collected +// if metric aggregation (PROMETHEUS_MONITORING_GROUP) is enabled. +// Only start this service if DB has Prometheus enabled. +func (o *nodeWideMetricsObserver) Start() { + if o.db.config.TrackVectorDimensions { + enterrors.GoWrapper(o.observeDimensionMetrics, o.db.logger) + } + + if o.db.promMetrics.Group { + enterrors.GoWrapper(o.observeShards, o.db.logger) + } +} + +func (o *nodeWideMetricsObserver) Shutdown() { + close(o.shutdown) +} + +func (o *nodeWideMetricsObserver) observeShards() { + // make sure we start with a warm state, otherwise we delay the initial + // update. This only applies to tenant activity, other metrics wait + // for shard-readiness anyway. + o.observeActivity() + + t30 := time.NewTicker(30 * time.Second) + defer t30.Stop() + + t10 := time.NewTicker(10 * time.Second) + defer t10.Stop() + + for { + select { + case <-o.shutdown: + return + case <-t10.C: + o.observeActivity() + case <-t30.C: + o.observeObjectCount() + } + } +} + +// Collect and publish aggregated object_count metric iff all indices report allShardsReady=true. +func (o *nodeWideMetricsObserver) observeObjectCount() { + o.db.indexLock.RLock() + defer o.db.indexLock.RUnlock() + + for _, index := range o.db.indices { + if !index.allShardsReady.Load() { + o.db.logger.WithFields(logrus.Fields{ + "action": "skip_observe_node_wide_metrics", + }).Debugf("skip node-wide metrics, not all shards ready") + return + } + } + + start := time.Now() + + totalObjectCount := int64(0) + for _, index := range o.db.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + objectCount, err := shard.ObjectCountAsync(context.Background()) + if err != nil { + o.db.logger.Warnf("error while getting object count for shard %s: %w", shard.Name(), err) + } + totalObjectCount += objectCount + return nil + }) + } + + o.db.promMetrics.ObjectCount.With(prometheus.Labels{ + "class_name": "n/a", + "shard_name": "n/a", + }).Set(float64(totalObjectCount)) + + took := time.Since(start) + o.db.logger.WithFields(logrus.Fields{ + "action": "observe_node_wide_metrics", + "took": took, + "object_count": totalObjectCount, + }).Debug("observed node wide metrics") +} + +// NOTE(dyma): should this also chech that all indices report allShardsReady == true? +// Otherwise getCurrentActivity may end up loading lazy-loaded shards just to check +// their activity, which is redundant on a cold shard? +func (o *nodeWideMetricsObserver) observeActivity() { + start := time.Now() + current := o.getCurrentActivity() + + o.activityLock.Lock() + defer o.activityLock.Unlock() + + o.lastTenantUsage, o.lastTenantUsageReads, o.lastTenantUsageWrites = o.analyzeActivityDelta(current) + o.activityTracker = current + + took := time.Since(start) + o.db.logger.WithFields(logrus.Fields{ + "action": "observe_tenantactivity", + "took": took, + }).Debug("observed tenant activity stats") +} + +func (o *nodeWideMetricsObserver) logActivity(col, tenant, activityType string, value int32) { + logBase := o.db.logger.WithFields(logrus.Fields{ + "action": "tenant_activity_change", + "collection": col, + "tenant": tenant, + "activity_type": activityType, + "last_counter_value": value, + }) + + var lvlStr string + switch activityType { + case "read": + lvlStr = o.db.config.TenantActivityReadLogLevel.Get() + case "write": + lvlStr = o.db.config.TenantActivityWriteLogLevel.Get() + default: + lvlStr = "debug" // fall-back for any unknown activityType + } + + level, err := logrus.ParseLevel(strings.ToLower(lvlStr)) + if err != nil { + level = logrus.DebugLevel + logBase.WithField("invalid_level", lvlStr). + Warn("unknown tenant activity log level, defaulting to debug") + } + + logBase.Logf(level, "tenant %s activity change: %s", tenant, activityType) +} + +func (o *nodeWideMetricsObserver) analyzeActivityDelta(currentActivity activityByCollection) (total, reads, writes tenantactivity.ByCollection) { + previousActivity := o.activityTracker + if previousActivity == nil { + previousActivity = make(activityByCollection) + } + + now := time.Now() + + // create a new map, this way we will automatically drop anything that + // doesn't appear in the new list anymore + newUsageTotal := make(tenantactivity.ByCollection) + newUsageReads := make(tenantactivity.ByCollection) + newUsageWrites := make(tenantactivity.ByCollection) + + for class, current := range currentActivity { + newUsageTotal[class] = make(tenantactivity.ByTenant) + newUsageReads[class] = make(tenantactivity.ByTenant) + newUsageWrites[class] = make(tenantactivity.ByTenant) + + for tenant, act := range current { + if _, ok := previousActivity[class]; !ok { + previousActivity[class] = make(activityByTenant) + } + + previous, ok := previousActivity[class][tenant] + if !ok { + // this tenant didn't appear on the previous list, so we need to consider + // it recently active + newUsageTotal[class][tenant] = now + + // only track detailed value if the value is greater than the initial + // value, otherwise we consider it just an activation without any user + // activity + if act.read > 1 { + newUsageReads[class][tenant] = now + o.logActivity(class, tenant, "read", act.read) + } + if act.write > 1 { + newUsageWrites[class][tenant] = now + o.logActivity(class, tenant, "write", act.write) + } + + if act.read == 1 && act.write == 1 { + // no specific activity, just an activation + o.logActivity(class, tenant, "activation", 1) + } + continue + } + + if act.read == previous.read && act.write == previous.write { + // unchanged, we can copy the current state + newUsageTotal[class][tenant] = o.lastTenantUsage[class][tenant] + + // only copy previous reads+writes if they existed before + if lastRead, ok := o.lastTenantUsageReads[class][tenant]; ok { + newUsageReads[class][tenant] = lastRead + } + if lastWrite, ok := o.lastTenantUsageWrites[class][tenant]; ok { + newUsageWrites[class][tenant] = lastWrite + } + } else { + // activity changed we need to update it + newUsageTotal[class][tenant] = now + if act.read > previous.read { + newUsageReads[class][tenant] = now + o.logActivity(class, tenant, "read", act.read) + } else if lastRead, ok := o.lastTenantUsageReads[class][tenant]; ok { + newUsageReads[class][tenant] = lastRead + } + + if act.write > previous.write { + newUsageWrites[class][tenant] = now + o.logActivity(class, tenant, "write", act.write) + } else if lastWrite, ok := o.lastTenantUsageWrites[class][tenant]; ok { + newUsageWrites[class][tenant] = lastWrite + } + + } + } + } + + return newUsageTotal, newUsageReads, newUsageWrites +} + +func (o *nodeWideMetricsObserver) getCurrentActivity() activityByCollection { + o.db.indexLock.RLock() + defer o.db.indexLock.RUnlock() + + current := make(activityByCollection) + for _, index := range o.db.indices { + if !index.partitioningEnabled { + continue + } + cn := index.Config.ClassName.String() + current[cn] = make(activityByTenant) + index.ForEachShard(func(name string, shard ShardLike) error { + act := activity{} + act.read, act.write = shard.Activity() + current[cn][name] = act + return nil + }) + } + + return current +} + +func (o *nodeWideMetricsObserver) Usage(filter tenantactivity.UsageFilter) tenantactivity.ByCollection { + if o == nil { + // not loaded yet, requests could come in before the db is initialized yet + // don't attempt to lock, as that would lead to a nil-pointer issue + return tenantactivity.ByCollection{} + } + + o.activityLock.Lock() + defer o.activityLock.Unlock() + + switch filter { + + case tenantactivity.UsageFilterOnlyReads: + return o.lastTenantUsageReads + case tenantactivity.UsageFilterOnlyWrites: + return o.lastTenantUsageWrites + case tenantactivity.UsageFilterAll: + return o.lastTenantUsage + default: + return o.lastTenantUsage + } +} + +// ---------------------------------------------------------------------------- +// Vector dimensions tracking +// ---------------------------------------------------------------------------- + +// Start a goroutine to collect vector dimension/segment metrics from the shards, +// and publish them at a regular interval. Only call this method in the constructor, +// as it does not guard access with locks. +// If vector dimension tracking is disabled, this method is a no-op: no goroutine will +// be started and the "done" channel stays nil. +func (o *nodeWideMetricsObserver) observeDimensionMetrics() { + interval := config.DefaultTrackVectorDimensionsInterval + if o.db.config.TrackVectorDimensionsInterval > 0 { // duration must be > 0, or time.Timer will panic + interval = o.db.config.TrackVectorDimensionsInterval + } + + // This is a low-priority background process, which is not time-sensitive. + // Some downstream calls require a context, so we create one, but we needn't + // manage it beyond making sure it doesn't leak. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + o.publishVectorMetrics(ctx) + + tick := time.NewTicker(interval) + defer tick.Stop() + + for { + select { + case <-o.shutdown: + return + case <-tick.C: + o.publishVectorMetrics(ctx) + } + } +} + +func (o *nodeWideMetricsObserver) publishVectorMetrics(ctx context.Context) { + // We're a low-priority process, copy the index map to avoid blocking others. + // No new indices can be added while we're holding the lock anyways. + o.db.indexLock.RLock() + indices := make(map[string]*Index, len(o.db.indices)) + maps.Copy(indices, o.db.indices) + o.db.indexLock.RUnlock() + + var total DimensionMetrics + + start := time.Now() + defer func() { + took := time.Since(start) + o.db.logger.WithFields(logrus.Fields{ + "action": "observe_node_wide_metrics", + "took": took, + "total_dimensions": total.Uncompressed, + "total_segments": total.Compressed, + "publish_grouped": o.db.promMetrics.Group, + }).Debug("published vector metrics") + }() + + for _, index := range indices { + index.closeLock.RLock() + closed := index.closed + index.closeLock.RUnlock() + if closed { + continue + } + + className := index.Config.ClassName.String() + + // Avoid loading cold shards, as it may create I/O spikes. + index.ForEachLoadedShard(func(shardName string, sl ShardLike) error { + dim := calculateShardDimensionMetrics(ctx, sl) + total = total.Add(dim) + + // Report metrics per-shard if grouping is disabled. + if !o.db.promMetrics.Group { + o.sendVectorDimensions(className, shardName, dim) + } + return nil + }) + } + + // Report aggregate metrics for the node if grouping is enabled. + if o.db.promMetrics.Group { + o.sendVectorDimensions("n/a", "n/a", total) + } +} + +// Set vector_dimensions=DimensionMetrics.Uncompressed and vector_segments=DimensionMetrics.Compressed gauges. +func (o *nodeWideMetricsObserver) sendVectorDimensions(className, shardName string, dm DimensionMetrics) { + if g, err := o.db.promMetrics.VectorDimensionsSum.GetMetricWithLabelValues(className, shardName); err == nil { + g.Set(float64(dm.Uncompressed)) + } + + if g, err := o.db.promMetrics.VectorSegmentsSum.GetMetricWithLabelValues(className, shardName); err == nil { + g.Set(float64(dm.Compressed)) + } +} + +// Calculate total vector dimensions for all vector indices in the shard's parent Index. +func calculateShardDimensionMetrics(ctx context.Context, sl ShardLike) DimensionMetrics { + var total DimensionMetrics + for name, config := range sl.Index().GetVectorIndexConfigs() { + dim := calcVectorDimensionMetrics(ctx, sl, name, config) + total = total.Add(dim) + } + return total +} + +// Calculate vector dimensions for a vector index in a shard. +func calcVectorDimensionMetrics(ctx context.Context, sl ShardLike, vecName string, vecCfg schemaConfig.VectorIndexConfig) DimensionMetrics { + switch category, segments := GetDimensionCategory(vecCfg); category { + case DimensionCategoryPQ: + return DimensionMetrics{Uncompressed: 0, Compressed: sl.QuantizedDimensions(ctx, vecName, segments)} + case DimensionCategoryBQ: + // BQ: 1 bit per dimension, packed into uint64 blocks (8 bytes per 64 dimensions) + // [1..64] dimensions -> 8 bytes, [65..128] dimensions -> 16 bytes, etc. + // Roundup is required because BQ packs bits into uint64 blocks - you can't have + // a partial uint64 block. Even 1 dimension needs a full 8-byte uint64 block. + count, _ := sl.Dimensions(ctx, vecName) + bytes := (count + 63) / 64 * 8 // Round up to next uint64 block, then multiply by 8 bytes + return DimensionMetrics{Uncompressed: 0, Compressed: bytes} + case DimensionCategoryRQ: + // RQ: bits per dimension, where bits can be 1 or 8 + // For bits=1: equivalent to BQ (1 bit per dimension, packed in uint64 blocks) + // For bits=8: 8 bits per dimension (1 byte per dimension) + count, _ := sl.Dimensions(ctx, vecName) + bits := enthnsw.GetRQBits(vecCfg) + var bytes int + if bits == 1 { + // bits=1: same as BQ - 1 bit per dimension, packed in uint64 blocks + bytes = (count + 63) / 64 * 8 + } else { + // bits=8: 8 bits per dimension (1 byte per dimension) + bytes = count + } + return DimensionMetrics{Uncompressed: bytes, Compressed: 0} + default: + count, _ := sl.Dimensions(ctx, vecName) + return DimensionMetrics{Uncompressed: count, Compressed: 0} + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/node_wide_metrics_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/node_wide_metrics_test.go new file mode 100644 index 0000000000000000000000000000000000000000..74c7c7ab98bd8d1462289e823a9c45c3742390e4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/node_wide_metrics_test.go @@ -0,0 +1,145 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "math" + "testing" + "time" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/tenantactivity" +) + +func TestShardActivity(t *testing.T) { + logger, _ := test.NewNullLogger() + db := &DB{ + logger: logger, + indices: map[string]*Index{ + "Col1": { + Config: IndexConfig{ + ClassName: "Col1", + ReplicationFactor: 1, + }, + closingCtx: context.Background(), + partitioningEnabled: true, + shards: shardMap{}, + }, + "NonMT": { + Config: IndexConfig{ + ClassName: "NonMT", + ReplicationFactor: 1, + }, + closingCtx: context.Background(), + partitioningEnabled: false, + shards: shardMap{}, + }, + }, + } + + db.indices["Col1"].shards.Store("t1_overflow", &Shard{}) + db.indices["Col1"].shards.Store("t2_only_reads", &Shard{}) + db.indices["Col1"].shards.Store("t3_no_reads_and_writes", &Shard{}) + db.indices["Col1"].shards.Store("t4_only_writes", &Shard{}) + db.indices["Col1"].shards.Store("t5_reads_and_writes", &Shard{}) + o := newNodeWideMetricsObserver(db) + + o.observeActivity() + + // show activity on two tenants + time.Sleep(10 * time.Millisecond) + db.indices["Col1"].shards.Load("t1_overflow").(*Shard).activityTrackerRead.Store(math.MaxInt32) + db.indices["Col1"].shards.Load("t2_only_reads").(*Shard).activityTrackerRead.Add(1) + db.indices["Col1"].shards.Load("t4_only_writes").(*Shard).activityTrackerWrite.Add(1) + db.indices["Col1"].shards.Load("t5_reads_and_writes").(*Shard).activityTrackerRead.Add(1) + db.indices["Col1"].shards.Load("t5_reads_and_writes").(*Shard).activityTrackerWrite.Add(1) + + // observe to update timestamps + o.observeActivity() + + // show activity again on one tenant (should now have the latest timestamp + time.Sleep(10 * time.Millisecond) + // previous value was math.MaxInt32, so this counter will overflow now. + // Assert that everything still works as expected + db.indices["Col1"].shards.Load("t1_overflow").(*Shard).activityTrackerRead.Add(1) + o.observeActivity() + + t.Run("total usage", func(t *testing.T) { + usage := o.Usage(tenantactivity.UsageFilterAll) + _, ok := usage["NonMT"] + assert.False(t, ok, "only MT cols should be contained") + + col, ok := usage["Col1"] + require.True(t, ok, "MT col should be contained") + require.Len(t, col, 5, "all 5 tenants should be contained") + assert.True(t, col["t1_overflow"].After(col["t2_only_reads"]), "t1 should have a newer timestamp than t2") + assert.True(t, col["t2_only_reads"].After(col["t3_no_reads_and_writes"]), "t2 should have a newer timestamp than t3") + assert.True(t, col["t4_only_writes"].After(col["t3_no_reads_and_writes"]), "t4 should have a newer timestamp than t3") + assert.True(t, col["t5_reads_and_writes"].After(col["t3_no_reads_and_writes"]), "t4 should have a newer timestamp than t3") + }) + + t.Run("display only reads", func(t *testing.T) { + usage := o.Usage(tenantactivity.UsageFilterOnlyReads) + _, ok := usage["NonMT"] + assert.False(t, ok, "only MT cols should be contained") + + col, ok := usage["Col1"] + require.True(t, ok, "MT col should be contained") + require.Len(t, col, 3, "all tenants which received reads should be contained") + + // tenants with reads + _, ok = col["t1_overflow"] + assert.True(t, ok, "t1 should be contained") + _, ok = col["t2_only_reads"] + assert.True(t, ok, "t2 should be contained") + _, ok = col["t5_reads_and_writes"] + assert.True(t, ok, "t5 should be contained") + + // tenants without reads + _, ok = col["t3_no_reads_and_writes"] + assert.False(t, ok, "t3 should not be contained") + _, ok = col["t4_only_writes"] + assert.False(t, ok, "t4 should not be contained") + }) + + t.Run("display only writes", func(t *testing.T) { + usage := o.Usage(tenantactivity.UsageFilterOnlyWrites) + _, ok := usage["NonMT"] + assert.False(t, ok, "only MT cols should be contained") + + col, ok := usage["Col1"] + require.True(t, ok, "MT col should be contained") + require.Len(t, col, 2, "all tenants which received reads should be contained") + + // tenants with writes + _, ok = col["t4_only_writes"] + assert.True(t, ok, "t4 should be contained") + // tenants with writes + _, ok = col["t5_reads_and_writes"] + assert.True(t, ok, "t5 should be contained") + + // write into t5 again + db.indices["Col1"].shards.Load("t5_reads_and_writes").(*Shard).activityTrackerWrite.Add(1) + time.Sleep(10 * time.Millisecond) + o.observeActivity() + + usage = o.Usage(tenantactivity.UsageFilterOnlyWrites) + col, ok = usage["Col1"] + require.True(t, ok, "MT col should be contained") + + assert.True(t, col["t5_reads_and_writes"].After(col["t4_only_writes"]), "t5 should have a newer timestamp than t4") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/nodes.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/nodes.go new file mode 100644 index 0000000000000000000000000000000000000000..a16c9613175aa73b63fa4d8c5bb28603c4c963b1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/nodes.go @@ -0,0 +1,383 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "sort" + + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/pkg/errors" + + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/verbosity" +) + +// GetNodeStatus returns the status of all Weaviate nodes. +func (db *DB) GetNodeStatus(ctx context.Context, className, shardName string, verbosity string) ([]*models.NodeStatus, error) { + nodeStatuses := make([]*models.NodeStatus, len(db.schemaGetter.Nodes())) + eg := enterrors.NewErrorGroupWrapper(db.logger) + eg.SetLimit(_NUMCPU) + for i, nodeName := range db.schemaGetter.Nodes() { + i, nodeName := i, nodeName + eg.Go(func() error { + status, err := db.GetOneNodeStatus(ctx, nodeName, className, shardName, verbosity) + if err != nil { + return fmt.Errorf("node: %v: %w", nodeName, err) + } + if status.Status == nil { + return enterrors.NewErrNotFound( + fmt.Errorf("class %q not found", className)) + } + nodeStatuses[i] = status + + return nil + }, nodeName) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + sort.Slice(nodeStatuses, func(i, j int) bool { + return nodeStatuses[i].Name < nodeStatuses[j].Name + }) + return nodeStatuses, nil +} + +func (db *DB) GetOneNodeStatus(ctx context.Context, nodeName, className, shardName, output string) (*models.NodeStatus, error) { + if db.schemaGetter.NodeName() == nodeName { + return db.LocalNodeStatus(ctx, className, shardName, output), nil + } + status, err := db.remoteNode.GetNodeStatus(ctx, nodeName, className, shardName, output) + if err != nil { + var errSendHttpRequest *enterrors.ErrSendHttpRequest + switch { + case errors.As(err, &errSendHttpRequest): + if errors.Is(errSendHttpRequest.Unwrap(), context.DeadlineExceeded) { + nodeTimeout := models.NodeStatusStatusTIMEOUT + return &models.NodeStatus{Name: nodeName, Status: &nodeTimeout}, nil + } + + nodeUnavailable := models.NodeStatusStatusUNAVAILABLE + return &models.NodeStatus{Name: nodeName, Status: &nodeUnavailable}, nil + case errors.As(err, &enterrors.ErrOpenHttpRequest{}): + nodeUnavailable := models.NodeStatusStatusUNAVAILABLE + return &models.NodeStatus{Name: nodeName, Status: &nodeUnavailable}, nil + default: + return nil, err + } + } + return status, nil +} + +// IncomingGetNodeStatus returns the index if it exists or nil if it doesn't +func (db *DB) IncomingGetNodeStatus(ctx context.Context, className, shardName, verbosity string) (*models.NodeStatus, error) { + return db.LocalNodeStatus(ctx, className, shardName, verbosity), nil +} + +func (db *DB) LocalNodeStatus(ctx context.Context, className, shardName, output string) *models.NodeStatus { + if className != "" && db.GetIndex(schema.ClassName(className)) == nil { + // class not found + return &models.NodeStatus{} + } + + var ( + shards []*models.NodeShardStatus + nodeStats *models.NodeStats + ) + if output == verbosity.OutputVerbose { + nodeStats = db.localNodeShardStats(ctx, &shards, className, shardName) + } + + clusterHealthStatus := models.NodeStatusStatusHEALTHY + if db.schemaGetter.ClusterHealthScore() > 0 { + clusterHealthStatus = models.NodeStatusStatusUNHEALTHY + } + + status := models.NodeStatus{ + Name: db.schemaGetter.NodeName(), + Version: db.config.ServerVersion, + GitHash: db.config.GitHash, + Status: &clusterHealthStatus, + Shards: shards, + Stats: nodeStats, + BatchStats: db.localNodeBatchStats(), + } + + return &status +} + +func (db *DB) localNodeShardStats(ctx context.Context, + status *[]*models.NodeShardStatus, className, shardName string, +) *models.NodeStats { + var objectCount, shardCount int64 + if className == "" { + db.indexLock.RLock() + defer db.indexLock.RUnlock() + for name, idx := range db.indices { + if idx == nil { + db.logger.WithField("action", "local_node_status_for_all"). + Warningf("no resource found for index %q", name) + continue + } + objects, shards := idx.getShardsNodeStatus(ctx, status, shardName) + objectCount, shardCount = objectCount+objects, shardCount+shards + } + return &models.NodeStats{ + ObjectCount: objectCount, + ShardCount: shardCount, + } + } + idx := db.GetIndex(schema.ClassName(className)) + if idx == nil { + db.logger.WithField("action", "local_node_status_for_class"). + Warningf("no index found for class %q", className) + return nil + } + objectCount, shardCount = idx.getShardsNodeStatus(ctx, status, shardName) + return &models.NodeStats{ + ObjectCount: objectCount, + ShardCount: shardCount, + } +} + +func (db *DB) localNodeBatchStats() *models.BatchStats { + rate := db.ratePerSecond.Load() + stats := &models.BatchStats{RatePerSecond: rate} + if !asyncEnabled() { + ql := int64(len(db.jobQueueCh)) + stats.QueueLength = &ql + } + return stats +} + +// getShardsNodeStatus modifies the status slice to include the shard statuses. +// If shardName is provided, it will only get the status of the specific shard. +// Otherwise, it will get the status of all shards. +// Returns the total object count and the number of shards. +// If an error occurs, the status slice may have been modified and this method +// may return a partial result. +func (i *Index) getShardsNodeStatus(ctx context.Context, + status *[]*models.NodeShardStatus, shardName string, +) (totalCount, shardCount int64) { + i.ForEachShard(func(name string, shard ShardLike) error { + if err := ctx.Err(); err != nil { + return err + } + // if shardName is provided, only return the status for the specified shard + if shardName != "" && shardName != name { + return nil + } + + // Don't force load a lazy shard to get nodes status + className := i.Config.ClassName.String() + if lazy, ok := shard.(*LazyLoadShard); ok { + if !lazy.isLoaded() { + numberOfReplicas, replicationFactor := getShardReplicationDetails(i, shard.Name()) + shardStatus := &models.NodeShardStatus{ + Name: name, + Class: className, + VectorIndexingStatus: shard.GetStatus().String(), + Loaded: false, + ReplicationFactor: replicationFactor, + NumberOfReplicas: numberOfReplicas, + Compressed: isAnyVectorIndexCompressed(shard), + } + *status = append(*status, shardStatus) + shardCount++ + return nil + } + } + + objectCount, err := shard.ObjectCountAsync(ctx) + if err != nil { + i.logger.Warnf("error while getting object count for shard %s: %w", shard.Name(), err) + } + + totalCount += int64(objectCount) + + // FIXME stats of target vectors + var queueLen int64 + _ = shard.ForEachVectorQueue(func(_ string, queue *VectorIndexQueue) error { + queueLen += queue.Size() + return nil + }) + + var compressed bool + _ = shard.ForEachVectorIndex(func(_ string, index VectorIndex) error { + compressed = compressed || index.Compressed() + return nil + }) + + numberOfReplicas, replicationFactor := getShardReplicationDetails(i, shard.Name()) + if err != nil { + i.logger.Errorf("error while getting number of replicas for shard %s: %w", shard.Name(), err) + } + + shardStatus := &models.NodeShardStatus{ + Name: name, + Class: className, + ObjectCount: objectCount, + VectorIndexingStatus: shard.GetStatus().String(), + VectorQueueLength: queueLen, + Compressed: isAnyVectorIndexCompressed(shard), + Loaded: true, + AsyncReplicationStatus: shard.getAsyncReplicationStats(ctx), + ReplicationFactor: replicationFactor, + NumberOfReplicas: numberOfReplicas, + } + *status = append(*status, shardStatus) + shardCount++ + return nil + }) + return +} + +func getShardReplicationDetails(i *Index, shardName string) (int64, int64) { + var numberOfReplicas int64 + var replicationFactor int64 + class := i.Config.ClassName.String() + err := i.schemaReader.Read(class, func(class *models.Class, state *sharding.State) error { + var err error + if state == nil { + return fmt.Errorf("unable to retrieve sharding state for class %s", class.Class) + } + replicationFactor = state.ReplicationFactor + numberOfReplicas, err = state.NumberOfReplicas(shardName) + if err != nil { + return fmt.Errorf("unable to retrieve number of replicas for class %s: %w", class.Class, err) + } + return nil + }) + if err != nil { + i.logger.Errorf("error while getting number of replicas for shard %s: %v", shardName, err) + } + return numberOfReplicas, replicationFactor +} + +func isAnyVectorIndexCompressed(shard ShardLike) bool { + var compressed bool + shard.ForEachVectorIndex(func(_ string, index VectorIndex) error { + compressed = compressed || index.Compressed() + return nil + }) + return compressed +} + +func (db *DB) GetNodeStatistics(ctx context.Context) ([]*models.Statistics, error) { + nodeStatistics := make([]*models.Statistics, len(db.schemaGetter.Nodes())) + eg := enterrors.NewErrorGroupWrapper(db.logger) + eg.SetLimit(_NUMCPU) + for i, nodeName := range db.schemaGetter.Nodes() { + i, nodeName := i, nodeName + eg.Go(func() error { + statistics, err := db.getNodeStatistics(ctx, nodeName) + if err != nil { + return fmt.Errorf("node: %v: %w", nodeName, err) + } + nodeStatistics[i] = statistics + + return nil + }, nodeName) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + sort.Slice(nodeStatistics, func(i, j int) bool { + return nodeStatistics[i].Name < nodeStatistics[j].Name + }) + return nodeStatistics, nil +} + +func (db *DB) IncomingGetNodeStatistics() (*models.Statistics, error) { + return db.localNodeStatistics() +} + +func (db *DB) localNodeStatistics() (*models.Statistics, error) { + stats := db.schemaGetter.Statistics() + var raft *models.RaftStatistics + raftStats, ok := stats["raft"].(map[string]string) + if ok { + raft = &models.RaftStatistics{ + AppliedIndex: raftStats["applied_index"], + CommitIndex: raftStats["commit_index"], + FsmPending: raftStats["fsm_pending"], + LastContact: raftStats["last_contact"], + LastLogIndex: raftStats["last_log_index"], + LastLogTerm: raftStats["last_log_term"], + LastSnapshotIndex: raftStats["last_snapshot_index"], + LastSnapshotTerm: raftStats["last_snapshot_term"], + LatestConfiguration: stats["raft_latest_configuration_servers"], + LatestConfigurationIndex: raftStats["latest_configuration_index"], + NumPeers: raftStats["num_peers"], + ProtocolVersion: raftStats["protocol_version"], + ProtocolVersionMax: raftStats["protocol_version_max"], + ProtocolVersionMin: raftStats["protocol_version_min"], + SnapshotVersionMax: raftStats["snapshot_version_max"], + SnapshotVersionMin: raftStats["snapshot_version_min"], + State: raftStats["state"], + Term: raftStats["term"], + } + } + status := models.StatisticsStatusHEALTHY + if db.schemaGetter.ClusterHealthScore() > 0 { + status = models.StatisticsStatusUNHEALTHY + } + statistics := &models.Statistics{ + Status: &status, + Name: stats["id"].(string), + LeaderAddress: stats["leader_address"], + LeaderID: stats["leader_id"], + Ready: stats["ready"].(bool), + IsVoter: stats["is_voter"].(bool), + Open: stats["open"].(bool), + Bootstrapped: stats["bootstrapped"].(bool), + InitialLastAppliedIndex: stats["last_store_log_applied_index"].(uint64), + DbLoaded: stats["db_loaded"].(bool), + Candidates: stats["candidates"], + Raft: raft, + } + return statistics, nil +} + +func (db *DB) getNodeStatistics(ctx context.Context, nodeName string) (*models.Statistics, error) { + if db.schemaGetter.NodeName() == nodeName { + return db.localNodeStatistics() + } + statistics, err := db.remoteNode.GetStatistics(ctx, nodeName) + if err != nil { + var errSendHttpRequest *enterrors.ErrSendHttpRequest + switch { + case errors.As(err, &errSendHttpRequest): + if errors.Is(errSendHttpRequest.Unwrap(), context.DeadlineExceeded) { + nodeTimeout := models.StatisticsStatusTIMEOUT + return &models.Statistics{Name: nodeName, Status: &nodeTimeout}, nil + } + + nodeUnavailable := models.StatisticsStatusUNAVAILABLE + return &models.Statistics{Name: nodeName, Status: &nodeUnavailable}, nil + case errors.As(err, &enterrors.ErrOpenHttpRequest{}): + nodeUnavailable := models.StatisticsStatusUNAVAILABLE + return &models.Statistics{Name: nodeName, Status: &nodeUnavailable}, nil + default: + return nil, err + } + } + return statistics, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/nodes_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/nodes_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ea45abed28054e8acb51b24486f3af90c4d8950d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/nodes_integration_test.go @@ -0,0 +1,167 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package db + +import ( + "context" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/entities/verbosity" + "github.com/weaviate/weaviate/usecases/objects" +) + +func TestNodesAPI_Journey(t *testing.T) { + dirName := t.TempDir() + + logger := logrus.New() + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + ServerVersion: "server-version", + GitHash: "git-hash", + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + + defer repo.Shutdown(context.Background()) + migrator := NewMigrator(repo, logger, "node1") + + // check nodes api response on empty DB + nodeStatues, err := repo.GetNodeStatus(context.Background(), "", "", verbosity.OutputVerbose) + require.Nil(t, err) + require.NotNil(t, nodeStatues) + + require.Len(t, nodeStatues, 1) + nodeStatus := nodeStatues[0] + assert.NotNil(t, nodeStatus) + assert.Equal(t, "node1", nodeStatus.Name) + assert.Equal(t, "server-version", nodeStatus.Version) + assert.Equal(t, "git-hash", nodeStatus.GitHash) + assert.Len(t, nodeStatus.Shards, 0) + assert.Equal(t, int64(0), nodeStatus.Stats.ObjectCount) + assert.Equal(t, int64(0), nodeStatus.Stats.ShardCount) + + // import 2 objects + class := &models.Class{ + Class: "ClassNodesAPI", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Properties: []*models.Property{ + { + Name: "stringProp", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + } + + require.Nil(t, + migrator.AddClass(context.Background(), class)) + + schemaGetter.schema.Objects = &models.Schema{ + Classes: []*models.Class{class}, + } + + batch := objects.BatchObjects{ + objects.BatchObject{ + OriginalIndex: 0, + Err: nil, + Object: &models.Object{ + Class: "ClassNodesAPI", + Properties: map[string]interface{}{ + "stringProp": "first element", + }, + ID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + }, + UUID: "8d5a3aa2-3c8d-4589-9ae1-3f638f506970", + }, + objects.BatchObject{ + OriginalIndex: 1, + Err: nil, + Object: &models.Object{ + Class: "ClassNodesAPI", + Properties: map[string]interface{}{ + "stringProp": "second element", + }, + ID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + }, + UUID: "86a380e9-cb60-4b2a-bc48-51f52acd72d6", + }, + } + batchRes, err := repo.BatchPutObjects(context.Background(), batch, nil, 0) + require.Nil(t, err) + + assert.Nil(t, batchRes[0].Err) + assert.Nil(t, batchRes[1].Err) + + // check nodes api after importing 2 objects to DB + nodeStatues, err = repo.GetNodeStatus(context.Background(), "", "", verbosity.OutputVerbose) + require.Nil(t, err) + require.NotNil(t, nodeStatues) + + require.Len(t, nodeStatues, 1) + nodeStatus = nodeStatues[0] + assert.NotNil(t, nodeStatus) + assert.Equal(t, "node1", nodeStatus.Name) + assert.Equal(t, "server-version", nodeStatus.Version) + assert.Equal(t, "git-hash", nodeStatus.GitHash) + assert.Len(t, nodeStatus.Shards, 1) + assert.Equal(t, "ClassNodesAPI", nodeStatus.Shards[0].Class) + assert.True(t, len(nodeStatus.Shards[0].Name) > 0) + // a previous version of this test made assertions on object counts, + // however with object count becoming async, we can no longer make exact + // assertions here. See https://github.com/weaviate/weaviate/issues/4193 + // for details. + assert.Equal(t, "READY", nodeStatus.Shards[0].VectorIndexingStatus) + assert.Equal(t, int64(0), nodeStatus.Shards[0].VectorQueueLength) + assert.Equal(t, int64(1), nodeStatus.Stats.ShardCount) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/replication.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/replication.go new file mode 100644 index 0000000000000000000000000000000000000000..af749972be6632b60e0592234bfbc35a3b272ea1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/replication.go @@ -0,0 +1,895 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/backup" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/lsmkv" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/file" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +type Replicator interface { + ReplicateObject(ctx context.Context, shardName, requestID string, + object *storobj.Object) replica.SimpleResponse + ReplicateObjects(ctx context.Context, shardName, requestID string, + objects []*storobj.Object) replica.SimpleResponse + ReplicateUpdate(ctx context.Context, shard, requestID string, + doc *objects.MergeDocument) replica.SimpleResponse + ReplicateDeletion(ctx context.Context, shardName, requestID string, + uuid strfmt.UUID, deletionTime time.Time) replica.SimpleResponse + ReplicateDeletions(ctx context.Context, shardName, requestID string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64) replica.SimpleResponse + ReplicateReferences(ctx context.Context, shard, requestID string, + refs []objects.BatchReference) replica.SimpleResponse + CommitReplication(shard, + requestID string) interface{} + AbortReplication(shardName, + requestID string) interface{} +} + +const tmpCopyExtension = ".copy.tmp" // indexcount and proplen temporary copy + +func (db *DB) ReplicateObject(ctx context.Context, class, + shard, requestID string, object *storobj.Object, +) replica.SimpleResponse { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.ReplicateObject(ctx, shard, requestID, object) +} + +func (db *DB) ReplicateObjects(ctx context.Context, class, + shard, requestID string, objects []*storobj.Object, schemaVersion uint64, +) replica.SimpleResponse { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.ReplicateObjects(ctx, shard, requestID, objects, schemaVersion) +} + +func (db *DB) ReplicateUpdate(ctx context.Context, class, + shard, requestID string, mergeDoc *objects.MergeDocument, +) replica.SimpleResponse { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.ReplicateUpdate(ctx, shard, requestID, mergeDoc) +} + +func (db *DB) ReplicateDeletion(ctx context.Context, class, + shard, requestID string, uuid strfmt.UUID, deletionTime time.Time, +) replica.SimpleResponse { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.ReplicateDeletion(ctx, shard, requestID, uuid, deletionTime) +} + +func (db *DB) ReplicateDeletions(ctx context.Context, class, + shard, requestID string, uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) replica.SimpleResponse { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.ReplicateDeletions(ctx, shard, requestID, uuids, deletionTime, dryRun, schemaVersion) +} + +func (db *DB) ReplicateReferences(ctx context.Context, class, + shard, requestID string, refs []objects.BatchReference, +) replica.SimpleResponse { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.ReplicateReferences(ctx, shard, requestID, refs) +} + +func (db *DB) CommitReplication(class, + shard, requestID string, +) interface{} { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.CommitReplication(shard, requestID) +} + +func (db *DB) AbortReplication(class, + shard, requestID string, +) interface{} { + index, pr := db.replicatedIndex(class) + if pr != nil { + return *pr + } + + return index.AbortReplication(shard, requestID) +} + +func (db *DB) replicatedIndex(name string) (idx *Index, resp *replica.SimpleResponse) { + if !db.StartupComplete() { + return nil, &replica.SimpleResponse{Errors: []replica.Error{ + *replica.NewError(replica.StatusNotReady, name), + }} + } + + if idx = db.GetIndex(schema.ClassName(name)); idx == nil { + return nil, &replica.SimpleResponse{Errors: []replica.Error{ + *replica.NewError(replica.StatusClassNotFound, name), + }} + } + return +} + +func (i *Index) writableShard(name string) (ShardLike, func(), *replica.SimpleResponse) { + localShard, release, err := i.getOrInitShard(context.Background(), name) + if err != nil { + return nil, func() {}, &replica.SimpleResponse{Errors: []replica.Error{ + {Code: replica.StatusShardNotFound, Msg: name}, + }} + } + if localShard.isReadOnly() != nil { + release() + + return nil, func() {}, &replica.SimpleResponse{Errors: []replica.Error{{ + Code: replica.StatusReadOnly, Msg: name, + }}} + } + return localShard, release, nil +} + +func (i *Index) ReplicateObject(ctx context.Context, shard, requestID string, object *storobj.Object) replica.SimpleResponse { + localShard, release, pr := i.writableShard(shard) + if pr != nil { + return *pr + } + + defer release() + + return localShard.preparePutObject(ctx, requestID, object) +} + +func (i *Index) ReplicateUpdate(ctx context.Context, shard, requestID string, doc *objects.MergeDocument) replica.SimpleResponse { + localShard, release, pr := i.writableShard(shard) + if pr != nil { + return *pr + } + + defer release() + + return localShard.prepareMergeObject(ctx, requestID, doc) +} + +func (i *Index) ReplicateDeletion(ctx context.Context, shard, requestID string, uuid strfmt.UUID, deletionTime time.Time) replica.SimpleResponse { + localShard, release, pr := i.writableShard(shard) + if pr != nil { + return *pr + } + + defer release() + + return localShard.prepareDeleteObject(ctx, requestID, uuid, deletionTime) +} + +func (i *Index) ReplicateObjects(ctx context.Context, shard, requestID string, objects []*storobj.Object, schemaVersion uint64) replica.SimpleResponse { + localShard, release, pr := i.writableShard(shard) + if pr != nil { + return *pr + } + + defer release() + + return localShard.preparePutObjects(ctx, requestID, objects) +} + +func (i *Index) ReplicateDeletions(ctx context.Context, shard, requestID string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) replica.SimpleResponse { + localShard, release, pr := i.writableShard(shard) + if pr != nil { + return *pr + } + + defer release() + + return localShard.prepareDeleteObjects(ctx, requestID, uuids, deletionTime, dryRun) +} + +func (i *Index) ReplicateReferences(ctx context.Context, shard, requestID string, refs []objects.BatchReference) replica.SimpleResponse { + localShard, release, pr := i.writableShard(shard) + if pr != nil { + return *pr + } + + defer release() + + return localShard.prepareAddReferences(ctx, requestID, refs) +} + +func (i *Index) CommitReplication(shard, requestID string) interface{} { + localShard, release, err := i.getOrInitShard(context.Background(), shard) + if err != nil { + return replica.SimpleResponse{Errors: []replica.Error{ + {Code: replica.StatusShardNotFound, Msg: shard, Err: err}, + }} + } + + defer release() + + return localShard.commitReplication(context.Background(), requestID, &i.shardTransferMutex) +} + +func (i *Index) AbortReplication(shard, requestID string) interface{} { + localShard, release, err := i.getOrInitShard(context.Background(), shard) + if err != nil { + return replica.SimpleResponse{Errors: []replica.Error{ + {Code: replica.StatusShardNotFound, Msg: shard, Err: err}, + }} + } + + defer release() + + return localShard.abortReplication(context.Background(), requestID) +} + +func (i *Index) IncomingFilePutter(ctx context.Context, shardName, + filePath string, +) (io.WriteCloser, error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, fmt.Errorf("incoming file putter get shard %s err: %w", shardName, err) + } + if shard == nil { + return nil, fmt.Errorf("incoming file putter get shard %s: shard not found", shardName) + } + defer release() + + return shard.filePutter(ctx, filePath) +} + +func (i *Index) IncomingCreateShard(ctx context.Context, className string, shardName string) error { + if err := i.initLocalShard(ctx, shardName); err != nil { + return fmt.Errorf("incoming create shard: %w", err) + } + return nil +} + +func (i *Index) IncomingReinitShard(ctx context.Context, shardName string) error { + err := func() error { + i.closeLock.Lock() + defer i.closeLock.Unlock() + + if i.closed { + return errAlreadyShutdown + } + + i.shardCreateLocks.Lock(shardName) + defer i.shardCreateLocks.Unlock(shardName) + + shard, ok := i.shards.LoadAndDelete(shardName) + if ok { + if err := shard.Shutdown(ctx); err != nil { + if !errors.Is(err, errAlreadyShutdown) { + return err + } + } + } + + return nil + }() + if err != nil { + return err + } + + return i.initLocalShard(ctx, shardName) +} + +// IncomingPauseFileActivity pauses the background processes of the specified shard. +// You should explicitly call resumeMaintenanceCycles to resume the background processes after you don't +// need the returned files to stay immutable anymore. +func (i *Index) IncomingPauseFileActivity(ctx context.Context, + shardName string, +) error { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return fmt.Errorf("incoming pause file activity get shard %s err: %w", shardName, err) + } + if shard == nil { + return fmt.Errorf("incoming pause file activity get shard %s: shard not found", shardName) + } + defer release() + + err = shard.HaltForTransfer(ctx, false, i.Config.TransferInactivityTimeout) + if err != nil { + return fmt.Errorf("shard %q could not be halted for transfer: %w", shardName, err) + } + + return nil +} + +// IncomingResumeFileActivity resumes the background processes of the specified shard. +func (i *Index) IncomingResumeFileActivity(ctx context.Context, + shardName string, +) error { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return fmt.Errorf("incoming resume file activity get shard %s err: %w", shardName, err) + } + if shard == nil { + return fmt.Errorf("incoming resume file activity get shard %s: shard not found", shardName) + } + defer release() + + err = shard.resumeMaintenanceCycles(ctx) + if err != nil { + return fmt.Errorf("shard %q could not be resumed after transfer: %w", shardName, err) + } + + return nil +} + +// IncomingListFiles returns a list of files that can be used to get the +// shard data at the time the pause was requested. +// You should explicitly call resumeMaintenanceCycles to resume the background processes after you don't +// need the returned files to stay immutable anymore. +func (i *Index) IncomingListFiles(ctx context.Context, + shardName string, +) ([]string, error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, fmt.Errorf("incoming list files get shard %s: %w", shardName, err) + } + if shard == nil { + return nil, fmt.Errorf("incoming list files get shard is nil: %s", shardName) + } + defer release() + + sd := backup.ShardDescriptor{Name: shardName} + + // prevent writing into the index during collection of metadata + i.shardTransferMutex.Lock() + defer i.shardTransferMutex.Unlock() + + // flushing memtable before gathering the files to prevent the inclusion of a partially written file + if err = shard.Store().FlushMemtables(ctx); err != nil { + return nil, fmt.Errorf("flush memtables: %w", err) + } + + if err := shard.ListBackupFiles(ctx, &sd); err != nil { + return nil, fmt.Errorf("shard %q could not list backup files: %w", shardName, err) + } + + err = i.tmpCopy(shard.Counter().FileName(), sd.DocIDCounter) + if err != nil { + return nil, err + } + + err = i.tmpCopy(shard.GetPropertyLengthTracker().FileName(), sd.PropLengthTracker) + if err != nil { + return nil, err + } + + files := []string{ + sd.DocIDCounterPath, + sd.PropLengthTrackerPath, + sd.ShardVersionPath, + } + files = append(files, sd.Files...) + + return files, nil +} + +func (i *Index) tmpCopy(path string, b []byte) error { + tmpFile, err := os.OpenFile(path+tmpCopyExtension, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o666) + if err != nil { + return err + } + defer tmpFile.Close() + + _, err = io.Copy(tmpFile, bytes.NewBuffer(b)) + return err +} + +// IncomingGetFileMetadata returns file metadata at the given path in the specified shards's root +// directory. +func (i *Index) IncomingGetFileMetadata(ctx context.Context, shardName, relativeFilePath string) (file.FileMetadata, error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return file.FileMetadata{}, fmt.Errorf("incoming get file metadata get shard %s err: %w", shardName, err) + } + if shard == nil { + return file.FileMetadata{}, fmt.Errorf("incoming get file metadata get shard %s: shard not found", shardName) + } + defer release() + + if strings.HasSuffix(shard.Counter().FileName(), relativeFilePath) || + strings.HasSuffix(shard.GetPropertyLengthTracker().FileName(), relativeFilePath) { + relativeFilePath = relativeFilePath + tmpCopyExtension + } + + return shard.GetFileMetadata(ctx, relativeFilePath) +} + +// IncomingGetFile returns a reader for the file at the given path in the specified shard's root +// directory. The caller must close the returned io.ReadCloser if no error is returned. +func (i *Index) IncomingGetFile(ctx context.Context, shardName, + relativeFilePath string, +) (io.ReadCloser, error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, fmt.Errorf("incoming get file get shard %s err: %w", shardName, err) + } + if shard == nil { + return nil, fmt.Errorf("incoming get file get shard %s: shard not found", shardName) + } + defer release() + + if strings.HasSuffix(shard.Counter().FileName(), relativeFilePath) || + strings.HasSuffix(shard.GetPropertyLengthTracker().FileName(), relativeFilePath) { + relativeFilePath = relativeFilePath + tmpCopyExtension + } + + return shard.GetFile(ctx, relativeFilePath) +} + +// IncomingAddAsyncReplicationTargetNode adds the given target node override for async replication. +// If the target node override already exists with a different upper time bound, the existing +// override will use the maximum upper time bound between the two. Async replication will be +// started if it's not already running. +func (i *Index) IncomingAddAsyncReplicationTargetNode( + ctx context.Context, + shardName string, + targetNodeOverride additional.AsyncReplicationTargetNodeOverride, +) error { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return fmt.Errorf("incoming add async replication get shard %s err: %w", shardName, err) + } + if shard == nil { + return fmt.Errorf("incoming add async replication get shard %s: shard not found", shardName) + } + defer release() + + return shard.addTargetNodeOverride(ctx, targetNodeOverride) +} + +// IncomingRemoveAsyncReplicationTargetNode removes the given target node override for async +// replication. The removal is a no-op if the target node override does not exist +// or if the upper time bound of the given target node override is less than the existing +// override's upper time bound. If there are no target node overrides left, async replication +// will be reset to it's default configuration. +func (i *Index) IncomingRemoveAsyncReplicationTargetNode(ctx context.Context, + shardName string, + targetNodeOverride additional.AsyncReplicationTargetNodeOverride, +) error { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return fmt.Errorf("incoming remove async replication get shard %s err: %w", shardName, err) + } + if shard == nil { + return fmt.Errorf("incoming remove async replication get shard %s: shard not found", shardName) + } + defer release() + + return shard.removeTargetNodeOverride(ctx, targetNodeOverride) +} + +// IncomingAllRemoveAsyncReplicationTargetNodes removes all target node overrides for async +// replication. Async replication will be reset to it's default configuration. +func (i *Index) IncomingRemoveAllAsyncReplicationTargetNodes(ctx context.Context, + shardName string, +) error { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return fmt.Errorf("incoming remove all async replication get shard %s err: %w", shardName, err) + } + if shard == nil { + return fmt.Errorf("incoming remove all async replication get shard %s: shard not found", shardName) + } + defer release() + + return shard.removeAllTargetNodeOverrides(ctx) +} + +func (s *Shard) filePutter(ctx context.Context, + filePath string, +) (io.WriteCloser, error) { + // TODO: validate file prefix to rule out that we're accidentally writing + // into another shard + finalPath := filepath.Join(s.Index().Config.RootPath, filePath) + dir := path.Dir(finalPath) + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return nil, fmt.Errorf("create parent folder for %s: %w", filePath, err) + } + + f, err := os.Create(finalPath) + if err != nil { + return nil, fmt.Errorf("open file %q for writing: %w", filePath, err) + } + + return f, nil +} + +// OverwriteObjects if their state didn't change in the meantime +// It returns nil if all object have been successfully overwritten +// and otherwise a list of failed operations. +func (idx *Index) OverwriteObjects(ctx context.Context, + shard string, updates []*objects.VObject, +) ([]types.RepairResponse, error) { + s, release, err := idx.GetShard(ctx, shard) + if err != nil { + return nil, fmt.Errorf("shard %q not found locally", shard) + } + if s == nil { + return nil, fmt.Errorf("shard %q not found locally", shard) + } + defer release() + + var result []types.RepairResponse + + for i, u := range updates { + incomingObj := u.LatestObject + + if (u.Deleted && u.ID == "") || (!u.Deleted && (incomingObj == nil || incomingObj.ID == "")) { + msg := fmt.Sprintf("received nil object or empty uuid at position %d", i) + result = append(result, types.RepairResponse{Err: msg}) + continue + } + + var id strfmt.UUID + if u.Deleted { + id = u.ID + } else { + id = incomingObj.ID + } + + var currUpdateTime int64 // 0 means object doesn't exist on this node + var locallyDeleted bool + + localObj, err := s.ObjectByIDErrDeleted(ctx, id, nil, additional.Properties{}) + if err == nil { + currUpdateTime = localObj.LastUpdateTimeUnix() + } else if errors.Is(err, lsmkv.Deleted) { + locallyDeleted = true + var errDeleted lsmkv.ErrDeleted + if errors.As(err, &errDeleted) { + currUpdateTime = errDeleted.DeletionTime().UnixMilli() + } // otherwise an unknown deletion time + } else if !errors.Is(err, lsmkv.NotFound) { + result = append(result, types.RepairResponse{ + ID: id.String(), + Err: err.Error(), + }) + continue + } + + if currUpdateTime != u.StaleUpdateTime { + + if currUpdateTime == u.LastUpdateTimeUnixMilli { + // local object was updated in the mean time, no need to do anything + continue + } + + // a conflict is returned except for a particular situation + // that can be locally solved at this point: + // the node propagating the object change may have no information about + // the object from this node because it was deleted, it means that + // if a time-based resolution is used and the update was more recent + // than the deletion, the object update can be proccessed despite + // the fact `currUpdateTime == u.StaleUpdateTime` does not hold. + if !locallyDeleted || + idx.DeletionStrategy() != models.ReplicationConfigDeletionStrategyTimeBasedResolution || + currUpdateTime > u.LastUpdateTimeUnixMilli { + // object changed and its state differs from recent known state + r := types.RepairResponse{ + ID: id.String(), + Deleted: locallyDeleted, + UpdateTime: currUpdateTime, + Err: "conflict", + } + + result = append(result, r) + continue + } + // the object is locally deleted, the resolution strategy is time-based and + // the deletion was not made after the received update + } + + // another validation is needed for backward-compatibility reasons: + // objects may have been deleted without a deletionTime, it means + // if an object is locally deleted currUpdateTime == 0 + // so to avoid creating/updating the locally deleted object + // time-based strategy and a more recent creation/update is required + if !u.Deleted && locallyDeleted && + (idx.DeletionStrategy() != models.ReplicationConfigDeletionStrategyTimeBasedResolution || + currUpdateTime > u.LastUpdateTimeUnixMilli) { + r := types.RepairResponse{ + ID: id.String(), + Deleted: locallyDeleted, + UpdateTime: currUpdateTime, + Err: "conflict", + } + + result = append(result, r) + continue + } + + if u.Deleted { + err := s.DeleteObject(ctx, u.ID, time.UnixMilli(u.LastUpdateTimeUnixMilli)) + if err != nil { + r := types.RepairResponse{ + ID: u.ID.String(), + Err: fmt.Sprintf("overwrite deleted object: %v", err), + } + result = append(result, r) + } + continue + } + + err = s.PutObject(ctx, storobj.FromObject(incomingObj, u.Vector, u.Vectors, u.MultiVectors)) + if err != nil { + r := types.RepairResponse{ + ID: id.String(), + Err: fmt.Sprintf("overwrite stale object: %v", err), + } + result = append(result, r) + continue + } + } + + return result, nil +} + +func (i *Index) IncomingOverwriteObjects(ctx context.Context, + shardName string, vobjects []*objects.VObject, +) ([]types.RepairResponse, error) { + return i.OverwriteObjects(ctx, shardName, vobjects) +} + +func (i *Index) DigestObjects(ctx context.Context, + shardName string, ids []strfmt.UUID, +) (result []types.RepairResponse, err error) { + result = make([]types.RepairResponse, len(ids)) + + s, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return nil, fmt.Errorf("shard %q not found locally", shardName) + } + + defer release() + + if s.GetStatus() == storagestate.StatusLoading { + return nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + multiIDs := make([]multi.Identifier, len(ids)) + for j := range multiIDs { + multiIDs[j] = multi.Identifier{ID: ids[j].String()} + } + + objs, err := s.MultiObjectByID(ctx, multiIDs) + if err != nil { + return nil, fmt.Errorf("shard objects digest: %w", err) + } + + for j := range objs { + if objs[j] == nil { + deleted, deletionTime, err := s.WasDeleted(ctx, ids[j]) + if err != nil { + return nil, err + } + + var updateTime int64 + if deleted && !deletionTime.IsZero() { + updateTime = deletionTime.UnixMilli() + } + + result[j] = types.RepairResponse{ + ID: ids[j].String(), + Deleted: deleted, + UpdateTime: updateTime, + // TODO: use version when supported + Version: 0, + } + } else { + result[j] = types.RepairResponse{ + ID: objs[j].ID().String(), + UpdateTime: objs[j].LastUpdateTimeUnix(), + // TODO: use version when supported + Version: 0, + } + } + } + + return +} + +func (i *Index) IncomingDigestObjects(ctx context.Context, + shardName string, ids []strfmt.UUID, +) (result []types.RepairResponse, err error) { + return i.DigestObjects(ctx, shardName, ids) +} + +func (i *Index) DigestObjectsInRange(ctx context.Context, + shardName string, initialUUID, finalUUID strfmt.UUID, limit int, +) (result []types.RepairResponse, err error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, fmt.Errorf("shard %q does not exist locally", shardName) + } + if shard == nil { + return nil, nil + } + + defer release() + + return shard.ObjectDigestsInRange(ctx, initialUUID, finalUUID, limit) +} + +func (i *Index) IncomingDigestObjectsInRange(ctx context.Context, + shardName string, initialUUID, finalUUID strfmt.UUID, limit int, +) (result []types.RepairResponse, err error) { + return i.DigestObjectsInRange(ctx, shardName, initialUUID, finalUUID, limit) +} + +func (i *Index) HashTreeLevel(ctx context.Context, + shardName string, level int, discriminant *hashtree.Bitset, +) (digests []hashtree.Digest, err error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, fmt.Errorf("%w: shard %q", err, shardName) + } + if shard == nil { + return nil, nil + } + + defer release() + + return shard.HashTreeLevel(ctx, level, discriminant) +} + +func (i *Index) IncomingHashTreeLevel(ctx context.Context, + shardName string, level int, discriminant *hashtree.Bitset, +) (digests []hashtree.Digest, err error) { + return i.HashTreeLevel(ctx, shardName, level, discriminant) +} + +func (i *Index) FetchObject(ctx context.Context, + shardName string, id strfmt.UUID, +) (replica.Replica, error) { + shard, release, err := i.getOrInitShard(ctx, shardName) + if err != nil { + return replica.Replica{}, fmt.Errorf("shard %q does not exist locally", shardName) + } + + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return replica.Replica{}, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + obj, err := shard.ObjectByID(ctx, id, nil, additional.Properties{}) + if err != nil { + return replica.Replica{}, fmt.Errorf("shard %q read repair get object: %w", shard.ID(), err) + } + + if obj == nil { + deleted, deletionTime, err := shard.WasDeleted(ctx, id) + if err != nil { + return replica.Replica{}, err + } + + var updateTime int64 + if !deletionTime.IsZero() { + updateTime = deletionTime.UnixMilli() + } + + return replica.Replica{ + ID: id, + Deleted: deleted, + LastUpdateTimeUnixMilli: updateTime, + }, nil + } + + return replica.Replica{ + Object: obj, + ID: obj.ID(), + LastUpdateTimeUnixMilli: obj.LastUpdateTimeUnix(), + }, nil +} + +func (i *Index) FetchObjects(ctx context.Context, + shardName string, ids []strfmt.UUID, +) ([]replica.Replica, error) { + shard, release, err := i.GetShard(ctx, shardName) + if err != nil { + return nil, fmt.Errorf("shard %q does not exist locally", shardName) + } + if shard == nil { + return nil, fmt.Errorf("shard %q does not exist locally", shardName) + } + defer release() + + if shard.GetStatus() == storagestate.StatusLoading { + return nil, enterrors.NewErrUnprocessable(fmt.Errorf("local %s shard is not ready", shardName)) + } + + objs, err := shard.MultiObjectByID(ctx, wrapIDsInMulti(ids)) + if err != nil { + return nil, fmt.Errorf("shard %q replication multi get objects: %w", shard.ID(), err) + } + + resp := make([]replica.Replica, len(ids)) + + for j, obj := range objs { + if obj == nil { + deleted, deletionTime, err := shard.WasDeleted(ctx, ids[j]) + if err != nil { + return nil, err + } + + var updateTime int64 + if !deletionTime.IsZero() { + updateTime = deletionTime.UnixMilli() + } + + resp[j] = replica.Replica{ + ID: ids[j], + Deleted: deleted, + LastUpdateTimeUnixMilli: updateTime, + } + } else { + resp[j] = replica.Replica{ + Object: obj, + ID: obj.ID(), + LastUpdateTimeUnixMilli: obj.LastUpdateTimeUnix(), + } + } + } + + return resp, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/repo.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/repo.go new file mode 100644 index 0000000000000000000000000000000000000000..0c5e4674dabc2b66635e9805ea1d38de60fdbdff --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/repo.go @@ -0,0 +1,466 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "math" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + clusterReplication "github.com/weaviate/weaviate/cluster/replication" + "github.com/weaviate/weaviate/cluster/replication/types" + clusterSchema "github.com/weaviate/weaviate/cluster/schema" + usagetypes "github.com/weaviate/weaviate/cluster/usage/types" + "github.com/weaviate/weaviate/cluster/utils" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/replication" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config" + configRuntime "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/replica" + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +type DB struct { + logger logrus.FieldLogger + localNodeName string + schemaGetter schemaUC.SchemaGetter + config Config + indices map[string]*Index + remoteIndex sharding.RemoteIndexClient + replicaClient replica.Client + nodeResolver nodeResolver + remoteNode *sharding.RemoteNode + promMetrics *monitoring.PrometheusMetrics + indexCheckpoints *indexcheckpoint.Checkpoints + shutdown chan struct{} + startupComplete atomic.Bool + resourceScanState *resourceScanState + memMonitor *memwatch.Monitor + + // indexLock is an RWMutex which allows concurrent access to various indexes, + // but only one modification at a time. R/W can be a bit confusing here, + // because it does not refer to write or read requests from a user's + // perspective, but rather: + // + // - Read -> The array containing all indexes is read-only. In other words + // there will never be a race condition from doing something like index := + // indexes[0]. What you do with the Index after retrieving it from the array + // does not matter. Assuming that it is thread-safe (it is) you can + // read/write from the index itself. Therefore from a user's perspective + // something like a parallel import batch and a read-query can happen without + // any problems. + // + // - Write -> The index array is being modified, for example, because a new + // index is added. This is mutually exclusive with the other case (but + // hopefully very short). + // + // + // See also: https://github.com/weaviate/weaviate/issues/2351 + // + // This lock should be used to avoid that the indices-map is changed while iterating over it. To + // mark a given index in use, lock that index directly. + indexLock sync.RWMutex + + jobQueueCh chan job + scheduler *queue.Scheduler + shutDownWg sync.WaitGroup + maxNumberGoroutines int + ratePerSecond atomic.Int64 + + // in the case of metrics grouping we need to observe some metrics + // node-centric, rather than shard-centric + metricsObserver *nodeWideMetricsObserver + + shardLoadLimiter ShardLoadLimiter + + reindexer ShardReindexerV3 + nodeSelector cluster.NodeSelector + schemaReader schemaUC.SchemaReader + replicationFSM types.ReplicationFSMReader + + bitmapBufPool roaringset.BitmapBufPool + bitmapBufPoolClose func() +} + +func (db *DB) GetSchemaGetter() schemaUC.SchemaGetter { + return db.schemaGetter +} + +func (db *DB) GetSchema() schema.Schema { + return db.schemaGetter.GetSchemaSkipAuth() +} + +func (db *DB) GetConfig() Config { + return db.config +} + +func (db *DB) GetRemoteIndex() sharding.RemoteIndexClient { + return db.remoteIndex +} + +func (db *DB) SetSchemaGetter(sg schemaUC.SchemaGetter) { + db.schemaGetter = sg +} + +func (db *DB) GetScheduler() *queue.Scheduler { + return db.scheduler +} + +func (db *DB) WaitForStartup(ctx context.Context) error { + err := db.init(ctx) + if err != nil { + return err + } + + db.startupComplete.Store(true) + db.scanResourceUsage() + + return nil +} + +func (db *DB) StartupComplete() bool { return db.startupComplete.Load() } + +// IndexGetter interface defines the methods that the service uses from db.IndexGetter +// This allows for better testability by using interfaces instead of concrete types +type IndexGetter interface { + GetIndexLike(className schema.ClassName) IndexLike +} + +// IndexLike interface defines the methods that the service uses from db.Index +// This allows for better testability by using interfaces instead of concrete types +type IndexLike interface { + ForEachShard(f func(name string, shard ShardLike) error) error + CalculateUnloadedObjectsMetrics(ctx context.Context, tenantName string) (usagetypes.ObjectUsage, error) + CalculateUnloadedVectorsMetrics(ctx context.Context, tenantName string) (int64, error) +} + +func New(logger logrus.FieldLogger, localNodeName string, config Config, + remoteIndex sharding.RemoteIndexClient, nodeResolver nodeResolver, + remoteNodesClient sharding.RemoteNodeClient, replicaClient replica.Client, + promMetrics *monitoring.PrometheusMetrics, memMonitor *memwatch.Monitor, + nodeSelector cluster.NodeSelector, schemaReader schemaUC.SchemaReader, replicationFSM types.ReplicationFSMReader, +) (*DB, error) { + if memMonitor == nil { + memMonitor = memwatch.NewDummyMonitor() + } + metricsRegisterer := monitoring.NoopRegisterer + if promMetrics != nil && promMetrics.Registerer != nil { + metricsRegisterer = promMetrics.Registerer + } + + db := &DB{ + logger: logger, + localNodeName: localNodeName, + config: config, + indices: map[string]*Index{}, + remoteIndex: remoteIndex, + nodeResolver: nodeResolver, + remoteNode: sharding.NewRemoteNode(nodeResolver, remoteNodesClient), + replicaClient: replicaClient, + promMetrics: promMetrics, + shutdown: make(chan struct{}), + maxNumberGoroutines: int(math.Round(config.MaxImportGoroutinesFactor * float64(runtime.GOMAXPROCS(0)))), + resourceScanState: newResourceScanState(), + memMonitor: memMonitor, + shardLoadLimiter: NewShardLoadLimiter(metricsRegisterer, config.MaximumConcurrentShardLoads), + reindexer: NewShardReindexerV3Noop(), + nodeSelector: nodeSelector, + schemaReader: schemaReader, + replicationFSM: replicationFSM, + bitmapBufPool: roaringset.NewBitmapBufPoolNoop(), + bitmapBufPoolClose: func() {}, + } + + if db.maxNumberGoroutines == 0 { + return db, errors.New("no workers to add batch-jobs configured.") + } + if !asyncEnabled() { + db.jobQueueCh = make(chan job, 100000) + db.shutDownWg.Add(db.maxNumberGoroutines) + for i := 0; i < db.maxNumberGoroutines; i++ { + i := i + enterrors.GoWrapper(func() { db.batchWorker(i == 0) }, db.logger) + } + // since queues are created regardless of the async setting, we need to + // create a scheduler anyway, but there is no need to start it + db.scheduler = queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + }) + } else { + logger.Info("async indexing enabled") + + db.shutDownWg.Add(1) + db.scheduler = queue.NewScheduler(queue.SchedulerOptions{ + Logger: logger, + OnClose: db.shutDownWg.Done, + }) + + db.scheduler.Start() + } + + return db, nil +} + +type Config struct { + RootPath string + QueryLimit int64 + QueryMaximumResults int64 + QueryHybridMaximumResults int64 + QueryNestedRefLimit int64 + ResourceUsage config.ResourceUsage + MaxImportGoroutinesFactor float64 + LazySegmentsDisabled bool + SegmentInfoIntoFileNameEnabled bool + WriteMetadataFilesEnabled bool + MemtablesFlushDirtyAfter int + MemtablesInitialSizeMB int + MemtablesMaxSizeMB int + MemtablesMinActiveSeconds int + MemtablesMaxActiveSeconds int + MinMMapSize int64 + MaxReuseWalSize int64 + SegmentsCleanupIntervalSeconds int + SeparateObjectsCompactions bool + MaxSegmentSize int64 + TrackVectorDimensions bool + TrackVectorDimensionsInterval time.Duration + UsageEnabled bool + ServerVersion string + GitHash string + AvoidMMap bool + DisableLazyLoadShards bool + ForceFullReplicasSearch bool + TransferInactivityTimeout time.Duration + LSMEnableSegmentsChecksumValidation bool + Replication replication.GlobalConfig + MaximumConcurrentShardLoads int + CycleManagerRoutinesFactor int + IndexRangeableInMemory bool + + HNSWMaxLogSize int64 + HNSWDisableSnapshots bool + HNSWSnapshotIntervalSeconds int + HNSWSnapshotOnStartup bool + HNSWSnapshotMinDeltaCommitlogsNumber int + HNSWSnapshotMinDeltaCommitlogsSizePercentage int + HNSWWaitForCachePrefill bool + HNSWFlatSearchConcurrency int + HNSWAcornFilterRatio float64 + VisitedListPoolMaxSize int + + TenantActivityReadLogLevel *configRuntime.DynamicValue[string] + TenantActivityWriteLogLevel *configRuntime.DynamicValue[string] + QuerySlowLogEnabled *configRuntime.DynamicValue[bool] + QuerySlowLogThreshold *configRuntime.DynamicValue[time.Duration] + InvertedSorterDisabled *configRuntime.DynamicValue[bool] + MaintenanceModeEnabled func() bool +} + +func (db *DB) GetIndexLike(className schema.ClassName) IndexLike { + index := db.GetIndex(className) + if index == nil { + return nil + } + + return index +} + +// GetIndex returns the index if it exists or nil if it doesn't +// by default it will retry 3 times between 0-150 ms to get the index +// to handle the eventual consistency. +func (db *DB) GetIndex(className schema.ClassName) *Index { + var ( + index *Index + exists bool + ) + // TODO-RAFT remove backoff. Eventual consistency handled by versioning + backoff.Retry(func() error { + db.indexLock.RLock() + defer db.indexLock.RUnlock() + + index, exists = db.indices[indexID(className)] + if !exists { + return fmt.Errorf("index for class %v not found locally", index) + } + return nil + }, utils.NewBackoff()) + + return index +} + +// IndexExists returns if an index exists +func (db *DB) IndexExists(className schema.ClassName) bool { + return db.GetIndex(className) != nil +} + +// TODO-RAFT: Because of interfaces and import order we can't have this function just return the same index interface +// for both sharding and replica usage. With a refactor of the interfaces this can be done and we can remove the +// deduplication + +// GetIndexForIncomingSharding returns the index if it exists or nil if it doesn't +// by default it will retry 3 times between 0-150 ms to get the index +// to handle the eventual consistency. +func (db *DB) GetIndexForIncomingSharding(className schema.ClassName) sharding.RemoteIndexIncomingRepo { + index := db.GetIndex(className) + if index == nil { + return nil + } + + return index +} + +// GetIndexForIncomingReplica returns the index if it exists or nil if it doesn't +// by default it will retry 3 times between 0-150 ms to get the index +// to handle the eventual consistency. +func (db *DB) GetIndexForIncomingReplica(className schema.ClassName) replica.RemoteIndexIncomingRepo { + index := db.GetIndex(className) + if index == nil { + return nil + } + + return index +} + +// DeleteIndex deletes the index +func (db *DB) DeleteIndex(className schema.ClassName) error { + index := db.GetIndex(className) + if index == nil { + return nil + } + + // drop index + db.indexLock.Lock() + defer db.indexLock.Unlock() + + index.dropIndex.Lock() + defer index.dropIndex.Unlock() + if err := index.drop(); err != nil { + db.logger.WithField("action", "delete_index").WithField("class", className).Error(err) + } + + delete(db.indices, indexID(className)) + + if err := db.promMetrics.DeleteClass(className.String()); err != nil { + db.logger.Error("can't delete prometheus metrics", err) + } + return nil +} + +func (db *DB) Shutdown(ctx context.Context) error { + db.shutdown <- struct{}{} + db.bitmapBufPoolClose() + + if !asyncEnabled() { + // shut down the workers that add objects to + for i := 0; i < db.maxNumberGoroutines; i++ { + db.jobQueueCh <- job{ + index: -1, + } + } + } + + if asyncEnabled() { + // shut down the async workers + err := db.scheduler.Close() + if err != nil { + return errors.Wrap(err, "close scheduler") + } + } + + if db.metricsObserver != nil { + db.metricsObserver.Shutdown() + } + + db.indexLock.Lock() + defer db.indexLock.Unlock() + for id, index := range db.indices { + if err := index.Shutdown(ctx); err != nil { + return errors.Wrapf(err, "shutdown index %q", id) + } + } + + db.shutDownWg.Wait() // wait until job queue shutdown is completed + + if asyncEnabled() { + db.indexCheckpoints.Close() + } + + return nil +} + +type job struct { + object *storobj.Object + status objectInsertStatus + index int + ctx context.Context + batcher *objectsBatcher +} + +func (db *DB) batchWorker(first bool) { + objectCounter := 0 + checkTime := time.Now().Add(time.Second) + for jobToAdd := range db.jobQueueCh { + if jobToAdd.index < 0 { + db.shutDownWg.Done() + return + } + jobToAdd.batcher.storeSingleObjectInAdditionalStorage(jobToAdd.ctx, jobToAdd.object, jobToAdd.status, jobToAdd.index) + jobToAdd.batcher.wg.Done() + objectCounter += 1 + if first && time.Now().After(checkTime) { // only have one worker report the rate per second + db.ratePerSecond.Store(int64(objectCounter * db.maxNumberGoroutines)) + + objectCounter = 0 + checkTime = time.Now().Add(time.Second) + } + } +} + +func (db *DB) WithReindexer(reindexer ShardReindexerV3) *DB { + db.reindexer = reindexer + return db +} + +func (db *DB) SetNodeSelector(nodeSelector cluster.NodeSelector) { + db.nodeSelector = nodeSelector +} + +func (db *DB) SetSchemaReader(schemaReader clusterSchema.SchemaReader) { + db.schemaReader = schemaReader +} + +func (db *DB) SetReplicationFSM(replicationFsm *clusterReplication.ShardReplicationFSM) { + db.replicationFSM = replicationFsm +} + +func (db *DB) WithBitmapBufPool(bufPool roaringset.BitmapBufPool, close func()) *DB { + db.bitmapBufPool = bufPool + db.bitmapBufPoolClose = close + return db +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/repo_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/repo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..17f6c4a1c259aadf3563f0361217aaaafb019290 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/repo_test.go @@ -0,0 +1,67 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestGetIndex(t *testing.T) { + db := testDB(t, t.TempDir(), []*models.Class{}, make(map[string]*sharding.State)) + + // empty indices + db.indices = map[string]*Index{} + idx := db.GetIndex(schema.ClassName("test1")) + require.Nil(t, idx) + + // after 20 ms + go func() { + time.Sleep(20 * time.Millisecond) + db.indexLock.Lock() + defer db.indexLock.Unlock() + db.indices = map[string]*Index{ + "test1": {}, + } + }() + idx = db.GetIndex(schema.ClassName("test1")) + require.NotNil(t, idx) + + // after 50 ms + go func() { + time.Sleep(50 * time.Millisecond) + db.indexLock.Lock() + defer db.indexLock.Unlock() + db.indices = map[string]*Index{ + "test2": {}, + } + }() + idx = db.GetIndex(schema.ClassName("test2")) + require.NotNil(t, idx) + + // after 100 ms + go func() { + time.Sleep(100 * time.Millisecond) + db.indexLock.Lock() + defer db.indexLock.Unlock() + db.indices = map[string]*Index{ + "test3": {}, + } + }() + idx = db.GetIndex(schema.ClassName("test3")) + require.NotNil(t, idx) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/resource_use.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/resource_use.go new file mode 100644 index 0000000000000000000000000000000000000000..64a1985c051cb4da29ff2679551ec77f7ba904ae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/resource_use.go @@ -0,0 +1,170 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "fmt" + "time" + + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/weaviate/weaviate/entities/interval" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type diskUse struct { + total uint64 + free uint64 + avail uint64 +} + +func (d diskUse) percentUsed() float64 { + used := d.total - d.free + return (float64(used) / float64(d.total)) * 100 +} + +func (d diskUse) String() string { + GB := 1024 * 1024 * 1024 + + return fmt.Sprintf("total: %.2fGB, free: %.2fGB, used: %.2fGB (avail: %.2fGB)", + float64(d.total)/float64(GB), + float64(d.free)/float64(GB), + float64(d.total-d.free)/float64(GB), + float64(d.avail)/float64(GB)) +} + +func (d *DB) scanResourceUsage() { + f := func() { + t := time.NewTicker(time.Millisecond * 500) + i := 0 + defer t.Stop() + for { + select { + case <-d.shutdown: + return + case <-t.C: + updateMappings := i%(memwatch.MappingDelayInS*2) == 0 + if !d.resourceScanState.isReadOnly { + du := d.getDiskUse(d.config.RootPath) + d.resourceUseWarn(d.memMonitor, du, updateMappings) + d.resourceUseReadonly(d.memMonitor, du) + } + i += 1 + } + } + } + enterrors.GoWrapper(f, d.logger) +} + +type resourceScanState struct { + diskWarning *interval.BackoffTimer + memWarning *interval.BackoffTimer + isReadOnly bool +} + +func newResourceScanState() *resourceScanState { + return &resourceScanState{ + diskWarning: interval.NewBackoffTimer(), + memWarning: interval.NewBackoffTimer(), + } +} + +// logs a warning if user-set threshold is surpassed +func (db *DB) resourceUseWarn(mon *memwatch.Monitor, du diskUse, updateMappings bool) { + mon.Refresh(updateMappings) + db.diskUseWarn(du) + db.memUseWarn(mon) +} + +func (db *DB) diskUseWarn(du diskUse) { + diskWarnPercent := db.config.ResourceUsage.DiskUse.WarningPercentage + if diskWarnPercent > 0 { + if pu := du.percentUsed(); pu > float64(diskWarnPercent) { + if db.resourceScanState.diskWarning.IntervalElapsed() { + db.logger.WithField("action", "read_disk_use"). + WithField("path", db.config.RootPath). + Warnf("disk usage currently at %.2f%%, threshold set to %.2f%%", + pu, float64(diskWarnPercent)) + + db.logger.WithField("action", "disk_use_stats"). + WithField("path", db.config.RootPath). + Debugf("%s", du.String()) + db.resourceScanState.diskWarning.IncreaseInterval() + } + } + } +} + +func (db *DB) memUseWarn(mon *memwatch.Monitor) { + memWarnPercent := db.config.ResourceUsage.MemUse.WarningPercentage + if memWarnPercent > 0 { + if pu := mon.Ratio() * 100; pu > float64(memWarnPercent) { + if db.resourceScanState.memWarning.IntervalElapsed() { + db.logger.WithField("action", "read_memory_use"). + WithField("path", db.config.RootPath). + Warnf("memory usage currently at %.2f%%, threshold set to %.2f%%", + pu, float64(memWarnPercent)) + db.resourceScanState.memWarning.IncreaseInterval() + } + } + } +} + +// sets the shard to readonly if user-set threshold is surpassed +func (db *DB) resourceUseReadonly(mon *memwatch.Monitor, du diskUse) { + db.diskUseReadonly(du) + db.memUseReadonly(mon) +} + +func (db *DB) diskUseReadonly(du diskUse) { + diskROPercent := db.config.ResourceUsage.DiskUse.ReadOnlyPercentage + if diskROPercent > 0 { + if pu := du.percentUsed(); pu > float64(diskROPercent) { + db.setShardsReadOnly(fmt.Sprintf("disk usage too high. Set to read-only at %.2f%%, threshold set to %.2f%%", pu, float64(diskROPercent))) + db.logger.WithField("action", "set_shard_read_only"). + WithField("path", db.config.RootPath). + Warnf("Set READONLY, disk usage currently at %.2f%%, threshold set to %.2f%%", + pu, float64(diskROPercent)) + } + } +} + +func (db *DB) memUseReadonly(mon *memwatch.Monitor) { + memROPercent := db.config.ResourceUsage.MemUse.ReadOnlyPercentage + if memROPercent > 0 { + if pu := mon.Ratio() * 100; pu > float64(memROPercent) { + db.setShardsReadOnly(fmt.Sprintf("memory usage too high. Set to read-only at %.2f%%, threshold set to %.2f%%", pu, float64(memROPercent))) + db.logger.WithField("action", "set_shard_read_only"). + WithField("path", db.config.RootPath). + Warnf("Set READONLY, memory usage currently at %.2f%%, threshold set to %.2f%%", + pu, float64(memROPercent)) + } + } +} + +func (db *DB) setShardsReadOnly(reason string) { + db.indexLock.Lock() + for _, index := range db.indices { + index.ForEachShard(func(name string, shard ShardLike) error { + err := shard.SetStatusReadonly(reason) + if err != nil { + db.logger.WithField("action", "set_shard_read_only"). + WithField("path", db.config.RootPath). + WithError(err). + Fatal("failed to set to READONLY") + } + return nil + }) + } + db.indexLock.Unlock() + db.resourceScanState.isReadOnly = true +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/restart_journey_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/restart_journey_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..37152d986e734a5290e25f7bd25c43b7d402aeea --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/restart_journey_integration_test.go @@ -0,0 +1,273 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/search" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestRestartJourney(t *testing.T) { + dirName := t.TempDir() + + logger, _ := test.NewNullLogger() + thingclass := &models.Class{ + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + Class: "Class", + Properties: []*models.Property{ + { + Name: "description", + DataType: []string{string(schema.DataTypeText)}, + Tokenization: "word", + }, + }, + } + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + migrator := NewMigrator(repo, logger, "node1") + + t.Run("creating the thing class", func(t *testing.T) { + require.Nil(t, + migrator.AddClass(context.Background(), thingclass)) + }) + + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{thingclass}, + }, + } + + t.Run("import some data", func(t *testing.T) { + err := repo.PutObject(context.Background(), &models.Object{ + Class: "Class", + ID: "9d64350e-5027-40ea-98db-e3b97e6f6f8f", + Properties: map[string]interface{}{ + "description": "the band is just fantastic that is really what I think", + }, + }, []float32{0.1, 0.2, 0.3}, nil, nil, nil, 0) + require.Nil(t, err) + + err = repo.PutObject(context.Background(), &models.Object{ + Class: "Class", + ID: "46ebcce8-fb77-413b-ade6-26c427af3f33", + Properties: map[string]interface{}{ + "description": "oh by the way, which one's pink?", + }, + }, []float32{-0.1, 0.2, -0.3}, nil, nil, nil, 0) + require.Nil(t, err) + }) + + t.Run("control", func(t *testing.T) { + t.Run("verify object by id", func(t *testing.T) { + res, err := repo.ObjectByID(context.Background(), "46ebcce8-fb77-413b-ade6-26c427af3f33", nil, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, res) + assert.Equal(t, "oh by the way, which one's pink?", + res.Schema.(map[string]interface{})["description"]) + }) + + t.Run("find object by id through filter", func(t *testing.T) { + res, err := repo.ObjectSearch(context.Background(), 0, 10, + &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: "9d64350e-5027-40ea-98db-e3b97e6f6f8f", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Class: "Class", + Property: "id", + }, + }, + }, nil, additional.Properties{}, "") + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "the band is just fantastic that is really what I think", + res[0].Schema.(map[string]interface{})["description"]) + }) + + t.Run("find object through regular inverted index", func(t *testing.T) { + res, err := repo.ObjectSearch(context.Background(), 0, 10, + &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: "pink", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Class: "Class", + Property: "description", + }, + }, + }, nil, additional.Properties{}, "") + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "oh by the way, which one's pink?", + res[0].Schema.(map[string]interface{})["description"]) + }) + + t.Run("find object through vector index", func(t *testing.T) { + res, err := repo.VectorSearch(context.Background(), + dto.GetParams{ + ClassName: "Class", + Pagination: &filters.Pagination{ + Limit: 1, + }, + Properties: search.SelectProperties{{Name: "description"}}, + }, []string{""}, []models.Vector{[]float32{0.05, 0.1, 0.15}}) + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "the band is just fantastic that is really what I think", + res[0].Schema.(map[string]interface{})["description"]) + }) + }) + + var newRepo *DB + t.Run("shutdown and recreate", func(t *testing.T) { + require.Nil(t, repo.Shutdown(context.Background())) + repo = nil + + newRepo, err = New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + newRepo.SetSchemaGetter(schemaGetter) + require.Nil(t, newRepo.WaitForStartup(testCtx())) + }) + + t.Run("verify after restart", func(t *testing.T) { + t.Run("verify object by id", func(t *testing.T) { + res, err := newRepo.ObjectByID(context.Background(), "46ebcce8-fb77-413b-ade6-26c427af3f33", nil, additional.Properties{}, "") + require.Nil(t, err) + require.NotNil(t, res) + assert.Equal(t, "oh by the way, which one's pink?", + res.Schema.(map[string]interface{})["description"]) + }) + + t.Run("find object by id through filter", func(t *testing.T) { + res, err := newRepo.ObjectSearch(context.Background(), 0, 10, + &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: "9d64350e-5027-40ea-98db-e3b97e6f6f8f", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Class: "Class", + Property: "id", + }, + }, + }, nil, additional.Properties{}, "") + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "the band is just fantastic that is really what I think", + res[0].Schema.(map[string]interface{})["description"]) + }) + + t.Run("find object through regular inverted index", func(t *testing.T) { + res, err := newRepo.ObjectSearch(context.Background(), 0, 10, + &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: "pink", + Type: schema.DataTypeText, + }, + On: &filters.Path{ + Class: "Class", + Property: "description", + }, + }, + }, nil, additional.Properties{}, "") + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "oh by the way, which one's pink?", + res[0].Schema.(map[string]interface{})["description"]) + }) + + t.Run("find object through vector index", func(t *testing.T) { + res, err := newRepo.VectorSearch(context.Background(), + dto.GetParams{ + ClassName: "Class", + Pagination: &filters.Pagination{ + Limit: 1, + }, + Properties: search.SelectProperties{{Name: "description"}}, + }, []string{""}, []models.Vector{[]float32{0.05, 0.1, 0.15}}) + require.Nil(t, err) + require.Len(t, res, 1) + assert.Equal(t, "the band is just fantastic that is really what I think", + res[0].Schema.(map[string]interface{})["description"]) + }) + }) + + t.Run("shutdown", func(t *testing.T) { + require.Nil(t, newRepo.Shutdown(context.Background())) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/search.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/search.go new file mode 100644 index 0000000000000000000000000000000000000000..4cd60373125fc3053acf478ff9d18f87f800981d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/search.go @@ -0,0 +1,523 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/models" + + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/adapters/repos/db/refcache" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/traverser" +) + +func (db *DB) Aggregate(ctx context.Context, + params aggregation.Params, + modules *modules.Provider, +) (*aggregation.Result, error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "aggregate_query_completed", + "took": took, + "params": params, + }).Debugf("aggregate query completed in %s", took) + }() + + idx := db.GetIndex(params.ClassName) + if idx == nil { + return nil, fmt.Errorf("tried to browse non-existing index for %s", params.ClassName) + } + + return idx.aggregate(ctx, nil, params, modules, params.Tenant) +} + +func (db *DB) GetQueryMaximumResults() int { + return int(db.config.QueryMaximumResults) +} + +// SparseObjectSearch is used to perform an inverted index search on the db +// +// Earlier use cases required only []search.Result as a return value from the db, and the +// Class ClassSearch method fit this need. Later on, other use cases presented the need +// for the raw storage objects, such as hybrid search. +func (db *DB) SparseObjectSearch(ctx context.Context, params dto.GetParams) ([]*storobj.Object, []float32, error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "sparse_object_search_completed", + "took": took, + "params": params, + }).Debugf("sparse object search query completed in %s", took) + }() + + idx := db.GetIndex(schema.ClassName(params.ClassName)) + if idx == nil { + return nil, nil, fmt.Errorf("tried to browse non-existing index for %s", params.ClassName) + } + + if params.Pagination == nil { + return nil, nil, fmt.Errorf("invalid params, pagination object is nil") + } + + totalLimit, err := db.getTotalLimit(params.Pagination, params.AdditionalProperties) + if err != nil { + return nil, nil, errors.Wrapf(err, "invalid pagination params") + } + + // if this is reference search and tenant is given (as origin class is MT) + // but searched class is non-MT, then skip tenant to pass validation + tenant := params.Tenant + if !idx.partitioningEnabled && params.IsRefOrigin { + tenant = "" + } + + res, scores, err := idx.objectSearch(ctx, totalLimit, + params.Filters, params.KeywordRanking, params.Sort, params.Cursor, + params.AdditionalProperties, params.ReplicationProperties, tenant, params.Pagination.Autocut, params.Properties.GetPropertyNames()) + if err != nil { + return nil, nil, errors.Wrapf(err, "object search at index %s", idx.ID()) + } + + return res, scores, nil +} + +func (db *DB) Search(ctx context.Context, params dto.GetParams) ([]search.Result, error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "search_completed", + "took": took, + "params": params, + }).Debugf("search query completed in %s", took) + }() + + if params.Pagination == nil { + return nil, fmt.Errorf("invalid params, pagination object is nil") + } + + res, scores, err := db.SparseObjectSearch(ctx, params) + if err != nil { + return nil, err + } + + res, scores = db.getStoreObjectsWithScores(res, scores, params.Pagination) + return db.ResolveReferences(ctx, + storobj.SearchResultsWithScore(res, scores, params.AdditionalProperties, params.Tenant), + params.Properties, params.GroupBy, params.AdditionalProperties, params.Tenant) +} + +func (db *DB) VectorSearch(ctx context.Context, + params dto.GetParams, targetVectors []string, searchVectors []models.Vector, +) ([]search.Result, error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "vector_search_completed", + "took": took, + "params": params, + "targetVectors": targetVectors, + }).Debugf("vector search query completed in %s", took) + }() + if len(searchVectors) == 0 || len(searchVectors) == 1 && isEmptyVector(searchVectors[0]) { + results, err := db.Search(ctx, params) + return results, err + } + + totalLimit, err := db.getTotalLimit(params.Pagination, params.AdditionalProperties) + if err != nil { + return nil, fmt.Errorf("invalid pagination params: %w", err) + } + + idx := db.GetIndex(schema.ClassName(params.ClassName)) + if idx == nil { + return nil, fmt.Errorf("tried to browse non-existing index for %s", params.ClassName) + } + + targetDist := extractDistanceFromParams(params) + res, dists, err := idx.objectVectorSearch(ctx, searchVectors, targetVectors, + targetDist, totalLimit, params.Filters, params.Sort, params.GroupBy, + params.AdditionalProperties, params.ReplicationProperties, params.Tenant, params.TargetVectorCombination, params.Properties.GetPropertyNames()) + if err != nil { + return nil, errors.Wrapf(err, "object vector search at index %s", idx.ID()) + } + + if totalLimit < 0 { + params.Pagination.Limit = len(res) + } + + return db.ResolveReferences(ctx, + storobj.SearchResultsWithDists(db.getStoreObjects(res, params.Pagination), + params.AdditionalProperties, db.getDists(dists, params.Pagination)), + params.Properties, params.GroupBy, params.AdditionalProperties, params.Tenant) +} + +func isEmptyVector(searchVector models.Vector) bool { + if isVectorEmpty, err := dto.IsVectorEmpty(searchVector); err == nil { + return isVectorEmpty + } + return false +} + +func extractDistanceFromParams(params dto.GetParams) float32 { + certainty := traverser.ExtractCertaintyFromParams(params) + if certainty != 0 { + return float32(additional.CertaintyToDist(certainty)) + } + + dist, _ := traverser.ExtractDistanceFromParams(params) + return float32(dist) +} + +func (db *DB) CrossClassVectorSearch(ctx context.Context, vector models.Vector, targetVector string, offset, limit int, + filters *filters.LocalFilter, +) ([]search.Result, error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "cross_class_vector_search_completed", + "took": took, + "targetVector": targetVector, + }).Debugf("cross class vector search query completed in %s", took) + }() + var found search.Results + + wg := &sync.WaitGroup{} + mutex := &sync.Mutex{} + var searchErrors []error + totalLimit := offset + limit + + db.indexLock.RLock() + for _, index := range db.indices { + wg.Add(1) + index := index + f := func() { + defer wg.Done() + + objs, dist, err := index.objectVectorSearch(ctx, []models.Vector{vector}, []string{targetVector}, + 0, totalLimit, filters, nil, nil, + additional.Properties{}, nil, "", nil, nil) + if err != nil { + mutex.Lock() + searchErrors = append(searchErrors, errors.Wrapf(err, "search index %s", index.ID())) + mutex.Unlock() + } + + mutex.Lock() + found = append(found, storobj.SearchResultsWithDists(objs, additional.Properties{}, dist)...) + mutex.Unlock() + } + enterrors.GoWrapper(f, index.logger) + } + db.indexLock.RUnlock() + + wg.Wait() + + if len(searchErrors) > 0 { + var msg strings.Builder + for i, err := range searchErrors { + if i != 0 { + msg.WriteString(", ") + } + errorMessage := fmt.Sprintf("%v", err) + msg.WriteString(errorMessage) + } + return nil, errors.New(msg.String()) + } + + sort.Slice(found, func(i, j int) bool { + return found[i].Dist < found[j].Dist + }) + + // not enriching by refs, as a vector search result cannot provide + // SelectProperties + return db.getSearchResults(found, offset, limit), nil +} + +// Query a specific class +func (db *DB) Query(ctx context.Context, q *objects.QueryInput) (search.Results, *objects.Error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "query_completed", + "took": took, + "params": q, + }).Debugf("query completed in %s", took) + }() + totalLimit := q.Offset + q.Limit + if totalLimit == 0 { + return nil, nil + } + if len(q.Sort) > 0 { + if err := filters.ValidateSort(db.schemaGetter.ReadOnlyClass, schema.ClassName(q.Class), q.Sort); err != nil { + return nil, &objects.Error{Msg: "sorting", Code: objects.StatusBadRequest, Err: err} + } + } + idx := db.GetIndex(schema.ClassName(q.Class)) + if idx == nil { + return nil, &objects.Error{Msg: "class not found " + q.Class, Code: objects.StatusNotFound} + } + if q.Cursor != nil { + if err := filters.ValidateCursor(schema.ClassName(q.Class), q.Cursor, q.Offset, q.Filters, q.Sort); err != nil { + return nil, &objects.Error{Msg: "cursor api: invalid 'after' parameter", Code: objects.StatusBadRequest, Err: err} + } + } + res, _, err := idx.objectSearch(ctx, totalLimit, q.Filters, + nil, q.Sort, q.Cursor, q.Additional, nil, q.Tenant, 0, nil) + if err != nil { + switch { + case errors.As(err, &objects.ErrMultiTenancy{}): + return nil, &objects.Error{Msg: "search index " + idx.ID(), Code: objects.StatusUnprocessableEntity, Err: err} + default: + return nil, &objects.Error{Msg: "search index " + idx.ID(), Code: objects.StatusInternalServerError, Err: err} + } + } + return db.getSearchResults(storobj.SearchResults(res, q.Additional, ""), q.Offset, q.Limit), nil +} + +// ObjectSearch search each index. +// Deprecated by Query which searches a specific index +func (db *DB) ObjectSearch(ctx context.Context, offset, limit int, + filters *filters.LocalFilter, sort []filters.Sort, + additional additional.Properties, tenant string, +) (search.Results, error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "object_search_completed", + "took": took, + "offset": offset, + "limit": limit, + "filters": filters, + "sort": sort, + }).Debugf("object search completed in %s", took) + }() + return db.objectSearch(ctx, offset, limit, filters, sort, additional, tenant) +} + +func (db *DB) objectSearch(ctx context.Context, offset, limit int, + filters *filters.LocalFilter, sort []filters.Sort, + additional additional.Properties, tenant string, +) (search.Results, error) { + var found []*storobj.Object + + if err := db.validateSort(sort); err != nil { + return nil, errors.Wrap(err, "search") + } + + totalLimit := offset + limit + // TODO: Search in parallel, rather than sequentially or this will be + // painfully slow on large schemas + // wrapped in func to unlock mutex within defer + if err := func() error { + db.indexLock.RLock() + defer db.indexLock.RUnlock() + + for _, index := range db.indices { + // TODO support all additional props + scheme := index.getSchema.GetSchemaSkipAuth() + props := scheme.GetClass(string(index.Config.ClassName)).Properties + propsNames := make([]string, len(props)) + for i, prop := range props { + propsNames[i] = prop.Name + } + + res, _, err := index.objectSearch(ctx, totalLimit, + filters, nil, sort, nil, additional, nil, tenant, 0, propsNames) + if err != nil { + // Multi tenancy specific errors + if errors.As(err, &objects.ErrMultiTenancy{}) { + // validation failed (either MT class without tenant or non-MT class with tenant) + if strings.Contains(err.Error(), "has multi-tenancy enabled, but request was without tenant") || + strings.Contains(err.Error(), "has multi-tenancy disabled, but request was with tenant") { + continue + } + // tenant not added to class + if strings.Contains(err.Error(), "no tenant found with key") { + continue + } + // tenant does belong to this class + if errors.Is(err, enterrors.ErrTenantNotFound) { + continue // tenant does belong to this class + } + } + return errors.Wrapf(err, "search index %s", index.ID()) + } + + found = append(found, res...) + if len(found) >= totalLimit { + // we are done + break + } + } + return nil + }(); err != nil { + return nil, err + } + + return db.getSearchResults(storobj.SearchResults(found, additional, tenant), offset, limit), nil +} + +// ResolveReferences takes a list of search results and enriches them +// with any referenced objects +func (db *DB) ResolveReferences(ctx context.Context, objs search.Results, + props search.SelectProperties, groupBy *searchparams.GroupBy, + addl additional.Properties, tenant string, +) (search.Results, error) { + start := time.Now() + defer func() { + took := time.Since(start) + db.logger.WithFields(logrus.Fields{ + "action": "resolve_references_completed", + "took": took, + "len": len(objs), + "props": props, + }).Debugf("resolve references completed in %s", took) + }() + if addl.NoProps { + // If we have no props, there also can't be refs among them, so we can skip + // the refcache resolver + return objs, nil + } + + if groupBy != nil { + res, err := refcache.NewResolverWithGroup(refcache.NewCacher(db, db.logger, tenant), groupBy.Properties). + Do(ctx, objs, props, addl) + if err != nil { + return nil, fmt.Errorf("resolve cross-refs: %w", err) + } + return res, nil + } + + res, err := refcache.NewResolver(refcache.NewCacher(db, db.logger, tenant)). + Do(ctx, objs, props, addl) + if err != nil { + return nil, fmt.Errorf("resolve cross-refs: %w", err) + } + + return res, nil +} + +func (db *DB) validateSort(sort []filters.Sort) error { + if len(sort) > 0 { + var errorMsgs []string + db.indexLock.RLock() + for _, index := range db.indices { + err := filters.ValidateSort(db.schemaGetter.ReadOnlyClass, index.Config.ClassName, sort) + if err != nil { + errorMsg := errors.Wrapf(err, "search index %s", index.ID()).Error() + errorMsgs = append(errorMsgs, errorMsg) + } + } + db.indexLock.RUnlock() + if len(errorMsgs) > 0 { + return errors.Errorf("%s", strings.Join(errorMsgs, ", ")) + } + } + return nil +} + +func (db *DB) getTotalLimit(pagination *filters.Pagination, addl additional.Properties) (int, error) { + if pagination.Limit == filters.LimitFlagSearchByDist { + return filters.LimitFlagSearchByDist, nil + } + + totalLimit := pagination.Offset + db.getLimit(pagination.Limit) + if totalLimit == 0 { + return 0, fmt.Errorf("invalid default limit: %v", db.getLimit(pagination.Limit)) + } + if !addl.ReferenceQuery && totalLimit > int(db.config.QueryMaximumResults) { + return 0, errors.New("query maximum results exceeded") + } + return totalLimit, nil +} + +func (db *DB) getSearchResults(found search.Results, paramOffset, paramLimit int) search.Results { + offset, limit := db.getOffsetLimit(len(found), paramOffset, paramLimit) + if offset == 0 && limit == 0 { + return nil + } + return found[offset:limit] +} + +func (db *DB) getStoreObjects(res []*storobj.Object, pagination *filters.Pagination) []*storobj.Object { + offset, limit := db.getOffsetLimit(len(res), pagination.Offset, pagination.Limit) + if offset == 0 && limit == 0 { + return nil + } + return res[offset:limit] +} + +func (db *DB) getStoreObjectsWithScores(res []*storobj.Object, scores []float32, pagination *filters.Pagination) ([]*storobj.Object, []float32) { + offset, limit := db.getOffsetLimit(len(res), pagination.Offset, pagination.Limit) + if offset == 0 && limit == 0 { + return nil, nil + } + res = res[offset:limit] + // not all search results have scores + if len(scores) == 0 { + return res, scores + } + + return res, scores[offset:limit] +} + +func (db *DB) getDists(dists []float32, pagination *filters.Pagination) []float32 { + offset, limit := db.getOffsetLimit(len(dists), pagination.Offset, pagination.Limit) + if offset == 0 && limit == 0 { + return nil + } + return dists[offset:limit] +} + +func (db *DB) getOffsetLimit(arraySize int, offset, limit int) (int, int) { + totalLimit := offset + db.getLimit(limit) + if arraySize > totalLimit { + return offset, totalLimit + } else if arraySize > offset { + return offset, arraySize + } + return 0, 0 +} + +func (db *DB) getLimit(limit int) int { + if limit == filters.LimitFlagNotSet { + return int(db.config.QueryLimit) + } + return limit +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/search_deduplication.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/search_deduplication.go new file mode 100644 index 0000000000000000000000000000000000000000..e7db4b7ccad3e7928847a271c516024384f0ed92 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/search_deduplication.go @@ -0,0 +1,59 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/storobj" +) + +func searchResultDedup(out []*storobj.Object, dists []float32) ([]*storobj.Object, []float32, error) { + type indexAndScore struct { + i int + score float32 + } + allKeys := make(map[strfmt.UUID]indexAndScore) + filteredObjects := make([]*storobj.Object, 0, len(out)) + filteredScores := make([]float32, 0, len(dists)) + + i := 0 + // Iterate over all the objects, the corresponding score is always dists[j] for object at index j + for j, obj := range out { + // If we have encountered the object before lookup the score of the current object vs the previous one. If + // the score is better then we keep this one by replacing it in filtered arrays in place, if not we ignore + // it and move on + val, ok := allKeys[obj.ID()] + if ok { + // If the store distance is bigger than the current object distance we want to replace the object we + // have in the filtered array + if val.score > dists[j] { + // Update in place in the filtered arrays + filteredObjects[val.i] = obj + filteredScores[val.i] = dists[j] + // Update the score stored in the map tracking what we have seen so far + allKeys[obj.ID()] = indexAndScore{val.i, dists[j]} + } + } else { + // We have never seen that object before, append to the filtered arrays and add the tracking map + filteredObjects = append(filteredObjects, obj) + filteredScores = append(filteredScores, dists[j]) + allKeys[obj.ID()] = indexAndScore{i: i, score: dists[j]} + i++ + } + } + if len(filteredObjects) != len(filteredScores) { + return []*storobj.Object{}, []float32{}, fmt.Errorf("length of object and scores should be equal obj=%d vs dists=%d", len(filteredObjects), len(filteredScores)) + } + return filteredObjects, filteredScores, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/search_deduplication_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/search_deduplication_test.go new file mode 100644 index 0000000000000000000000000000000000000000..47f763ab2034ee3cab08d9c6873657778045b352 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/search_deduplication_test.go @@ -0,0 +1,245 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "testing" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/storobj" +) + +func TestSearchDeduplication(t *testing.T) { + type idAndDistPair struct { + id string + dist float32 + } + + type shardResult struct { + elems []idAndDistPair + } + + type test struct { + name string + input []shardResult + expectedOutput []idAndDistPair + } + + tests := []test{ + { + name: "single input", + input: []shardResult{ + { + elems: []idAndDistPair{ + {id: "1", dist: 0.1}, + }, + }, + }, + expectedOutput: []idAndDistPair{ + {id: "1", dist: 0.1}, + }, + }, + { + name: "2 shards with full overlap", + input: []shardResult{ + { + elems: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.3}, + {id: "4", dist: 0.4}, + }, + }, + { + elems: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.3}, + {id: "4", dist: 0.4}, + }, + }, + }, + expectedOutput: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.3}, + {id: "4", dist: 0.4}, + }, + }, + { + name: "2 shards with no overlap, best result in first shard", + input: []shardResult{ + { + elems: []idAndDistPair{ + {id: "001", dist: 0.1}, + {id: "003", dist: 0.3}, + }, + }, + { + elems: []idAndDistPair{ + {id: "002", dist: 0.2}, + {id: "004", dist: 0.4}, + }, + }, + }, + expectedOutput: []idAndDistPair{ + {id: "001", dist: 0.1}, + {id: "002", dist: 0.2}, + {id: "003", dist: 0.3}, + {id: "004", dist: 0.4}, + }, + }, + { + name: "2 shards with no overlap, best result in second shard", + input: []shardResult{ + { + elems: []idAndDistPair{ + {id: "002", dist: 0.2}, + {id: "004", dist: 0.4}, + }, + }, + { + elems: []idAndDistPair{ + {id: "001", dist: 0.1}, + {id: "003", dist: 0.3}, + }, + }, + }, + expectedOutput: []idAndDistPair{ + {id: "001", dist: 0.1}, + {id: "002", dist: 0.2}, + {id: "003", dist: 0.3}, + {id: "004", dist: 0.4}, + }, + }, + { + name: "2 shards with full overlap, but shard 1 has lower scores for some elements", + input: []shardResult{ + { + elems: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.3}, + {id: "4", dist: 0.4}, + }, + }, + { + elems: []idAndDistPair{ + {id: "1", dist: 0.15}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.35}, + {id: "4", dist: 0.4}, + }, + }, + }, + expectedOutput: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.3}, + {id: "4", dist: 0.4}, + }, + }, + { + name: "2 shards with full overlap, but shard 1 has higher scores for some elements", + input: []shardResult{ + { + elems: []idAndDistPair{ + {id: "1", dist: 0.15}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.35}, + {id: "4", dist: 0.4}, + }, + }, + { + elems: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.3}, + {id: "4", dist: 0.4}, + }, + }, + }, + expectedOutput: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.2}, + {id: "3", dist: 0.3}, + {id: "4", dist: 0.4}, + }, + }, + { + name: "choas with some overlap, some new results, some differing scores", + input: []shardResult{ + { + elems: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "3", dist: 0.102}, + {id: "4", dist: 0.1}, + }, + }, + { + elems: []idAndDistPair{ + {id: "2", dist: 0.099}, + {id: "3", dist: 0.101}, + }, + }, + { + elems: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.1}, + {id: "3", dist: 0.1}, + {id: "4", dist: 0.1}, + {id: "5", dist: 0.098}, + }, + }, + }, + expectedOutput: []idAndDistPair{ + {id: "1", dist: 0.1}, + {id: "2", dist: 0.099}, + {id: "3", dist: 0.1}, + {id: "4", dist: 0.1}, + {id: "5", dist: 0.098}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Build input + input := []*storobj.Object{} + inputDists := []float32{} + docID := uint64(0) + for _, shard := range test.input { + for _, elem := range shard.elems { + obj := storobj.New(docID) + obj.Object.ID = strfmt.UUID(elem.id) + input = append(input, obj) + docID++ // the docIDs don't matter for this test, but we need them to create the storobjs + inputDists = append(inputDists, elem.dist) + } + } + + // Run function + out, outDists, err := searchResultDedup(input, inputDists) + require.NoError(t, err) + + // turn results into idAndDistPair for easier comparison + output := make([]idAndDistPair, len(out)) + for i, obj := range out { + output[i] = idAndDistPair{obj.ID().String(), outDists[i]} + } + + assert.ElementsMatch(t, test.expectedOutput, output) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard.go new file mode 100644 index 0000000000000000000000000000000000000000..79c9e93c52550d769a6e9803b3e3cbbd260a4030 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard.go @@ -0,0 +1,529 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "io" + "path" + "sync" + "sync/atomic" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "go.etcd.io/bbolt" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/indexcounter" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/propertyspecific" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/cluster/router/types" + usagetypes "github.com/weaviate/weaviate/cluster/usage/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + entinverted "github.com/weaviate/weaviate/entities/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/file" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +const IdLockPoolSize = 128 + +var ( + errAlreadyShutdown = errors.New("already shut or dropped") + errShutdownInProgress = errors.New("shard shutdown in progress") +) + +type ShardLike interface { + Index() *Index // Get the parent index + Name() string // Get the shard name + Store() *lsmkv.Store // Get the underlying store + NotifyReady() // Set shard status to ready + GetStatus() storagestate.Status // Return the shard status + UpdateStatus(status, reason string) error // Set shard status + SetStatusReadonly(reason string) error // Set shard status to readonly with reason + FindUUIDs(ctx context.Context, filters *filters.LocalFilter) ([]strfmt.UUID, error) // Search and return document ids + + Counter() *indexcounter.Counter + ObjectCount() int + ObjectCountAsync(ctx context.Context) (int64, error) + ObjectStorageSize(ctx context.Context) (int64, error) + VectorStorageSize(ctx context.Context) (int64, error) + GetPropertyLengthTracker() *inverted.JsonShardMetaData + + PutObject(context.Context, *storobj.Object) error + PutObjectBatch(context.Context, []*storobj.Object) []error + ObjectByID(ctx context.Context, id strfmt.UUID, props search.SelectProperties, additional additional.Properties) (*storobj.Object, error) + ObjectByIDErrDeleted(ctx context.Context, id strfmt.UUID, props search.SelectProperties, additional additional.Properties) (*storobj.Object, error) + Exists(ctx context.Context, id strfmt.UUID) (bool, error) + ObjectSearch(ctx context.Context, limit int, filters *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, sort []filters.Sort, cursor *filters.Cursor, additional additional.Properties, properties []string) ([]*storobj.Object, []float32, error) + ObjectVectorSearch(ctx context.Context, searchVectors []models.Vector, targetVectors []string, targetDist float32, limit int, filters *filters.LocalFilter, sort []filters.Sort, groupBy *searchparams.GroupBy, additional additional.Properties, targetCombination *dto.TargetCombination, properties []string) ([]*storobj.Object, []float32, error) + UpdateVectorIndexConfig(ctx context.Context, updated schemaConfig.VectorIndexConfig) error + UpdateVectorIndexConfigs(ctx context.Context, updated map[string]schemaConfig.VectorIndexConfig) error + AddReferencesBatch(ctx context.Context, refs objects.BatchReferences) []error + DeleteObjectBatch(ctx context.Context, ids []strfmt.UUID, deletionTime time.Time, dryRun bool) objects.BatchSimpleObjects // Delete many objects by id + DeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error // Delete object by id + MultiObjectByID(ctx context.Context, query []multi.Identifier) ([]*storobj.Object, error) + ObjectDigestsInRange(ctx context.Context, initialUUID, finalUUID strfmt.UUID, limit int) (objs []types.RepairResponse, err error) + ID() string // Get the shard id + drop() error + HaltForTransfer(ctx context.Context, offloading bool, inactivityTimeout time.Duration) error + initPropertyBuckets(ctx context.Context, eg *enterrors.ErrorGroupWrapper, lazyLoadSegments bool, props ...*models.Property) + ListBackupFiles(ctx context.Context, ret *backup.ShardDescriptor) error + resumeMaintenanceCycles(ctx context.Context) error + GetFileMetadata(ctx context.Context, relativeFilePath string) (file.FileMetadata, error) + GetFile(ctx context.Context, relativeFilePath string) (io.ReadCloser, error) + SetPropertyLengths(props []inverted.Property) error + AnalyzeObject(*storobj.Object) ([]inverted.Property, []inverted.NilProperty, error) + Aggregate(ctx context.Context, params aggregation.Params, modules *modules.Provider) (*aggregation.Result, error) + HashTreeLevel(ctx context.Context, level int, discriminant *hashtree.Bitset) (digests []hashtree.Digest, err error) + MergeObject(ctx context.Context, object objects.MergeDocument) error + VectorDistanceForQuery(ctx context.Context, id uint64, searchVectors []models.Vector, targets []string) ([]float32, error) + ConvertQueue(targetVector string) error + FillQueue(targetVector string, from uint64) error + Shutdown(context.Context) error // Shutdown the shard + preventShutdown() (release func(), err error) + + // TODO tests only + ObjectList(ctx context.Context, limit int, sort []filters.Sort, cursor *filters.Cursor, + additional additional.Properties, className schema.ClassName) ([]*storobj.Object, error) // Search and return objects + WasDeleted(ctx context.Context, id strfmt.UUID) (bool, time.Time, error) // Check if an object was deleted + GetVectorIndexQueue(targetVector string) (*VectorIndexQueue, bool) + GetVectorIndex(targetVector string) (VectorIndex, bool) + ForEachVectorIndex(f func(targetVector string, index VectorIndex) error) error + ForEachVectorQueue(f func(targetVector string, queue *VectorIndexQueue) error) error + // TODO tests only + Versioner() *shardVersioner // Get the shard versioner + + SetAsyncReplicationEnabled(ctx context.Context, enabled bool) error + + isReadOnly() error + pathLSM() string + + preparePutObject(context.Context, string, *storobj.Object) replica.SimpleResponse + preparePutObjects(context.Context, string, []*storobj.Object) replica.SimpleResponse + prepareMergeObject(context.Context, string, *objects.MergeDocument) replica.SimpleResponse + prepareDeleteObject(context.Context, string, strfmt.UUID, time.Time) replica.SimpleResponse + prepareDeleteObjects(context.Context, string, []strfmt.UUID, time.Time, bool) replica.SimpleResponse + prepareAddReferences(context.Context, string, []objects.BatchReference) replica.SimpleResponse + + commitReplication(context.Context, string, *shardTransfer) interface{} + abortReplication(context.Context, string) replica.SimpleResponse + filePutter(context.Context, string) (io.WriteCloser, error) + + // Dimensions returns the total number of dimensions for a given vector + Dimensions(ctx context.Context, targetVector string) (int, error) + // DimensionsUsage returns the total number of dimensions and the number of objects for a given vector + DimensionsUsage(ctx context.Context, targetVector string) (usagetypes.Dimensionality, error) + QuantizedDimensions(ctx context.Context, targetVector string, segments int) int + + extendDimensionTrackerLSM(dimLength int, docID uint64, targetVector string) error + resetDimensionsLSM(ctx context.Context) error + + addToPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error + deleteFromPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error + addToPropertyMapBucket(bucket *lsmkv.Bucket, pair lsmkv.MapPair, key []byte) error + addToPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error + deleteFromPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error + pairPropertyWithFrequency(docID uint64, freq, propLen float32) lsmkv.MapPair + + setFallbackToSearchable(fallback bool) + addJobToQueue(job job) + uuidFromDocID(docID uint64) (strfmt.UUID, error) + batchDeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error + putObjectLSM(object *storobj.Object, idBytes []byte) (objectInsertStatus, error) + mayUpsertObjectHashTree(object *storobj.Object, idBytes []byte, status objectInsertStatus) error + mutableMergeObjectLSM(merge objects.MergeDocument, idBytes []byte) (mutableMergeResult, error) + batchExtendInvertedIndexItemsLSMNoFrequency(b *lsmkv.Bucket, item inverted.MergeItem) error + updatePropertySpecificIndices(ctx context.Context, object *storobj.Object, status objectInsertStatus) error + updateVectorIndexIgnoreDelete(ctx context.Context, vector []float32, status objectInsertStatus) error + updateVectorIndexesIgnoreDelete(ctx context.Context, vectors map[string][]float32, status objectInsertStatus) error + updateMultiVectorIndexesIgnoreDelete(ctx context.Context, multiVectors map[string][][]float32, status objectInsertStatus) error + hasGeoIndex() bool + // addTargetNodeOverride adds a target node override to the shard. + addTargetNodeOverride(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error + // removeTargetNodeOverride removes a target node override from the shard. + removeTargetNodeOverride(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error + // removeAllTargetNodeOverrides removes all target node overrides from the shard + // and resets the async replication config. + removeAllTargetNodeOverrides(ctx context.Context) error + + // getAsyncReplicationStats returns all current sync replication stats for this node/shard + getAsyncReplicationStats(ctx context.Context) []*models.AsyncReplicationStatus + + Metrics() *Metrics + + // A thread-safe counter that goes up any time there is activity on this + // shard. The absolute value has no meaning, it's only purpose is to compare + // the previous value to the current value. First value is for reads, second + // for writes. + Activity() (int32, int32) + // Debug methods + DebugResetVectorIndex(ctx context.Context, targetVector string) error + RepairIndex(ctx context.Context, targetVector string) error +} + +type onAddToPropertyValueIndex func(shard *Shard, docID uint64, property *inverted.Property) error + +type onDeleteFromPropertyValueIndex func(shard *Shard, docID uint64, property *inverted.Property) error + +// Shard is the smallest completely-contained index unit. A shard manages +// database files for all the objects it owns. How a shard is determined for a +// target object (e.g. Murmur hash, etc.) is still open at this point +type Shard struct { + index *Index // a reference to the underlying index, which in turn contains schema information + class *models.Class + scheduler *queue.Scheduler + name string + store *lsmkv.Store + counter *indexcounter.Counter + indexCheckpoints *indexcheckpoint.Checkpoints + metrics *Metrics + promMetrics *monitoring.PrometheusMetrics + slowQueryReporter helpers.SlowQueryReporter + propertyIndices propertyspecific.Indices + propLenTracker *inverted.JsonShardMetaData + versioner *shardVersioner + + vectorIndexMu sync.RWMutex + vectorIndex VectorIndex + queue *VectorIndexQueue + vectorIndexes map[string]VectorIndex + queues map[string]*VectorIndexQueue + + // async replication + asyncReplicationRWMux sync.RWMutex + asyncReplicationConfig asyncReplicationConfig + hashtree hashtree.AggregatedHashTree + hashtreeFullyInitialized bool + minimalHashtreeInitializationCh chan struct{} + asyncReplicationCancelFunc context.CancelFunc + + lastComparedHosts []string + lastComparedHostsMux sync.RWMutex + asyncReplicationStatsByTargetNode map[string]*hashBeatHostStats + + haltForTransferMux sync.Mutex + haltForTransferInactivityTimeout time.Duration + haltForTransferInactivityTimer *time.Timer + haltForTransferCount int + haltForTransferCancel func() + + status ShardStatus + statusLock sync.RWMutex + propertyIndicesLock sync.RWMutex + + centralJobQueue chan job // reference to queue used by all shards + + docIdLock []sync.Mutex + // replication + replicationMap pendingReplicaTasks + + // Indicates whether searchable buckets should be used + // when filterable buckets are missing for text/text[] properties + // This can happen for db created before v1.19, where + // only map (now called searchable) buckets were created as inverted + // indexes for text/text[] props. + // Now roaring set (filterable) and map (searchable) buckets can + // coexists for text/text[] props, and by default both are enabled. + // So despite property's IndexFilterable and IndexSearchable settings + // being enabled, only searchable bucket exists + fallbackToSearchable bool + + cycleCallbacks *shardCycleCallbacks + bitmapFactory *roaringset.BitmapFactory + bitmapBufPool roaringset.BitmapBufPool + + activityTrackerRead atomic.Int32 + activityTrackerWrite atomic.Int32 + + // shared bolt database for dynamic vector indexes. + // nil if there is no configured dynamic vector index + dynamicVectorIndexDB *bbolt.DB + + // indicates whether shard is shut down or dropped (or ongoing) + shut atomic.Bool + // indicates whether shard in being used at the moment (e.g. write request) + inUseCounter atomic.Int64 + // allows concurrent shut read/write + shutdownLock *sync.RWMutex + + reindexer ShardReindexerV3 + callbacksAddToPropertyValueIndex []onAddToPropertyValueIndex + callbacksRemoveFromPropertyValueIndex []onDeleteFromPropertyValueIndex + // stores names of properties that are searchable and use buckets of + // inverted strategy. for such properties delta analyzer should avoid + // computing delta between previous and current values of properties + searchableBlockmaxPropNames []string + searchableBlockmaxPropNamesLock *sync.Mutex + + usingBlockMaxWAND bool + + // shutdownRequested marks shard as requested for shutdown + shutdownRequested atomic.Bool +} + +func (s *Shard) ID() string { + return shardId(s.index.ID(), s.name) +} + +func (s *Shard) path() string { + return shardPath(s.index.path(), s.name) +} + +func (s *Shard) pathLSM() string { + return shardPathLSM(s.index.path(), s.name) +} + +func (s *Shard) pathHashTree() string { + return path.Join(s.path(), "hashtree_uuid") +} + +func (s *Shard) vectorIndexID(targetVector string) string { + if targetVector != "" { + return fmt.Sprintf("vectors_%s", targetVector) + } + return "main" +} + +func (s *Shard) uuidToIdLockPoolId(idBytes []byte) uint8 { + // use the last byte of the uuid to determine which locking-pool a given object should use. The last byte is used + // as uuids probably often have some kind of order and the last byte will in general be the one that changes the most + return idBytes[15] % IdLockPoolSize +} + +func (s *Shard) memtableDirtyConfig() lsmkv.BucketOption { + return lsmkv.WithDirtyThreshold( + time.Duration(s.index.Config.MemtablesFlushDirtyAfter) * time.Second) +} + +func (s *Shard) dynamicMemtableSizing() lsmkv.BucketOption { + return lsmkv.WithDynamicMemtableSizing( + s.index.Config.MemtablesInitialSizeMB, + s.index.Config.MemtablesMaxSizeMB, + s.index.Config.MemtablesMinActiveSeconds, + s.index.Config.MemtablesMaxActiveSeconds, + ) +} + +func (s *Shard) segmentCleanupConfig() lsmkv.BucketOption { + return lsmkv.WithSegmentsCleanupInterval( + time.Duration(s.index.Config.SegmentsCleanupIntervalSeconds) * time.Second) +} + +func (s *Shard) UpdateVectorIndexConfig(ctx context.Context, updated schemaConfig.VectorIndexConfig) error { + if err := s.isReadOnly(); err != nil { + return err + } + + reason := "UpdateVectorIndexConfig" + err := s.SetStatusReadonly(reason) + if err != nil { + return fmt.Errorf("attempt to mark read-only: %w", err) + } + + index, ok := s.GetVectorIndex("") + if !ok { + return fmt.Errorf("vector index does not exist") + } + + return index.UpdateUserConfig(updated, func() { + s.UpdateStatus(storagestate.StatusReady.String(), reason) + }) +} + +func (s *Shard) UpdateVectorIndexConfigs(ctx context.Context, updated map[string]schemaConfig.VectorIndexConfig) error { + if err := s.isReadOnly(); err != nil { + return err + } + + i := 0 + targetVecs := make([]string, len(updated)) + for targetVec := range updated { + targetVecs[i] = targetVec + i++ + } + reason := fmt.Sprintf("UpdateVectorIndexConfigs: %v", targetVecs) + if err := s.SetStatusReadonly(reason); err != nil { + return fmt.Errorf("attempt to mark read-only: %w", err) + } + + wg := new(sync.WaitGroup) + var err error + for targetVector, targetCfg := range updated { + if index, ok := s.GetVectorIndex(targetVector); ok { + wg.Add(1) + if err = index.UpdateUserConfig(targetCfg, wg.Done); err != nil { + break + } + } else { + // dont lazy load segments on config update + if err = s.initTargetVector(ctx, targetVector, targetCfg, false); err != nil { + return fmt.Errorf("creating new vector index: %w", err) + } + } + } + + f := func() { + wg.Wait() + s.UpdateStatus(storagestate.StatusReady.String(), reason) + } + enterrors.GoWrapper(f, s.index.logger) + + return err +} + +// ObjectCount returns the exact count at any moment +func (s *Shard) ObjectCount() int { + b := s.store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + return 0 + } + + return b.Count() +} + +// ObjectCountAsync returns the eventually consistent "async" count which is +// much cheaper to obtain +func (s *Shard) ObjectCountAsync(_ context.Context) (int64, error) { + b := s.store.Bucket(helpers.ObjectsBucketLSM) + if b == nil { + // we return no error, because we could have shards without the objects bucket + // the error is needed to satisfy the interface for lazy loaded shards possible errors + return 0, nil + } + + return int64(b.CountAsync()), nil +} + +func (s *Shard) ObjectStorageSize(ctx context.Context) (int64, error) { + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + if bucket == nil { + // we return no error, because we could have shards without the objects bucket + // the error is needed to satisfy the interface for lazy loaded shards possible errors + return 0, nil + } + + return bucket.DiskSize() + bucket.MetadataSize(), nil +} + +// VectorStorageSize calculates the total storage size of all vector indexes in the shard +// Always use the dimensions bucket for tracking total vectors and dimensions +// This ensures we get accurate counts regardless of cache size or shard state +// This method is only called for active tenants, so we can always use direct vector index compression. +func (s *Shard) VectorStorageSize(ctx context.Context) (int64, error) { + totalSize := int64(0) + + // Iterate over all vector indexes to calculate storage size for both default and targeted vectors + if err := s.ForEachVectorIndex(func(targetVector string, index VectorIndex) error { + // Get dimensions and object count from the dimensions bucket for this specific target vector + dimensionality := calcTargetVectorDimensionsFromStore(ctx, s.store, targetVector, func(dimLen int, v []lsmkv.MapPair) (int, int) { + return len(v), dimLen + }) + + if dimensionality.Count == 0 || dimensionality.Dimensions == 0 { + return nil + } + + // Calculate uncompressed size (float32 = 4 bytes per dimension) + uncompressedSize := int64(dimensionality.Count) * int64(dimensionality.Dimensions) * 4 + + // For active tenants, always use the direct vector index compression rate + compressionRate := index.CompressionStats().CompressionRatio(dimensionality.Dimensions) + + // Calculate total size using actual compression rate + totalSize += int64(float64(uncompressedSize) * compressionRate) + + return nil + }); err != nil { + return 0, err + } + + return totalSize, nil +} + +func (s *Shard) isFallbackToSearchable() bool { + return s.fallbackToSearchable +} + +func (s *Shard) tenant() string { + // TODO provide better impl + if s.index.partitioningEnabled { + return s.name + } + return "" +} + +func shardId(indexId, shardName string) string { + return fmt.Sprintf("%s_%s", indexId, shardName) +} + +func shardPath(indexPath, shardName string) string { + return path.Join(indexPath, shardName) +} + +func shardPathLSM(indexPath, shardName string) string { + return path.Join(indexPath, shardName, "lsm") +} + +func shardPathObjectsLSM(indexPath, shardName string) string { + return path.Join(shardPathLSM(indexPath, shardName), helpers.ObjectsBucketLSM) +} + +func shardPathDimensionsLSM(indexPath, shardName string) string { + return path.Join(shardPathLSM(indexPath, shardName), helpers.DimensionsBucketLSM) +} + +func bucketKeyPropertyLength(length int) ([]byte, error) { + return entinverted.LexicographicallySortableInt64(int64(length)) +} + +func bucketKeyPropertyNull(isNull bool) ([]byte, error) { + if isNull { + return []byte{uint8(filters.InternalNullState)}, nil + } + return []byte{uint8(filters.InternalNotNullState)}, nil +} + +// Activity score for read and write +func (s *Shard) Activity() (int32, int32) { + return s.activityTrackerRead.Load(), s.activityTrackerWrite.Load() +} + +func (s *Shard) registerAddToPropertyValueIndex(callback onAddToPropertyValueIndex) { + s.callbacksAddToPropertyValueIndex = append(s.callbacksAddToPropertyValueIndex, callback) +} + +func (s *Shard) registerDeleteFromPropertyValueIndex(callback onDeleteFromPropertyValueIndex) { + s.callbacksRemoveFromPropertyValueIndex = append(s.callbacksRemoveFromPropertyValueIndex, callback) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_accessors.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_accessors.go new file mode 100644 index 0000000000000000000000000000000000000000..9eb64454418b805e61137179bb0c9b72bb54c4ae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_accessors.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Some standard accessors for the shard struct. +// It is important to NEVER access the shard struct directly, because we lazy load shards, so the information might not be there. +package db + +import ( + "github.com/weaviate/weaviate/adapters/repos/db/indexcounter" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/modelsext" + "github.com/weaviate/weaviate/entities/schema" +) + +// ForEachVectorIndex iterates through each vector index initialized in the shard (named and legacy). +// Iteration stops at the first return of non-nil error. +func (s *Shard) ForEachVectorIndex(f func(targetVector string, index VectorIndex) error) error { + // As we expect the mutex to be write-locked very rarely, we allow the callback + // to be invoked under the lock. If we find contention here, we should make a copy of the indexes + // before iterating over them. + s.vectorIndexMu.RLock() + defer s.vectorIndexMu.RUnlock() + + for targetVector, idx := range s.vectorIndexes { + if idx == nil { + continue + } + + if err := f(targetVector, idx); err != nil { + return err + } + } + if s.vectorIndex != nil { + if err := f("", s.vectorIndex); err != nil { + return err + } + } + return nil +} + +// ForEachVectorQueue iterates through each vector index queue initialized in the shard (named and legacy). +// Iteration stops at the first return of non-nil error. +func (s *Shard) ForEachVectorQueue(f func(targetVector string, queue *VectorIndexQueue) error) error { + // As we expect the mutex to be write-locked very rarely, we allow the callback + // to be invoked under the lock. If we find contention here, we should make a copy of the queues + // before iterating over them. + s.vectorIndexMu.RLock() + defer s.vectorIndexMu.RUnlock() + + for targetVector, q := range s.queues { + if q == nil { + continue + } + + if err := f(targetVector, q); err != nil { + return err + } + } + if s.queue != nil { + if err := f("", s.queue); err != nil { + return err + } + } + return nil +} + +// GetVectorIndexQueue retrieves a vector index queue associated with the targetVector. +// Empty targetVector is treated as a request to access a queue for the legacy vector index. +func (s *Shard) GetVectorIndexQueue(targetVector string) (*VectorIndexQueue, bool) { + s.vectorIndexMu.RLock() + defer s.vectorIndexMu.RUnlock() + + if s.isTargetVectorLegacyWithLock(targetVector) { + return s.queue, s.queue != nil + } + + queue, ok := s.queues[targetVector] + return queue, ok +} + +// GetVectorIndex retrieves a vector index queue associated with the targetVector. +// Empty targetVector is treated as a request to access a queue for the legacy vector index. +func (s *Shard) GetVectorIndex(targetVector string) (VectorIndex, bool) { + s.vectorIndexMu.RLock() + defer s.vectorIndexMu.RUnlock() + + if s.isTargetVectorLegacyWithLock(targetVector) { + return s.vectorIndex, s.vectorIndex != nil + } + + index, ok := s.vectorIndexes[targetVector] + return index, ok +} + +func (s *Shard) isTargetVectorLegacyWithLock(targetVector string) bool { + if targetVector == "" { + return true + } + + return s.vectorIndex != nil && targetVector == modelsext.DefaultNamedVectorName +} + +func (s *Shard) hasLegacyVectorIndex() bool { + _, ok := s.GetVectorIndex("") + return ok +} + +func (s *Shard) hasAnyVectorIndex() bool { + s.vectorIndexMu.RLock() + defer s.vectorIndexMu.RUnlock() + + return len(s.vectorIndexes) > 0 || s.vectorIndex != nil +} + +func (s *Shard) Versioner() *shardVersioner { + return s.versioner +} + +func (s *Shard) Index() *Index { + return s.index +} + +// Shard name(identifier?) +func (s *Shard) Name() string { + return s.name +} + +// The physical data store +func (s *Shard) Store() *lsmkv.Store { + return s.store +} + +func (s *Shard) Counter() *indexcounter.Counter { + return s.counter +} + +// Tracks the lengths of all properties. Must be updated on inserts/deletes. +func (s *Shard) GetPropertyLengthTracker() *inverted.JsonShardMetaData { + return s.propLenTracker +} + +// Tracks the lengths of all properties. Must be updated on inserts/deletes. +func (s *Shard) SetPropertyLengthTracker(tracker *inverted.JsonShardMetaData) { + s.propLenTracker = tracker +} + +// Grafana metrics +func (s *Shard) Metrics() *Metrics { + return s.metrics +} + +func (s *Shard) setFallbackToSearchable(fallback bool) { + s.fallbackToSearchable = fallback +} + +func (s *Shard) addJobToQueue(job job) { + s.centralJobQueue <- job +} + +func (s *Shard) hasGeoIndex() bool { + s.propertyIndicesLock.RLock() + defer s.propertyIndicesLock.RUnlock() + + for _, idx := range s.propertyIndices { + if idx.Type == schema.DataTypeGeoCoordinates { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_accessors_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_accessors_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e27939a10b56d56034ba89a0f6165084cf138a02 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_accessors_test.go @@ -0,0 +1,170 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modelsext" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestShared_GetVectorIndexAndQueue(t *testing.T) { + for _, tt := range []struct { + name string + setup func(idx *Index) + + wantLegacyExists bool + wantNamedExists bool + }{ + { + name: "only legacy initialized", + setup: func(idx *Index) { + idx.vectorIndexUserConfig = hnsw.NewDefaultUserConfig() + }, + wantLegacyExists: true, + wantNamedExists: false, + }, + { + name: "only named initialized", + setup: func(idx *Index) { + idx.vectorIndexUserConfig = nil + idx.vectorIndexUserConfigs = map[string]schemaConfig.VectorIndexConfig{ + "named": hnsw.NewDefaultUserConfig(), + "foo": flat.NewDefaultUserConfig(), + } + }, + wantLegacyExists: false, + wantNamedExists: true, + }, + { + name: "mixed initialized", + setup: func(idx *Index) { + idx.vectorIndexUserConfig = hnsw.NewDefaultUserConfig() + idx.vectorIndexUserConfigs = map[string]schemaConfig.VectorIndexConfig{ + "named": hnsw.NewDefaultUserConfig(), + "foo": flat.NewDefaultUserConfig(), + } + }, + wantLegacyExists: true, + wantNamedExists: true, + }, + } { + t.Run(tt.name, func(t *testing.T) { + s, _ := testShardWithSettings(t, testCtx(), &models.Class{Class: "test"}, hnsw.UserConfig{}, false, true, tt.setup) + + namedQueue, ok := s.GetVectorIndexQueue("named") + require.Equal(t, tt.wantNamedExists, ok) + + namedIndex, ok := s.GetVectorIndex("named") + require.Equal(t, tt.wantNamedExists, ok) + + if tt.wantNamedExists { + require.NotNil(t, namedQueue) + require.NotNil(t, namedIndex) + } + + legacyQueue, ok := s.GetVectorIndexQueue("") + require.Equal(t, tt.wantLegacyExists, ok) + + legacyIndex, ok := s.GetVectorIndex("") + require.Equal(t, tt.wantLegacyExists, ok) + + defaultQueue, ok := s.GetVectorIndex(modelsext.DefaultNamedVectorName) + require.Equal(t, tt.wantLegacyExists, ok) + + defaultIndex, ok := s.GetVectorIndex(modelsext.DefaultNamedVectorName) + require.Equal(t, tt.wantLegacyExists, ok) + + if tt.wantLegacyExists { + require.NotNil(t, legacyQueue) + require.NotNil(t, legacyIndex) + require.NotNil(t, defaultQueue) + require.NotNil(t, defaultIndex) + } + }) + } +} + +func TestShard_ForEachVectorIndexAndQueue(t *testing.T) { + for _, tt := range []struct { + name string + setConfigs func(idx *Index) + expectIndexes []string + }{ + { + name: "only legacy vector", + setConfigs: func(idx *Index) { + idx.vectorIndexUserConfig = hnsw.NewDefaultUserConfig() + }, + expectIndexes: []string{""}, + }, + { + name: "only named vector", + setConfigs: func(idx *Index) { + idx.vectorIndexUserConfig = nil + idx.vectorIndexUserConfigs = map[string]schemaConfig.VectorIndexConfig{ + "vector1": hnsw.NewDefaultUserConfig(), + "vector2": flat.NewDefaultUserConfig(), + } + }, + expectIndexes: []string{"vector1", "vector2"}, + }, + { + name: "mixed vectors", + setConfigs: func(idx *Index) { + idx.vectorIndexUserConfig = hnsw.NewDefaultUserConfig() + idx.vectorIndexUserConfigs = map[string]schemaConfig.VectorIndexConfig{ + "vector1": hnsw.NewDefaultUserConfig(), + "vector2": flat.NewDefaultUserConfig(), + } + }, + expectIndexes: []string{"", "vector1", "vector2"}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + shard, _ := testShardWithSettings(t, testCtx(), &models.Class{Class: "TestClass"}, hnsw.NewDefaultUserConfig(), false, true, tt.setConfigs) + + capturedIndexes := make(map[string]any) + err := shard.ForEachVectorIndex(func(targetVector string, index VectorIndex) error { + require.NotNil(t, index) + capturedIndexes[targetVector] = index + return nil + }) + require.NoError(t, err) + + capturedQueues := make(map[string]any) + err = shard.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + require.NotNil(t, queue) + capturedQueues[targetVector] = queue + return nil + }) + require.NoError(t, err) + + require.Len(t, capturedIndexes, len(tt.expectIndexes)) + for _, name := range tt.expectIndexes { + _, ok := capturedIndexes[name] + require.True(t, ok) + } + + require.Len(t, capturedQueues, len(tt.expectIndexes)) + for _, name := range tt.expectIndexes { + _, ok := capturedQueues[name] + require.True(t, ok) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_aggregate.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_aggregate.go new file mode 100644 index 0000000000000000000000000000000000000000..a1134db6840cd60d95bdf795aa23e02502d3256a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_aggregate.go @@ -0,0 +1,39 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/aggregator" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/usecases/modules" +) + +func (s *Shard) Aggregate(ctx context.Context, params aggregation.Params, modules *modules.Provider) (*aggregation.Result, error) { + var vectorIndex VectorIndex + + // we only need the index queue for vector search + if params.NearObject != nil || params.NearVector != nil || params.Hybrid != nil || params.SearchVector != nil { + idx, ok := s.GetVectorIndex(params.TargetVector) + if !ok { + return nil, fmt.Errorf("no vector index for target vector %q", params.TargetVector) + } + vectorIndex = idx + } + + return aggregator.New(s.store, params, s.index.getSchema, s.index.classSearcher, + s.index.stopwords, s.versioner.Version(), vectorIndex, s.index.logger, s.GetPropertyLengthTracker(), + s.isFallbackToSearchable, s.tenant(), s.index.Config.QueryNestedRefLimit, s.bitmapFactory, modules, s.index.Config.QueryHybridMaximumResults). + Do(ctx) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_async.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_async.go new file mode 100644 index 0000000000000000000000000000000000000000..0ae4bdff76cc8846c646cedfa73c7f688e0a9e8a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_async.go @@ -0,0 +1,532 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/binary" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/visited" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/storobj" +) + +// ConvertQueue converts a legacy in-memory queue to an on-disk queue. +// It detects if the queue has a checkpoint then it enqueues all the +// remaining vectors to the on-disk queue, then deletes the checkpoint. +func (s *Shard) ConvertQueue(targetVector string) error { + if !asyncEnabled() { + return nil + } + + // load non-indexed vectors and add them to the queue + checkpoint, exists, err := s.indexCheckpoints.Get(s.ID(), targetVector) + if err != nil { + return errors.Wrap(err, "get last indexed id") + } + if !exists { + return nil + } + + err = s.FillQueue(targetVector, checkpoint) + if err != nil { + return errors.WithStack(err) + } + + // we can now safely remove the checkpoint + err = s.indexCheckpoints.Delete(s.ID(), targetVector) + if err != nil { + return errors.Wrap(err, "delete checkpoint") + } + + return nil +} + +// FillQueue is a helper function that enqueues all vectors from the +// LSM store to the on-disk queue. +func (s *Shard) FillQueue(targetVector string, from uint64) error { + if !asyncEnabled() { + return nil + } + + start := time.Now() + + var counter int + + vectorIndex, ok := s.GetVectorIndex(targetVector) + if !ok { + s.index.logger.WithField("targetVector", targetVector).Warn("preload queue: vector index not found") + // shard was never initialized, possibly because of a failed shard + // initialization. No op. + return nil + } + + q, ok := s.GetVectorIndexQueue(targetVector) + if !ok { + s.index.logger.WithField("targetVector", targetVector).Warn("preload queue: queue not found") + // queue was never initialized, possibly because of a failed shard + // initialization. No op. + return nil + } + + ctx := context.Background() + + maxDocID := s.Counter().Get() + + var batch []common.VectorRecord + + if vectorIndex.Multivector() { + err := s.iterateOnLSMMultiVectors(ctx, from, targetVector, func(id uint64, vector [][]float32) error { + if vectorIndex.ContainsDoc(id) { + return nil + } + if len(vector) == 0 { + return nil + } + + rec := &common.Vector[[][]float32]{ + ID: id, + Vector: vector, + } + counter++ + + batch = append(batch, rec) + + if len(batch) < 1000 { + return nil + } + + err := q.Insert(ctx, batch...) + if err != nil { + return err + } + + batch = batch[:0] + return nil + }) + if err != nil { + return errors.Wrap(err, "iterate on LSM multi vectors") + } + } else { + err := s.iterateOnLSMVectors(ctx, from, targetVector, func(id uint64, vector []float32) error { + if vectorIndex.ContainsDoc(id) { + return nil + } + if len(vector) == 0 { + return nil + } + + rec := &common.Vector[[]float32]{ + ID: id, + Vector: vector, + } + counter++ + + batch = append(batch, rec) + + if len(batch) < 1000 { + return nil + } + + err := q.Insert(ctx, batch...) + if err != nil { + return err + } + + batch = batch[:0] + return nil + }) + if err != nil { + return errors.Wrap(err, "iterate on LSM vectors") + } + } + + if len(batch) > 0 { + err := q.Insert(ctx, batch...) + if err != nil { + return errors.Wrap(err, "insert batch") + } + } + + s.index.logger. + WithField("last_stored_id", maxDocID). + WithField("count", counter). + WithField("took", time.Since(start)). + WithField("shard_id", s.ID()). + WithField("target_vector", targetVector). + Info("enqueued vectors from LSM store") + + return nil +} + +func (s *Shard) iterateOnLSMVectors(ctx context.Context, fromID uint64, targetVector string, fn func(id uint64, vector []float32) error) error { + properties := additional.Properties{ + NoProps: true, + Vector: true, + } + if targetVector != "" { + properties.Vectors = []string{targetVector} + } + + return s.iterateOnLSMObjects(ctx, fromID, func(obj *storobj.Object) error { + var vector []float32 + if targetVector == "" { + vector = obj.Vector + } else { + if len(obj.Vectors) > 0 { + vector = obj.Vectors[targetVector] + } + } + return fn(obj.DocID, vector) + }, properties, nil) +} + +func (s *Shard) iterateOnLSMMultiVectors(ctx context.Context, fromID uint64, targetVector string, fn func(id uint64, vector [][]float32) error) error { + properties := additional.Properties{ + NoProps: true, + Vectors: []string{targetVector}, + } + + return s.iterateOnLSMObjects(ctx, fromID, func(obj *storobj.Object) error { + var vector [][]float32 + if len(obj.MultiVectors) > 0 { + vector = obj.MultiVectors[targetVector] + } + return fn(obj.DocID, vector) + }, properties, nil) +} + +func (s *Shard) iterateOnLSMObjects( + ctx context.Context, + fromID uint64, + fn func(obj *storobj.Object) error, + addProps additional.Properties, + properties *storobj.PropertyExtraction, +) error { + maxDocID := s.Counter().Get() + bucket := s.Store().Bucket(helpers.ObjectsBucketLSM) + + buf := make([]byte, 8) + for i := fromID; i < maxDocID; i++ { + if ctx.Err() != nil { + return ctx.Err() + } + + binary.LittleEndian.PutUint64(buf, i) + + v, err := bucket.GetBySecondary(0, buf) + if err != nil { + return errors.Wrap(err, "get last indexed object") + } + if v == nil { + continue + } + obj, err := storobj.FromBinaryOptional(v, addProps, properties) + if err != nil { + return errors.Wrap(err, "unmarshal last indexed object") + } + + err = fn(obj) + if err != nil { + return err + } + } + + return nil +} + +// RepairIndex ensures the vector index is consistent with the LSM store. +// It goes through the LSM store and enqueues any unindexed vector, and +// it also removes any indexed vector that is not in the LSM store. +// It it safe to call or interrupt this method at any time. +// If ASYNC_INDEXING is disabled, it's a no-op. +func (s *Shard) RepairIndex(ctx context.Context, targetVector string) error { + if !asyncEnabled() { + return nil + } + + start := time.Now() + className := s.index.Config.ClassName.String() + shardName := s.Name() + + vectorIndex, ok := s.GetVectorIndex(targetVector) + if !ok { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + Warn("repair index: vector index not found") + // shard was never initialized, possibly because of a failed shard + // initialization. No op. + return nil + } + + // if it's HNSW, trigger a tombstone cleanup + if hnsw.IsHNSWIndex(vectorIndex) { + err := hnsw.AsHNSWIndex(vectorIndex).CleanUpTombstonedNodes(func() bool { + return ctx.Err() != nil + }) + if err != nil { + return errors.Wrap(err, "clean up tombstoned nodes") + } + } + + q, ok := s.GetVectorIndexQueue(targetVector) + if !ok { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + Warn("repair index: queue not found") + // queue was never initialized, possibly because of a failed shard + // initialization. No op. + return nil + } + + maxDocID := s.Counter().Get() + visited := visited.NewList(int(maxDocID)) + + var added, deleted int + var batch []common.VectorRecord + + if vectorIndex.Multivector() { + // add non-indexed multi vectors to the queue + err := s.iterateOnLSMMultiVectors(ctx, 0, targetVector, func(docID uint64, vector [][]float32) error { + visited.Visit(docID) + + if vectorIndex.ContainsDoc(docID) { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("docID", docID). + Info("repair index: skipping doc, already in index") + return nil + } + if len(vector) == 0 { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("docID", docID). + WithField("vectorLen", 0). + Info("repair index: skipping doc, empty vector") + return nil + } + + rec := &common.Vector[[][]float32]{ + ID: docID, + Vector: vector, + } + added++ + + batch = append(batch, rec) + + dim := 0 + if len(vector[0]) > 0 { + dim = len(vector[0]) + } + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("docID", docID). + WithField("vectorLen", len(vector)). + WithField("vectorDim", dim). + WithField("currentBatchSize", len(batch)). + Info("repair index: added doc to batch") + + if len(batch) < 1000 { + return nil + } + + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("batchSize", len(batch)). + Info("repair index: inserting batch") + + err := q.Insert(ctx, batch...) + if err != nil { + s.index.logger. + WithError(err). + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + Warn("repair index: batch insert failed") + return err + } + + batch = batch[:0] + return nil + }) + if err != nil { + return errors.Wrap(err, "iterate on LSM multi vectors") + } + } else { + // add non-indexed vectors to the queue + err := s.iterateOnLSMVectors(ctx, 0, targetVector, func(docID uint64, vector []float32) error { + visited.Visit(docID) + + if vectorIndex.ContainsDoc(docID) { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("docID", docID). + Info("repair index: iterateOnLSMVectors: skipping doc, already in index") + return nil + } + if len(vector) == 0 { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("docID", docID). + WithField("vectorLen", 0). + Info("repair index: iterateOnLSMVectors: skipping doc, empty vector") + return nil + } + + rec := &common.Vector[[]float32]{ + ID: docID, + Vector: vector, + } + added++ + + batch = append(batch, rec) + + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("docID", docID). + WithField("vectorLen", len(vector)). + WithField("vectorDim", len(vector)). + WithField("currentBatchSize", len(batch)). + Info("repair index: iterateOnLSMVectors: added doc to batch") + + if len(batch) < 1000 { + return nil + } + + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("batchSize", len(batch)). + Info("repair index: iterateOnLSMVectors: inserting batch") + + err := q.Insert(ctx, batch...) + if err != nil { + return err + } + + batch = batch[:0] + return nil + }) + if err != nil { + return errors.Wrap(err, "iterate on LSM vectors") + } + } + + if len(batch) > 0 { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("batchSize", len(batch)). + Info("repair index: flushing final batch") + + err := q.Insert(ctx, batch...) + if err != nil { + return errors.Wrap(err, "insert batch") + } + } + + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("visited", visited.Len()). + WithField("added", added). + Info("repair index: completed LSM iteration") + + if visited.Len() > 0 && added == 0 { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("visited", visited.Len()). + Warn("repair index: visited documents but none added — all already indexed?") + } + // if no nodes were visited, it either means the LSM store is empty or + // there was an uncaught error during the iteration. + // in any case, we should not touch the index. + if visited.Len() == 0 { + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + Warn("repair index: empty LSM store") + return nil + } + + // remove any indexed vector that is not in the LSM store + vectorIndex.Iterate(func(docID uint64) bool { + if visited.Visited(docID) { + return true + } + + deleted++ + if vectorIndex.Multivector() { + if err := vectorIndex.(VectorIndexMulti).DeleteMulti(docID); err != nil { + s.index.logger. + WithError(err). + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("id", docID). + Warn("delete multi-vector from queue") + } + } else { + if err := vectorIndex.Delete(docID); err != nil { + s.index.logger. + WithError(err). + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("id", docID). + Warn("delete vector from queue") + } + } + + return true + }) + + s.index.logger. + WithField("class", className). + WithField("shard", shardName). + WithField("targetVector", targetVector). + WithField("added", added). + WithField("deleted", deleted). + WithField("took", time.Since(start)). + Info("repaired vector index") + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_async_replication.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_async_replication.go new file mode 100644 index 0000000000000000000000000000000000000000..fd1ec6341c5e9ce2dc16d21aa75b93a8d04abb20 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_async_replication.go @@ -0,0 +1,1356 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "math" + "os" + "path/filepath" + "runtime" + "slices" + "strconv" + "sync" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/diskio" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/interval" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +const ( + defaultHashtreeHeight = 16 + defaultFrequency = 30 * time.Second + defaultFrequencyWhilePropagating = 3 * time.Second + defaultAliveNodesCheckingFrequency = 5 * time.Second + defaultLoggingFrequency = 60 * time.Second + defaultInitShieldCPUEveryN = 1_000 + defaultDiffBatchSize = 1_000 + defaultDiffPerNodeTimeout = 10 * time.Second + defaultPrePropagationTimeout = 300 * time.Second + defaultPropagationTimeout = 60 * time.Second + defaultPropagationLimit = 10_000 + defaultPropagationDelay = 30 * time.Second + defaultPropagationConcurrency = 5 + defaultPropagationBatchSize = 100 + + minHashtreeHeight = 0 + maxHashtreeHeight = 20 + + minInitShieldCPUEveryN = 0 + maxInitShieldCPUEveryN = math.MaxInt + + minDiffBatchSize = 1 + maxDiffBatchSize = 10_000 + + minPropagationLimit = 1 + maxPropagationLimit = 1_000_000 + + minPropgationConcurrency = 1 + maxPropagationConcurrency = 20 + + minPropagationBatchSize = 1 + maxPropagationBatchSize = 1_000 +) + +type asyncReplicationConfig struct { + hashtreeHeight int + frequency time.Duration + frequencyWhilePropagating time.Duration + aliveNodesCheckingFrequency time.Duration + loggingFrequency time.Duration + initShieldCPUEveryN int + diffBatchSize int + diffPerNodeTimeout time.Duration + prePropagationTimeout time.Duration + propagationTimeout time.Duration + propagationLimit int + propagationDelay time.Duration + propagationConcurrency int + propagationBatchSize int + targetNodeOverrides additional.AsyncReplicationTargetNodeOverrides + maintenanceModeEnabled func() bool +} + +func (s *Shard) getAsyncReplicationConfig() (config asyncReplicationConfig, err error) { + // preserve the target node overrides from the previous config + config.targetNodeOverrides = s.asyncReplicationConfig.targetNodeOverrides + + config.hashtreeHeight, err = optParseInt( + os.Getenv("ASYNC_REPLICATION_HASHTREE_HEIGHT"), defaultHashtreeHeight, minHashtreeHeight, maxHashtreeHeight) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_HASHTREE_HEIGHT", err) + } + + config.frequency, err = optParseDuration(os.Getenv("ASYNC_REPLICATION_FREQUENCY"), defaultFrequency) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_FREQUENCY", err) + } + + config.frequencyWhilePropagating, err = optParseDuration(os.Getenv("ASYNC_REPLICATION_FREQUENCY_WHILE_PROPAGATING"), defaultFrequencyWhilePropagating) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_FREQUENCY_WHILE_PROPAGATING", err) + } + + config.aliveNodesCheckingFrequency, err = optParseDuration( + os.Getenv("ASYNC_REPLICATION_ALIVE_NODES_CHECKING_FREQUENCY"), defaultAliveNodesCheckingFrequency) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_ALIVE_NODES_CHECKING_FREQUENCY", err) + } + + config.loggingFrequency, err = optParseDuration( + os.Getenv("ASYNC_REPLICATION_LOGGING_FREQUENCY"), defaultLoggingFrequency) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_LOGGING_FREQUENCY", err) + } + + config.initShieldCPUEveryN, err = optParseInt( + os.Getenv("ASYNC_REPLICATION_INIT_SHIELD_CPU_EVERY_N"), defaultInitShieldCPUEveryN, minInitShieldCPUEveryN, maxInitShieldCPUEveryN) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_INIT_SHIELD_CPU_EVERY_N", err) + } + + config.diffBatchSize, err = optParseInt( + os.Getenv("ASYNC_REPLICATION_DIFF_BATCH_SIZE"), defaultDiffBatchSize, minDiffBatchSize, maxDiffBatchSize) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_DIFF_BATCH_SIZE", err) + } + + config.diffPerNodeTimeout, err = optParseDuration( + os.Getenv("ASYNC_REPLICATION_DIFF_PER_NODE_TIMEOUT"), defaultDiffPerNodeTimeout) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_DIFF_PER_NODE_TIMEOUT", err) + } + + config.prePropagationTimeout, err = optParseDuration( + os.Getenv("ASYNC_REPLICATION_PRE_PROPAGATION_TIMEOUT"), defaultPrePropagationTimeout) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_PRE_PROPAGATION_TIMEOUT", err) + } + + config.propagationTimeout, err = optParseDuration( + os.Getenv("ASYNC_REPLICATION_PROPAGATION_TIMEOUT"), defaultPropagationTimeout) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_PROPAGATION_TIMEOUT", err) + } + + config.propagationLimit, err = optParseInt( + os.Getenv("ASYNC_REPLICATION_PROPAGATION_LIMIT"), defaultPropagationLimit, minPropagationLimit, maxPropagationLimit) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_PROPAGATION_LIMIT", err) + } + + config.propagationDelay, err = optParseDuration( + os.Getenv("ASYNC_REPLICATION_PROPAGATION_DELAY"), defaultPropagationDelay) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_PROPAGATION_DELAY", err) + } + + config.propagationConcurrency, err = optParseInt( + os.Getenv("ASYNC_REPLICATION_PROPAGATION_CONCURRENCY"), defaultPropagationConcurrency, minPropgationConcurrency, maxPropagationConcurrency) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_PROPAGATION_CONCURRENCY", err) + } + + config.propagationBatchSize, err = optParseInt( + os.Getenv("ASYNC_REPLICATION_PROPAGATION_BATCH_SIZE"), defaultPropagationBatchSize, minPropagationBatchSize, maxPropagationBatchSize) + if err != nil { + return asyncReplicationConfig{}, fmt.Errorf("%s: %w", "ASYNC_REPLICATION_PROPAGATION_BATCH_SIZE", err) + } + + config.maintenanceModeEnabled = s.index.Config.MaintenanceModeEnabled + + return +} + +func optParseInt(s string, defaultVal, minVal, maxVal int) (val int, err error) { + if s == "" { + val = defaultVal + } else { + val, err = strconv.Atoi(s) + if err != nil { + return 0, err + } + } + + if val < minVal || val > maxVal { + return 0, fmt.Errorf("value %d out of range: min %d, max %d", val, minVal, maxVal) + } + + return val, nil +} + +func optParseDuration(s string, defaultDuration time.Duration) (time.Duration, error) { + if s == "" { + return defaultDuration, nil + } + return time.ParseDuration(s) +} + +func (s *Shard) initAsyncReplication() error { + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + + if bucket.GetSecondaryIndices() < 2 { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Warn("secondary index for token ranges is not available") + return nil + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + s.asyncReplicationCancelFunc = cancelFunc + + config, err := s.getAsyncReplicationConfig() + if err != nil { + return err + } + s.asyncReplicationConfig = config + + start := time.Now() + + if err := os.MkdirAll(s.pathHashTree(), os.ModePerm); err != nil { + return err + } + + // load the most recent hashtree file + dirEntries, err := os.ReadDir(s.pathHashTree()) + if err != nil { + return err + } + + for i := len(dirEntries) - 1; i >= 0; i-- { + dirEntry := dirEntries[i] + + if dirEntry.IsDir() || filepath.Ext(dirEntry.Name()) != ".ht" { + continue + } + + hashtreeFilename := filepath.Join(s.pathHashTree(), dirEntry.Name()) + + if s.hashtree != nil { + err := os.Remove(hashtreeFilename) + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Warnf("deleting older hashtree file %q: %v", hashtreeFilename, err) + continue + } + + f, err := os.OpenFile(hashtreeFilename, os.O_RDONLY, os.ModePerm) + if err != nil { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Warnf("reading hashtree file %q: %v", hashtreeFilename, err) + continue + } + + // attempt to load hashtree from file + s.hashtree, err = hashtree.DeserializeHashTree(bufio.NewReader(f)) + if err != nil { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Warnf("reading hashtree file %q: %v", hashtreeFilename, err) + } + + err = f.Close() + if err != nil { + return err + } + + err = os.Remove(hashtreeFilename) + if err != nil { + return err + } + + if err := diskio.Fsync(s.pathHashTree()); err != nil { + return fmt.Errorf("fsync hashtree directory %q: %w", s.pathHashTree(), err) + } + + if s.hashtree != nil && s.hashtree.Height() != config.hashtreeHeight { + // existing hashtree is erased if a different height was specified + s.hashtree = nil + } + } + + if s.hashtree != nil { + s.hashtreeFullyInitialized = true + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + WithField("took", fmt.Sprintf("%v", time.Since(start))). + Info("hashtree successfully initialized") + + s.initHashBeater(ctx, config) + return nil + } + + s.hashtree, err = hashtree.NewHashTree(config.hashtreeHeight) + if err != nil { + return err + } + + s.hashtreeFullyInitialized = false + s.minimalHashtreeInitializationCh = make(chan struct{}) + + enterrors.GoWrapper(func() { + for i := 0; ; i++ { + err := s.initHashtree(ctx, config, bucket) + if err == nil { + break + } + + if ctx.Err() != nil { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Info("hashtree initialization stopped") + return + } + + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Errorf("hashtree initialization attempt %d failure: %v", i, err) + + // exponential backoff: min(2^i * 100ms, 5s) + backoff := min(time.Duration(1<= config.loggingFrequency { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + WithField("object_count", objCount). + WithField("took", fmt.Sprintf("%v", time.Since(start))). + Infof("hashtree initialization in progress...") + prevProgressLogging = time.Now() + } + + uuidBytes, err := parseBytesUUID(object.ID()) + if err != nil { + return err + } + + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + err = s.mayUpsertObjectHashTree(object, uuidBytes, objectInsertStatus{}) + if err != nil { + return err + } + + objCount++ + + if config.initShieldCPUEveryN > 0 { + if objCount%config.initShieldCPUEveryN == 0 { + // yield the processor so other goroutines can run + runtime.Gosched() + time.Sleep(time.Millisecond) + } + } + + return nil + }) + if err != nil { + return fmt.Errorf("iterating objects: %w", err) + } + + s.asyncReplicationRWMux.Lock() + + if s.hashtree == nil { + s.asyncReplicationRWMux.Unlock() + + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Info("hashtree initialization stopped") + return nil + } + + s.hashtreeFullyInitialized = true + + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + WithField("object_count", objCount). + WithField("took", fmt.Sprintf("%v", time.Since(start))). + Info("hashtree successfully initialized") + + s.asyncReplicationRWMux.Unlock() + + s.initHashBeater(ctx, config) + + return nil +} + +func (s *Shard) waitForMinimalHashTreeInitialization(ctx context.Context) error { + if s.hashtree == nil || s.hashtreeFullyInitialized { + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-s.minimalHashtreeInitializationCh: + return nil + } +} + +func (s *Shard) mayStopAsyncReplication() { + s.asyncReplicationRWMux.Lock() + defer s.asyncReplicationRWMux.Unlock() + + if s.hashtree == nil { + return + } + + s.asyncReplicationCancelFunc() + + if s.hashtreeFullyInitialized { + // the hashtree needs to be fully in sync with stored data before it can be persisted + err := s.dumpHashTree() + if err != nil { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Errorf("store hashtree failed: %v", err) + } + } + + s.hashtree = nil + s.hashtreeFullyInitialized = false +} + +func (s *Shard) SetAsyncReplicationEnabled(_ context.Context, enabled bool) error { + s.asyncReplicationRWMux.Lock() + defer s.asyncReplicationRWMux.Unlock() + + if enabled { + if s.hashtree != nil { + return nil + } + + return s.initAsyncReplication() + } + + if s.hashtree == nil { + return nil + } + + s.asyncReplicationCancelFunc() + + s.hashtree = nil + s.asyncReplicationStatsByTargetNode = nil + s.hashtreeFullyInitialized = false + + return nil +} + +func (s *Shard) addTargetNodeOverride(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + func() { + s.asyncReplicationRWMux.Lock() + // unlock before calling SetAsyncReplicationEnabled because it will lock again + defer s.asyncReplicationRWMux.Unlock() + + for i, existing := range s.asyncReplicationConfig.targetNodeOverrides { + if existing.Equal(&targetNodeOverride) { + // if the collection/shard/source/target already exists, use the max + // upper time bound between the existing/new override + maxUpperTimeBound := existing.UpperTimeBound + if targetNodeOverride.UpperTimeBound > maxUpperTimeBound { + maxUpperTimeBound = targetNodeOverride.UpperTimeBound + s.asyncReplicationConfig.targetNodeOverrides[i].UpperTimeBound = maxUpperTimeBound + } + return + } + } + + if s.asyncReplicationConfig.targetNodeOverrides == nil { + s.asyncReplicationConfig.targetNodeOverrides = make(additional.AsyncReplicationTargetNodeOverrides, 0, 1) + } + s.asyncReplicationConfig.targetNodeOverrides = append(s.asyncReplicationConfig.targetNodeOverrides, targetNodeOverride) + }() + // we call update async replication config here to ensure that async replication starts + // if it's not already running + return s.SetAsyncReplicationEnabled(ctx, true) +} + +func (s *Shard) removeTargetNodeOverride(ctx context.Context, targetNodeOverrideToRemove additional.AsyncReplicationTargetNodeOverride) error { + targetNodeOverrideLen := 0 + func() { + s.asyncReplicationRWMux.Lock() + // unlock before calling SetAsyncReplicationEnabled because it will lock again + defer s.asyncReplicationRWMux.Unlock() + + newTargetNodeOverrides := make(additional.AsyncReplicationTargetNodeOverrides, 0, len(s.asyncReplicationConfig.targetNodeOverrides)) + for _, existing := range s.asyncReplicationConfig.targetNodeOverrides { + // only remove the existing override if the collection/shard/source/target match and the + // existing upper time bound is <= to the override being removed (eg if the override to remove + // is "before" the existing override, don't remove it) + if existing.Equal(&targetNodeOverrideToRemove) && existing.UpperTimeBound <= targetNodeOverrideToRemove.UpperTimeBound { + delete(s.asyncReplicationStatsByTargetNode, existing.TargetNode) + continue + } + newTargetNodeOverrides = append(newTargetNodeOverrides, existing) + } + s.asyncReplicationConfig.targetNodeOverrides = newTargetNodeOverrides + + targetNodeOverrideLen = len(s.asyncReplicationConfig.targetNodeOverrides) + }() + // if there are no overrides left, return the async replication config to what it + // was before overrides were added + if targetNodeOverrideLen == 0 { + return s.SetAsyncReplicationEnabled(ctx, s.index.Config.AsyncReplicationEnabled) + } + return nil +} + +func (s *Shard) removeAllTargetNodeOverrides(ctx context.Context) error { + func() { + s.asyncReplicationRWMux.Lock() + // unlock before calling SetAsyncReplicationEnabled because it will lock again + defer s.asyncReplicationRWMux.Unlock() + s.asyncReplicationConfig.targetNodeOverrides = make(additional.AsyncReplicationTargetNodeOverrides, 0) + }() + return s.SetAsyncReplicationEnabled(ctx, s.index.Config.AsyncReplicationEnabled) +} + +func (s *Shard) getAsyncReplicationStats(ctx context.Context) []*models.AsyncReplicationStatus { + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + asyncReplicationStatsToReturn := make([]*models.AsyncReplicationStatus, 0, len(s.asyncReplicationStatsByTargetNode)) + for targetNodeName, asyncReplicationStats := range s.asyncReplicationStatsByTargetNode { + asyncReplicationStatsToReturn = append(asyncReplicationStatsToReturn, &models.AsyncReplicationStatus{ + ObjectsPropagated: uint64(asyncReplicationStats.objectsPropagated) - uint64(asyncReplicationStats.objectsNotResolved), + StartDiffTimeUnixMillis: asyncReplicationStats.diffStartTime.UnixMilli(), + TargetNode: targetNodeName, + }) + } + + return asyncReplicationStatsToReturn +} + +func (s *Shard) dumpHashTree() error { + var b [8]byte + binary.BigEndian.PutUint64(b[:], uint64(time.Now().UnixNano())) + + hashtreeFilename := filepath.Join(s.pathHashTree(), fmt.Sprintf("hashtree-%x.ht", string(b[:]))) + + f, err := os.OpenFile(hashtreeFilename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, os.ModePerm) + if err != nil { + return fmt.Errorf("storing hashtree %q: %w", hashtreeFilename, err) + } + + w := bufio.NewWriter(f) + + _, err = s.hashtree.Serialize(w) + if err != nil { + return fmt.Errorf("storing hashtree %q: %w", hashtreeFilename, err) + } + + err = w.Flush() + if err != nil { + return fmt.Errorf("storing hashtree %q: %w", hashtreeFilename, err) + } + + err = f.Sync() + if err != nil { + return fmt.Errorf("storing hashtree %q: %w", hashtreeFilename, err) + } + + err = f.Close() + if err != nil { + return fmt.Errorf("closing hashtree %q: %w", hashtreeFilename, err) + } + + if err := diskio.Fsync(s.pathHashTree()); err != nil { + return fmt.Errorf("fsync hashtree directory %q: %w", s.pathHashTree(), err) + } + + return nil +} + +func (s *Shard) HashTreeLevel(ctx context.Context, level int, discriminant *hashtree.Bitset) (digests []hashtree.Digest, err error) { + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + if !s.hashtreeFullyInitialized { + return nil, fmt.Errorf("hashtree not initialized on shard %q", s.ID()) + } + + // TODO (jeroiraz): reusable pool of digests slices + digests = make([]hashtree.Digest, hashtree.LeavesCount(level+1)) + + n, err := s.hashtree.Level(level, discriminant, digests) + if err != nil { + return nil, err + } + + return digests[:n], nil +} + +func (s *Shard) initHashBeater(ctx context.Context, config asyncReplicationConfig) { + propagationRequired := make(chan struct{}) + + var lastHashbeat time.Time + var lastHashbeatPropagatedObjects bool + var lastHashbeatMux sync.Mutex + + enterrors.GoWrapper(func() { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Info("hashbeater started...") + + defer func() { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Info("hashbeater stopped") + }() + + var lastLog time.Time + + backoffTimer := interval.NewBackoffTimer(1*time.Second, 3*time.Second, 5*time.Second) + + for { + select { + case <-ctx.Done(): + return + case <-propagationRequired: + // Reload target node overrides + func() { + s.asyncReplicationRWMux.Lock() + defer s.asyncReplicationRWMux.Unlock() + config.targetNodeOverrides = s.asyncReplicationConfig.targetNodeOverrides + }() + + if (!s.index.asyncReplicationEnabled() && len(config.targetNodeOverrides) == 0) || + (config.maintenanceModeEnabled != nil && config.maintenanceModeEnabled()) { + // skip hashbeat iteration when async replication is disabled and no target node overrides are set + // or maintenance mode is enabled for localhost + if config.maintenanceModeEnabled != nil && config.maintenanceModeEnabled() { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Info("skipping async replication in maintenance mode") + } + backoffTimer.Reset() + lastHashbeatMux.Lock() + lastHashbeat = time.Now() + lastHashbeatPropagatedObjects = false + lastHashbeatMux.Unlock() + continue + } + + stats, err := s.hashBeat(ctx, config) + // update the shard stats for the target node + // anonymous func only here so we can use defer unlock + func() { + s.asyncReplicationRWMux.Lock() + defer s.asyncReplicationRWMux.Unlock() + + if s.asyncReplicationStatsByTargetNode == nil { + s.asyncReplicationStatsByTargetNode = make(map[string]*hashBeatHostStats) + } + if (err == nil || errors.Is(err, replica.ErrNoDiffFound)) && stats != nil { + for _, stat := range stats { + if stat != nil { + s.index.logger.WithFields(logrus.Fields{ + "source_shard": s.name, + "target_shard": s.name, + "target_node": stat.targetNodeName, + "objects_propagated": stat.objectsPropagated, + "start_diff_time_unix_millis": stat.diffStartTime.UnixMilli(), + "diff_calculation_took": stat.diffCalculationTook.String(), + "local_objects": stat.localObjects, + "remote_objects": stat.remoteObjects, + "object_progation_took": stat.objectProgationTook.String(), + }).Info("updating async replication stats") + s.asyncReplicationStatsByTargetNode[stat.targetNodeName] = stat + } + } + } + }() + if err != nil { + if ctx.Err() != nil { + return + } + + if errors.Is(err, replica.ErrNoDiffFound) { + if time.Since(lastLog) >= config.loggingFrequency { + lastLog = time.Now() + + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + WithField("hosts", s.getLastComparedHosts()). + Debug("hashbeat iteration successfully completed: no differences were found") + } + + backoffTimer.Reset() + lastHashbeatMux.Lock() + lastHashbeat = time.Now() + lastHashbeatPropagatedObjects = false + lastHashbeatMux.Unlock() + continue + } + + if time.Since(lastLog) >= config.loggingFrequency { + lastLog = time.Now() + + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + Warnf("hashbeat iteration failed: %v", err) + } + + time.Sleep(backoffTimer.CurrentInterval()) + backoffTimer.IncreaseInterval() + lastHashbeatMux.Lock() + lastHashbeat = time.Now() + lastHashbeatPropagatedObjects = false + lastHashbeatMux.Unlock() + continue + } + + statsHaveObjectsPropagated := false + if time.Since(lastLog) >= config.loggingFrequency { + lastLog = time.Now() + + for _, stat := range stats { + s.index.logger. + WithField("action", "async_replication"). + WithField("class_name", s.class.Class). + WithField("shard_name", s.name). + WithField("target_node_name", stat.targetNodeName). + WithField("diff_calculation_took", stat.diffCalculationTook.String()). + WithField("local_objects", stat.localObjects). + WithField("remote_objects", stat.remoteObjects). + WithField("objects_propagated", stat.objectsPropagated). + WithField("object_progation_took", stat.objectProgationTook.String()). + Info("hashbeat iteration successfully completed") + if stat.objectsPropagated > 0 { + statsHaveObjectsPropagated = true + } + } + } + + backoffTimer.Reset() + lastHashbeatMux.Lock() + lastHashbeat = time.Now() + lastHashbeatPropagatedObjects = statsHaveObjectsPropagated + lastHashbeatMux.Unlock() + } + } + }, s.index.logger) + + enterrors.GoWrapper(func() { + nt := time.NewTicker(config.aliveNodesCheckingFrequency) + defer nt.Stop() + + ft := time.NewTicker(min(config.frequencyWhilePropagating, config.frequency)) + defer ft.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-nt.C: + comparedHosts := s.getLastComparedHosts() + aliveHosts := s.allAliveHostnames() + + slices.Sort(comparedHosts) + slices.Sort(aliveHosts) + + if !slices.Equal(comparedHosts, aliveHosts) { + propagationRequired <- struct{}{} + s.setLastComparedNodes(aliveHosts) + } + case <-ft.C: + var shouldHashbeat bool + lastHashbeatMux.Lock() + shouldHashbeat = (lastHashbeatPropagatedObjects && time.Since(lastHashbeat) >= config.frequencyWhilePropagating) || + time.Since(lastHashbeat) >= config.frequency + lastHashbeatMux.Unlock() + + if shouldHashbeat { + propagationRequired <- struct{}{} + } + } + } + }, s.index.logger) +} + +func (s *Shard) setLastComparedNodes(hosts []string) { + s.lastComparedHostsMux.Lock() + defer s.lastComparedHostsMux.Unlock() + + s.lastComparedHosts = hosts +} + +func (s *Shard) getLastComparedHosts() []string { + s.lastComparedHostsMux.RLock() + defer s.lastComparedHostsMux.RUnlock() + + return s.lastComparedHosts +} + +func (s *Shard) allAliveHostnames() []string { + return s.index.replicator.AllHostnames() +} + +type hashBeatHostStats struct { + targetNodeName string + diffStartTime time.Time + diffCalculationTook time.Duration + localObjects int + remoteObjects int + objectsPropagated int + objectProgationTook time.Duration + objectsNotResolved int +} + +func (s *Shard) hashBeat(ctx context.Context, config asyncReplicationConfig) ([]*hashBeatHostStats, error) { + var ht hashtree.AggregatedHashTree + + s.asyncReplicationRWMux.RLock() + if s.hashtree == nil { + s.asyncReplicationRWMux.RUnlock() + // handling the case of a hashtree being explicitly set to nil + return nil, fmt.Errorf("hashtree not initialized on shard %q", s.ID()) + } + ht = s.hashtree + s.asyncReplicationRWMux.RUnlock() + + diffCalculationStart := time.Now() + + shardDiffReader, err := s.index.replicator.CollectShardDifferences(ctx, s.name, ht, config.diffPerNodeTimeout, config.targetNodeOverrides) + if err != nil { + if errors.Is(err, replica.ErrNoDiffFound) && len(config.targetNodeOverrides) > 0 { + stats := make([]*hashBeatHostStats, 0, len(config.targetNodeOverrides)) + for _, o := range config.targetNodeOverrides { + stats = append(stats, &hashBeatHostStats{ + targetNodeName: o.TargetNode, + diffStartTime: diffCalculationStart, + objectsPropagated: 0, + }) + } + return stats, err + } + return nil, fmt.Errorf("collecting differences: %w", err) + } + + diffCalculationTook := time.Since(diffCalculationStart) + + rangeReader := shardDiffReader.RangeReader + + objectProgationStart := time.Now() + + localObjectsCount := 0 + remoteObjectsCount := 0 + + objectsToPropagate := make([]strfmt.UUID, 0, config.propagationLimit) + localUpdateTimeByUUID := make(map[strfmt.UUID]int64, config.propagationLimit) + remoteStaleUpdateTimeByUUID := make(map[strfmt.UUID]int64, config.propagationLimit) + + prepropagationCtx, cancel := context.WithTimeout(ctx, config.prePropagationTimeout) + defer cancel() + + for len(objectsToPropagate) < config.propagationLimit { + initialLeaf, finalLeaf, err := rangeReader.Next() + if err != nil { + if errors.Is(err, hashtree.ErrNoMoreRanges) { + break + } + return nil, fmt.Errorf("reading collected differences: %w", err) + } + + localObjsCountWithinRange, remoteObjsCountWithinRange, objsToPropagateWithinRange, err := s.objectsToPropagateWithinRange( + prepropagationCtx, + config, + shardDiffReader.TargetNodeAddress, + shardDiffReader.TargetNodeName, + initialLeaf, + finalLeaf, + config.propagationLimit-len(objectsToPropagate), + ) + if err != nil { + if prepropagationCtx.Err() != nil { + // it may be the case that just pre propagation timeout was reached + // and some objects could be propagated + break + } + + return nil, fmt.Errorf("collecting local objects to be propagated: %w", err) + } + + localObjectsCount += localObjsCountWithinRange + remoteObjectsCount += remoteObjsCountWithinRange + + for _, obj := range objsToPropagateWithinRange { + objectsToPropagate = append(objectsToPropagate, obj.uuid) + localUpdateTimeByUUID[obj.uuid] = obj.lastUpdateTime + remoteStaleUpdateTimeByUUID[obj.uuid] = obj.remoteStaleUpdateTime + } + } + + objectsNotResolved := 0 + if len(objectsToPropagate) > 0 { + propagationCtx, cancel := context.WithTimeout(ctx, config.propagationTimeout) + defer cancel() + + resp, err := s.propagateObjects(propagationCtx, config, shardDiffReader.TargetNodeAddress, objectsToPropagate, remoteStaleUpdateTimeByUUID) + if err != nil { + return nil, fmt.Errorf("propagating local objects: %w", err) + } + + for _, r := range resp { + // NOTE: deleted objects are not propagated but locally deleted when conflict is detected + + deletionStrategy := s.index.DeletionStrategy() + + if !r.Deleted || + deletionStrategy == models.ReplicationConfigDeletionStrategyNoAutomatedResolution || + config.targetNodeOverrides.NoDeletionResolution(shardDiffReader.TargetNodeName) { + objectsNotResolved++ + continue + } + + if deletionStrategy == models.ReplicationConfigDeletionStrategyDeleteOnConflict || + (deletionStrategy == models.ReplicationConfigDeletionStrategyTimeBasedResolution && + r.UpdateTime > localUpdateTimeByUUID[strfmt.UUID(r.ID)]) { + + err := s.DeleteObject(propagationCtx, strfmt.UUID(r.ID), time.UnixMilli(r.UpdateTime)) + if err != nil { + return nil, fmt.Errorf("deleting local objects: %w", err) + } + } + } + } + + return []*hashBeatHostStats{ + { + targetNodeName: shardDiffReader.TargetNodeName, + diffStartTime: diffCalculationStart, + diffCalculationTook: diffCalculationTook, + localObjects: localObjectsCount, + remoteObjects: remoteObjectsCount, + objectsPropagated: len(objectsToPropagate), + objectProgationTook: time.Since(objectProgationStart), + objectsNotResolved: objectsNotResolved, + }, + }, nil +} + +func uuidFromBytes(uuidBytes []byte) (id strfmt.UUID, err error) { + uuidParsed, err := uuid.FromBytes(uuidBytes) + if err != nil { + return id, err + } + return strfmt.UUID(uuidParsed.String()), nil +} + +func bytesFromUUID(id strfmt.UUID) (uuidBytes []byte, err error) { + uuidParsed, err := uuid.Parse(id.String()) + if err != nil { + return nil, err + } + return uuidParsed.MarshalBinary() +} + +func incToNextLexValue(b []byte) bool { + for i := len(b) - 1; i >= 0; i-- { + if b[i] < 0xFF { + b[i]++ + return false + } + b[i] = 0x00 + } + return true +} + +type objectToPropagate struct { + uuid strfmt.UUID + lastUpdateTime int64 + remoteStaleUpdateTime int64 +} + +func (s *Shard) objectsToPropagateWithinRange(ctx context.Context, config asyncReplicationConfig, + targetNodeAddress, targetNodeName string, initialLeaf, finalLeaf uint64, limit int, +) (localObjectsCount int, remoteObjectsCount int, objectsToPropagate []objectToPropagate, err error) { + objectsToPropagate = make([]objectToPropagate, 0, limit) + + hashtreeHeight := config.hashtreeHeight + + finalUUIDBytes := make([]byte, 16) + binary.BigEndian.PutUint64(finalUUIDBytes, finalLeaf<<(64-hashtreeHeight)|((1<<(64-hashtreeHeight))-1)) + copy(finalUUIDBytes[8:], []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) + + finalUUID, err := uuidFromBytes(finalUUIDBytes) + if err != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, err + } + + currLocalUUIDBytes := make([]byte, 16) + binary.BigEndian.PutUint64(currLocalUUIDBytes, initialLeaf<<(64-hashtreeHeight)) + + for limit > 0 && bytes.Compare(currLocalUUIDBytes, finalUUIDBytes) < 1 { + if ctx.Err() != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, ctx.Err() + } + + currLocalUUID, err := uuidFromBytes(currLocalUUIDBytes) + if err != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, err + } + + currBatchSize := min(limit, config.diffBatchSize) + + allLocalDigests, err := s.index.DigestObjectsInRange(ctx, s.name, currLocalUUID, finalUUID, currBatchSize) + if err != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, fmt.Errorf("fetching local object digests: %w", err) + } + + if len(allLocalDigests) == 0 { + // no more local objects need to be propagated in this iteration + break + } + + localObjectsCount += len(allLocalDigests) + + // iteration should stop when all local digests within the range has been read + + lastLocalUUID := strfmt.UUID(allLocalDigests[len(allLocalDigests)-1].ID) + + lastLocalUUIDBytes, err := bytesFromUUID(lastLocalUUID) + if err != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, err + } + + localDigestsByUUID := make(map[string]types.RepairResponse, len(allLocalDigests)) + + // filter out too recent local digests to avoid object propagation when all the nodes may be alive + // or if an upper time bound is configured for shard replica movement + maxUpdateTime := s.getHashBeatMaxUpdateTime(config, targetNodeName) + + for _, d := range allLocalDigests { + if d.UpdateTime <= maxUpdateTime { + localDigestsByUUID[d.ID] = d + } + } + if len(localDigestsByUUID) == 0 { + // local digests are all too recent, so we can stop now + break + } + + remoteStaleUpdateTime := make(map[string]int64, len(localDigestsByUUID)) + + if len(localDigestsByUUID) > 0 { + // fetch digests from remote host in order to avoid sending unnecessary objects + for currRemoteUUIDBytes := currLocalUUIDBytes; bytes.Compare(currRemoteUUIDBytes, lastLocalUUIDBytes) < 1; { + if ctx.Err() != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, ctx.Err() + } + + currRemoteUUID, err := uuidFromBytes(currRemoteUUIDBytes) + if err != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, err + } + + // TODO could speed up by passing through the target node override upper time bound here + remoteDigests, err := s.index.replicator.DigestObjectsInRange(ctx, + s.name, targetNodeAddress, currRemoteUUID, lastLocalUUID, config.diffBatchSize) + if err != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, fmt.Errorf("fetching remote object digests: %w", err) + } + + if len(remoteDigests) == 0 { + // no more digests in remote host + break + } + + remoteObjectsCount += len(remoteDigests) + + for _, d := range remoteDigests { + localDigest, ok := localDigestsByUUID[d.ID] + if ok { + if localDigest.UpdateTime <= d.UpdateTime { + // older or up to date objects are not propagated + delete(localDigestsByUUID, d.ID) + + if len(localDigestsByUUID) == 0 { + // no more local objects need to be propagated in this iteration + break + } + } else { + // older object is subject to be overwriten + remoteStaleUpdateTime[d.ID] = d.UpdateTime + } + } + } + + if len(localDigestsByUUID) == 0 { + // no more local objects need to be propagated in this iteration + break + } + + if len(remoteDigests) < config.diffBatchSize { + break + } + + lastRemoteUUID := strfmt.UUID(remoteDigests[len(remoteDigests)-1].ID) + + lastRemoteUUIDBytes, err := bytesFromUUID(lastRemoteUUID) + if err != nil { + return localObjectsCount, remoteObjectsCount, objectsToPropagate, err + } + + overflow := incToNextLexValue(lastRemoteUUIDBytes) + if overflow { + // no more remote digests need to be fetched + break + } + + currRemoteUUIDBytes = lastRemoteUUIDBytes + } + } + + for _, obj := range localDigestsByUUID { + objectsToPropagate = append(objectsToPropagate, objectToPropagate{ + uuid: strfmt.UUID(obj.ID), + lastUpdateTime: obj.UpdateTime, + remoteStaleUpdateTime: remoteStaleUpdateTime[obj.ID], + }) + } + + if len(allLocalDigests) < currBatchSize { + // no more local objects need to be propagated + break + } + + // to avoid reading the last uuid in the next iteration + overflow := incToNextLexValue(lastLocalUUIDBytes) + if overflow { + // no more local objects need to be propagated + break + } + + currLocalUUIDBytes = lastLocalUUIDBytes + + limit -= len(localDigestsByUUID) + } + + // Note: propagations == 0 means local shard is laying behind remote shard, + // the local shard may receive recent objects when remote shard propagates them + + return localObjectsCount, remoteObjectsCount, objectsToPropagate, nil +} + +// getHashBeatMaxUpdateTime returns the maximum update time for the hash beat. +// If our local node and the target node have an upper time bound configured, use the +// configured upper time bound instead of the default one +func (s *Shard) getHashBeatMaxUpdateTime(config asyncReplicationConfig, targetNodeName string) int64 { + localNodeName := s.index.replicator.LocalNodeName() + for _, override := range config.targetNodeOverrides { + if override.Equal(&additional.AsyncReplicationTargetNodeOverride{ + SourceNode: localNodeName, + TargetNode: targetNodeName, + CollectionID: s.class.Class, + ShardID: s.name, + }) { + return override.UpperTimeBound + } + } + return time.Now().Add(-config.propagationDelay).UnixMilli() +} + +func (s *Shard) propagateObjects(ctx context.Context, config asyncReplicationConfig, host string, + objectsToPropagate []strfmt.UUID, remoteStaleUpdateTime map[strfmt.UUID]int64, +) (res []types.RepairResponse, err error) { + type workerResponse struct { + resp []types.RepairResponse + err error + } + + var wg sync.WaitGroup + + batchCh := make(chan []strfmt.UUID, len(objectsToPropagate)/config.propagationBatchSize+1) + resultCh := make(chan workerResponse, len(objectsToPropagate)/config.propagationBatchSize+1) + + for range config.propagationConcurrency { + enterrors.GoWrapper(func() { + for uuidBatch := range batchCh { + localObjs, err := s.MultiObjectByID(ctx, wrapIDsInMulti(uuidBatch)) + if err != nil { + resultCh <- workerResponse{ + err: fmt.Errorf("fetching local objects: %w", err), + } + wg.Done() + continue + } + + batch := make([]*objects.VObject, 0, len(localObjs)) + + for _, obj := range localObjs { + if obj == nil { + // local object was deleted meanwhile + continue + } + + var vectors map[string][]float32 + var multiVectors map[string][][]float32 + + if obj.Vectors != nil { + vectors = make(map[string][]float32, len(obj.Vectors)) + for targetVector, v := range obj.Vectors { + vectors[targetVector] = v + } + } + if obj.MultiVectors != nil { + multiVectors = make(map[string][][]float32, len(obj.MultiVectors)) + for targetVector, v := range obj.MultiVectors { + multiVectors[targetVector] = v + } + } + + obj := &objects.VObject{ + ID: obj.ID(), + LastUpdateTimeUnixMilli: obj.LastUpdateTimeUnix(), + LatestObject: &obj.Object, + Vector: obj.Vector, + Vectors: vectors, + MultiVectors: multiVectors, + StaleUpdateTime: remoteStaleUpdateTime[obj.ID()], + } + + batch = append(batch, obj) + } + + if len(batch) > 0 { + resp, err := s.index.replicator.Overwrite(ctx, host, s.class.Class, s.name, batch) + + resultCh <- workerResponse{ + resp: resp, + err: err, + } + } + + wg.Done() + } + }, s.index.logger) + } + + for i := 0; i < len(objectsToPropagate); { + actualBatchSize := config.propagationBatchSize + if i+actualBatchSize > len(objectsToPropagate) { + actualBatchSize = len(objectsToPropagate) - i + } + + wg.Add(1) + batchCh <- objectsToPropagate[i : i+actualBatchSize] + + i += actualBatchSize + } + + enterrors.GoWrapper(func() { + wg.Wait() + close(batchCh) + close(resultCh) + }, s.index.logger) + + ec := errorcompounder.New() + + for r := range resultCh { + if r.err != nil { + ec.Add(err) + continue + } + + res = append(res, r.resp...) + } + + if len(res) > 0 { + return res, nil + } + + return nil, ec.ToError() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_autoresume_maintenance_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_autoresume_maintenance_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ce3db56343f370fdec043473000c9f3996eebc38 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_autoresume_maintenance_test.go @@ -0,0 +1,264 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/backup" +) + +func TestShard_IllegalStateForTransfer(t *testing.T) { + ctx := testCtx() + className := "TestClass" + shd, idx := testShard(t, ctx, className) + + amount := 10 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + t.Run("insert data into shard", func(t *testing.T) { + for range amount { + obj := testObject(className) + + err := shd.PutObject(ctx, obj) + require.Nil(t, err) + } + + objs, err := shd.ObjectList(ctx, amount, nil, nil, additional.Properties{}, shd.Index().Config.ClassName) + require.Nil(t, err) + require.Equal(t, amount, len(objs)) + }) + + t.Run("attempt to list backup files without halting for transfer should fail", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, &backup.ShardDescriptor{}) + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("attempt to get file metadata without halting for transfer should fail", func(t *testing.T) { + _, err := shd.GetFileMetadata(ctx, "any.db") + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("attempt to get file content without halting for transfer should fail", func(t *testing.T) { + _, err := shd.GetFile(ctx, "any.db") + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("halt for transfer", func(t *testing.T) { + inactivityTimeout := 100 * time.Millisecond + + err := shd.HaltForTransfer(ctx, false, inactivityTimeout) + require.NoError(t, err) + }) + + t.Run("resume maintenance tasks", func(t *testing.T) { + err := shd.resumeMaintenanceCycles(ctx) + require.NoError(t, err) + }) + + t.Run("attempt to list backup files after explicitly resuming maintenance tasks should fail", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, &backup.ShardDescriptor{}) + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("attempt to get file metadata after explicitly resuming maintenance tasks should fail", func(t *testing.T) { + _, err := shd.GetFileMetadata(ctx, "any.db") + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("attempt to get file content after explicitly resuming maintenance tasks should fail", func(t *testing.T) { + _, err := shd.GetFile(ctx, "any.db") + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("halt for transfer and wait for inactivity timeout", func(t *testing.T) { + inactivityTimeout := 100 * time.Millisecond + + err := shd.HaltForTransfer(ctx, false, inactivityTimeout) + require.NoError(t, err) + + time.Sleep(inactivityTimeout * 2) + }) + + t.Run("attempt to list backup files after inactivity time should fail", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, &backup.ShardDescriptor{}) + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("attempt to get file metadata after inactivity time should fail", func(t *testing.T) { + _, err := shd.GetFileMetadata(ctx, "any.db") + require.ErrorContains(t, err, "not paused for transfer") + }) + + t.Run("attempt to get file content after inactivity time should fail", func(t *testing.T) { + _, err := shd.GetFile(ctx, "any.db") + require.ErrorContains(t, err, "not paused for transfer") + }) + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) +} + +func TestShard_HaltingBeforeTransfer(t *testing.T) { + ctx := testCtx() + className := "TestClass" + shd, idx := testShard(t, ctx, className) + + amount := 10 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + t.Run("insert data into shard", func(t *testing.T) { + for range amount { + obj := testObject(className) + + err := shd.PutObject(ctx, obj) + require.Nil(t, err) + } + + objs, err := shd.ObjectList(ctx, amount, nil, nil, additional.Properties{}, shd.Index().Config.ClassName) + require.Nil(t, err) + require.Equal(t, amount, len(objs)) + }) + + t.Run("halt for transfer", func(t *testing.T) { + inactivityTimeout := 100 * time.Millisecond + + err := shd.HaltForTransfer(ctx, false, inactivityTimeout) + require.NoError(t, err) + }) + + backupDescriptor := &backup.ShardDescriptor{} + + t.Run("attempt to list backup files should succeed", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, backupDescriptor) + require.NoError(t, err) + }) + + t.Run("attempt to get file metadata should succeed", func(t *testing.T) { + _, err := shd.GetFileMetadata(ctx, backupDescriptor.Files[0]) + require.NoError(t, err) + }) + + t.Run("attempt to get file content should succeed", func(t *testing.T) { + _, err := shd.GetFile(ctx, backupDescriptor.Files[0]) + require.NoError(t, err) + }) + + t.Run("resume maintenance tasks", func(t *testing.T) { + err := shd.resumeMaintenanceCycles(ctx) + require.NoError(t, err) + }) + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) +} + +func TestShard_ConcurrentTransfers(t *testing.T) { + ctx := testCtx() + className := "TestClass" + shd, idx := testShard(t, ctx, className) + + amount := 10 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + t.Run("insert data into shard", func(t *testing.T) { + for range amount { + obj := testObject(className) + + err := shd.PutObject(ctx, obj) + require.Nil(t, err) + } + + objs, err := shd.ObjectList(ctx, amount, nil, nil, additional.Properties{}, shd.Index().Config.ClassName) + require.Nil(t, err) + require.Equal(t, amount, len(objs)) + }) + + t.Run("halt for transfer", func(t *testing.T) { + inactivityTimeout := 100 * time.Millisecond + + err := shd.HaltForTransfer(ctx, false, inactivityTimeout) + require.NoError(t, err) + }) + + t.Run("attempt to list backup files should succeed", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, &backup.ShardDescriptor{}) + require.NoError(t, err) + }) + + t.Run("attempt to insert objects with paused maintenance tasks should succeed", func(t *testing.T) { + obj := testObject(className) + + err := shd.PutObject(ctx, obj) + require.NoError(t, err) + }) + + t.Run("halt for transfer with already paused maintenance tasks should succed", func(t *testing.T) { + inactivityTimeout := 150 * time.Millisecond + + err := shd.HaltForTransfer(ctx, false, inactivityTimeout) + require.NoError(t, err) + }) + + t.Run("attempt to list backup files for a second time should succeed", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, &backup.ShardDescriptor{}) + require.NoError(t, err) + }) + + t.Run("resume maintenance tasks", func(t *testing.T) { + err := shd.resumeMaintenanceCycles(ctx) + require.NoError(t, err) + }) + + t.Run("attempt to list backup files with one halt request still active should succeed", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, &backup.ShardDescriptor{}) + require.NoError(t, err) + }) + + t.Run("resume maintenance tasks", func(t *testing.T) { + err := shd.resumeMaintenanceCycles(ctx) + require.NoError(t, err) + }) + + t.Run("attempt to list backup files after resuming for a second time should fail", func(t *testing.T) { + err := shd.ListBackupFiles(ctx, &backup.ShardDescriptor{}) + require.ErrorContains(t, err, "not paused for transfer") + }) + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_backup.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_backup.go new file mode 100644 index 0000000000000000000000000000000000000000..45c6ef24cf01592772ecf8acb2782f9e54e67b05 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_backup.go @@ -0,0 +1,309 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/weaviate/weaviate/entities/backup" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/file" +) + +// HaltForTransfer stops compaction, and flushing memtable and commit log to begin with backup or cloud offload. +// This method could be called multiple times with different inactivity timeouts, +// a zeroed `inactivityTimeout` implies no timeout. +// If inactivity timeout is reached it will resume maintenance cycle independently on how many halt request has been made. +func (s *Shard) HaltForTransfer(ctx context.Context, offloading bool, inactivityTimeout time.Duration) (err error) { + s.haltForTransferMux.Lock() + defer s.haltForTransferMux.Unlock() + + s.haltForTransferCount++ + + defer func() { + if err == nil && inactivityTimeout > 0 { + s.mayUpdateInactivityTimeout(inactivityTimeout) + s.mayInitInactivityMonitoring() + } + }() + + if offloading { + // TODO: tenant offloading is calling HaltForTransfer but + // if Shutdown is called this step is not needed + s.mayStopAsyncReplication() + } + + if s.haltForTransferCount > 1 { + // shard was already halted + return nil + } + + defer func() { + if err != nil { + err = fmt.Errorf("pause compaction: %w", err) + if err2 := s.mayForceResumeMaintenanceCycles(ctx, false); err2 != nil { + err = fmt.Errorf("%w: resume maintenance: %w", err, err2) + } + } + }() + + if err = s.store.PauseCompaction(ctx); err != nil { + return fmt.Errorf("pause compaction: %w", err) + } + if err = s.store.FlushMemtables(ctx); err != nil { + return fmt.Errorf("flush memtables: %w", err) + } + if err = s.cycleCallbacks.vectorCombinedCallbacksCtrl.Deactivate(ctx); err != nil { + return fmt.Errorf("pause vector maintenance: %w", err) + } + if err = s.cycleCallbacks.geoPropsCombinedCallbacksCtrl.Deactivate(ctx); err != nil { + return fmt.Errorf("pause geo props maintenance: %w", err) + } + + // pause indexing + _ = s.ForEachVectorQueue(func(_ string, q *VectorIndexQueue) error { + q.Pause() + return nil + }) + + err = s.ForEachVectorIndex(func(targetVector string, index VectorIndex) error { + if err = index.SwitchCommitLogs(ctx); err != nil { + return fmt.Errorf("switch commit logs of vector %q: %w", targetVector, err) + } + return nil + }) + if err != nil { + return err + } + return nil +} + +func (s *Shard) mayUpdateInactivityTimeout(inactivityTimeout time.Duration) { + if s.haltForTransferInactivityTimeout != 0 && s.haltForTransferInactivityTimeout <= inactivityTimeout { + // no need to update current inactivity timeout + return + } + + s.haltForTransferInactivityTimeout = inactivityTimeout + + s.mayResetInactivityTimer() +} + +func (s *Shard) mayResetInactivityTimer() { + if s.haltForTransferInactivityTimer == nil { + return + } + + if !s.haltForTransferInactivityTimer.Stop() { + <-s.haltForTransferInactivityTimer.C // drain the channel if necessary + } + s.haltForTransferInactivityTimer.Reset(s.haltForTransferInactivityTimeout) +} + +func (s *Shard) mayInitInactivityMonitoring() { + if s.haltForTransferCancel != nil { + return + } + + ctx, cancel := context.WithCancel(context.Background()) + s.haltForTransferCancel = cancel + + s.haltForTransferInactivityTimer = time.NewTimer(s.haltForTransferInactivityTimeout) + + enterrors.GoWrapper(func() { + // this goroutine will release maintenance cycles if no file activity + // is detected in the specified inactivity timeout + defer func() { + s.haltForTransferMux.Lock() + s.haltForTransferInactivityTimer.Stop() + s.haltForTransferCancel = nil + s.haltForTransferMux.Unlock() + }() + + select { + case <-ctx.Done(): + return + case <-s.haltForTransferInactivityTimer.C: + s.haltForTransferMux.Lock() + s.mayForceResumeMaintenanceCycles(context.Background(), true) + s.haltForTransferMux.Unlock() + return + } + }, s.index.logger) +} + +// ListBackupFiles lists all files used to backup a shard +func (s *Shard) ListBackupFiles(ctx context.Context, ret *backup.ShardDescriptor) error { + s.haltForTransferMux.Lock() + defer s.haltForTransferMux.Unlock() + + if s.haltForTransferCount == 0 { + return fmt.Errorf("can not list files: illegal state: shard %q is not paused for transfer", s.name) + } + + s.mayResetInactivityTimer() + + var err error + if err := s.readBackupMetadata(ret); err != nil { + return err + } + + if ret.Files, err = s.store.ListFiles(ctx, s.index.Config.RootPath); err != nil { + return err + } + + return s.ForEachVectorIndex(func(targetVector string, idx VectorIndex) error { + files, err := idx.ListFiles(ctx, s.index.Config.RootPath) + if err != nil { + return fmt.Errorf("list files of vector %q: %w", targetVector, err) + } + ret.Files = append(ret.Files, files...) + return nil + }) +} + +func (s *Shard) resumeMaintenanceCycles(ctx context.Context) error { + s.haltForTransferMux.Lock() + defer s.haltForTransferMux.Unlock() + + return s.mayForceResumeMaintenanceCycles(ctx, false) +} + +func (s *Shard) mayForceResumeMaintenanceCycles(ctx context.Context, forced bool) error { + if s.haltForTransferCount == 0 { + // noop, maintenance cycles not halted + return nil + } + + if forced { + s.haltForTransferCount = 0 + } else { + s.haltForTransferCount-- + + if s.haltForTransferCount > 0 { + // maintenance cycles are not resumed as there is at least one active halt request + return nil + } + } + + if s.haltForTransferCancel != nil { + // terminate background goroutine checking for inactivity timeout + s.haltForTransferCancel() + } + + g := enterrors.NewErrorGroupWrapper(s.index.logger) + + g.Go(func() error { + return s.store.ResumeCompaction(ctx) + }) + g.Go(func() error { + return s.cycleCallbacks.vectorCombinedCallbacksCtrl.Activate() + }) + g.Go(func() error { + return s.cycleCallbacks.geoPropsCombinedCallbacksCtrl.Activate() + }) + + g.Go(func() error { + return s.ForEachVectorQueue(func(_ string, q *VectorIndexQueue) error { + q.Resume() + return nil + }) + }) + + if err := g.Wait(); err != nil { + return fmt.Errorf("failed to resume maintenance cycles for shard '%s': %w", s.name, err) + } + + return nil +} + +func (s *Shard) readBackupMetadata(d *backup.ShardDescriptor) (err error) { + d.Name = s.name + + d.Node, err = s.nodeName() + if err != nil { + return fmt.Errorf("node name: %w", err) + } + + fpath := s.counter.FileName() + if d.DocIDCounter, err = os.ReadFile(fpath); err != nil { + return fmt.Errorf("read shard doc-id-counter %s: %w", fpath, err) + } + d.DocIDCounterPath, err = filepath.Rel(s.index.Config.RootPath, fpath) + if err != nil { + return fmt.Errorf("docid counter path: %w", err) + } + fpath = s.GetPropertyLengthTracker().FileName() + if d.PropLengthTracker, err = os.ReadFile(fpath); err != nil { + return fmt.Errorf("read shard prop-lengths %s: %w", fpath, err) + } + d.PropLengthTrackerPath, err = filepath.Rel(s.index.Config.RootPath, fpath) + if err != nil { + return fmt.Errorf("proplength tracker path: %w", err) + } + fpath = s.versioner.path + if d.Version, err = os.ReadFile(fpath); err != nil { + return fmt.Errorf("read shard version %s: %w", fpath, err) + } + d.ShardVersionPath, err = filepath.Rel(s.index.Config.RootPath, fpath) + if err != nil { + return fmt.Errorf("shard version path: %w", err) + } + return nil +} + +func (s *Shard) nodeName() (string, error) { + node, err := s.index.getSchema.ShardOwner( + s.index.Config.ClassName.String(), s.name) + return node, err +} + +func (s *Shard) GetFileMetadata(ctx context.Context, relativeFilePath string) (file.FileMetadata, error) { + s.haltForTransferMux.Lock() + defer s.haltForTransferMux.Unlock() + + if s.haltForTransferCount == 0 { + return file.FileMetadata{}, fmt.Errorf("can not open file %q for reading: illegal state: shard %q is not paused for transfer", + relativeFilePath, s.name) + } + + s.mayResetInactivityTimer() + + finalPath := filepath.Join(s.Index().Config.RootPath, relativeFilePath) + return file.GetFileMetadata(finalPath) +} + +func (s *Shard) GetFile(ctx context.Context, relativeFilePath string) (io.ReadCloser, error) { + s.haltForTransferMux.Lock() + defer s.haltForTransferMux.Unlock() + + if s.haltForTransferCount == 0 { + return nil, fmt.Errorf("can not open file %q for reading: illegal state: shard %q is not paused for transfer", + relativeFilePath, s.name) + } + + s.mayResetInactivityTimer() + + finalPath := filepath.Join(s.Index().Config.RootPath, relativeFilePath) + + reader, err := os.Open(finalPath) + if err != nil { + return nil, fmt.Errorf("open file %q for reading: %w", relativeFilePath, err) + } + + return reader, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_combine_multi_target.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_combine_multi_target.go new file mode 100644 index 0000000000000000000000000000000000000000..3dba28766cb4633dc867cadc93785b2ad27db2f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_combine_multi_target.go @@ -0,0 +1,307 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "sync" + + "github.com/google/uuid" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/priorityqueue" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/traverser/hybrid" +) + +type DistanceForVector interface { + VectorDistanceForQuery(ctx context.Context, id uint64, searchVectors []models.Vector, targets []string) ([]float32, error) +} + +type idAndDistance struct { + docId uint64 + distance float32 +} + +type ResultContainer interface { + AddScores(id uint64, targets []string, distances []float32, weights []float32) + RemoveIdFromResult(id uint64) +} + +type ResultContainerHybrid struct { + ResultsIn []*search.Result + + IDsToRemove map[uint64]struct{} + allIDs map[uint64]struct{} +} + +func (r *ResultContainerHybrid) AddScores(id uint64, targets []string, distances []float32, weights []float32) { + // we need to add a copy of the properties etc to make sure that the correct object is returned + newResult := &search.Result{SecondarySortValue: distances[0], DocID: &id, ID: uuidFromUint64(id)} + r.ResultsIn = append(r.ResultsIn, newResult) +} + +func (r *ResultContainerHybrid) RemoveIdFromResult(id uint64) { + r.IDsToRemove[id] = struct{}{} +} + +type ResultContainerStandard struct { + ResultsIn map[uint64]idAndDistance +} + +func (r *ResultContainerStandard) AddScores(id uint64, targets []string, distances []float32, weights []float32) { + // we need to add a copy of the properties etc to make sure that the correct object is returned + tmp := r.ResultsIn[id] + for i := 0; i < len(targets); i++ { + tmp.distance += distances[i] * weights[i] + } + r.ResultsIn[id] = tmp +} + +func (r *ResultContainerStandard) RemoveIdFromResult(id uint64) { + delete(r.ResultsIn, id) +} + +type targetVectorData struct { + target []string + searchVector []models.Vector + weights []float32 +} + +func uuidFromUint64(id uint64) strfmt.UUID { + return strfmt.UUID(uuid.NewSHA1(uuid.Nil, []byte(fmt.Sprintf("%d", id))).String()) +} + +func CombineMultiTargetResults(ctx context.Context, shard DistanceForVector, logger logrus.FieldLogger, results [][]uint64, dists [][]float32, targetVectors []string, searchVectors []models.Vector, targetCombination *dto.TargetCombination, limit int, targetDist float32) ([]uint64, []float32, error) { + if len(results) == 0 { + return []uint64{}, []float32{}, nil + } + + if len(results) == 1 { + return results[0], dists[0], nil + } + + allNil := true + for i := range results { + if len(results[i]) > 0 { + allNil = false + break + } + } + if allNil { + return []uint64{}, []float32{}, nil + } + + if targetCombination == nil { + return nil, nil, fmt.Errorf("multi target combination is nil") + } + + allIDs := make(map[uint64]struct{}) + for i := range results { + for j := range results[i] { + allIDs[results[i][j]] = struct{}{} + } + } + missingIDs := make(map[uint64]targetVectorData) + + if targetCombination.Type == dto.RelativeScore { + weights := make([]float64, len(results)) + for i := range targetVectors { + weights[i] = float64(targetCombination.Weights[i]) + } + + scoresToRemove := make(map[uint64]struct{}) + + fusionInput := make([][]*search.Result, len(results)) + for i := range results { + localIDs := make(map[uint64]struct{}, len(allIDs)) + for key, result := range allIDs { + localIDs[key] = result + } + fusionInput[i] = make([]*search.Result, len(results[i])) + for j := range results[i] { + delete(localIDs, results[i][j]) + docId := &results[i][j] + // ID needs to be set because the fusion algorithm uses it to identify the objects - value doesn't matter as long as they are unique + fusionInput[i][j] = &search.Result{SecondarySortValue: dists[i][j], DocID: docId, ID: uuidFromUint64(*docId)} + } + collectMissingIds(localIDs, missingIDs, targetVectors, searchVectors, i, targetCombination.Weights) + resultContainer := ResultContainerHybrid{ResultsIn: fusionInput[i], allIDs: allIDs, IDsToRemove: make(map[uint64]struct{})} + if err := getScoresOfMissingResults(ctx, shard, logger, missingIDs, &resultContainer, targetCombination.Weights); err != nil { + return nil, nil, err + } + for key := range resultContainer.IDsToRemove { + scoresToRemove[key] = struct{}{} + } + fusionInput[i] = resultContainer.ResultsIn + clear(missingIDs) // each target vector is handled separately for hybrid + } + + // remove objects that have missing target vectors + if len(scoresToRemove) > 0 { + for i := range fusionInput { + for j := len(fusionInput[i]) - 1; j >= 0; j-- { + if _, ok := scoresToRemove[*fusionInput[i][j].DocID]; ok { + if j < len(fusionInput[i])-1 { + fusionInput[i] = append(fusionInput[i][:j], fusionInput[i][j+1:]...) + } else { + fusionInput[i] = fusionInput[i][:j] + } + } + } + } + } + + joined := hybrid.FusionRelativeScore(weights, fusionInput, targetVectors, false) + joinedResults := make([]uint64, len(joined)) + joinedDists := make([]float32, len(joined)) + for i := range joined { + joinedResults[i] = *(joined[i].DocID) + joinedDists[i] = joined[i].Score + } + + if limit > len(joinedResults) { + limit = len(joinedResults) + } + + return joinedResults[:limit], joinedDists[:limit], nil + } + + combinedResults := make(map[uint64]idAndDistance, len(results[0])) + for i := range results { + if len(results[i]) > len(dists[i]) { + return nil, nil, fmt.Errorf("number of results does not match number of distances") + } + var localIDs map[uint64]struct{} + + if targetCombination.Type != dto.Minimum { + localIDs = make(map[uint64]struct{}, len(allIDs)) + for val := range allIDs { + localIDs[val] = struct{}{} + } + } + + for j := range results[i] { + uid := results[i][j] + + tmp, ok := combinedResults[uid] + + if targetCombination.Type == dto.Minimum { + if !ok { + tmp = idAndDistance{docId: results[i][j], distance: dists[i][j]} + } + tmp.distance = min(tmp.distance, dists[i][j]) + } else { + delete(localIDs, uid) + if len(targetCombination.Weights) != len(results) { + return nil, nil, fmt.Errorf("number of weights in join does not match number of results") + } + if !ok { + tmp = idAndDistance{docId: results[i][j], distance: 0} + } + + weight := targetCombination.Weights[i] + tmp.distance += weight * dists[i][j] + } + combinedResults[uid] = tmp + + } + collectMissingIds(localIDs, missingIDs, targetVectors, searchVectors, i, targetCombination.Weights) + } + if targetCombination.Type != dto.Minimum { + if err := getScoresOfMissingResults(ctx, shard, logger, missingIDs, &ResultContainerStandard{combinedResults}, targetCombination.Weights); err != nil { + return nil, nil, err + } + } + + // unlimited results + if limit < 0 { + limit = len(combinedResults) + } + + queue := priorityqueue.NewMin[float32](limit) + uuidCounter := make([]uint64, len(combinedResults)) + count := uint64(0) + for id, res := range combinedResults { + if targetDist > 0 && res.distance > targetDist { // targetDist == 0 means no target distance is set + continue + } + uuidCounter[count] = id + queue.Insert(count, res.distance) + count++ + + } + returnResults := make([]uint64, 0, queue.Len()) + returnDists := make([]float32, 0, queue.Len()) + lim := queue.Len() + for i := 0; i < lim; i++ { + item := queue.Pop() + returnResults = append(returnResults, combinedResults[uuidCounter[item.ID]].docId) + returnDists = append(returnDists, combinedResults[uuidCounter[item.ID]].distance) + } + + if limit > len(returnResults) { + limit = len(returnResults) + } + + return returnResults[:limit], returnDists[:limit], nil +} + +func collectMissingIds(localIDs map[uint64]struct{}, missingIDs map[uint64]targetVectorData, targetVectors []string, searchVectors []models.Vector, i int, weights []float32) { + for id := range localIDs { + val, ok := missingIDs[id] + if !ok { + val = targetVectorData{target: []string{targetVectors[i]}, searchVector: []models.Vector{searchVectors[i]}, weights: []float32{weights[i]}} + } else { + val.target = append(val.target, targetVectors[i]) + val.searchVector = append(val.searchVector, searchVectors[i]) + val.weights = append(val.weights, weights[i]) + } + missingIDs[id] = val + } +} + +func getScoresOfMissingResults(ctx context.Context, shard DistanceForVector, logger logrus.FieldLogger, missingIDs map[uint64]targetVectorData, combinedResults ResultContainer, weights []float32) error { + if len(missingIDs) == 0 { + return nil + } + + eg, ctx := enterrors.NewErrorGroupWithContextWrapper(logger, ctx) + eg.SetLimit(_NUMCPU * 2) + mutex := sync.Mutex{} + for id, targets := range missingIDs { + id := id + targets := targets + f := func() error { + distances, err := shard.VectorDistanceForQuery(ctx, id, targets.searchVector, targets.target) + mutex.Lock() + defer mutex.Unlock() + if err != nil { + // when we cannot look up missing distances for an object, it will be removed from the result list + combinedResults.RemoveIdFromResult(id) + } else { + combinedResults.AddScores(id, targets.target, distances, targets.weights) + } + return nil + } + eg.Go(f) + } + if err := eg.Wait(); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_combine_multi_target_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_combine_multi_target_test.go new file mode 100644 index 0000000000000000000000000000000000000000..70fd8756f41b719e43b14245dda2d8a3d8d04e86 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_combine_multi_target_test.go @@ -0,0 +1,296 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" +) + +func uid(id uint64) strfmt.UUID { + return strfmt.UUID(uuid.NewSHA1(uuid.Nil, []byte(fmt.Sprintf("%d", id))).String()) +} + +func res(id uint64, distance float32) search.Result { + return search.Result{DocID: &id, Dist: distance, ID: uid(id)} +} + +func TestCombiner(t *testing.T) { + logger, _ := test.NewNullLogger() + searchesVectors := []models.Vector{[]float32{1, 0, 0}, []float32{0, 1, 0}, []float32{0, 1, 0}, []float32{0, 1, 0}} // not relevant for this test + + cases := []struct { + name string + targets []string + in [][]search.Result + out []search.Result + joinMethod *dto.TargetCombination + missingElements map[uint64][]string + missingDistancesResult map[uint64]map[string]float32 + targetDistance float32 + }{ + { + name: "no results (nil)", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 1}}, + in: nil, + out: []search.Result{}, + }, + { + name: "no results (empty)", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 1}}, + in: [][]search.Result{}, + out: []search.Result{}, + }, + { + name: "single result", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 1}}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}}, + out: []search.Result{res(0, 0.5), res(1, 0.6)}, + }, + { + name: "simple join", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 1}}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5), res(1, 0.6)}}, + out: []search.Result{res(0, 1), res(1, 1.2)}, + }, + { + name: "minimum", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Type: dto.Minimum}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5), res(1, 0.6)}}, + out: []search.Result{res(0, 0.5), res(1, 0.6)}, + }, + { + name: "score fusion", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{0.5, 0.5}}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5), res(1, 0.6)}}, + out: []search.Result{res(0, 0), res(1, 1)}, + }, + { + name: "score fusion with custom weights", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{1, 2}}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5), res(1, 0.6)}}, + out: []search.Result{res(0, 0), res(1, 3)}, + }, + { + name: "missing document without target vector (min)", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Type: dto.Minimum}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5)}}, + out: []search.Result{res(0, 0.5), res(1, 0.6)}, + }, + { + name: "missing document without target vector (weights)", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 1}}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5)}}, + out: []search.Result{res(0, 1)}, + missingElements: map[uint64][]string{1: {"target2"}}, + }, + { + name: "missing document without target vector that is not searched (weights)", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 1}}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5)}}, + out: []search.Result{res(0, 1), res(1, 2.6)}, + missingElements: map[uint64][]string{1: {"target3"}}, + }, + { + name: "missing document without target vector (score fusion)", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{0.5, 0.5}}, + in: [][]search.Result{{res(0, 0.5), res(1, 0.6)}, {res(0, 0.5)}}, + out: []search.Result{res(0, 1)}, + missingElements: map[uint64][]string{1: {"target2"}}, + }, + { + name: "many documents (weights)", + targets: []string{"target1", "target2", "target3", "target4"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 0.5, 0.25, 0.1}}, + in: [][]search.Result{ + {res(0, 0.5), res(1, 0.6), res(2, 0.8), res(3, 0.9)}, + {res(1, 0.2), res(0, 0.4), res(2, 0.6), res(5, 0.8)}, + {res(1, 0.2), res(2, 0.4), res(3, 0.6), res(4, 0.8)}, + {res(6, 0.1), res(0, 0.3), res(2, 0.7), res(3, 0.9)}, + }, + out: []search.Result{res(1, 0.95), res(0, 1.23), res(2, 1.27)}, + }, + { + name: "many documents (weights) and target Distance", + targets: []string{"target1", "target2", "target3", "target4"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 0.5, 0.25, 0.1}}, + in: [][]search.Result{ + {res(0, 0.5), res(1, 0.6), res(2, 0.8), res(3, 0.9)}, + {res(1, 0.2), res(0, 0.4), res(2, 0.6), res(5, 0.8)}, + {res(1, 0.2), res(2, 0.4), res(3, 0.6), res(4, 0.8)}, + {res(6, 0.1), res(0, 0.3), res(2, 0.7), res(3, 0.9)}, + }, + targetDistance: 1.25, + out: []search.Result{res(1, 0.95), res(0, 1.23)}, + }, + { + name: "many documents missing entry (weights)", + targets: []string{"target1", "target2", "target3", "target4"}, + joinMethod: &dto.TargetCombination{Weights: []float32{1, 0.5, 0.25, 0.1}}, + in: [][]search.Result{ + {res(0, 0.5), res(1, 0.6), res(2, 0.8), res(3, 0.9)}, + {res(1, 0.2), res(0, 0.4), res(2, 0.6), res(5, 0.8)}, + {res(1, 0.2), res(2, 0.4), res(3, 0.6), res(4, 0.8)}, + {res(6, 0.1), res(0, 0.3), res(2, 0.7), res(3, 0.9)}, + }, + out: []search.Result{res(1, 0.95), res(2, 1.27)}, + missingElements: map[uint64][]string{0: {"target3"}}, + }, + { + name: "many documents (score fusion)", + targets: []string{"target1", "target2", "target3", "target4"}, + joinMethod: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{0.25, 0.25, 0.25, 0.25}}, + in: [][]search.Result{ + // 0:0, 1:0.2 2:0.6, 3:1.0 + {res(0, 0.5), res(1, 0.6), res(2, 0.8), res(3, 1.0)}, + // 1:0, 0:0.25, 2:0.75, 3:1. + {res(1, 0.2), res(0, 0.3), res(2, 0.5), res(3, 0.6)}, + // 1:0, 2:0.1/3, 3:0.2/3, 0:1. + {res(1, 0.2), res(2, 0.4), res(3, 0.6), res(0, 0.8)}, + // 1:0, 0:0.25, 2:0.75, 3:1. + {res(1, 0.1), res(0, 0.3), res(2, 0.7), res(3, 0.9)}, + }, + out: []search.Result{res(1, 0.05), res(0, 0.375), res(2, 0.60833)}, + }, + { + name: "many documents missing entry (score fusion)", + targets: []string{"target1", "target2", "target3", "target4"}, + joinMethod: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{1, 0.5, 0.25, 0.1}}, + in: [][]search.Result{ + {res(0, 0.5), res(1, 0.6), res(2, 0.8), res(3, 0.9)}, + {res(1, 0.2), res(0, 0.4), res(2, 0.6), res(5, 0.8)}, + {res(1, 0.2), res(2, 0.4), res(3, 0.6), res(4, 0.8)}, + {res(6, 0.1), res(0, 0.3), res(2, 0.7), res(3, 0.9)}, + }, + missingDistancesResult: map[uint64]map[string]float32{ + 0: {"target3": 1}, + 1: {"target4": 1.1}, + 3: {"target2": 1.2}, + 4: {"target1": 1, "target2": 1.1, "target4": 1.2}, + 5: {"target1": 1, "target3": 1.2, "target4": 1.3}, + }, + out: []search.Result{res(1, 0.28), res(0, 0.3), res(2, 0.89), res(3, 1.46), res(5, 1.65), res(4, 1.69)}, + missingElements: map[uint64][]string{6: {"target3"}}, + }, + { + name: "all missing (score fusion)", + targets: []string{"target1", "target2"}, + joinMethod: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{1, 0.5}}, + in: [][]search.Result{ + {res(0, 0.5), res(1, 0.6)}, + {res(2, 0.6), res(5, 0.8)}, + }, + out: []search.Result{}, + missingElements: map[uint64][]string{0: {"target1"}, 1: {"target1"}, 2: {"target2"}, 5: {"target2"}}, + }, + { + name: "all missing except one (score fusion)", + targets: []string{"target1", "target2", "target3", "target4"}, + joinMethod: &dto.TargetCombination{Type: dto.RelativeScore, Weights: []float32{1, 0.5, 0.25, 0.1}}, + in: [][]search.Result{ + {res(0, 0.5), res(1, 0.6), res(2, 0.8), res(3, 0.9)}, + {res(2, 0.6), res(5, 0.8)}, + {res(1, 0.2), res(3, 0.6), res(4, 0.8)}, + {res(6, 0.1), res(0, 0.3), res(2, 0.7), res(3, 0.9)}, + }, + out: []search.Result{res(3, 1.85)}, // score is 1 for each if there is only one result, multiplied by the weight + missingElements: map[uint64][]string{0: {"target2"}, 1: {"target2"}, 2: {"target3"}, 4: {"target1", "target2", "target4"}, 5: {"target1", "target2", "target4"}, 6: {"target1", "target2", "target3"}}, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + searcher := fakeS{missingElements: tt.missingElements, missingDistancesResult: tt.missingDistancesResult} + + idsIn := make([][]uint64, len(tt.in)) + distsIn := make([][]float32, len(tt.in)) + for i := range tt.in { + distsIn[i] = make([]float32, len(tt.in[i])) + idsIn[i] = make([]uint64, len(tt.in[i])) + for j := range tt.in[i] { + distsIn[i][j] = tt.in[i][j].Dist + idsIn[i][j] = *(tt.in[i][j].DocID) + } + } + + limit := len(tt.out) + if tt.targetDistance > 0 { + limit = 100 + } + + ids, dists, err := CombineMultiTargetResults(context.Background(), searcher, logger, idsIn, distsIn, tt.targets, searchesVectors[:len(tt.targets)], tt.joinMethod, limit, tt.targetDistance) + require.Nil(t, err) + require.Len(t, ids, len(tt.out)) + for i, id := range ids { + // we do not want to compare ExplainScore etc + require.Equal(t, *(tt.out[i].DocID), id) + require.InDelta(t, tt.out[i].Dist, dists[i], 0.0001) + } + }) + } +} + +type fakeS struct { + missingElements map[uint64][]string + missingDistancesResult map[uint64]map[string]float32 +} + +func (f fakeS) VectorDistanceForQuery(ctx context.Context, id uint64, searchVectors []models.Vector, targetVectors []string) ([]float32, error) { + returns := make([]float32, 0, len(targetVectors)) + for range targetVectors { + returns = append(returns, 2) + } + + missingTargets, ok := f.missingElements[id] + if !ok { + missingDistances, ok := f.missingDistancesResult[id] + if ok { + for i := range targetVectors { + score, ok := missingDistances[targetVectors[i]] + if ok { + returns[i] = score + } + } + } + + return returns, nil + } + + for _, missingTarget := range missingTargets { + for _, target := range targetVectors { + if target == missingTarget { + return nil, errors.Errorf("missing target %s", missingTarget) + } + } + } + return returns, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_cyclecallbacks.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_cyclecallbacks.go new file mode 100644 index 0000000000000000000000000000000000000000..1c422d8ab9ebd2f08e065c344e3116673496e077 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_cyclecallbacks.go @@ -0,0 +1,123 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "strings" + + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +type shardCycleCallbacks struct { + compactionCallbacks cyclemanager.CycleCallbackGroup + compactionCallbacksCtrl cyclemanager.CycleCallbackCtrl + compactionAuxCallbacks cyclemanager.CycleCallbackGroup + compactionAuxCallbacksCtrl cyclemanager.CycleCallbackCtrl + + flushCallbacks cyclemanager.CycleCallbackGroup + flushCallbacksCtrl cyclemanager.CycleCallbackCtrl + + vectorCommitLoggerCallbacks cyclemanager.CycleCallbackGroup + vectorTombstoneCleanupCallbacks cyclemanager.CycleCallbackGroup + vectorCombinedCallbacksCtrl cyclemanager.CycleCallbackCtrl + + geoPropsCommitLoggerCallbacks cyclemanager.CycleCallbackGroup + geoPropsTombstoneCleanupCallbacks cyclemanager.CycleCallbackGroup + geoPropsCombinedCallbacksCtrl cyclemanager.CycleCallbackCtrl +} + +func (s *Shard) initCycleCallbacks() { + id := func(elems ...string) string { + elems = append([]string{"shard", s.index.ID(), s.name}, elems...) + return strings.Join(elems, "/") + } + + var compactionCallbacks cyclemanager.CycleCallbackGroup + var compactionCallbacksCtrl cyclemanager.CycleCallbackCtrl + var compactionAuxCallbacks cyclemanager.CycleCallbackGroup + var compactionAuxCallbacksCtrl cyclemanager.CycleCallbackCtrl + + if s.index.cycleCallbacks.compactionAuxCallbacks == nil { + compactionId := id("compaction") + compactionCallbacks = cyclemanager.NewCallbackGroup(compactionId, s.index.logger, 1) + compactionCallbacksCtrl = s.index.cycleCallbacks.compactionCallbacks.Register( + compactionId, compactionCallbacks.CycleCallback, + cyclemanager.WithIntervals(cyclemanager.CompactionCycleIntervals())) + compactionAuxCallbacksCtrl = cyclemanager.NewCallbackCtrlNoop() + } else { + compactionId := id("compaction-non-objects") + compactionCallbacks = cyclemanager.NewCallbackGroup(compactionId, s.index.logger, 1) + compactionCallbacksCtrl = s.index.cycleCallbacks.compactionCallbacks.Register( + compactionId, compactionCallbacks.CycleCallback, + cyclemanager.WithIntervals(cyclemanager.CompactionCycleIntervals())) + + compactionAuxId := id("compaction-objects") + compactionAuxCallbacks = cyclemanager.NewCallbackGroup(compactionAuxId, s.index.logger, 1) + compactionAuxCallbacksCtrl = s.index.cycleCallbacks.compactionAuxCallbacks.Register( + compactionAuxId, compactionAuxCallbacks.CycleCallback, + cyclemanager.WithIntervals(cyclemanager.CompactionCycleIntervals())) + } + + flushId := id("flush") + flushCallbacks := cyclemanager.NewCallbackGroup(flushId, s.index.logger, 1) + flushCallbacksCtrl := s.index.cycleCallbacks.flushCallbacks.Register( + flushId, flushCallbacks.CycleCallback, + cyclemanager.WithIntervals(cyclemanager.MemtableFlushCycleIntervals())) + + vectorCommitLoggerId := id("vector", "commit_logger") + vectorCommitLoggerCallbacks := cyclemanager.NewCallbackGroup(vectorCommitLoggerId, s.index.logger, 1) + vectorCommitLoggerCallbacksCtrl := s.index.cycleCallbacks.vectorCommitLoggerCallbacks.Register( + vectorCommitLoggerId, vectorCommitLoggerCallbacks.CycleCallback, + cyclemanager.WithIntervals(cyclemanager.HnswCommitLoggerCycleIntervals())) + + vectorTombstoneCleanupId := id("vector", "tombstone_cleanup") + vectorTombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup(vectorTombstoneCleanupId, s.index.logger, 1) + // fixed interval on class level, no need to specify separate on shard level + vectorTombstoneCleanupCallbacksCtrl := s.index.cycleCallbacks.vectorTombstoneCleanupCallbacks.Register( + vectorTombstoneCleanupId, vectorTombstoneCleanupCallbacks.CycleCallback) + + vectorCombinedCallbacksCtrl := cyclemanager.NewCombinedCallbackCtrl(2, s.index.logger, + vectorCommitLoggerCallbacksCtrl, vectorTombstoneCleanupCallbacksCtrl) + + geoPropsCommitLoggerId := id("geo_props", "commit_logger") + geoPropsCommitLoggerCallbacks := cyclemanager.NewCallbackGroup(geoPropsCommitLoggerId, s.index.logger, 1) + geoPropsCommitLoggerCallbacksCtrl := s.index.cycleCallbacks.geoPropsCommitLoggerCallbacks.Register( + geoPropsCommitLoggerId, geoPropsCommitLoggerCallbacks.CycleCallback, + cyclemanager.WithIntervals(cyclemanager.GeoCommitLoggerCycleIntervals())) + + geoPropsTombstoneCleanupId := id("geoProps", "tombstone_cleanup") + geoPropsTombstoneCleanupCallbacks := cyclemanager.NewCallbackGroup(geoPropsTombstoneCleanupId, s.index.logger, 1) + // fixed interval on class level, no need to specify separate on shard level + geoPropsTombstoneCleanupCallbacksCtrl := s.index.cycleCallbacks.geoPropsTombstoneCleanupCallbacks.Register( + geoPropsTombstoneCleanupId, geoPropsTombstoneCleanupCallbacks.CycleCallback) + + geoPropsCombinedCallbacksCtrl := cyclemanager.NewCombinedCallbackCtrl(2, s.index.logger, + geoPropsCommitLoggerCallbacksCtrl, geoPropsTombstoneCleanupCallbacksCtrl) + + s.cycleCallbacks = &shardCycleCallbacks{ + compactionCallbacks: compactionCallbacks, + compactionCallbacksCtrl: compactionCallbacksCtrl, + compactionAuxCallbacks: compactionAuxCallbacks, + compactionAuxCallbacksCtrl: compactionAuxCallbacksCtrl, + + flushCallbacks: flushCallbacks, + flushCallbacksCtrl: flushCallbacksCtrl, + + vectorCommitLoggerCallbacks: vectorCommitLoggerCallbacks, + vectorTombstoneCleanupCallbacks: vectorTombstoneCleanupCallbacks, + vectorCombinedCallbacksCtrl: vectorCombinedCallbacksCtrl, + + geoPropsCommitLoggerCallbacks: geoPropsCommitLoggerCallbacks, + geoPropsTombstoneCleanupCallbacks: geoPropsTombstoneCleanupCallbacks, + geoPropsCombinedCallbacksCtrl: geoPropsCombinedCallbacksCtrl, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_debug.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_debug.go new file mode 100644 index 0000000000000000000000000000000000000000..8fe389ded616dc27baa325b7f801943f75986f7a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_debug.go @@ -0,0 +1,63 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" +) + +// IMPORTANT: +// DebugResetVectorIndex is intended to be used for debugging purposes only. +// It creates a new vector index and replaces the existing one if any. +// This function assumes the node is not receiving any traffic besides the +// debug endpoints and that async indexing is enabled. +func (s *Shard) DebugResetVectorIndex(ctx context.Context, targetVector string) error { + if !asyncEnabled() { + return fmt.Errorf("async indexing is not enabled") + } + + vidx, vok := s.GetVectorIndex(targetVector) + q, qok := s.GetVectorIndexQueue(targetVector) + + if !(vok && qok) { + return fmt.Errorf("vector index %q not found", targetVector) + } + + q.Pause() + q.Wait() + + err := vidx.Drop(ctx) + if err != nil { + return errors.Wrap(err, "drop vector index") + } + + var newConfig schemaConfig.VectorIndexConfig + if targetVector == "" { + newConfig = s.index.vectorIndexUserConfig + } else { + newConfig = s.index.vectorIndexUserConfigs[targetVector] + } + + vidx, err = s.initVectorIndex(ctx, targetVector, newConfig, false) + if err != nil { + return errors.Wrap(err, "init vector index") + } + s.setVectorIndex(targetVector, vidx) + + q.ResetWith(vidx) + q.Resume() + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_dimension_tracking.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_dimension_tracking.go new file mode 100644 index 0000000000000000000000000000000000000000..d89a884fd9b8ed76841896b21e91d09e07da7506 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_dimension_tracking.go @@ -0,0 +1,169 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/cluster/usage/types" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type DimensionCategory int + +const ( + DimensionCategoryStandard DimensionCategory = iota + DimensionCategoryPQ + DimensionCategoryBQ + DimensionCategorySQ + DimensionCategoryRQ +) + +func (c DimensionCategory) String() string { + switch c { + case DimensionCategoryPQ: + return "pq" + case DimensionCategoryBQ: + return "bq" + case DimensionCategorySQ: + return "sq" + case DimensionCategoryRQ: + return "rq" + default: + return "standard" + } +} + +// DimensionsUsage returns the total number of dimensions and the number of objects for a given vector +func (s *Shard) DimensionsUsage(ctx context.Context, targetVector string) (types.Dimensionality, error) { + dimensionality, err := s.calcTargetVectorDimensions(ctx, targetVector, func(dimLength int, v []lsmkv.MapPair) (int, int) { + return len(v), dimLength + }) + if err != nil { + return types.Dimensionality{}, err + } + return dimensionality, nil +} + +// Dimensions returns the total number of dimensions for a given vector +func (s *Shard) Dimensions(ctx context.Context, targetVector string) (int, error) { + dimensionality, err := s.calcTargetVectorDimensions(ctx, targetVector, func(dimLength int, v []lsmkv.MapPair) (int, int) { + return dimLength * len(v), dimLength + }) + if err != nil { + return 0, err + } + return dimensionality.Count, nil +} + +func (s *Shard) QuantizedDimensions(ctx context.Context, targetVector string, segments int) int { + dimensionality, err := s.calcTargetVectorDimensions(ctx, targetVector, func(dimLength int, v []lsmkv.MapPair) (int, int) { + return len(v), dimLength + }) + if err != nil { + return 0 + } + + return dimensionality.Count * correctEmptySegments(segments, dimensionality.Dimensions) +} + +func (s *Shard) calcTargetVectorDimensions(ctx context.Context, targetVector string, calcEntry func(dimLen int, v []lsmkv.MapPair) (int, int)) (types.Dimensionality, error) { + return calcTargetVectorDimensionsFromStore(ctx, s.store, targetVector, calcEntry), nil +} + +// DimensionMetrics represents the dimension tracking metrics for a vector. +// The metrics are used to track memory usage and performance characteristics +// of different vector compression methods. +// +// Usage patterns: +// - Standard vectors: Only Uncompressed is set (4 bytes per dimension) +// - PQ (Product Quantization): Only Compressed is set (1 byte per segment) +// - BQ (Binary Quantization): Only Compressed is set (1 bit per dimension, packed in uint64 blocks) +// +// The metrics are aggregated across all vectors in a shard and published +// to Prometheus for monitoring and capacity planning. +type DimensionMetrics struct { + Uncompressed int // Uncompressed dimensions count (for standard vectors) + Compressed int // Compressed dimensions count (for PQ/BQ vectors) +} + +// Add creates a new DimensionMetrics instance with total values summed pairwise. +func (dm DimensionMetrics) Add(add DimensionMetrics) DimensionMetrics { + return DimensionMetrics{ + Uncompressed: dm.Uncompressed + add.Uncompressed, + Compressed: dm.Compressed + add.Compressed, + } +} + +// Set shard's vector_dimensions_sum and vector_segments_sum metrics to 0. +func (s *Shard) clearDimensionMetrics() { + if s.promMetrics == nil { + return + } + clearDimensionMetrics(s.index.Config, s.promMetrics, s.index.Config.ClassName.String(), s.name) +} + +// Set shard's vector_dimensions_sum and vector_segments_sum metrics to 0. +// +// Vector dimension metrics are collected on the node level and +// are normally _polled_ from each shard. Shard dimension metrics +// should only be updated on the shard level iff it is being shut +// down or dropped and metrics grouping is disabled. +// If metrics grouping is enabled, the difference is eventually +// accounted for the next time nodeWideMetricsObserver recalculates +// total vector dimensions, because only _active_ shards are considered. +func clearDimensionMetrics(cfg IndexConfig, promMetrics *monitoring.PrometheusMetrics, className, shardName string) { + if !cfg.TrackVectorDimensions || promMetrics.Group { + return + } + if g, err := promMetrics.VectorDimensionsSum. + GetMetricWithLabelValues(className, shardName); err == nil { + g.Set(0) + } + if g, err := promMetrics.VectorSegmentsSum. + GetMetricWithLabelValues(className, shardName); err == nil { + g.Set(0) + } +} + +func GetDimensionCategory(cfg schemaConfig.VectorIndexConfig) (DimensionCategory, int) { + // We have special dimension tracking for BQ and PQ to represent reduced costs + // these are published under the separate vector_segments_dimensions metric + if hnswUserConfig, ok := cfg.(hnswent.UserConfig); ok { + if hnswUserConfig.PQ.Enabled { + return DimensionCategoryPQ, hnswUserConfig.PQ.Segments + } + if hnswUserConfig.BQ.Enabled { + return DimensionCategoryBQ, 0 + } + if hnswUserConfig.SQ.Enabled { + return DimensionCategorySQ, 0 + } + if hnswUserConfig.RQ.Enabled { + return DimensionCategoryRQ, 0 + } + } + return DimensionCategoryStandard, 0 +} + +func correctEmptySegments(segments int, dimensions int) int { + // If segments is 0 (unset), in this case PQ will calculate the number of segments + // based on the number of dimensions + if segments > 0 { + return segments + } + return common.CalculateOptimalSegments(dimensions) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_dimension_tracking_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_dimension_tracking_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a9e1c21aa3c78365fa03256365319a9ecb107487 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_dimension_tracking_test.go @@ -0,0 +1,723 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func Benchmark_Migration(b *testing.B) { + fmt.Printf("Running benchmark %v times\n", b.N) + for i := 0; i < b.N; i++ { + func() { + r := getRandomSeed() + dirName := b.TempDir() + + shardState := singleShardState() + logger := logrus.New() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(b) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(b) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(b) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 1000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(b, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(b, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + class := &models.Class{ + Class: "Test", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + } + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + migrator.AddClass(context.Background(), class) + + schemaGetter.schema = schema + + repo.config.TrackVectorDimensions = false + + dim := 128 + for i := 0; i < 100; i++ { + vec := make([]float32, dim) + for j := range vec { + vec[j] = r.Float32() + } + + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + if err != nil { + b.Fatal(err) + } + } + + fmt.Printf("Added vectors, now migrating\n") + + repo.config.TrackVectorDimensions = true + migrator.RecalculateVectorDimensions(context.TODO()) + fmt.Printf("Benchmark complete") + }() + } +} + +// Rebuild dimensions at startup +func Test_Migration(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + shardState := singleShardState() + logger := logrus.New() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 1000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, nil, + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("set schema", func(t *testing.T) { + class := &models.Class{ + Class: "Test", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + } + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + require.Nil(t, + migrator.AddClass(context.Background(), class)) + + schemaGetter.schema = schema + }) + + repo.config.TrackVectorDimensions = false + + t.Run("import objects with d=128", func(t *testing.T) { + dim := 128 + for i := 0; i < 100; i++ { + vec := make([]float32, dim) + for j := range vec { + vec[j] = r.Float32() + } + + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + require.Equal(t, 0, dimAfter, "dimensions should not have been calculated") + }) + + dimBefore := getDimensionsFromRepo(context.Background(), repo, "Test") + require.Equal(t, 0, dimBefore, "dimensions should not have been calculated") + repo.config.TrackVectorDimensions = true + migrator.RecalculateVectorDimensions(context.TODO()) + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + require.Equal(t, 12800, dimAfter, "dimensions should be counted now") +} + +func Test_DimensionTracking(t *testing.T) { + r := getRandomSeed() + dirName := t.TempDir() + + shardState := singleShardState() + logger := logrus.New() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, monitoring.GetMetrics(), memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + repo.SetSchemaGetter(schemaGetter) + require.Nil(t, repo.WaitForStartup(testCtx())) + defer repo.Shutdown(context.Background()) + + migrator := NewMigrator(repo, logger, "node1") + + t.Run("set schema", func(t *testing.T) { + class := &models.Class{ + Class: "Test", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + } + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + require.Nil(t, migrator.AddClass(context.Background(), class)) + + schemaGetter.schema = schema + }) + + t.Run("import objects with d=128", func(t *testing.T) { + dim := 128 + for i := 0; i < 100; i++ { + vec := make([]float32, dim) + for j := range vec { + vec[j] = r.Float32() + } + + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + require.Equal(t, 12800, dimAfter, "dimensions should not have changed") + quantDimAfter := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + require.Equal(t, 6400, quantDimAfter, "quantized dimensions should not have changed") + }) + + t.Run("import objects with d=0", func(t *testing.T) { + dimBefore := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimBefore := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + for i := 100; i < 200; i++ { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + err := repo.PutObject(context.Background(), obj, nil, nil, nil, nil, 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + require.Equal(t, dimBefore, dimAfter, "dimensions should not have changed") + quantDimAfter := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + require.Equal(t, quantDimBefore, quantDimAfter, "quantized dimensions should not have changed") + }) + + t.Run("verify dimensions after initial import", func(t *testing.T) { + idx := repo.GetIndex("Test") + idx.ForEachShard(func(name string, shard ShardLike) error { + dim, err := shard.Dimensions(context.Background(), "") + assert.NoError(t, err) + assert.Equal(t, 12800, dim) + assert.Equal(t, 6400, shard.QuantizedDimensions(context.Background(), "", 64)) + return nil + }) + }) + + t.Run("delete 10 objects with d=128", func(t *testing.T) { + dimBefore := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimBefore := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + for i := 0; i < 10; i++ { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + err := repo.DeleteObject(context.Background(), "Test", id, time.Now(), nil, "", 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + require.Equal(t, dimBefore, dimAfter+10*128, "dimensions should have decreased") + quantDimAfter := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + require.Equal(t, quantDimBefore, quantDimAfter+10*64, "dimensions should have decreased") + }) + + t.Run("verify dimensions after delete", func(t *testing.T) { + idx := repo.GetIndex("Test") + idx.ForEachShard(func(name string, shard ShardLike) error { + dim, err := shard.Dimensions(context.Background(), "") + assert.NoError(t, err) + assert.Equal(t, 11520, dim) + assert.Equal(t, 5760, shard.QuantizedDimensions(context.Background(), "", 64)) + return nil + }) + }) + + t.Run("update some of the d=128 objects with a new vector", func(t *testing.T) { + dimBefore := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimBefore := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + dim := 128 + for i := 0; i < 50; i++ { + vec := make([]float32, dim) + for j := range vec { + vec[j] = rand.Float32() + } + + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + // Put is idempotent, but since the IDs exist now, this is an update + // under the hood and a "reinstert" for the already deleted ones + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimAfter := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + require.Equal(t, dimBefore+10*128, dimAfter, "dimensions should have been restored") + require.Equal(t, quantDimBefore+10*64, quantDimAfter, "dimensions should have been restored") + }) + + t.Run("update some of the d=128 objects with a nil vector", func(t *testing.T) { + dimBefore := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimBefore := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 32) + for i := 50; i < 100; i++ { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + // Put is idempotent, but since the IDs exist now, this is an update + // under the hood and a "reinsert" for the already deleted ones + err := repo.PutObject(context.Background(), obj, nil, nil, nil, nil, 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimAfter := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 32) + require.Equal(t, dimBefore, dimAfter+50*128, "dimensions should decrease") + require.Equal(t, quantDimBefore, quantDimAfter+50*32, "dimensions should decrease") + }) + + t.Run("verify dimensions after first set of updates", func(t *testing.T) { + idx := repo.GetIndex("Test") + idx.ForEachShard(func(name string, shard ShardLike) error { + dim, err := shard.Dimensions(context.Background(), "") + assert.NoError(t, err) + assert.Equal(t, 6400, dim) + assert.Equal(t, 3200, shard.QuantizedDimensions(context.Background(), "", 64)) + assert.Equal(t, 1600, shard.QuantizedDimensions(context.Background(), "", 32)) + assert.Equal(t, 3200, shard.QuantizedDimensions(context.Background(), "", 0)) + return nil + }) + }) + + t.Run("update some of the origin nil vector objects with a d=128 vector", func(t *testing.T) { + dimBefore := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimBefore := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + dim := 128 + for i := 100; i < 150; i++ { + vec := make([]float32, dim) + for j := range vec { + vec[j] = rand.Float32() + } + + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + // Put is idempotent, but since the IDs exist now, this is an update + // under the hood and a "reinsert" for the already deleted ones + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimAfter := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + require.Equal(t, dimBefore+50*128, dimAfter, "dimensions should increase") + require.Equal(t, quantDimBefore+50*64, quantDimAfter, "dimensions should increase") + }) + + t.Run("update some of the nil objects with another nil vector", func(t *testing.T) { + dimBefore := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimBefore := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + for i := 150; i < 200; i++ { + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: "Test", ID: id} + // Put is idempotent, but since the IDs exist now, this is an update + // under the hood and a "reinstert" for the already deleted ones + err := repo.PutObject(context.Background(), obj, nil, nil, nil, nil, 0) + require.Nil(t, err) + } + dimAfter := getDimensionsFromRepo(context.Background(), repo, "Test") + quantDimAfter := GetQuantizedDimensionsFromRepo(context.Background(), repo, "Test", 64) + require.Equal(t, dimBefore, dimAfter, "dimensions should not have changed") + require.Equal(t, quantDimBefore, quantDimAfter, "dimensions should not have changed") + }) + + t.Run("verify dimensions after more updates", func(t *testing.T) { + idx := repo.GetIndex("Test") + idx.ForEachShard(func(name string, shard ShardLike) error { + dim, err := shard.Dimensions(context.Background(), "") + assert.NoError(t, err) + assert.Equal(t, 12800, dim) + assert.Equal(t, 6400, shard.QuantizedDimensions(context.Background(), "", 64)) + assert.Equal(t, 3200, shard.QuantizedDimensions(context.Background(), "", 32)) + // segments = 0, will use 128/2 = 64 segments and so value should be 6400 + assert.Equal(t, 6400, shard.QuantizedDimensions(context.Background(), "", 0)) + return nil + }) + }) +} + +func TestTotalDimensionTrackingMetrics(t *testing.T) { + const ( + objectCount = 100 + multiVecCard = 3 + dimensionsPerVector = 64 + ) + + for _, tt := range []struct { + name string + vectorConfig func() enthnsw.UserConfig + namedVectorConfig func() enthnsw.UserConfig + multiVectorConfig func() enthnsw.UserConfig + + expectDimensions float64 + expectSegments float64 + }{ + { + name: "legacy", + vectorConfig: func() enthnsw.UserConfig { return enthnsw.NewDefaultUserConfig() }, + + expectDimensions: dimensionsPerVector * objectCount, + }, + { + name: "named", + namedVectorConfig: func() enthnsw.UserConfig { return enthnsw.NewDefaultUserConfig() }, + + expectDimensions: dimensionsPerVector * objectCount, + }, + { + name: "multi", + multiVectorConfig: func() enthnsw.UserConfig { return enthnsw.NewDefaultUserConfig() }, + + expectDimensions: multiVecCard * dimensionsPerVector * objectCount, + }, + { + name: "mixed", + vectorConfig: func() enthnsw.UserConfig { return enthnsw.NewDefaultUserConfig() }, + namedVectorConfig: func() enthnsw.UserConfig { return enthnsw.NewDefaultUserConfig() }, + + expectDimensions: 2 * dimensionsPerVector * objectCount, + }, + { + name: "named_with_bq", + namedVectorConfig: func() enthnsw.UserConfig { + cfg := enthnsw.NewDefaultUserConfig() + cfg.BQ.Enabled = true + return cfg + }, + + expectSegments: (dimensionsPerVector / 8) * objectCount, + }, + { + name: "named_with_pq", + namedVectorConfig: func() enthnsw.UserConfig { + cfg := enthnsw.NewDefaultUserConfig() + cfg.PQ.Enabled = true + cfg.PQ.Segments = 10 + return cfg + }, + + expectSegments: 10 * objectCount, + }, + { + name: "named_with_pq_zero_segments", + namedVectorConfig: func() enthnsw.UserConfig { + cfg := enthnsw.NewDefaultUserConfig() + cfg.PQ.Enabled = true + return cfg + }, + expectSegments: (dimensionsPerVector / 2) * objectCount, + }, + { + name: "multi_and_bq_named", + namedVectorConfig: func() enthnsw.UserConfig { + cfg := enthnsw.NewDefaultUserConfig() + cfg.BQ.Enabled = true + return cfg + }, + multiVectorConfig: func() enthnsw.UserConfig { return enthnsw.NewDefaultUserConfig() }, + expectDimensions: multiVecCard * dimensionsPerVector * objectCount, + expectSegments: (dimensionsPerVector / 8) * objectCount, + }, + } { + t.Run(tt.name, func(t *testing.T) { + var ( + class = &models.Class{ + Class: tt.name, + InvertedIndexConfig: invertedConfig(), + VectorConfig: map[string]models.VectorConfig{}, + } + + namedVectorName = "namedVector" + multiVectorName = "multiVector" + + legacyVec []float32 + namedVecs map[string][]float32 + multiVecs map[string][][]float32 + ) + + if tt.vectorConfig != nil { + class.VectorIndexConfig = tt.vectorConfig() + legacyVec = randVector(dimensionsPerVector) + } + + if tt.namedVectorConfig != nil { + class.VectorConfig[namedVectorName] = models.VectorConfig{ + VectorIndexConfig: tt.namedVectorConfig(), + } + namedVecs = map[string][]float32{ + namedVectorName: randVector(dimensionsPerVector), + } + } + + if tt.multiVectorConfig != nil { + config := tt.multiVectorConfig() + config.Multivector = enthnsw.MultivectorConfig{Enabled: true} + class.VectorConfig[multiVectorName] = models.VectorConfig{ + VectorIndexConfig: config, + } + + multiVecs = map[string][][]float32{} + for range multiVecCard { + multiVecs[multiVectorName] = append(multiVecs[multiVectorName], randVector(dimensionsPerVector)) + } + } + + var ( + db = createTestDatabaseWithClass(t, monitoring.GetMetrics(), class) + shardName = getSingleShardNameFromRepo(db, class.Class) + + insertData = func() { + for i := range objectCount { + obj := &models.Object{ + Class: tt.name, + ID: intToUUID(i), + } + err := db.PutObject(context.Background(), obj, legacyVec, namedVecs, multiVecs, nil, 0) + require.Nil(t, err) + } + publishVectorMetricsFromDB(t, db) + } + + removeData = func() { + for i := range objectCount { + err := db.DeleteObject(context.Background(), class.Class, intToUUID(i), time.Now(), nil, "", 0) + require.NoError(t, err) + } + publishVectorMetricsFromDB(t, db) + } + + assertTotalMetrics = func(expectDims, expectSegs float64) { + metrics := monitoring.GetMetrics() + metric, err := metrics.VectorDimensionsSum.GetMetricWithLabelValues(class.Class, shardName) + require.NoError(t, err) + require.Equal(t, expectDims, testutil.ToFloat64(metric)) + + metric, err = metrics.VectorSegmentsSum.GetMetricWithLabelValues(class.Class, shardName) + require.NoError(t, err) + require.Equal(t, expectSegs, testutil.ToFloat64(metric)) + } + ) + + insertData() + assertTotalMetrics(tt.expectDimensions, tt.expectSegments) + removeData() + assertTotalMetrics(0, 0) + insertData() + assertTotalMetrics(tt.expectDimensions, tt.expectSegments) + require.NoError(t, db.DeleteIndex(schema.ClassName(class.Class))) + assertTotalMetrics(0, 0) + }) + } +} + +func intToUUID(i int) strfmt.UUID { + return strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) +} + +func TestDimensionTrackingWithGrouping(t *testing.T) { + const ( + nClasses = 2 + shardsPerClass = 1 // createTestDatabaseWithClass does not support multi-tenancy + objectCount = 5 + dimPerVector = 64 + expectDimPerShard = objectCount * dimPerVector + expectTotalDim = nClasses * shardsPerClass * expectDimPerShard + ) + + testCases := []struct { + name string + groupingEnabled bool + expectedLabels []string // class-shard label pairs + expectedDimensions []int // expectedDimensions for a label pair + }{ + { + name: "with_grouping_enabled", + groupingEnabled: true, + expectedLabels: []string{"n/a", "n/a"}, + expectedDimensions: []int{expectTotalDim}, + }, + { + name: "with_grouping_disabled", + groupingEnabled: false, + // Will be set dynamically + expectedLabels: nil, + expectedDimensions: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Setup metrics grouping (copy to avoid race condition) + metrics := *monitoring.GetMetrics() + metrics.Group = tc.groupingEnabled + + // Create test class and database + classes := make([]*models.Class, nClasses) + for i := range classes { + classes[i] = &models.Class{ + Class: fmt.Sprintf("%s_%d", tc.name, i), + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: shardsPerClass > 1, + AutoTenantCreation: true, + AutoTenantActivation: true, + }, + } + } + + db := createTestDatabaseWithClass(t, &metrics, classes...) + + // Insert test data + for _, class := range classes { + for range shardsPerClass { + shardName := getSingleShardNameFromRepo(db, class.Class) + + for i := range objectCount { + obj := &models.Object{ + Class: class.Class, + ID: intToUUID(i), + } + + if shardsPerClass > 1 { + obj.Tenant = shardName + } + + vec := randVector(dimPerVector) + err := db.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.NoError(t, err, "put object") + } + + // Set expected labels for non-grouping case + if !tc.groupingEnabled { + tc.expectedLabels = append(tc.expectedLabels, class.Class, shardName) + tc.expectedDimensions = append(tc.expectedDimensions, expectDimPerShard) + } + } + } + + // Publish metrics + publishVectorMetricsFromDB(t, db) + + // Check expected dimensions for each pair of labels + for i := 0; i < len(tc.expectedLabels); i += 2 { + className, shardName := tc.expectedLabels[i], tc.expectedLabels[i+1] + + // Verify dimension metrics + dim, err := metrics.VectorDimensionsSum.GetMetricWithLabelValues(className, shardName) + require.NoError(t, err, "get vector_dimensions_sum metric") + require.Equal(t, float64(tc.expectedDimensions[0]), testutil.ToFloat64(dim), + "vector_dimensions_sum{class=%s,shard=%s}", className, shardName) + + // Verify segment metrics (should be 0 for standard vectors) + segments, err := metrics.VectorSegmentsSum.GetMetricWithLabelValues(className, shardName) + require.NoError(t, err, "get vector_segments_sum metric") + require.Equal(t, float64(0), testutil.ToFloat64(segments), + "vector_segments_sum{class=%s,shard=%s}", className, shardName) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_drop.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_drop.go new file mode 100644 index 0000000000000000000000000000000000000000..80f2a3afb1664b7215d6eb3acfb00f3784ba7364 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_drop.go @@ -0,0 +1,138 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/cyclemanager" +) + +// IMPORTANT: +// Be advised there exists LazyLoadShard::drop() implementation intended +// to drop shard that was not loaded (instantiated) yet. +// It deletes shard by performing required actions and removing entire shard directory. +// If there is any action that needs to be performed beside files/dirs being removed +// from shard directory, it needs to be reflected as well in LazyLoadShard::drop() +// method to keep drop behaviour consistent. +func (s *Shard) drop() (err error) { + s.reindexer.Stop(s, fmt.Errorf("shard drop")) + + s.metrics.DeleteShardLabels(s.index.Config.ClassName.String(), s.name) + s.metrics.baseMetrics.StartUnloadingShard() + s.replicationMap.clear() + + s.index.logger.WithFields(logrus.Fields{ + "action": "drop_shard", + "class": s.class.Class, + "shard": s.name, + }).Debug("dropping shard") + + s.clearDimensionMetrics() // not deleted in s.metrics.DeleteShardLabels + + s.mayStopAsyncReplication() + + s.haltForTransferMux.Lock() + if s.haltForTransferCancel != nil { + s.haltForTransferCancel() + } + s.haltForTransferMux.Unlock() + + ctx, cancel := context.WithTimeout(context.TODO(), 20*time.Second) + defer cancel() + + // unregister all callbacks at once, in parallel + if err = cyclemanager.NewCombinedCallbackCtrl(0, s.index.logger, + s.cycleCallbacks.compactionCallbacksCtrl, + s.cycleCallbacks.compactionAuxCallbacksCtrl, + s.cycleCallbacks.flushCallbacksCtrl, + s.cycleCallbacks.vectorCombinedCallbacksCtrl, + s.cycleCallbacks.geoPropsCombinedCallbacksCtrl, + ).Unregister(ctx); err != nil { + return err + } + + if err = s.store.Shutdown(ctx); err != nil { + return errors.Wrap(err, "stop lsmkv store") + } + + if _, err = os.Stat(s.pathLSM()); err == nil { + err := os.RemoveAll(s.pathLSM()) + if err != nil { + return errors.Wrapf(err, "remove lsm store at %s", s.pathLSM()) + } + } + // delete indexcount + err = s.counter.Drop() + if err != nil { + return errors.Wrapf(err, "remove indexcount at %s", s.path()) + } + + // delete version + err = s.versioner.Drop() + if err != nil { + return errors.Wrapf(err, "remove version at %s", s.path()) + } + + err = s.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err = queue.Drop(); err != nil { + return fmt.Errorf("close queue of vector %q at %s: %w", targetVector, s.path(), err) + } + return nil + }) + if err != nil { + return err + } + + err = s.ForEachVectorIndex(func(targetVector string, index VectorIndex) error { + if err = index.Drop(ctx); err != nil { + return fmt.Errorf("remove vector index of vector %q at %s: %w", targetVector, s.path(), err) + } + return nil + }) + if err != nil { + return err + } + + // delete property length tracker + err = s.GetPropertyLengthTracker().Drop() + if err != nil { + return errors.Wrapf(err, "remove prop length tracker at %s", s.path()) + } + + s.propertyIndicesLock.Lock() + err = s.propertyIndices.DropAll(ctx) + s.propertyIndicesLock.Unlock() + if err != nil { + return errors.Wrapf(err, "remove property specific indices at %s", s.path()) + } + + // remove shard dir + if err := os.RemoveAll(s.path()); err != nil { + return fmt.Errorf("delete shard dir: %w", err) + } + + s.metrics.baseMetrics.FinishUnloadingShard() + + s.index.logger.WithFields(logrus.Fields{ + "action": "drop_shard", + "class": s.class.Class, + "shard": s.name, + }).Debug("shard successfully dropped") + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_geo_props.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_geo_props.go new file mode 100644 index 0000000000000000000000000000000000000000..e6f802e17d6f0902617a5ace5de49f8da5392921 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_geo_props.go @@ -0,0 +1,195 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/propertyspecific" + "github.com/weaviate/weaviate/adapters/repos/db/vector/geo" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (s *Shard) initGeoProp(prop *models.Property) error { + // starts geo props cycles if actual geo property is present + // (safe to start multiple times) + s.index.cycleCallbacks.geoPropsCommitLoggerCycle.Start() + s.index.cycleCallbacks.geoPropsTombstoneCleanupCycle.Start() + + idx, err := geo.NewIndex(geo.Config{ + ID: geoPropID(prop.Name), + RootPath: s.path(), + CoordinatesForID: s.makeCoordinatesForID(prop.Name), + DisablePersistence: false, + Logger: s.index.logger, + + SnapshotDisabled: s.index.Config.HNSWDisableSnapshots, + SnapshotOnStartup: s.index.Config.HNSWSnapshotOnStartup, + SnapshotCreateInterval: time.Duration(s.index.Config.HNSWSnapshotIntervalSeconds) * time.Second, + SnapshotMinDeltaCommitlogsNumer: s.index.Config.HNSWSnapshotMinDeltaCommitlogsNumber, + SnapshotMinDeltaCommitlogsSizePercentage: s.index.Config.HNSWSnapshotMinDeltaCommitlogsSizePercentage, + }, + s.cycleCallbacks.geoPropsCommitLoggerCallbacks, + s.cycleCallbacks.geoPropsTombstoneCleanupCallbacks, + ) + if err != nil { + return errors.Wrapf(err, "create geo index for prop %q", prop.Name) + } + + s.propertyIndicesLock.Lock() + s.propertyIndices[prop.Name] = propertyspecific.Index{ + Type: schema.DataTypeGeoCoordinates, + GeoIndex: idx, + Name: prop.Name, + } + s.propertyIndicesLock.Unlock() + + idx.PostStartup() + + return nil +} + +func (s *Shard) makeCoordinatesForID(propName string) geo.CoordinatesForID { + return func(ctx context.Context, id uint64) (*models.GeoCoordinates, error) { + obj, err := s.objectByIndexID(ctx, id, true) + if err != nil { + return nil, storobj.NewErrNotFoundf(id, "retrieve object") + } + + if obj.Properties() == nil { + return nil, storobj.NewErrNotFoundf(id, + "object has no properties") + } + + prop, ok := obj.Properties().(map[string]interface{})[propName] + if !ok { + return nil, storobj.NewErrNotFoundf(id, + "object has no property %q", propName) + } + + geoProp, ok := prop.(*models.GeoCoordinates) + if !ok { + return nil, fmt.Errorf("expected property to be of type %T, got: %T", + &models.GeoCoordinates{}, prop) + } + + return geoProp, nil + } +} + +func geoPropID(propName string) string { + return fmt.Sprintf("geo.%s", propName) +} + +func (s *Shard) updatePropertySpecificIndices(ctx context.Context, object *storobj.Object, + status objectInsertStatus, +) error { + if err := s.isReadOnly(); err != nil { + return err + } + + s.propertyIndicesLock.RLock() + defer s.propertyIndicesLock.RUnlock() + + for propName, propIndex := range s.propertyIndices { + if err := s.updatePropertySpecificIndex(ctx, propName, propIndex, + object, status); err != nil { + return errors.Wrapf(err, "property %q", propName) + } + } + + return nil +} + +func (s *Shard) updatePropertySpecificIndex(ctx context.Context, propName string, + index propertyspecific.Index, obj *storobj.Object, + status objectInsertStatus, +) error { + if index.Type != schema.DataTypeGeoCoordinates { + return fmt.Errorf("unsupported per-property index type %q", index.Type) + } + + // currently the only property-specific index we support + return s.updateGeoIndex(ctx, propName, index, obj, status) +} + +func (s *Shard) updateGeoIndex(ctx context.Context, propName string, + index propertyspecific.Index, obj *storobj.Object, status objectInsertStatus, +) error { + if err := s.isReadOnly(); err != nil { + return err + } + + // geo props were not changed + if status.docIDPreserved || status.skipUpsert { + return nil + } + + if status.docIDChanged { + if err := s.deleteFromGeoIndex(index, status.oldDocID); err != nil { + return errors.Wrap(err, "delete old doc id from geo index") + } + } + + return s.addToGeoIndex(ctx, propName, index, obj, status) +} + +func (s *Shard) addToGeoIndex(ctx context.Context, propName string, + index propertyspecific.Index, + obj *storobj.Object, status objectInsertStatus, +) error { + if err := s.isReadOnly(); err != nil { + return err + } + + if obj.Properties() == nil { + return nil + } + + asMap := obj.Properties().(map[string]interface{}) + propValue, ok := asMap[propName] + if !ok { + return nil + } + + // geo coordinates is the only supported one at the moment + asGeo, ok := propValue.(*models.GeoCoordinates) + if !ok { + return fmt.Errorf("expected prop to be of type %T, but got: %T", + &models.GeoCoordinates{}, propValue) + } + + if err := index.GeoIndex.Add(ctx, status.docID, asGeo); err != nil { + return errors.Wrapf(err, "insert into geo index") + } + + return nil +} + +func (s *Shard) deleteFromGeoIndex(index propertyspecific.Index, + docID uint64, +) error { + if err := s.isReadOnly(); err != nil { + return err + } + + if err := index.GeoIndex.Delete(docID); err != nil { + return errors.Wrapf(err, "delete from geo index") + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_group_by.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_group_by.go new file mode 100644 index 0000000000000000000000000000000000000000..cda850a5312be56170c1cc6b9c7f94eb7ff1b081 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_group_by.go @@ -0,0 +1,249 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (s *Shard) groupResults(ctx context.Context, ids []uint64, + dists []float32, groupBy *searchparams.GroupBy, + additional additional.Properties, properties []string, +) ([]*storobj.Object, []float32, error) { + objsBucket := s.store.Bucket(helpers.ObjectsBucketLSM) + className := s.index.Config.ClassName + class := s.index.getSchema.ReadOnlyClass(className.String()) + if class == nil { + return nil, nil, fmt.Errorf("could not find class %s in schema", className) + } + prop, err := schema.GetPropertyByName(class, groupBy.Property) + if err != nil { + return nil, nil, fmt.Errorf("%w: unrecognized property: %s", err, groupBy.Property) + } + dt, err := schema.FindPropertyDataTypeWithRefs(s.index.getSchema.ReadOnlyClass, prop.DataType, false, "") + if err != nil { + return nil, nil, fmt.Errorf("%w: unrecognized data type for property: %s", err, groupBy.Property) + } + + var props []string + props = append(props, properties...) + for _, propTmp := range groupBy.Properties { + props = append(props, propTmp.Name) + } + + return newGrouper(ids, dists, groupBy, objsBucket, dt, additional, props).Do(ctx) +} + +type grouper struct { + ids []uint64 + dists []float32 + groupBy *searchparams.GroupBy + additional additional.Properties + propertyDataType schema.PropertyDataType + objBucket *lsmkv.Bucket + properties []string +} + +func newGrouper(ids []uint64, dists []float32, + groupBy *searchparams.GroupBy, objBucket *lsmkv.Bucket, + propertyDataType schema.PropertyDataType, + additional additional.Properties, properties []string, +) *grouper { + return &grouper{ + ids: ids, + dists: dists, + groupBy: groupBy, + objBucket: objBucket, + propertyDataType: propertyDataType, + additional: additional, + properties: properties, + } +} + +func (g *grouper) Do(ctx context.Context) ([]*storobj.Object, []float32, error) { + docIDBytes := make([]byte, 8) + + groupsOrdered := []string{} + groups := map[string][]uint64{} + docIDObject := map[uint64]*storobj.Object{} + docIDDistance := map[uint64]float32{} + + propertyPaths := make([][]string, len(g.properties)) + for j := range g.properties { + propertyPaths[j] = []string{g.properties[j]} + } + + props := &storobj.PropertyExtraction{ + PropertyPaths: propertyPaths, + } + +DOCS_LOOP: + for i, docID := range g.ids { + binary.LittleEndian.PutUint64(docIDBytes, docID) + objData, err := g.objBucket.GetBySecondary(0, docIDBytes) + if err != nil { + return nil, nil, fmt.Errorf("%w: could not get obj by doc id %d", err, docID) + } + if objData == nil { + continue + } + value, ok, _ := storobj.ParseAndExtractProperty(objData, g.groupBy.Property) + if !ok { + continue + } + + values, err := g.getValues(value) + if err != nil { + return nil, nil, err + } + + for _, val := range values { + current, groupExists := groups[val] + if len(current) >= g.groupBy.ObjectsPerGroup { + continue + } + + if !groupExists && len(groups) >= g.groupBy.Groups { + continue DOCS_LOOP + } + + groups[val] = append(current, docID) + + if !groupExists { + // this group doesn't exist add it to the ordered list + groupsOrdered = append(groupsOrdered, val) + } + + if _, ok := docIDObject[docID]; !ok { + // whole object, might be that we only need value and ID to be extracted + unmarshalled, err := storobj.FromBinaryOptional(objData, g.additional, props) + if err != nil { + return nil, nil, fmt.Errorf("%w: unmarshal data object at position %d", err, i) + } + docIDObject[docID] = unmarshalled + docIDDistance[docID] = g.dists[i] + } + } + } + + objs := make([]*storobj.Object, len(groupsOrdered)) + dists := make([]float32, len(groupsOrdered)) + objIDs := []uint64{} + for i, val := range groupsOrdered { + docIDs := groups[val] + unmarshalled, err := g.getUnmarshalled(docIDs[0], docIDObject, objIDs) + if err != nil { + return nil, nil, err + } + dist := docIDDistance[docIDs[0]] + objIDs = append(objIDs, docIDs[0]) + hits := make([]map[string]interface{}, len(docIDs)) + for j, docID := range docIDs { + props := map[string]interface{}{} + for k, v := range docIDObject[docID].Properties().(map[string]interface{}) { + props[k] = v + } + props["_additional"] = &additional.GroupHitAdditional{ + ID: docIDObject[docID].ID(), + Distance: docIDDistance[docID], + Vector: docIDObject[docID].Vector, + Vectors: docIDObject[docID].GetVectors(), + } + + hits[j] = props + } + group := &additional.Group{ + ID: i, + GroupedBy: &additional.GroupedBy{ + Value: val, + Path: []string{g.groupBy.Property}, + }, + Count: len(hits), + Hits: hits, + MinDistance: docIDDistance[docIDs[0]], + MaxDistance: docIDDistance[docIDs[len(docIDs)-1]], + } + + // add group + if unmarshalled.AdditionalProperties() == nil { + unmarshalled.Object.Additional = models.AdditionalProperties{} + } + unmarshalled.AdditionalProperties()["group"] = group + + objs[i] = unmarshalled + dists[i] = dist + } + + return objs, dists, nil +} + +func (g *grouper) getUnmarshalled(docID uint64, + docIDObject map[uint64]*storobj.Object, + objIDs []uint64, +) (*storobj.Object, error) { + containsDocID := false + for i := range objIDs { + if objIDs[i] == docID { + containsDocID = true + break + } + } + if containsDocID { + // we have already added this object containing a group to the result array + // and we need to unmarshall it again so that a group won't get overridden + docIDBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(docIDBytes, docID) + objData, err := g.objBucket.GetBySecondary(0, docIDBytes) + if err != nil { + return nil, fmt.Errorf("%w: could not get obj by doc id %d", err, docID) + } + unmarshalled, err := storobj.FromBinaryOptional(objData, g.additional, nil) + if err != nil { + return nil, fmt.Errorf("%w: unmarshal data object doc id %d", err, docID) + } + return unmarshalled, nil + } + return docIDObject[docID], nil +} + +func (g *grouper) getValues(values []string) ([]string, error) { + if len(values) == 0 { + return []string{""}, nil + } + if g.propertyDataType.IsReference() { + beacons := make([]string, len(values)) + for i := range values { + if values[i] != "" { + var ref models.SingleRef + err := json.Unmarshal([]byte(values[i]), &ref) + if err != nil { + return nil, fmt.Errorf("%w: unmarshal grouped by value %s at position %d", + err, values[i], i) + } + beacons[i] = ref.Beacon.String() + } + } + return beacons, nil + } + return values, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init.go new file mode 100644 index 0000000000000000000000000000000000000000..b6662d07c9b632a06d85270989cefc622ace1158 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init.go @@ -0,0 +1,181 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "os" + "runtime/debug" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func NewShard(ctx context.Context, promMetrics *monitoring.PrometheusMetrics, + shardName string, index *Index, class *models.Class, jobQueueCh chan job, + scheduler *queue.Scheduler, indexCheckpoints *indexcheckpoint.Checkpoints, + reindexer ShardReindexerV3, lazyLoadSegments bool, bitmapBufPool roaringset.BitmapBufPool, +) (_ *Shard, err error) { + start := time.Now() + + index.logger.WithFields(logrus.Fields{ + "action": "init_shard", + "shard": shardName, + "index": index.ID(), + }).Debugf("initializing shard %q", shardName) + + s := &Shard{ + index: index, + class: class, + name: shardName, + promMetrics: promMetrics, + metrics: NewMetrics(index.logger, promMetrics, + string(index.Config.ClassName), shardName), + slowQueryReporter: helpers.NewSlowQueryReporter(index.Config.QuerySlowLogEnabled, + index.Config.QuerySlowLogThreshold, index.logger), + replicationMap: pendingReplicaTasks{Tasks: make(map[string]replicaTask, 32)}, + centralJobQueue: jobQueueCh, + scheduler: scheduler, + indexCheckpoints: indexCheckpoints, + + shutdownLock: new(sync.RWMutex), + + status: ShardStatus{Status: storagestate.StatusLoading}, + searchableBlockmaxPropNamesLock: new(sync.Mutex), + reindexer: reindexer, + usingBlockMaxWAND: index.invertedIndexConfig.UsingBlockMaxWAND, + bitmapBufPool: bitmapBufPool, + } + + index.metrics.UpdateShardStatus("", storagestate.StatusLoading.String()) + + defer func() { + p := recover() + if p != nil { + err = fmt.Errorf("unexpected error initializing shard %q of index %q: %v", shardName, index.ID(), p) + index.logger.WithError(err).WithFields(logrus.Fields{ + "index": index.ID(), + "shard": shardName, + }).Error("panic during shard initialization") + debug.PrintStack() + } + + if err != nil { + // Initializing a shard should normally not fail. If it does, this could + // mean that this setup requires further attention, e.g. to manually fix + // a data corruption. This makes it a prime use case for sentry: + entsentry.CaptureException(err) + // spawn a new context as we cannot guarantee that the init context is + // still valid, but we want to make sure that we have enough time to clean + // up the partial init + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + s.index.logger.WithFields(logrus.Fields{ + "action": "new_shard", + "duration": 120 * time.Second, + }).Debug("context.WithTimeout") + + s.cleanupPartialInit(ctx) + } + }() + + defer func() { + index.metrics.ObserveUpdateShardStatus(s.status.Status.String(), time.Since(start)) + }() + + s.activityTrackerRead.Store(1) // initial state + s.activityTrackerWrite.Store(1) // initial state + s.initCycleCallbacks() + + s.docIdLock = make([]sync.Mutex, IdLockPoolSize) + + defer index.metrics.ShardStartup(start) + + _, err = os.Stat(s.path()) + exists := err == nil + + if err := os.MkdirAll(s.path(), os.ModePerm); err != nil { + return nil, err + } + + // init the store itself synchronously + if err := s.initLSMStore(); err != nil { + return nil, fmt.Errorf("init shard's %q store: %w", s.ID(), err) + } + + _ = s.reindexer.RunBeforeLsmInit(ctx, s) + + if s.index.Config.LazySegmentsDisabled { + lazyLoadSegments = false // disable globally + } + if err := s.initNonVector(ctx, class, lazyLoadSegments); err != nil { + return nil, errors.Wrapf(err, "init shard %q", s.ID()) + } + + if err = s.initShardVectors(ctx, lazyLoadSegments); err != nil { + return nil, fmt.Errorf("init shard vectors: %w", err) + } + + if asyncEnabled() { + f := func() { + _ = s.ForEachVectorQueue(func(targetVector string, _ *VectorIndexQueue) error { + if err := s.ConvertQueue(targetVector); err != nil { + index.logger.WithError(err).Errorf("preload shard for target vector: %s", targetVector) + } + return nil + }) + } + enterrors.GoWrapper(f, s.index.logger) + } + s.NotifyReady() + + if exists { + s.index.logger.Printf("Completed loading shard %s in %s", s.ID(), time.Since(start)) + } else { + s.index.logger.Printf("Created shard %s in %s", s.ID(), time.Since(start)) + } + + _ = s.reindexer.RunAfterLsmInit(ctx, s) + _ = s.reindexer.RunAfterLsmInitAsync(ctx, s) + return s, nil +} + +// cleanupPartialInit is called when the shard was only partially initialized. +// Internally it just uses [Shutdown], but also adds some logging. +func (s *Shard) cleanupPartialInit(ctx context.Context) { + log := s.index.logger.WithField("action", "cleanup_partial_initialization") + if err := s.Shutdown(ctx); err != nil { + log.WithError(err).Error("failed to shutdown store") + } + + log.Debug("successfully cleaned up partially initialized shard") +} + +func (s *Shard) NotifyReady() { + s.UpdateStatus(storagestate.StatusReady.String(), "notify ready") + s.index.logger. + WithField("action", "startup"). + Debugf("shard=%s is ready", s.name) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_blockmax.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_blockmax.go new file mode 100644 index 0000000000000000000000000000000000000000..d1f692c1095f32ecc7411470ca46a866546f16fe --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_blockmax.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/usecases/schema" +) + +// There is at least a searchable bucket in the shard +// that isn't using the block max inverted index +func (s *Shard) areAllSearchableBucketsBlockMax() bool { + for name, bucket := range s.Store().GetBucketsByName() { + _, indexType := GetPropNameAndIndexTypeFromBucketName(name) + if bucket.Strategy() == lsmkv.StrategyMapCollection && indexType == IndexTypePropSearchableValue { + return false + } + } + return true +} + +func structToMap(obj interface{}) (newMap map[string]interface{}) { + data, _ := json.Marshal(obj) // Convert to a json string + json.Unmarshal(data, &newMap) // Convert to a map + return +} + +func updateToBlockMaxInvertedIndexConfig(ctx context.Context, sc *schema.Manager, className string) error { + class := sc.ReadOnlyClass(className) + if class == nil { + return fmt.Errorf("class %q not found", className) + } + // nothing to update + if class.InvertedIndexConfig.UsingBlockMaxWAND { + return nil + } + class.ModuleConfig = structToMap(class.ModuleConfig) + class.VectorIndexConfig = structToMap(class.VectorIndexConfig) + class.ShardingConfig = structToMap(class.ShardingConfig) + for i := range class.VectorConfig { + tempConfig := class.VectorConfig[i] + tempConfig.VectorIndexConfig = structToMap(tempConfig.VectorIndexConfig) + tempConfig.Vectorizer = structToMap(tempConfig.Vectorizer) + class.VectorConfig[i] = tempConfig + } + class.InvertedIndexConfig.UsingBlockMaxWAND = true + return schema.UpdateClassInternal(&sc.Handler, ctx, className, class) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_lsm.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_lsm.go new file mode 100644 index 0000000000000000000000000000000000000000..0a15ecf64e83ada683a20d29ccad93979c7b53ec --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_lsm.go @@ -0,0 +1,207 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "path" + "time" + + "github.com/weaviate/weaviate/entities/schema" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/indexcounter" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/config" +) + +func (s *Shard) initNonVector(ctx context.Context, class *models.Class, lazyLoadSegments bool) error { + before := time.Now() + defer func() { + took := time.Since(before) + s.index.logger.WithFields(logrus.Fields{ + "action": "init_shard_non_vector", + "duration": took, + }).Debugf("loaded non-vector (lsm, object, inverted) in %s for shard %q", took, s.ID()) + }() + + // the shard versioner is also dependency of some of the bucket + // initializations, so it also needs to happen synchronously + if err := s.initIndexCounterVersionerAndBitmapFactory(); err != nil { + return fmt.Errorf("init shard %q: %w", s.ID(), err) + } + + // Run all other inits in parallel and use a single error group to wait for + // all init tasks, the wait statement is at the end of this method. No other + // methods should attempt to wait on this error group. + eg := enterrors.NewErrorGroupWrapper(s.index.logger) + + eg.Go(func() error { + return s.initObjectBucket(ctx) + }) + + eg.Go(func() error { + return s.initProplenTracker() + }) + + // geo props depend on the object bucket and we need to wait for its creation in this case + hasGeoProp := false + for _, prop := range class.Properties { + if len(prop.DataType) != 1 { + continue + } + if prop.DataType[0] == schema.DataTypeGeoCoordinates.String() { + hasGeoProp = true + break + } + } + + if hasGeoProp { + err := eg.Wait() + if err != nil { + // annotate error with shard id only once, all inner functions should only + // annotate what they do, but not repeat the shard id. + return fmt.Errorf("init shard %q: %w", s.ID(), err) + } + } + + // error group is passed, so properties can be initialized in parallel with + // the other initializations going on here. + s.initProperties(eg, class, lazyLoadSegments) + + err := eg.Wait() + if err != nil { + // annotate error with shard id only once, all inner functions should only + // annotate what they do, but not repeat the shard id. + return fmt.Errorf("init shard %q: %w", s.ID(), err) + } + + // Object bucket must be available, initAsyncReplication depends on it + if s.index.asyncReplicationEnabled() { + s.asyncReplicationRWMux.Lock() + defer s.asyncReplicationRWMux.Unlock() + + err = s.initAsyncReplication() + if err != nil { + return fmt.Errorf("init async replication on shard %q: %w", s.ID(), err) + } + } else if s.index.replicationEnabled() { + s.index.logger.Infof("async replication disabled on shard %q", s.ID()) + } + + // check if we need to set Inverted Index config to use BlockMax inverted format for new properties + // TODO(amourao): this is a temporary solution, we need to update the inverted index config in the schema as well + // right now, this is done as part of the migration process, but we need to find a way of dealing with MT indices + // where some shards are using the old format and some shards are using the new format + if !s.usingBlockMaxWAND && config.DefaultUsingBlockMaxWAND { + s.usingBlockMaxWAND = s.areAllSearchableBucketsBlockMax() + } + + return nil +} + +func (s *Shard) initLSMStore() error { + annotatedLogger := s.index.logger.WithFields(logrus.Fields{ + "shard": s.name, + "index": s.index.ID(), + "class": s.index.Config.ClassName, + }) + var metrics *lsmkv.Metrics + if s.promMetrics != nil { + metrics = lsmkv.NewMetrics(s.promMetrics, string(s.index.Config.ClassName), s.name) + } + + store, err := lsmkv.New(s.pathLSM(), s.path(), annotatedLogger, metrics, + s.cycleCallbacks.compactionCallbacks, + s.cycleCallbacks.compactionAuxCallbacks, + s.cycleCallbacks.flushCallbacks) + if err != nil { + return fmt.Errorf("init lsmkv store at %s: %w", s.pathLSM(), err) + } + + s.store = store + + return nil +} + +func (s *Shard) initObjectBucket(ctx context.Context) error { + opts := []lsmkv.BucketOption{ + lsmkv.WithStrategy(lsmkv.StrategyReplace), + lsmkv.WithSecondaryIndices(2), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithKeepTombstones(true), + s.dynamicMemtableSizing(), + s.memtableDirtyConfig(), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + s.segmentCleanupConfig(), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithCalcCountNetAdditions(true), + // dont lazy segment load object bucket - we need it in most (all?) operations + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + } + + if s.metrics != nil && !s.metrics.grouped { + // If metrics are grouped we cannot observe the count of an individual + // shard's object store because there is just a single metric. We would + // override it. See https://github.com/weaviate/weaviate/issues/4396 for + // details. + opts = append(opts, lsmkv.WithMonitorCount()) + } + + err := s.store.CreateOrLoadBucket(ctx, helpers.ObjectsBucketLSM, opts...) + if err != nil { + return fmt.Errorf("create objects bucket: %w", err) + } + + return nil +} + +func (s *Shard) initProplenTracker() error { + plPath := path.Join(s.path(), "proplengths") + tracker, err := inverted.NewJsonShardMetaData(plPath, s.index.logger) + if err != nil { + return fmt.Errorf("init prop length tracker: %w", err) + } + + s.propLenTracker = tracker + return nil +} + +func (s *Shard) initIndexCounterVersionerAndBitmapFactory() error { + counter, err := indexcounter.New(s.path()) + if err != nil { + return fmt.Errorf("init index counter: %w", err) + } + s.counter = counter + // counter is incremented whenever new docID is fetched, therefore last docID is lower by 1 + s.bitmapFactory = roaringset.NewBitmapFactory(s.bitmapBufPool, func() uint64 { return s.counter.Get() - 1 }) + + dataPresent := s.counter.PreviewNext() != 0 + versionPath := path.Join(s.path(), "version") + versioner, err := newShardVersioner(versionPath, dataPresent) + if err != nil { + return fmt.Errorf("init shard versioner: %w", err) + } + s.versioner = versioner + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_properties.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_properties.go new file mode 100644 index 0000000000000000000000000000000000000000..aad9133c47824ab25ab1807511fc8b1059ab829d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_properties.go @@ -0,0 +1,364 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/propertyspecific" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" +) + +func (s *Shard) initProperties(eg *enterrors.ErrorGroupWrapper, class *models.Class, lazyLoadSegments bool) { + s.propertyIndices = propertyspecific.Indices{} + if class == nil { + return + } + + s.initPropertyBuckets(context.Background(), eg, lazyLoadSegments, class.Properties...) + + eg.Go(func() error { + return s.addIDProperty(context.TODO(), lazyLoadSegments) + }) + + if s.index.invertedIndexConfig.IndexTimestamps { + eg.Go(func() error { + return s.addTimestampProperties(context.TODO(), lazyLoadSegments) + }) + } + + if s.index.Config.TrackVectorDimensions { + eg.Go(func() error { + return s.addDimensionsProperty(context.TODO()) + }) + } +} + +func (s *Shard) initPropertyBuckets(ctx context.Context, eg *enterrors.ErrorGroupWrapper, lazyLoadSegments bool, props ...*models.Property) { + for _, prop := range props { + if !inverted.HasAnyInvertedIndex(prop) { + continue + } + + propCopy := *prop // prevent loop variable capture + + eg.Go(func() error { + if err := s.createPropertyValueIndex(ctx, &propCopy, lazyLoadSegments); err != nil { + return fmt.Errorf("init prop %q: value index: %w", propCopy.Name, err) + } + return nil + }) + + if s.index.invertedIndexConfig.IndexNullState { + eg.Go(func() error { + if err := s.createPropertyNullIndex(ctx, &propCopy, lazyLoadSegments); err != nil { + return fmt.Errorf("init prop %q: null index: %w", prop.Name, err) + } + return nil + }) + } + + if s.index.invertedIndexConfig.IndexPropertyLength { + eg.Go(func() error { + if err := s.createPropertyLengthIndex(ctx, &propCopy, lazyLoadSegments); err != nil { + return fmt.Errorf("init prop %q: length index: %w", prop.Name, err) + } + return nil + }) + } + } +} + +func (s *Shard) createPropertyValueIndex(ctx context.Context, prop *models.Property, lazyLoadSegments bool) error { + if err := s.isReadOnly(); err != nil { + return err + } + + bucketOpts := []lsmkv.BucketOption{ + s.memtableDirtyConfig(), + s.dynamicMemtableSizing(), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithLazySegmentLoading(lazyLoadSegments), + s.segmentCleanupConfig(), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + } + + if inverted.HasFilterableIndex(prop) { + if dt, _ := schema.AsPrimitive(prop.DataType); dt == schema.DataTypeGeoCoordinates { + return s.initGeoProp(prop) + } + + if schema.IsRefDataType(prop.DataType) { + if err := s.store.CreateOrLoadBucket(ctx, + helpers.BucketFromPropNameMetaCountLSM(prop.Name), + append(bucketOpts, + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(s.bitmapBufPool), + )..., + ); err != nil { + return err + } + } + + if err := s.store.CreateOrLoadBucket(ctx, + helpers.BucketFromPropNameLSM(prop.Name), + append(bucketOpts, + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(s.bitmapBufPool), + )..., + ); err != nil { + return err + } + } + + if inverted.HasSearchableIndex(prop) { + strategy := lsmkv.DefaultSearchableStrategy(s.usingBlockMaxWAND) + searchableBucketOpts := append( + bucketOpts, + lsmkv.WithStrategy(strategy), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + ) + if strategy == lsmkv.StrategyMapCollection && s.versioner.Version() < 2 { + searchableBucketOpts = append(searchableBucketOpts, lsmkv.WithLegacyMapSorting()) + } + + if s.class.InvertedIndexConfig != nil { + searchableBucketOpts = append(searchableBucketOpts, lsmkv.WithBM25Config(s.class.InvertedIndexConfig.Bm25)) + } + + bucketName := helpers.BucketSearchableFromPropNameLSM(prop.Name) + if err := s.store.CreateOrLoadBucket(ctx, bucketName, searchableBucketOpts...); err != nil { + return err + } + + if actualStrategy := s.store.Bucket(bucketName).Strategy(); actualStrategy == lsmkv.StrategyInverted { + s.markSearchableBlockmaxProperties(prop.Name) + } + } + + if inverted.HasRangeableIndex(prop) { + if err := s.store.CreateOrLoadBucket(ctx, + helpers.BucketRangeableFromPropNameLSM(prop.Name), + append(bucketOpts, + lsmkv.WithStrategy(lsmkv.StrategyRoaringSetRange), + lsmkv.WithUseBloomFilter(false), + lsmkv.WithKeepSegmentsInMemory(s.index.Config.IndexRangeableInMemory), + lsmkv.WithBitmapBufPool(s.bitmapBufPool), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + )..., + ); err != nil { + return err + } + } + + return nil +} + +func (s *Shard) createPropertyLengthIndex(ctx context.Context, prop *models.Property, lazyLoadSegments bool) error { + if err := s.isReadOnly(); err != nil { + return err + } + + // some datatypes are not added to the inverted index, so we can skip them here + switch schema.DataType(prop.DataType[0]) { + case schema.DataTypeGeoCoordinates, schema.DataTypePhoneNumber, schema.DataTypeBlob, schema.DataTypeInt, + schema.DataTypeNumber, schema.DataTypeBoolean, schema.DataTypeDate: + return nil + default: + } + + return s.store.CreateOrLoadBucket(ctx, + helpers.BucketFromPropNameLengthLSM(prop.Name), + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(s.bitmapBufPool), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithLazySegmentLoading(lazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + s.segmentCleanupConfig(), + ) +} + +func (s *Shard) createPropertyNullIndex(ctx context.Context, prop *models.Property, lazyLoadSegments bool) error { + if err := s.isReadOnly(); err != nil { + return err + } + + return s.store.CreateOrLoadBucket(ctx, + helpers.BucketFromPropNameNullLSM(prop.Name), + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(s.bitmapBufPool), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithLazySegmentLoading(lazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + s.segmentCleanupConfig(), + ) +} + +func (s *Shard) addIDProperty(ctx context.Context, lazyLoadSegments bool) error { + if err := s.isReadOnly(); err != nil { + return err + } + + err := s.store.CreateOrLoadBucket(ctx, + helpers.BucketFromPropNameLSM(filters.InternalPropID), + s.memtableDirtyConfig(), + lsmkv.WithStrategy(lsmkv.StrategySetCollection), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithLazySegmentLoading(lazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + s.segmentCleanupConfig(), + ) + if err != nil { + return fmt.Errorf("create id property: %w", err) + } + return nil +} + +func (s *Shard) createDimensionsBucket(ctx context.Context, name string) error { + if err := s.isReadOnly(); err != nil { + return err + } + + err := s.store.CreateOrLoadBucket(ctx, + name, + s.memtableDirtyConfig(), + lsmkv.WithStrategy(lsmkv.StrategyMapCollection), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + s.segmentCleanupConfig(), + ) + if err != nil { + return fmt.Errorf("create dimensions bucket: %w", err) + } + return nil +} + +func (s *Shard) addDimensionsProperty(ctx context.Context) error { + if err := s.isReadOnly(); err != nil { + return err + } + + // Note: this data would fit the "Set" type better, but since the "Map" type + // is currently optimized better, it is more efficient to use a Map here. + err := s.createDimensionsBucket(ctx, helpers.DimensionsBucketLSM) + if err != nil { + return fmt.Errorf("create dimensions tracking property: %w", err) + } + + return nil +} + +func (s *Shard) addTimestampProperties(ctx context.Context, lazyLoadSegments bool) error { + if err := s.isReadOnly(); err != nil { + return err + } + + if err := s.addCreationTimeUnixProperty(ctx, lazyLoadSegments); err != nil { + return fmt.Errorf("create creation time property: %w", err) + } + + if err := s.addLastUpdateTimeUnixProperty(ctx, lazyLoadSegments); err != nil { + return fmt.Errorf("create last update time property: %w", err) + } + + return nil +} + +func (s *Shard) addCreationTimeUnixProperty(ctx context.Context, lazyLoadSegments bool) error { + return s.store.CreateOrLoadBucket(ctx, + helpers.BucketFromPropNameLSM(filters.InternalPropCreationTimeUnix), + s.memtableDirtyConfig(), + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(s.bitmapBufPool), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithLazySegmentLoading(lazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + s.segmentCleanupConfig(), + ) +} + +func (s *Shard) addLastUpdateTimeUnixProperty(ctx context.Context, lazyLoadSegments bool) error { + return s.store.CreateOrLoadBucket(ctx, + helpers.BucketFromPropNameLSM(filters.InternalPropLastUpdateTimeUnix), + s.memtableDirtyConfig(), + lsmkv.WithStrategy(lsmkv.StrategyRoaringSet), + lsmkv.WithBitmapBufPool(s.bitmapBufPool), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithSegmentsChecksumValidationEnabled(s.index.Config.LSMEnableSegmentsChecksumValidation), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithLazySegmentLoading(lazyLoadSegments), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + s.segmentCleanupConfig(), + ) +} + +func (s *Shard) markSearchableBlockmaxProperties(propNames ...string) { + s.searchableBlockmaxPropNamesLock.Lock() + s.searchableBlockmaxPropNames = append(s.searchableBlockmaxPropNames, propNames...) + s.searchableBlockmaxPropNamesLock.Unlock() +} + +func (s *Shard) getSearchableBlockmaxProperties() []string { + // since slice is only appended, it should be safe to return it that way + s.searchableBlockmaxPropNamesLock.Lock() + defer s.searchableBlockmaxPropNamesLock.Unlock() + return s.searchableBlockmaxPropNames +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_vector.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_vector.go new file mode 100644 index 0000000000000000000000000000000000000000..574ebfb0d95c3da7bb881b3b0dc6a897a7ae1f67 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_init_vector.go @@ -0,0 +1,312 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/vector/dynamic" + "github.com/weaviate/weaviate/adapters/repos/db/vector/flat" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw/distancer" + "github.com/weaviate/weaviate/adapters/repos/db/vector/noop" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/vectorindex" + "github.com/weaviate/weaviate/entities/vectorindex/common" + dynamicent "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" + hnswent "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "go.etcd.io/bbolt" +) + +func (s *Shard) initShardVectors(ctx context.Context, lazyLoadSegments bool) error { + if s.index.vectorIndexUserConfig != nil { + if err := s.initLegacyVector(ctx, lazyLoadSegments); err != nil { + return err + } + } + + if err := s.initTargetVectors(ctx, lazyLoadSegments); err != nil { + return err + } + + return nil +} + +func (s *Shard) initVectorIndex(ctx context.Context, + targetVector string, vectorIndexUserConfig schemaConfig.VectorIndexConfig, lazyLoadSegments bool, +) (VectorIndex, error) { + var distProv distancer.Provider + + switch vectorIndexUserConfig.DistanceName() { + case "", common.DistanceCosine: + distProv = distancer.NewCosineDistanceProvider() + case common.DistanceDot: + distProv = distancer.NewDotProductProvider() + case common.DistanceL2Squared: + distProv = distancer.NewL2SquaredProvider() + case common.DistanceManhattan: + distProv = distancer.NewManhattanProvider() + case common.DistanceHamming: + distProv = distancer.NewHammingProvider() + default: + return nil, fmt.Errorf("init vector index: %w", + errors.Errorf("unrecognized distance metric %q,"+ + "choose one of [\"cosine\", \"dot\", \"l2-squared\", \"manhattan\",\"hamming\"]", vectorIndexUserConfig.DistanceName())) + } + + var vectorIndex VectorIndex + + switch vectorIndexUserConfig.IndexType() { + case vectorindex.VectorIndexTypeHNSW: + hnswUserConfig, ok := vectorIndexUserConfig.(hnswent.UserConfig) + if !ok { + return nil, errors.Errorf("hnsw vector index: config is not hnsw.UserConfig: %T", + vectorIndexUserConfig) + } + + if hnswUserConfig.Skip { + vectorIndex = noop.NewIndex() + } else { + // starts vector cycles if vector is configured + s.index.cycleCallbacks.vectorCommitLoggerCycle.Start() + s.index.cycleCallbacks.vectorTombstoneCleanupCycle.Start() + + // a shard can actually have multiple vector indexes: + // - the main index, which is used for all normal object vectors + // - a geo property index for each geo prop in the schema + // + // here we label the main vector index as such. + vecIdxID := s.vectorIndexID(targetVector) + + vi, err := hnsw.New(hnsw.Config{ + Logger: s.index.logger, + RootPath: s.path(), + ID: vecIdxID, + ShardName: s.name, + ClassName: s.index.Config.ClassName.String(), + PrometheusMetrics: s.promMetrics, + VectorForIDThunk: hnsw.NewVectorForIDThunk(targetVector, s.vectorByIndexID), + MultiVectorForIDThunk: hnsw.NewVectorForIDThunk(targetVector, s.multiVectorByIndexID), + TempVectorForIDThunk: hnsw.NewTempVectorForIDThunk(targetVector, s.readVectorByIndexIDIntoSlice), + TempMultiVectorForIDThunk: hnsw.NewTempMultiVectorForIDThunk(targetVector, s.readMultiVectorByIndexIDIntoSlice), + DistanceProvider: distProv, + MakeCommitLoggerThunk: func() (hnsw.CommitLogger, error) { + return hnsw.NewCommitLogger(s.path(), vecIdxID, + s.index.logger, s.cycleCallbacks.vectorCommitLoggerCallbacks, + hnsw.WithAllocChecker(s.index.allocChecker), + hnsw.WithCommitlogThresholdForCombining(s.index.Config.HNSWMaxLogSize), + // consistent with previous logic where the individual limit is 1/5 of the combined limit + hnsw.WithCommitlogThreshold(s.index.Config.HNSWMaxLogSize/5), + hnsw.WithSnapshotDisabled(s.index.Config.HNSWDisableSnapshots), + hnsw.WithSnapshotCreateInterval(time.Duration(s.index.Config.HNSWSnapshotIntervalSeconds)*time.Second), + hnsw.WithSnapshotMinDeltaCommitlogsNumer(s.index.Config.HNSWSnapshotMinDeltaCommitlogsNumber), + hnsw.WithSnapshotMinDeltaCommitlogsSizePercentage(s.index.Config.HNSWSnapshotMinDeltaCommitlogsSizePercentage), + ) + }, + AllocChecker: s.index.allocChecker, + WaitForCachePrefill: s.index.Config.HNSWWaitForCachePrefill, + FlatSearchConcurrency: s.index.Config.HNSWFlatSearchConcurrency, + AcornFilterRatio: s.index.Config.HNSWAcornFilterRatio, + VisitedListPoolMaxSize: s.index.Config.VisitedListPoolMaxSize, + DisableSnapshots: s.index.Config.HNSWDisableSnapshots, + SnapshotOnStartup: s.index.Config.HNSWSnapshotOnStartup, + LazyLoadSegments: lazyLoadSegments, + WriteSegmentInfoIntoFileName: s.index.Config.SegmentInfoIntoFileNameEnabled, + WriteMetadataFilesEnabled: s.index.Config.WriteMetadataFilesEnabled, + }, hnswUserConfig, s.cycleCallbacks.vectorTombstoneCleanupCallbacks, s.store) + if err != nil { + return nil, errors.Wrapf(err, "init shard %q: hnsw index", s.ID()) + } + vectorIndex = vi + } + case vectorindex.VectorIndexTypeFLAT: + flatUserConfig, ok := vectorIndexUserConfig.(flatent.UserConfig) + if !ok { + return nil, errors.Errorf("flat vector index: config is not flat.UserConfig: %T", + vectorIndexUserConfig) + } + s.index.cycleCallbacks.vectorCommitLoggerCycle.Start() + + // a shard can actually have multiple vector indexes: + // - the main index, which is used for all normal object vectors + // - a geo property index for each geo prop in the schema + // + // here we label the main vector index as such. + vecIdxID := s.vectorIndexID(targetVector) + + vi, err := flat.New(flat.Config{ + ID: vecIdxID, + TargetVector: targetVector, + RootPath: s.path(), + Logger: s.index.logger, + DistanceProvider: distProv, + AllocChecker: s.index.allocChecker, + MinMMapSize: s.index.Config.MinMMapSize, + MaxWalReuseSize: s.index.Config.MaxReuseWalSize, + LazyLoadSegments: lazyLoadSegments, + WriteSegmentInfoIntoFileName: s.index.Config.SegmentInfoIntoFileNameEnabled, + WriteMetadataFilesEnabled: s.index.Config.WriteMetadataFilesEnabled, + }, flatUserConfig, s.store) + if err != nil { + return nil, errors.Wrapf(err, "init shard %q: flat index", s.ID()) + } + vectorIndex = vi + case vectorindex.VectorIndexTypeDYNAMIC: + dynamicUserConfig, ok := vectorIndexUserConfig.(dynamicent.UserConfig) + if !ok { + return nil, errors.Errorf("dynamic vector index: config is not dynamic.UserConfig: %T", + vectorIndexUserConfig) + } + s.index.cycleCallbacks.vectorCommitLoggerCycle.Start() + + // a shard can actually have multiple vector indexes: + // - the main index, which is used for all normal object vectors + // - a geo property index for each geo prop in the schema + // + // here we label the main vector index as such. + vecIdxID := s.vectorIndexID(targetVector) + + sharedDB, err := s.getOrInitDynamicVectorIndexDB() + if err != nil { + return nil, errors.Wrapf(err, "init shard %q: dynamic index", s.ID()) + } + + vi, err := dynamic.New(dynamic.Config{ + ID: vecIdxID, + TargetVector: targetVector, + Logger: s.index.logger, + DistanceProvider: distProv, + RootPath: s.path(), + ShardName: s.name, + ClassName: s.index.Config.ClassName.String(), + PrometheusMetrics: s.promMetrics, + VectorForIDThunk: hnsw.NewVectorForIDThunk(targetVector, s.vectorByIndexID), + TempVectorForIDThunk: hnsw.NewTempVectorForIDThunk(targetVector, s.readVectorByIndexIDIntoSlice), + MakeCommitLoggerThunk: func() (hnsw.CommitLogger, error) { + return hnsw.NewCommitLogger(s.path(), vecIdxID, + s.index.logger, s.cycleCallbacks.vectorCommitLoggerCallbacks, + hnsw.WithAllocChecker(s.index.allocChecker), + hnsw.WithCommitlogThresholdForCombining(s.index.Config.HNSWMaxLogSize), + // consistent with previous logic where the individual limit is 1/5 of the combined limit + hnsw.WithCommitlogThreshold(s.index.Config.HNSWMaxLogSize/5), + hnsw.WithSnapshotDisabled(s.index.Config.HNSWDisableSnapshots), + hnsw.WithSnapshotCreateInterval(time.Duration(s.index.Config.HNSWSnapshotIntervalSeconds)*time.Second), + hnsw.WithSnapshotMinDeltaCommitlogsNumer(s.index.Config.HNSWSnapshotMinDeltaCommitlogsNumber), + hnsw.WithSnapshotMinDeltaCommitlogsSizePercentage(s.index.Config.HNSWSnapshotMinDeltaCommitlogsSizePercentage), + ) + }, + TombstoneCallbacks: s.cycleCallbacks.vectorTombstoneCleanupCallbacks, + SharedDB: sharedDB, + HNSWDisableSnapshots: s.index.Config.HNSWDisableSnapshots, + HNSWSnapshotOnStartup: s.index.Config.HNSWSnapshotOnStartup, + MinMMapSize: s.index.Config.MinMMapSize, + MaxWalReuseSize: s.index.Config.MaxReuseWalSize, + LazyLoadSegments: lazyLoadSegments, + AllocChecker: s.index.allocChecker, + WriteSegmentInfoIntoFileName: s.index.Config.SegmentInfoIntoFileNameEnabled, + }, dynamicUserConfig, s.store) + if err != nil { + return nil, errors.Wrapf(err, "init shard %q: dynamic index", s.ID()) + } + vectorIndex = vi + default: + return nil, fmt.Errorf("unknown vector index type: %q. Choose one from [\"%s\", \"%s\", \"%s\"]", + vectorIndexUserConfig.IndexType(), vectorindex.VectorIndexTypeHNSW, vectorindex.VectorIndexTypeFLAT, vectorindex.VectorIndexTypeDYNAMIC) + } + defer vectorIndex.PostStartup() + return vectorIndex, nil +} + +func (s *Shard) getOrInitDynamicVectorIndexDB() (*bbolt.DB, error) { + if s.dynamicVectorIndexDB == nil { + path := filepath.Join(s.path(), "index.db") + + db, err := bbolt.Open(path, 0o600, nil) + if err != nil { + return nil, errors.Wrapf(err, "open %q", path) + } + + s.dynamicVectorIndexDB = db + } + + return s.dynamicVectorIndexDB, nil +} + +func (s *Shard) initTargetVectors(ctx context.Context, lazyLoadSegments bool) error { + s.vectorIndexMu.Lock() + defer s.vectorIndexMu.Unlock() + + s.vectorIndexes = make(map[string]VectorIndex, len(s.index.vectorIndexUserConfigs)) + s.queues = make(map[string]*VectorIndexQueue, len(s.index.vectorIndexUserConfigs)) + + for targetVector, vectorIndexConfig := range s.index.vectorIndexUserConfigs { + if err := s.initTargetVectorWithLock(ctx, targetVector, vectorIndexConfig, lazyLoadSegments); err != nil { + return err + } + } + return nil +} + +func (s *Shard) initTargetVector(ctx context.Context, targetVector string, cfg schemaConfig.VectorIndexConfig, lazyLoadSegments bool) error { + s.vectorIndexMu.Lock() + defer s.vectorIndexMu.Unlock() + return s.initTargetVectorWithLock(ctx, targetVector, cfg, lazyLoadSegments) +} + +func (s *Shard) initTargetVectorWithLock(ctx context.Context, targetVector string, cfg schemaConfig.VectorIndexConfig, lazyLoadSegments bool) error { + vectorIndex, err := s.initVectorIndex(ctx, targetVector, cfg, lazyLoadSegments) + if err != nil { + return fmt.Errorf("cannot create vector index for %q: %w", targetVector, err) + } + queue, err := NewVectorIndexQueue(s, targetVector, vectorIndex) + if err != nil { + return fmt.Errorf("cannot create index queue for %q: %w", targetVector, err) + } + + s.vectorIndexes[targetVector] = vectorIndex + s.queues[targetVector] = queue + return nil +} + +func (s *Shard) initLegacyVector(ctx context.Context, lazyLoadSegments bool) error { + s.vectorIndexMu.Lock() + defer s.vectorIndexMu.Unlock() + + vectorIndex, err := s.initVectorIndex(ctx, "", s.index.vectorIndexUserConfig, lazyLoadSegments) + if err != nil { + return err + } + + queue, err := NewVectorIndexQueue(s, "", vectorIndex) + if err != nil { + return err + } + s.vectorIndex = vectorIndex + s.queue = queue + return nil +} + +func (s *Shard) setVectorIndex(targetVector string, index VectorIndex) { + s.vectorIndexMu.Lock() + defer s.vectorIndexMu.Unlock() + + if targetVector == "" { + s.vectorIndex = index + } else { + s.vectorIndexes[targetVector] = index + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_lazyloader.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_lazyloader.go new file mode 100644 index 0000000000000000000000000000000000000000..c94ec1e674f40118a62ae7d9516548d1e426f412 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_lazyloader.go @@ -0,0 +1,801 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "io" + "os" + "sync" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/adapters/repos/db/indexcheckpoint" + "github.com/weaviate/weaviate/adapters/repos/db/indexcounter" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/roaringset" + "github.com/weaviate/weaviate/cluster/router/types" + usagetypes "github.com/weaviate/weaviate/cluster/usage/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/aggregation" + "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/file" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/modules" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +type LazyLoadShard struct { + shardOpts *deferredShardOpts + shard *Shard + loaded bool + mutex sync.Mutex + memMonitor memwatch.AllocChecker + shardLoadLimiter ShardLoadLimiter + lazyLoadSegments bool +} + +func NewLazyLoadShard(ctx context.Context, promMetrics *monitoring.PrometheusMetrics, + shardName string, index *Index, class *models.Class, jobQueueCh chan job, + indexCheckpoints *indexcheckpoint.Checkpoints, memMonitor memwatch.AllocChecker, + shardLoadLimiter ShardLoadLimiter, shardReindexer ShardReindexerV3, + lazyLoadSegments bool, bitmapBufPool roaringset.BitmapBufPool, +) *LazyLoadShard { + if memMonitor == nil { + memMonitor = memwatch.NewDummyMonitor() + } + promMetrics.NewUnloadedshard() + return &LazyLoadShard{ + shardOpts: &deferredShardOpts{ + promMetrics: promMetrics, + name: shardName, + index: index, + class: class, + jobQueueCh: jobQueueCh, + scheduler: index.scheduler, + indexCheckpoints: indexCheckpoints, + shardReindexer: shardReindexer, + bitmapBufPool: bitmapBufPool, + }, + memMonitor: memMonitor, + shardLoadLimiter: shardLoadLimiter, + lazyLoadSegments: lazyLoadSegments, + } +} + +type deferredShardOpts struct { + promMetrics *monitoring.PrometheusMetrics + name string + index *Index + class *models.Class + jobQueueCh chan job + scheduler *queue.Scheduler + indexCheckpoints *indexcheckpoint.Checkpoints + shardReindexer ShardReindexerV3 + bitmapBufPool roaringset.BitmapBufPool +} + +func (l *LazyLoadShard) mustLoad() { + l.mustLoadCtx(context.Background()) +} + +func (l *LazyLoadShard) mustLoadCtx(ctx context.Context) { + if err := l.Load(ctx); err != nil { + panic(err.Error()) + } +} + +func (l *LazyLoadShard) Load(ctx context.Context) error { + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.loaded { + return nil + } + + if err := l.memMonitor.CheckMappingAndReserve(3, int(lsmkv.FlushAfterDirtyDefault.Seconds())); err != nil { + return errors.Wrap(err, "memory pressure: cannot load shard") + } + + if err := l.shardLoadLimiter.Acquire(ctx); err != nil { + return fmt.Errorf("acquiring permit to load shard: %w", err) + } + defer l.shardLoadLimiter.Release() + + shard, err := NewShard(ctx, l.shardOpts.promMetrics, l.shardOpts.name, l.shardOpts.index, + l.shardOpts.class, l.shardOpts.jobQueueCh, l.shardOpts.scheduler, + l.shardOpts.indexCheckpoints, l.shardOpts.shardReindexer, l.lazyLoadSegments, + l.shardOpts.bitmapBufPool) + if err != nil { + msg := fmt.Sprintf("Unable to load shard %s: %v", l.shardOpts.name, err) + l.shardOpts.index.logger.WithField("error", "shard_load").WithError(err).Error(msg) + return errors.New(msg) + } + + l.shard = shard + l.loaded = true + + return nil +} + +func (l *LazyLoadShard) Index() *Index { + return l.shardOpts.index +} + +func (l *LazyLoadShard) Name() string { + return l.shardOpts.name +} + +func (l *LazyLoadShard) Store() *lsmkv.Store { + l.mustLoad() + return l.shard.Store() +} + +func (l *LazyLoadShard) NotifyReady() { + l.mustLoad() + l.shard.NotifyReady() +} + +func (l *LazyLoadShard) GetStatus() storagestate.Status { + if l.loaded { + return l.shard.GetStatus() + } + return storagestate.StatusLazyLoading +} + +func (l *LazyLoadShard) UpdateStatus(status, reason string) error { + l.mustLoad() + return l.shard.UpdateStatus(status, reason) +} + +func (l *LazyLoadShard) SetStatusReadonly(reason string) error { + l.mustLoad() + return l.shard.SetStatusReadonly(reason) +} + +func (l *LazyLoadShard) FindUUIDs(ctx context.Context, filters *filters.LocalFilter) ([]strfmt.UUID, error) { + if err := l.Load(ctx); err != nil { + return []strfmt.UUID{}, err + } + return l.shard.FindUUIDs(ctx, filters) +} + +func (l *LazyLoadShard) Counter() *indexcounter.Counter { + l.mustLoad() + return l.shard.Counter() +} + +func (l *LazyLoadShard) ObjectCount() int { + l.mustLoad() + return l.shard.ObjectCount() +} + +func (l *LazyLoadShard) ObjectCountAsync(ctx context.Context) (int64, error) { + l.mutex.Lock() + if l.loaded { + l.mutex.Unlock() + return l.shard.ObjectCountAsync(ctx) + } + l.mutex.Unlock() + objectUsage, err := l.shardOpts.index.CalculateUnloadedObjectsMetrics(ctx, l.shardOpts.name) + if err != nil { + return 0, fmt.Errorf("error while getting object count for shard %s: %w", l.shardOpts.name, err) + } + return objectUsage.Count, nil +} + +func (l *LazyLoadShard) ObjectStorageSize(ctx context.Context) (int64, error) { + l.mutex.Lock() + if l.loaded { + l.mutex.Unlock() + return l.shard.ObjectStorageSize(ctx) + } + l.mutex.Unlock() + + // For unloaded shards, calculate storage size by walking the file system + // This avoids loading the shard into memory entirely + objectUsage, err := l.shardOpts.index.CalculateUnloadedObjectsMetrics(ctx, l.shardOpts.name) + return objectUsage.StorageBytes, err +} + +func (l *LazyLoadShard) GetPropertyLengthTracker() *inverted.JsonShardMetaData { + l.mustLoad() + return l.shard.GetPropertyLengthTracker() +} + +func (l *LazyLoadShard) PutObject(ctx context.Context, object *storobj.Object) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.PutObject(ctx, object) +} + +func (l *LazyLoadShard) PutObjectBatch(ctx context.Context, objects []*storobj.Object) []error { + if err := l.Load(ctx); err != nil { + return []error{err} + } // TODO check + return l.shard.PutObjectBatch(ctx, objects) +} + +func (l *LazyLoadShard) ObjectByID(ctx context.Context, id strfmt.UUID, props search.SelectProperties, additional additional.Properties) (*storobj.Object, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.ObjectByID(ctx, id, props, additional) +} + +func (l *LazyLoadShard) ObjectByIDErrDeleted(ctx context.Context, id strfmt.UUID, props search.SelectProperties, additional additional.Properties) (*storobj.Object, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.ObjectByIDErrDeleted(ctx, id, props, additional) +} + +func (l *LazyLoadShard) Exists(ctx context.Context, id strfmt.UUID) (bool, error) { + if err := l.Load(ctx); err != nil { + return false, err + } + return l.shard.Exists(ctx, id) +} + +func (l *LazyLoadShard) ObjectSearch(ctx context.Context, limit int, filters *filters.LocalFilter, keywordRanking *searchparams.KeywordRanking, sort []filters.Sort, cursor *filters.Cursor, additional additional.Properties, properties []string) ([]*storobj.Object, []float32, error) { + if err := l.Load(ctx); err != nil { + return nil, nil, err + } + return l.shard.ObjectSearch(ctx, limit, filters, keywordRanking, sort, cursor, additional, properties) +} + +func (l *LazyLoadShard) ObjectVectorSearch(ctx context.Context, searchVectors []models.Vector, targetVectors []string, targetDist float32, limit int, filters *filters.LocalFilter, sort []filters.Sort, groupBy *searchparams.GroupBy, additional additional.Properties, targetCombination *dto.TargetCombination, properties []string) ([]*storobj.Object, []float32, error) { + if err := l.Load(ctx); err != nil { + return nil, nil, err + } + return l.shard.ObjectVectorSearch(ctx, searchVectors, targetVectors, targetDist, limit, filters, sort, groupBy, additional, targetCombination, properties) +} + +func (l *LazyLoadShard) UpdateVectorIndexConfig(ctx context.Context, updated schemaConfig.VectorIndexConfig) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.UpdateVectorIndexConfig(ctx, updated) +} + +func (l *LazyLoadShard) UpdateVectorIndexConfigs(ctx context.Context, updated map[string]schemaConfig.VectorIndexConfig) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.UpdateVectorIndexConfigs(ctx, updated) +} + +func (l *LazyLoadShard) SetAsyncReplicationEnabled(ctx context.Context, enabled bool) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.SetAsyncReplicationEnabled(ctx, enabled) +} + +func (l *LazyLoadShard) addTargetNodeOverride(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + if err := l.Load(ctx); err != nil { + return err + } + l.shard.addTargetNodeOverride(ctx, targetNodeOverride) + return nil +} + +func (l *LazyLoadShard) removeTargetNodeOverride(ctx context.Context, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error { + if err := l.Load(ctx); err != nil { + return err + } + l.shard.removeTargetNodeOverride(ctx, targetNodeOverride) + return nil +} + +func (l *LazyLoadShard) removeAllTargetNodeOverrides(ctx context.Context) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.removeAllTargetNodeOverrides(ctx) +} + +func (l *LazyLoadShard) getAsyncReplicationStats(ctx context.Context) []*models.AsyncReplicationStatus { + if err := l.Load(ctx); err != nil { + return nil + } + return l.shard.getAsyncReplicationStats(ctx) +} + +func (l *LazyLoadShard) AddReferencesBatch(ctx context.Context, refs objects.BatchReferences) []error { + if err := l.Load(ctx); err != nil { + return []error{err} + } // TODO check + return l.shard.AddReferencesBatch(ctx, refs) +} + +func (l *LazyLoadShard) DeleteObjectBatch(ctx context.Context, ids []strfmt.UUID, deletionTime time.Time, dryRun bool) objects.BatchSimpleObjects { + l.mustLoadCtx(ctx) + return l.shard.DeleteObjectBatch(ctx, ids, deletionTime, dryRun) +} + +func (l *LazyLoadShard) DeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.DeleteObject(ctx, id, deletionTime) +} + +func (l *LazyLoadShard) MultiObjectByID(ctx context.Context, query []multi.Identifier) ([]*storobj.Object, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.MultiObjectByID(ctx, query) +} + +func (l *LazyLoadShard) ObjectDigestsInRange(ctx context.Context, + initialUUID, finalUUID strfmt.UUID, limit int, +) (objs []types.RepairResponse, err error) { + if !l.isLoaded() { + return nil, err + } + + return l.shard.ObjectDigestsInRange(ctx, initialUUID, finalUUID, limit) +} + +func (l *LazyLoadShard) ID() string { + return shardId(l.shardOpts.index.ID(), l.shardOpts.name) +} + +func (l *LazyLoadShard) drop() error { + // if not loaded, execute simplified drop without loading shard: + // - perform required actions + // - remove entire shard directory + // use lock to prevent eventual concurrent droping and loading + l.mutex.Lock() + defer l.mutex.Unlock() + + if !l.loaded { + idx := l.shardOpts.index + className := idx.Config.ClassName.String() + shardName := l.shardOpts.name + + // cleanup metrics + NewMetrics(idx.logger, l.shardOpts.promMetrics, className, shardName). + DeleteShardLabels(className, shardName) + + // cleanup dimensions: not deleted in s.metrics.DeleteShardLabels + clearDimensionMetrics(idx.Config, l.shardOpts.promMetrics, className, shardName) + + // cleanup index checkpoints + if l.shardOpts.indexCheckpoints != nil { + if err := l.shardOpts.index.indexCheckpoints.DeleteShard(l.ID()); err != nil { + return fmt.Errorf("delete shard index checkpoints: %w", err) + } + } + + // remove shard dir + if err := os.RemoveAll(shardPath(idx.path(), shardName)); err != nil { + return fmt.Errorf("delete shard dir: %w", err) + } + + return nil + } + + return l.shard.drop() +} + +func (l *LazyLoadShard) DebugResetVectorIndex(ctx context.Context, targetVector string) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.DebugResetVectorIndex(ctx, targetVector) +} + +func (l *LazyLoadShard) initPropertyBuckets(ctx context.Context, eg *enterrors.ErrorGroupWrapper, lazyLoadSegments bool, props ...*models.Property) { + l.mustLoad() + l.shard.initPropertyBuckets(ctx, eg, lazyLoadSegments, props...) +} + +func (l *LazyLoadShard) HaltForTransfer(ctx context.Context, offloading bool, inactivityTimeout time.Duration) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.HaltForTransfer(ctx, offloading, inactivityTimeout) +} + +func (l *LazyLoadShard) ListBackupFiles(ctx context.Context, ret *backup.ShardDescriptor) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.ListBackupFiles(ctx, ret) +} + +func (l *LazyLoadShard) resumeMaintenanceCycles(ctx context.Context) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.resumeMaintenanceCycles(ctx) +} + +func (l *LazyLoadShard) GetFileMetadata(ctx context.Context, relativeFilePath string) (file.FileMetadata, error) { + if err := l.Load(ctx); err != nil { + return file.FileMetadata{}, err + } + return l.shard.GetFileMetadata(ctx, relativeFilePath) +} + +func (l *LazyLoadShard) GetFile(ctx context.Context, relativeFilePath string) (io.ReadCloser, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.GetFile(ctx, relativeFilePath) +} + +func (l *LazyLoadShard) SetPropertyLengths(props []inverted.Property) error { + l.mustLoad() + return l.shard.SetPropertyLengths(props) +} + +func (l *LazyLoadShard) AnalyzeObject(object *storobj.Object) ([]inverted.Property, []inverted.NilProperty, error) { + l.mustLoad() + return l.shard.AnalyzeObject(object) +} + +func (l *LazyLoadShard) DimensionsUsage(ctx context.Context, targetVector string) (usagetypes.Dimensionality, error) { + l.mutex.Lock() + if l.loaded { + l.mutex.Unlock() + return l.shard.DimensionsUsage(ctx, targetVector) + } + l.mutex.Unlock() + + // For unloaded shards, use the unloaded shard/tenant calculation method + // This avoids loading the shard into memory + return l.shardOpts.index.CalculateUnloadedDimensionsUsage(ctx, l.shardOpts.name, targetVector) +} + +func (l *LazyLoadShard) Dimensions(ctx context.Context, targetVector string) (int, error) { + l.mutex.Lock() + if l.loaded { + l.mutex.Unlock() + return l.shard.Dimensions(ctx, targetVector) + } + l.mutex.Unlock() + + // For unloaded shards, get dimensions from unloaded shard/tenant calculation + dimensionality, err := l.shardOpts.index.CalculateUnloadedDimensionsUsage(ctx, l.shardOpts.name, targetVector) + return dimensionality.Count, err +} + +func (l *LazyLoadShard) QuantizedDimensions(ctx context.Context, targetVector string, segments int) int { + l.mustLoad() + return l.shard.QuantizedDimensions(ctx, targetVector, segments) +} + +func (l *LazyLoadShard) resetDimensionsLSM(ctx context.Context) error { + l.mustLoad() + return l.shard.resetDimensionsLSM(ctx) +} + +func (l *LazyLoadShard) Aggregate(ctx context.Context, params aggregation.Params, modules *modules.Provider) (*aggregation.Result, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.Aggregate(ctx, params, modules) +} + +func (l *LazyLoadShard) MergeObject(ctx context.Context, object objects.MergeDocument) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.MergeObject(ctx, object) +} + +func (l *LazyLoadShard) GetVectorIndexQueue(targetVector string) (*VectorIndexQueue, bool) { + l.mustLoad() + return l.shard.GetVectorIndexQueue(targetVector) +} + +func (l *LazyLoadShard) GetVectorIndex(targetVector string) (VectorIndex, bool) { + l.mustLoad() + return l.shard.GetVectorIndex(targetVector) +} + +func (l *LazyLoadShard) ForEachVectorIndex(f func(targetVector string, index VectorIndex) error) error { + l.mustLoad() + return l.shard.ForEachVectorIndex(f) +} + +func (l *LazyLoadShard) ForEachVectorQueue(f func(targetVector string, queue *VectorIndexQueue) error) error { + l.mustLoad() + return l.shard.ForEachVectorQueue(f) +} + +func (l *LazyLoadShard) VectorDistanceForQuery(ctx context.Context, id uint64, searchVectors []models.Vector, targets []string) ([]float32, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.VectorDistanceForQuery(ctx, id, searchVectors, targets) +} + +func (l *LazyLoadShard) ConvertQueue(targetVector string) error { + l.mustLoad() + return l.shard.ConvertQueue(targetVector) +} + +func (l *LazyLoadShard) FillQueue(targetVector string, from uint64) error { + l.mustLoad() + return l.shard.FillQueue(targetVector, from) +} + +func (l *LazyLoadShard) RepairIndex(ctx context.Context, targetVector string) error { + l.mustLoad() + return l.shard.RepairIndex(ctx, targetVector) +} + +func (l *LazyLoadShard) Shutdown(ctx context.Context) error { + if !l.isLoaded() { + return nil + } + return l.shard.Shutdown(ctx) +} + +func (l *LazyLoadShard) preventShutdown() (release func(), err error) { + if err := l.Load(context.Background()); err != nil { + return nil, fmt.Errorf("LazyLoadShard::preventShutdown: %w", err) + } + return l.shard.preventShutdown() +} + +func (l *LazyLoadShard) HashTreeLevel(ctx context.Context, level int, discriminant *hashtree.Bitset) (digests []hashtree.Digest, err error) { + if !l.isLoaded() { + return []hashtree.Digest{}, nil + } + return l.shard.HashTreeLevel(ctx, level, discriminant) +} + +func (l *LazyLoadShard) ObjectList(ctx context.Context, limit int, sort []filters.Sort, cursor *filters.Cursor, additional additional.Properties, className schema.ClassName) ([]*storobj.Object, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.ObjectList(ctx, limit, sort, cursor, additional, className) +} + +func (l *LazyLoadShard) WasDeleted(ctx context.Context, id strfmt.UUID) (bool, time.Time, error) { + if err := l.Load(ctx); err != nil { + return false, time.Time{}, err + } + return l.shard.WasDeleted(ctx, id) +} + +func (l *LazyLoadShard) Versioner() *shardVersioner { + l.mustLoad() + return l.shard.Versioner() +} + +func (l *LazyLoadShard) isReadOnly() error { + l.mustLoad() + return l.shard.isReadOnly() +} + +func (l *LazyLoadShard) preparePutObject(ctx context.Context, shardID string, object *storobj.Object) replica.SimpleResponse { + l.mustLoadCtx(ctx) + return l.shard.preparePutObject(ctx, shardID, object) +} + +func (l *LazyLoadShard) preparePutObjects(ctx context.Context, shardID string, objects []*storobj.Object) replica.SimpleResponse { + l.mustLoadCtx(ctx) + return l.shard.preparePutObjects(ctx, shardID, objects) +} + +func (l *LazyLoadShard) prepareMergeObject(ctx context.Context, shardID string, object *objects.MergeDocument) replica.SimpleResponse { + l.mustLoadCtx(ctx) + return l.shard.prepareMergeObject(ctx, shardID, object) +} + +func (l *LazyLoadShard) prepareDeleteObject(ctx context.Context, shardID string, id strfmt.UUID, deletionTime time.Time) replica.SimpleResponse { + l.mustLoadCtx(ctx) + return l.shard.prepareDeleteObject(ctx, shardID, id, deletionTime) +} + +func (l *LazyLoadShard) prepareDeleteObjects(ctx context.Context, shardID string, + ids []strfmt.UUID, deletionTime time.Time, dryRun bool, +) replica.SimpleResponse { + l.mustLoadCtx(ctx) + return l.shard.prepareDeleteObjects(ctx, shardID, ids, deletionTime, dryRun) +} + +func (l *LazyLoadShard) prepareAddReferences(ctx context.Context, shardID string, refs []objects.BatchReference) replica.SimpleResponse { + l.mustLoadCtx(ctx) + return l.shard.prepareAddReferences(ctx, shardID, refs) +} + +func (l *LazyLoadShard) commitReplication(ctx context.Context, shardID string, mutex *shardTransfer) interface{} { + l.mustLoad() + return l.shard.commitReplication(ctx, shardID, mutex) +} + +func (l *LazyLoadShard) abortReplication(ctx context.Context, shardID string) replica.SimpleResponse { + l.mustLoad() + return l.shard.abortReplication(ctx, shardID) +} + +func (l *LazyLoadShard) filePutter(ctx context.Context, shardID string) (io.WriteCloser, error) { + if err := l.Load(ctx); err != nil { + return nil, err + } + return l.shard.filePutter(ctx, shardID) +} + +func (l *LazyLoadShard) extendDimensionTrackerLSM(dimLength int, docID uint64, targetVector string) error { + if err := l.Load(context.Background()); err != nil { + return err + } + return l.shard.extendDimensionTrackerLSM(dimLength, docID, targetVector) +} + +func (l *LazyLoadShard) addToPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + l.mustLoad() + return l.shard.addToPropertySetBucket(bucket, docID, key) +} + +func (l *LazyLoadShard) addToPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + l.mustLoad() + return l.shard.addToPropertyRangeBucket(bucket, docID, key) +} + +func (l *LazyLoadShard) addToPropertyMapBucket(bucket *lsmkv.Bucket, pair lsmkv.MapPair, key []byte) error { + l.mustLoad() + return l.shard.addToPropertyMapBucket(bucket, pair, key) +} + +func (l *LazyLoadShard) pairPropertyWithFrequency(docID uint64, freq, propLen float32) lsmkv.MapPair { + l.mustLoad() + return l.shard.pairPropertyWithFrequency(docID, freq, propLen) +} + +func (l *LazyLoadShard) setFallbackToSearchable(fallback bool) { + l.mustLoad() + l.shard.setFallbackToSearchable(fallback) +} + +func (l *LazyLoadShard) addJobToQueue(job job) { + l.mustLoad() + l.shard.addJobToQueue(job) +} + +func (l *LazyLoadShard) uuidFromDocID(docID uint64) (strfmt.UUID, error) { + l.mustLoad() + return l.shard.uuidFromDocID(docID) +} + +func (l *LazyLoadShard) batchDeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error { + if err := l.Load(ctx); err != nil { + return err + } + return l.shard.batchDeleteObject(ctx, id, deletionTime) +} + +func (l *LazyLoadShard) putObjectLSM(object *storobj.Object, idBytes []byte) (objectInsertStatus, error) { + l.mustLoad() + return l.shard.putObjectLSM(object, idBytes) +} + +func (l *LazyLoadShard) mayUpsertObjectHashTree(object *storobj.Object, idBytes []byte, status objectInsertStatus) error { + l.mustLoad() + return l.shard.mayUpsertObjectHashTree(object, idBytes, status) +} + +func (l *LazyLoadShard) mutableMergeObjectLSM(merge objects.MergeDocument, idBytes []byte) (mutableMergeResult, error) { + l.mustLoad() + return l.shard.mutableMergeObjectLSM(merge, idBytes) +} + +func (l *LazyLoadShard) deleteFromPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + l.mustLoad() + return l.shard.deleteFromPropertySetBucket(bucket, docID, key) +} + +func (l *LazyLoadShard) deleteFromPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + l.mustLoad() + return l.shard.deleteFromPropertyRangeBucket(bucket, docID, key) +} + +func (l *LazyLoadShard) batchExtendInvertedIndexItemsLSMNoFrequency(b *lsmkv.Bucket, item inverted.MergeItem) error { + l.mustLoad() + return l.shard.batchExtendInvertedIndexItemsLSMNoFrequency(b, item) +} + +func (l *LazyLoadShard) updatePropertySpecificIndices(ctx context.Context, object *storobj.Object, status objectInsertStatus) error { + l.mustLoad() + return l.shard.updatePropertySpecificIndices(ctx, object, status) +} + +func (l *LazyLoadShard) updateVectorIndexIgnoreDelete(ctx context.Context, vector []float32, status objectInsertStatus) error { + l.mustLoad() + return l.shard.updateVectorIndexIgnoreDelete(ctx, vector, status) +} + +func (l *LazyLoadShard) updateVectorIndexesIgnoreDelete(ctx context.Context, vectors map[string][]float32, status objectInsertStatus) error { + l.mustLoad() + return l.shard.updateVectorIndexesIgnoreDelete(ctx, vectors, status) +} + +func (l *LazyLoadShard) updateMultiVectorIndexesIgnoreDelete(ctx context.Context, multiVectors map[string][][]float32, status objectInsertStatus) error { + l.mustLoad() + return l.shard.updateMultiVectorIndexesIgnoreDelete(ctx, multiVectors, status) +} + +func (l *LazyLoadShard) hasGeoIndex() bool { + l.mustLoad() + return l.shard.hasGeoIndex() +} + +func (l *LazyLoadShard) Metrics() *Metrics { + l.mustLoad() + return l.shard.Metrics() +} + +func (l *LazyLoadShard) isLoaded() bool { + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.loaded +} + +func (l *LazyLoadShard) Activity() (int32, int32) { + var loaded bool + l.mutex.Lock() + loaded = l.loaded + l.mutex.Unlock() + + if !loaded { + // don't force-load the shard, just report the same number every time, so + // the caller can figure out there was no activity + return 0, 0 + } + + return l.shard.Activity() +} + +func (l *LazyLoadShard) pathLSM() string { + return shardPathLSM(l.shardOpts.index.path(), l.shardOpts.name) +} + +func (l *LazyLoadShard) VectorStorageSize(ctx context.Context) (int64, error) { + l.mutex.Lock() + if l.loaded { + l.mutex.Unlock() + return l.shard.VectorStorageSize(ctx) + } + l.mutex.Unlock() + + // For unloaded shards, use the existing cold tenant calculation method + // This avoids complex disk file calculations and uses the same logic as the index + return l.shardOpts.index.CalculateUnloadedVectorsMetrics(ctx, l.shardOpts.name) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_load_limiter.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_load_limiter.go new file mode 100644 index 0000000000000000000000000000000000000000..2ef8cf47c2f07ec3e10889a8228c7f0b60220d21 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_load_limiter.go @@ -0,0 +1,71 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/sync/semaphore" +) + +const ( + defaultShardLoadingLimit = 500 +) + +// ShardLoadLimiter is a limiter to control how many shards are loaded concurrently. +// If too many shards are loaded in parallel, it throttles the loading instead of rejecting. +// The motivation of this limiter is the fact that loading a shard requires multiple syscalls +// which create a new OS thread is there is no unused ones. In case we try to load thousands of shards in parallel, +// we might hit internal thread count limit of Go runtime. +type ShardLoadLimiter struct { + sema *semaphore.Weighted + + shardsLoading prometheus.Gauge + waitingForPermitToLoad prometheus.Gauge +} + +func NewShardLoadLimiter(reg prometheus.Registerer, limit int) ShardLoadLimiter { + r := promauto.With(reg) + if limit == 0 { + limit = defaultShardLoadingLimit + } + + return ShardLoadLimiter{ + sema: semaphore.NewWeighted(int64(limit)), + + shardsLoading: r.NewGauge(prometheus.GaugeOpts{ + Name: "database_shards_loading", + }), + waitingForPermitToLoad: r.NewGauge(prometheus.GaugeOpts{ + Name: "database_shards_waiting_for_permit_to_load", + }), + } +} + +func (l *ShardLoadLimiter) Acquire(ctx context.Context) error { + l.waitingForPermitToLoad.Inc() + defer l.waitingForPermitToLoad.Dec() + + err := l.sema.Acquire(ctx, 1) + if err != nil { + return err + } + l.shardsLoading.Inc() + return nil +} + +func (l *ShardLoadLimiter) Release() { + l.sema.Release(1) + l.shardsLoading.Dec() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_load_limiter_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_load_limiter_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f79e31a5a4cdeebd6338ac59fad681ee44084bcd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_load_limiter_test.go @@ -0,0 +1,77 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +func TestNewShardLoadLimiter_DefaultLimit(t *testing.T) { + tests := []struct { + name string + limit int + expectedLimit int64 + }{ + { + name: "with custom limit", + limit: 100, + expectedLimit: 100, + }, + { + name: "with default limit", + limit: 0, + expectedLimit: defaultShardLoadingLimit, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + limiter := NewShardLoadLimiter(monitoring.NoopRegisterer, tc.limit) + + var count int64 + for limiter.sema.TryAcquire(1) { + count++ + } + + require.Equal(t, tc.expectedLimit, count) + }) + } +} + +func TestNewShardLoadLimiter_ControlsConcurrency(t *testing.T) { + var ( + limiter = NewShardLoadLimiter(monitoring.NoopRegisterer, 5) + start = time.Now() + ) + + var wg sync.WaitGroup + for range 10 { + wg.Add(1) + go func() { + defer wg.Done() + + require.NoError(t, limiter.Acquire(context.Background())) + defer limiter.Release() + + time.Sleep(100 * time.Millisecond) + }() + } + wg.Wait() + + require.GreaterOrEqual(t, time.Since(start), 200*time.Millisecond) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_object_storage_size_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_object_storage_size_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2a101c56e1e9de0c4136d2a288f75c671fa07019 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_object_storage_size_test.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "os" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/storagestate" +) + +func TestShard_ObjectStorageSize_DifferentStatuses(t *testing.T) { + testCases := []struct { + name string + status storagestate.Status + description string + expectError bool + }{ + { + name: "status ready", + status: storagestate.StatusReady, + description: "should handle ready status with existing bucket", + expectError: false, + }, + { + name: "status readonly", + status: storagestate.StatusReadOnly, + description: "should handle readonly status with existing bucket", + expectError: false, + }, + { + name: "status loading", + status: storagestate.StatusLoading, + description: "should handle loading status with missing bucket", + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var store *lsmkv.Store + + if tc.expectError { + // For loading status, create empty store without bucket + store = &lsmkv.Store{} + } else { + // For ready/readonly status, create a proper store with bucket + dirName := t.TempDir() + defer os.RemoveAll(dirName) + logger, _ := test.NewNullLogger() + + var err error + store, err = lsmkv.New(dirName, dirName, logger, nil, + cyclemanager.NewCallbackGroup("classCompactionObjects", logger, 1), + cyclemanager.NewCallbackGroup("classCompactionNonObjects", logger, 1), + cyclemanager.NewCallbackGroupNoop()) + require.NoError(t, err) + + // Create the objects bucket + err = store.CreateOrLoadBucket(context.Background(), helpers.ObjectsBucketLSM) + require.NoError(t, err) + + // Add some data to the bucket to ensure it has a non-zero size + bucket := store.Bucket(helpers.ObjectsBucketLSM) + require.NotNil(t, bucket) + + // Add a test object to the bucket + err = bucket.Put([]byte("test-key"), []byte("test-value")) + require.NoError(t, err) + + // Flush the data to disk to ensure it's included in size calculation + err = store.FlushMemtables(context.Background()) + require.NoError(t, err) + } + + shard := &Shard{ + store: store, + status: ShardStatus{Status: tc.status}, + } + + ctx := context.Background() + result, err := shard.ObjectStorageSize(ctx) + + if tc.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), "bucket objects not found") + assert.Equal(t, int64(0), result, tc.description) + } else { + // For ready/readonly status, the bucket should exist and return a valid size + require.NoError(t, err) + assert.Greater(t, result, int64(0), tc.description) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_read.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_read.go new file mode 100644 index 0000000000000000000000000000000000000000..1a5878cf83a44f10f8f8b32686e1513a7987fec8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_read.go @@ -0,0 +1,763 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/sorter" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/multi" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/searchparams" + entsentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (s *Shard) ObjectByIDErrDeleted(ctx context.Context, id strfmt.UUID, props search.SelectProperties, additional additional.Properties) (*storobj.Object, error) { + idBytes, err := uuid.MustParse(id.String()).MarshalBinary() + if err != nil { + return nil, err + } + + bytes, err := s.store.Bucket(helpers.ObjectsBucketLSM).GetErrDeleted(idBytes) + if err != nil { + return nil, err + } + + if bytes == nil { + return nil, nil + } + + obj, err := storobj.FromBinary(bytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal object") + } + + return obj, nil +} + +func (s *Shard) ObjectByID(ctx context.Context, id strfmt.UUID, props search.SelectProperties, additional additional.Properties) (*storobj.Object, error) { + s.activityTrackerRead.Add(1) + idBytes, err := uuid.MustParse(id.String()).MarshalBinary() + if err != nil { + return nil, err + } + + bytes, err := s.store.Bucket(helpers.ObjectsBucketLSM).Get(idBytes) + if err != nil { + return nil, err + } + + if bytes == nil { + return nil, nil + } + + obj, err := storobj.FromBinary(bytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal object") + } + + return obj, nil +} + +func (s *Shard) MultiObjectByID(ctx context.Context, query []multi.Identifier) ([]*storobj.Object, error) { + s.activityTrackerRead.Add(1) + objects := make([]*storobj.Object, len(query)) + + ids := make([][]byte, len(query)) + for i, q := range query { + idBytes, err := uuid.MustParse(q.ID).MarshalBinary() + if err != nil { + return nil, err + } + + ids[i] = idBytes + } + + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + for i, id := range ids { + bytes, err := bucket.Get(id) + if err != nil { + return nil, err + } + + if bytes == nil { + continue + } + + obj, err := storobj.FromBinary(bytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal kind object") + } + objects[i] = obj + } + + return objects, nil +} + +func (s *Shard) ObjectDigestsInRange(ctx context.Context, + initialUUID, finalUUID strfmt.UUID, limit int) ( + objs []types.RepairResponse, err error, +) { + initialUUIDBytes, err := uuid.MustParse(initialUUID.String()).MarshalBinary() + if err != nil { + return nil, err + } + + finalUUIDBytes, err := uuid.MustParse(finalUUID.String()).MarshalBinary() + if err != nil { + return nil, err + } + + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + + cursor := bucket.Cursor() + defer cursor.Close() + + n := 0 + + for k, v := cursor.Seek(initialUUIDBytes); n < limit && k != nil && bytes.Compare(k, finalUUIDBytes) < 1; k, v = cursor.Next() { + if ctx.Err() != nil { + return objs, ctx.Err() + } + + obj, err := storobj.FromBinaryUUIDOnly(v) + if err != nil { + return objs, fmt.Errorf("cannot unmarshal object: %w", err) + } + + replicaObj := types.RepairResponse{ + ID: obj.ID().String(), + UpdateTime: obj.LastUpdateTimeUnix(), + // TODO: use version when supported + Version: 0, + } + + objs = append(objs, replicaObj) + + n++ + } + + return objs, nil +} + +// TODO: This does an actual read which is not really needed, if we see this +// come up in profiling, we could optimize this by adding an explicit Exists() +// on the LSMKV which only checks the bloom filters, which at least in the case +// of a true negative would be considerably faster. For a (false) positive, +// we'd still need to check, though. +func (s *Shard) Exists(ctx context.Context, id strfmt.UUID) (bool, error) { + s.activityTrackerRead.Add(1) + idBytes, err := uuid.MustParse(id.String()).MarshalBinary() + if err != nil { + return false, err + } + + bytes, err := s.store.Bucket(helpers.ObjectsBucketLSM).Get(idBytes) + if err != nil { + return false, errors.Wrap(err, "read request") + } + + if bytes == nil { + return false, nil + } + + return true, nil +} + +func (s *Shard) objectByIndexID(ctx context.Context, indexID uint64, acceptDeleted bool) (*storobj.Object, error) { + keyBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(keyBuf, indexID) + + bytes, err := s.store.Bucket(helpers.ObjectsBucketLSM). + GetBySecondary(0, keyBuf) + if err != nil { + return nil, err + } + + if bytes == nil { + return nil, storobj.NewErrNotFoundf(indexID, + "uuid found for docID, but object is nil") + } + + obj, err := storobj.FromBinary(bytes) + if err != nil { + return nil, errors.Wrap(err, "unmarshal kind object") + } + + return obj, nil +} + +func (s *Shard) vectorByIndexID(ctx context.Context, indexID uint64, targetVector string) ([]float32, error) { + keyBuf := make([]byte, 8) + return s.readVectorByIndexIDIntoSlice(ctx, indexID, &common.VectorSlice{Buff8: keyBuf}, targetVector) +} + +func (s *Shard) readVectorByIndexIDIntoSlice(ctx context.Context, indexID uint64, container *common.VectorSlice, targetVector string) ([]float32, error) { + binary.LittleEndian.PutUint64(container.Buff8, indexID) + + bytes, newBuff, err := s.store.Bucket(helpers.ObjectsBucketLSM). + GetBySecondaryIntoMemory(0, container.Buff8, container.Buff) + if err != nil { + return nil, err + } + + if bytes == nil { + return nil, storobj.NewErrNotFoundf(indexID, + "no object for doc id, it could have been deleted") + } + + container.Buff = newBuff + return storobj.VectorFromBinary(bytes, container.Slice, targetVector) +} + +func (s *Shard) multiVectorByIndexID(ctx context.Context, indexID uint64, targetVector string) ([][]float32, error) { + keyBuf := make([]byte, 8) + return s.readMultiVectorByIndexIDIntoSlice(ctx, indexID, &common.VectorSlice{Buff8: keyBuf}, targetVector) +} + +func (s *Shard) readMultiVectorByIndexIDIntoSlice(ctx context.Context, indexID uint64, container *common.VectorSlice, targetVector string) ([][]float32, error) { + binary.LittleEndian.PutUint64(container.Buff8, indexID) + + bytes, newBuff, err := s.store.Bucket(helpers.ObjectsBucketLSM). + GetBySecondaryIntoMemory(0, container.Buff8, container.Buff) + if err != nil { + return nil, err + } + + if bytes == nil { + return nil, storobj.NewErrNotFoundf(indexID, + "no object for doc id, it could have been deleted") + } + + container.Buff = newBuff + return storobj.MultiVectorFromBinary(bytes, container.Slice, targetVector) +} + +func (s *Shard) ObjectSearch(ctx context.Context, limit int, filters *filters.LocalFilter, + keywordRanking *searchparams.KeywordRanking, sort []filters.Sort, cursor *filters.Cursor, + additional additional.Properties, properties []string, +) ([]*storobj.Object, []float32, error) { + var err error + + // Report slow queries if this method takes longer than expected + startTime := time.Now() + defer func() { + s.slowQueryReporter.LogIfSlow(ctx, startTime, map[string]any{ + "collection": s.index.Config.ClassName, + "shard": s.ID(), + "tenant": s.tenant(), + "query": "ObjectSearch", + "filters": filters, + "limit": limit, + "sort": sort, + "cursor": cursor, + "keyword_ranking": keywordRanking, + "version": s.versioner.Version(), + "additional": additional, + // in addition the slowQueryReporter will extract any slow query details + // that may or may not have been written into the ctx + }) + }() + + s.activityTrackerRead.Add(1) + if keywordRanking != nil { + if v := s.versioner.Version(); v < 2 { + return nil, nil, errors.Errorf( + "shard was built with an older version of " + + "Weaviate which does not yet support BM25 search") + } + + var bm25objs []*storobj.Object + var bm25count []float32 + var filterDocIds helpers.AllowList + + if filters != nil { + filterDocIds, err = inverted.NewSearcher(s.index.logger, s.store, + s.index.getSchema.ReadOnlyClass, s.propertyIndices, + s.index.classSearcher, s.index.stopwords, s.versioner.Version(), + s.isFallbackToSearchable, s.tenant(), s.index.Config.QueryNestedRefLimit, + s.bitmapFactory). + DocIDs(ctx, filters, additional, s.index.Config.ClassName) + if err != nil { + return nil, nil, err + } + + defer filterDocIds.Close() + } + + className := s.index.Config.ClassName + bm25Config := s.index.GetInvertedIndexConfig().BM25 + logger := s.index.logger.WithFields(logrus.Fields{"class": s.index.Config.ClassName, "shard": s.name}) + bm25searcher := inverted.NewBM25Searcher(bm25Config, s.store, + s.index.getSchema.ReadOnlyClass, s.propertyIndices, s.index.classSearcher, + s.GetPropertyLengthTracker(), logger, s.versioner.Version()) + bm25objs, bm25count, err = bm25searcher.BM25F(ctx, filterDocIds, className, limit, *keywordRanking, additional) + if err != nil { + return nil, nil, err + } + + return bm25objs, bm25count, nil + } + + if filters == nil { + objs, err := s.ObjectList(ctx, limit, sort, + cursor, additional, s.index.Config.ClassName) + return objs, nil, err + } + objs, err := inverted.NewSearcher(s.index.logger, s.store, s.index.getSchema.ReadOnlyClass, + s.propertyIndices, s.index.classSearcher, s.index.stopwords, s.versioner.Version(), + s.isFallbackToSearchable, s.tenant(), s.index.Config.QueryNestedRefLimit, s.bitmapFactory). + Objects(ctx, limit, filters, sort, additional, s.index.Config.ClassName, properties, + s.index.Config.InvertedSorterDisabled) + return objs, nil, err +} + +func (s *Shard) VectorDistanceForQuery(ctx context.Context, docId uint64, searchVectors []models.Vector, targetVectors []string) ([]float32, error) { + if len(targetVectors) != len(searchVectors) || len(targetVectors) == 0 { + return nil, fmt.Errorf("target vectors and search vectors must have the same non-zero length") + } + + distances := make([]float32, len(targetVectors)) + for j, target := range targetVectors { + index, ok := s.GetVectorIndex(target) + if !ok { + return nil, fmt.Errorf("index %s not found", target) + } + var distancer common.QueryVectorDistancer + switch v := searchVectors[j].(type) { + case []float32: + distancer = index.QueryVectorDistancer(v) + case [][]float32: + distancer = index.(VectorIndexMulti).QueryMultiVectorDistancer(v) + default: + return nil, fmt.Errorf("unsupported vector type: %T", v) + } + dist, err := distancer.DistanceToNode(docId) + if err != nil { + return nil, err + } + distances[j] = dist + } + return distances, nil +} + +func (s *Shard) ObjectVectorSearch(ctx context.Context, searchVectors []models.Vector, targetVectors []string, targetDist float32, limit int, filters *filters.LocalFilter, sort []filters.Sort, groupBy *searchparams.GroupBy, additional additional.Properties, targetCombination *dto.TargetCombination, properties []string) ([]*storobj.Object, []float32, error) { + startTime := time.Now() + + defer func() { + s.slowQueryReporter.LogIfSlow(ctx, startTime, map[string]any{ + "collection": s.index.Config.ClassName, + "shard": s.ID(), + "tenant": s.tenant(), + "query": "ObjectVectorSearch", + "filters": filters, + "limit": limit, + "sort": sort, + "version": s.versioner.Version(), + "additional": additional, + "group_by": groupBy, + // in addition the slowQueryReporter will extract any slow query details + // that may or may not have been written into the ctx + }) + }() + + s.activityTrackerRead.Add(1) + + var allowList helpers.AllowList + if filters != nil { + beforeFilter := time.Now() + list, err := s.buildAllowList(ctx, filters, additional) + if err != nil { + return nil, nil, err + } + allowList = list + took := time.Since(beforeFilter) + s.metrics.FilteredVectorFilter(took) + helpers.AnnotateSlowQueryLog(ctx, "filters_build_allow_list_took", took) + helpers.AnnotateSlowQueryLog(ctx, "filters_ids_matched", allowList.Len()) + } + + eg := enterrors.NewErrorGroupWrapper(s.index.logger) + eg.SetLimit(_NUMCPU) + idss := make([][]uint64, len(targetVectors)) + distss := make([][]float32, len(targetVectors)) + beforeVector := time.Now() + + for i, targetVector := range targetVectors { + i := i + targetVector := targetVector + eg.Go(func() error { + var ( + ids []uint64 + dists []float32 + err error + ) + + vidx, ok := s.GetVectorIndex(targetVector) + if !ok { + return fmt.Errorf("index for target vector %q not found", targetVector) + } + + if limit < 0 { + switch searchVector := searchVectors[i].(type) { + case []float32: + ids, dists, err = vidx.SearchByVectorDistance( + ctx, searchVector, targetDist, s.index.Config.QueryMaximumResults, allowList) + if err != nil { + // This should normally not fail. A failure here could indicate that more + // attention is required, for example because data is corrupted. That's + // why this error is explicitly pushed to sentry. + err = fmt.Errorf("vector search by distance: %w", err) + entsentry.CaptureException(err) + return err + } + case [][]float32: + ids, dists, err = vidx.(VectorIndexMulti).SearchByMultiVectorDistance( + ctx, searchVector, targetDist, s.index.Config.QueryMaximumResults, allowList) + if err != nil { + // This should normally not fail. A failure here could indicate that more + // attention is required, for example because data is corrupted. That's + // why this error is explicitly pushed to sentry. + err = fmt.Errorf("multi vector search by distance: %w", err) + entsentry.CaptureException(err) + return err + } + default: + return fmt.Errorf("vector search by distance: unsupported type: %T", searchVectors[i]) + } + } else { + switch searchVector := searchVectors[i].(type) { + case []float32: + ids, dists, err = vidx.SearchByVector(ctx, searchVector, limit, allowList) + if err != nil { + // This should normally not fail. A failure here could indicate that more + // attention is required, for example because data is corrupted. That's + // why this error is explicitly pushed to sentry. + err = fmt.Errorf("vector search: %w", err) + // annotate for sentry so we know which collection/shard this happened on + entsentry.CaptureException(fmt.Errorf("collection %q shard %q: %w", + s.index.Config.ClassName, s.name, err)) + return err + } + case [][]float32: + ids, dists, err = vidx.(VectorIndexMulti).SearchByMultiVector(ctx, searchVector, limit, allowList) + if err != nil { + // This should normally not fail. A failure here could indicate that more + // attention is required, for example because data is corrupted. That's + // why this error is explicitly pushed to sentry. + err = fmt.Errorf("multi vector search: %w", err) + // annotate for sentry so we know which collection/shard this happened on + entsentry.CaptureException(fmt.Errorf("collection %q shard %q: %w", + s.index.Config.ClassName, s.name, err)) + return err + } + default: + return fmt.Errorf("vector search: unsupported type: %T", searchVectors[i]) + } + } + if len(ids) == 0 { + return nil + } + + idss[i] = ids + distss[i] = dists + return nil + }) + } + + err := eg.Wait() + if allowList != nil { + allowList.Close() + } + if err != nil { + return nil, nil, err + } + + idsCombined, distCombined, err := CombineMultiTargetResults(ctx, s, s.index.logger, idss, distss, targetVectors, searchVectors, targetCombination, limit, targetDist) + if err != nil { + return nil, nil, err + } + + if filters != nil { + s.metrics.FilteredVectorVector(time.Since(beforeVector)) + } + helpers.AnnotateSlowQueryLog(ctx, "vector_search_took", time.Since(beforeVector)) + + if groupBy != nil { + objs, dists, err := s.groupResults(ctx, idsCombined, distCombined, groupBy, additional, properties) + if err != nil { + return nil, nil, err + } + return objs, dists, nil + } + + if len(sort) > 0 { + beforeSort := time.Now() + idsCombined, distCombined, err = s.sortDocIDsAndDists(ctx, limit, sort, + s.index.Config.ClassName, idsCombined, distCombined) + if err != nil { + return nil, nil, errors.Wrap(err, "vector search sort") + } + took := time.Since(beforeSort) + if filters != nil { + s.metrics.FilteredVectorSort(took) + } + helpers.AnnotateSlowQueryLog(ctx, "sort_took", took) + } + + beforeObjects := time.Now() + + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + objs, err := storobj.ObjectsByDocID(bucket, idsCombined, additional, properties, s.index.logger) + if err != nil { + return nil, nil, err + } + + took := time.Since(beforeObjects) + if filters != nil { + s.metrics.FilteredVectorObjects(took) + } + + helpers.AnnotateSlowQueryLog(ctx, "objects_took", took) + return objs, distCombined, nil +} + +func (s *Shard) ObjectList(ctx context.Context, limit int, sort []filters.Sort, cursor *filters.Cursor, additional additional.Properties, className schema.ClassName) ([]*storobj.Object, error) { + s.activityTrackerRead.Add(1) + if len(sort) > 0 { + beforeSort := time.Now() + docIDs, err := s.sortedObjectList(ctx, limit, sort, className) + if err != nil { + return nil, err + } + helpers.AnnotateSlowQueryLog(ctx, "sort_took", time.Since(beforeSort)) + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + + beforeObjects := time.Now() + defer func() { + took := time.Since(beforeObjects) + helpers.AnnotateSlowQueryLog(ctx, "objects_took", took) + }() + return storobj.ObjectsByDocID(bucket, docIDs, additional, nil, s.index.logger) + } + + if cursor == nil { + cursor = &filters.Cursor{After: "", Limit: limit} + } + return s.cursorObjectList(ctx, cursor, additional, className) +} + +func (s *Shard) cursorObjectList(ctx context.Context, c *filters.Cursor, + additional additional.Properties, + className schema.ClassName, +) ([]*storobj.Object, error) { + cursor := s.store.Bucket(helpers.ObjectsBucketLSM).Cursor() + defer cursor.Close() + + var key, val []byte + if c.After == "" { + key, val = cursor.First() + } else { + uuidBytes, err := uuid.MustParse(c.After).MarshalBinary() + if err != nil { + return nil, errors.Wrap(err, "after argument is not a valid uuid") + } + key, val = cursor.Seek(uuidBytes) + if bytes.Equal(key, uuidBytes) { + // move cursor by one if it's the same ID + key, val = cursor.Next() + } + } + + i := 0 + out := make([]*storobj.Object, c.Limit) + + for ; key != nil && i < c.Limit; key, val = cursor.Next() { + obj, err := storobj.FromBinary(val) + if err != nil { + return nil, errors.Wrapf(err, "unmarhsal item %d", i) + } + + out[i] = obj + i++ + } + + return out[:i], nil +} + +func (s *Shard) sortedObjectList(ctx context.Context, limit int, sort []filters.Sort, + className schema.ClassName, +) ([]uint64, error) { + lsmSorter, err := sorter.NewLSMSorter(s.store, s.index.getSchema.ReadOnlyClass, + className, s.index.Config.InvertedSorterDisabled) + if err != nil { + return nil, errors.Wrap(err, "sort object list") + } + docIDs, err := lsmSorter.Sort(ctx, limit, sort) + if err != nil { + return nil, errors.Wrap(err, "sort object list") + } + return docIDs, nil +} + +func (s *Shard) sortDocIDsAndDists(ctx context.Context, limit int, sort []filters.Sort, + className schema.ClassName, docIDs []uint64, dists []float32, +) ([]uint64, []float32, error) { + lsmSorter, err := sorter.NewLSMSorter(s.store, s.index.getSchema.ReadOnlyClass, + className, s.index.Config.InvertedSorterDisabled) + if err != nil { + return nil, nil, errors.Wrap(err, "sort objects with distances") + } + sortedDocIDs, sortedDists, err := lsmSorter.SortDocIDsAndDists(ctx, limit, sort, docIDs, dists) + if err != nil { + return nil, nil, errors.Wrap(err, "sort objects with distances") + } + return sortedDocIDs, sortedDists, nil +} + +func (s *Shard) buildAllowList(ctx context.Context, filters *filters.LocalFilter, addl additional.Properties) (helpers.AllowList, error) { + list, err := inverted.NewSearcher(s.index.logger, s.store, s.index.getSchema.ReadOnlyClass, + s.propertyIndices, s.index.classSearcher, s.index.stopwords, s.versioner.Version(), + s.isFallbackToSearchable, s.tenant(), s.index.Config.QueryNestedRefLimit, s.bitmapFactory). + DocIDs(ctx, filters, addl, s.index.Config.ClassName) + if err != nil { + return nil, errors.Wrap(err, "build inverted filter allow list") + } + + return list, nil +} + +func (s *Shard) uuidFromDocID(docID uint64) (strfmt.UUID, error) { + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + if bucket == nil { + return "", errors.Errorf("objects bucket not found") + } + + keyBuf := bytes.NewBuffer(nil) + err := binary.Write(keyBuf, binary.LittleEndian, &docID) + if err != nil { + return "", fmt.Errorf("write doc id to buffer: %w", err) + } + docIDBytes := keyBuf.Bytes() + res, err := bucket.GetBySecondary(0, docIDBytes) + if err != nil { + return "", fmt.Errorf("get object by doc id: %w", err) + } + + prop, _, err := storobj.ParseAndExtractProperty(res, "id") + if err != nil { + return "", fmt.Errorf("parse and extract property: %w", err) + } + + return strfmt.UUID(prop[0]), nil +} + +func (s *Shard) batchDeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error { + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + err := s.waitForMinimalHashTreeInitialization(ctx) + if err != nil { + return err + } + + idBytes, err := uuid.MustParse(id.String()).MarshalBinary() + if err != nil { + return err + } + + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + + // see comment in shard_write_put.go::putObjectLSM + lock := &s.docIdLock[s.uuidToIdLockPoolId(idBytes)] + + lock.Lock() + defer lock.Unlock() + + existing, err := bucket.Get(idBytes) + if err != nil { + return errors.Wrap(err, "unexpected error on previous lookup") + } + + if existing == nil { + // nothing to do + return nil + } + + // we need the doc ID so we can clean up inverted indices currently + // pointing to this object + docID, updateTime, err := storobj.DocIDAndTimeFromBinary(existing) + if err != nil { + return errors.Wrap(err, "get existing doc id from object binary") + } + + if deletionTime.IsZero() { + err = bucket.Delete(idBytes) + } else { + err = bucket.DeleteWith(idBytes, deletionTime) + } + if err != nil { + return errors.Wrap(err, "delete object from bucket") + } + + if err = s.mayDeleteObjectHashTree(idBytes, updateTime); err != nil { + return errors.Wrap(err, "object deletion in hashtree") + } + + err = s.cleanupInvertedIndexOnDelete(existing, docID) + if err != nil { + return errors.Wrap(err, "delete object from bucket") + } + + err = s.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err = queue.Delete(docID); err != nil { + return fmt.Errorf("delete from vector index queue of vector %q: %w", targetVector, err) + } + return nil + }) + if err != nil { + return err + } + + return nil +} + +func (s *Shard) WasDeleted(ctx context.Context, id strfmt.UUID) (bool, time.Time, error) { + s.activityTrackerRead.Add(1) + idBytes, err := uuid.MustParse(id.String()).MarshalBinary() + if err != nil { + return false, time.Time{}, err + } + + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + return bucket.WasDeleted(idBytes) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_replication.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_replication.go new file mode 100644 index 0000000000000000000000000000000000000000..7ce5b150cfd2618c3f872e235fcb347b7a663919 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_replication.go @@ -0,0 +1,196 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica" +) + +type replicaTask func(context.Context) interface{} + +type pendingReplicaTasks struct { + sync.Mutex + Tasks map[string]replicaTask +} + +func (p *pendingReplicaTasks) clear() { + p.Lock() + // TODO: can we postpone deletion until all pending replications are done + p.Tasks = nil + p.Unlock() +} + +func (p *pendingReplicaTasks) get(requestID string) (replicaTask, bool) { + p.Lock() + defer p.Unlock() + t, ok := p.Tasks[requestID] + return t, ok +} + +func (p *pendingReplicaTasks) set(requestID string, task replicaTask) { + p.Lock() + p.Tasks[requestID] = task + p.Unlock() +} + +func (p *pendingReplicaTasks) delete(requestID string) { + p.Lock() + delete(p.Tasks, requestID) + p.Unlock() +} + +func (s *Shard) commitReplication(ctx context.Context, requestID string, backupReadLock *shardTransfer) interface{} { + f, ok := s.replicationMap.get(requestID) + if !ok { + return nil + } + defer s.replicationMap.delete(requestID) + backupReadLock.RLock() + defer backupReadLock.RUnlock() + + return f(ctx) +} + +func (s *Shard) abortReplication(ctx context.Context, requestID string) replica.SimpleResponse { + s.replicationMap.delete(requestID) + return replica.SimpleResponse{} +} + +func (s *Shard) preparePutObject(ctx context.Context, requestID string, object *storobj.Object) replica.SimpleResponse { + uuid, err := parseBytesUUID(object.ID()) + if err != nil { + return replica.SimpleResponse{Errors: []replica.Error{{ + Code: replica.StatusPreconditionFailed, Msg: err.Error(), + }}} + } + task := func(ctx context.Context) interface{} { + resp := replica.SimpleResponse{} + if err := s.putOne(ctx, uuid, object); err != nil { + resp.Errors = []replica.Error{ + {Code: replica.StatusConflict, Msg: err.Error()}, + } + } + return resp + } + s.replicationMap.set(requestID, task) + return replica.SimpleResponse{} +} + +func (s *Shard) prepareMergeObject(ctx context.Context, requestID string, doc *objects.MergeDocument) replica.SimpleResponse { + uuid, err := parseBytesUUID(doc.ID) + if err != nil { + return replica.SimpleResponse{Errors: []replica.Error{ + {Code: replica.StatusPreconditionFailed, Msg: err.Error()}, + }} + } + task := func(ctx context.Context) interface{} { + resp := replica.SimpleResponse{} + if err := s.merge(ctx, uuid, *doc); err != nil { + var code replica.StatusCode + if errors.Is(err, errObjectNotFound) { + code = replica.StatusObjectNotFound + } else { + code = replica.StatusConflict + } + resp.Errors = []replica.Error{ + {Code: code, Msg: err.Error()}, + } + } + return resp + } + s.replicationMap.set(requestID, task) + return replica.SimpleResponse{} +} + +func (s *Shard) prepareDeleteObject(ctx context.Context, requestID string, uuid strfmt.UUID, deletionTime time.Time) replica.SimpleResponse { + task := func(ctx context.Context) interface{} { + resp := replica.SimpleResponse{} + if err := s.DeleteObject(ctx, uuid, deletionTime); err != nil { + resp.Errors = []replica.Error{ + {Code: replica.StatusConflict, Msg: err.Error()}, + } + } + return resp + } + s.replicationMap.set(requestID, task) + return replica.SimpleResponse{} +} + +func (s *Shard) preparePutObjects(ctx context.Context, requestID string, objects []*storobj.Object) replica.SimpleResponse { + task := func(ctx context.Context) interface{} { + rawErrs := s.putBatch(ctx, objects) + resp := replica.SimpleResponse{Errors: make([]replica.Error, len(rawErrs))} + for i, err := range rawErrs { + if err != nil { + resp.Errors[i] = replica.Error{Code: replica.StatusConflict, Msg: err.Error()} + } + } + return resp + } + s.replicationMap.set(requestID, task) + return replica.SimpleResponse{} +} + +func (s *Shard) prepareDeleteObjects(ctx context.Context, requestID string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, +) replica.SimpleResponse { + task := func(ctx context.Context) interface{} { + result := newDeleteObjectsBatcher(s).Delete(ctx, uuids, deletionTime, dryRun) + resp := replica.DeleteBatchResponse{ + Batch: make([]replica.UUID2Error, len(result)), + } + + for i, r := range result { + entry := replica.UUID2Error{UUID: string(r.UUID)} + if err := r.Err; err != nil { + entry.Error = replica.Error{Code: replica.StatusConflict, Msg: err.Error()} + } + resp.Batch[i] = entry + } + return resp + } + s.replicationMap.set(requestID, task) + return replica.SimpleResponse{} +} + +func (s *Shard) prepareAddReferences(ctx context.Context, requestID string, refs []objects.BatchReference) replica.SimpleResponse { + task := func(ctx context.Context) interface{} { + rawErrs := newReferencesBatcher(s).References(ctx, refs) + resp := replica.SimpleResponse{Errors: make([]replica.Error, len(rawErrs))} + for i, err := range rawErrs { + if err != nil { + resp.Errors[i] = replica.Error{Code: replica.StatusConflict, Msg: err.Error()} + } + } + return resp + } + s.replicationMap.set(requestID, task) + return replica.SimpleResponse{} +} + +func parseBytesUUID(id strfmt.UUID) ([]byte, error) { + uuid, err := uuid.Parse(string(id)) + if err != nil { + return nil, fmt.Errorf("parse uuid %q: %w", id, err) + } + return uuid.MarshalBinary() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_shutdown.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_shutdown.go new file mode 100644 index 0000000000000000000000000000000000000000..bedd690d579f641fcf17153e6450ff483e9f8e89 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_shutdown.go @@ -0,0 +1,199 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + + "github.com/weaviate/weaviate/entities/cyclemanager" + "github.com/weaviate/weaviate/entities/errorcompounder" + "github.com/weaviate/weaviate/entities/storagestate" +) + +func (s *Shard) Shutdown(ctx context.Context) (err error) { + s.shutdownRequested.Store(true) + return backoff.Retry(func() error { + // this retry to make sure it's retried in case + // the performShutdown() returned shard still in use + return s.performShutdown(ctx) + }, backoff.WithContext(backoff.WithMaxRetries( + // this will try with max 2 seconds could be configurable later on + backoff.NewConstantBackOff(200*time.Millisecond), 10), ctx)) +} + +/* + + batch + shut + false + in_use ++ + defer in_use -- + true + fail request + + shutdown + loop + time: + if shut == true + fail request + in_use == 0 && shut == false + shut = true + +*/ +// Shutdown needs to be idempotent, so it can also deal with a partial +// initialization. In some parts, it relies on the underlying structs to have +// idempotent Shutdown methods. In other parts, it explicitly checks if a +// component was initialized. If not, it turns it into a noop to prevent +// blocking. +func (s *Shard) performShutdown(ctx context.Context) (err error) { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shut.Load() { + s.shutdownRequested.Store(false) + s.index.logger. + WithField("action", "shutdown"). + Debugf("shard %q is already shut down", s.name) + // shutdown is idempotent + return nil + } + if s.inUseCounter.Load() > 0 { + s.index.logger. + WithField("action", "shutdown"). + Debugf("shard %q is still in use", s.name) + return fmt.Errorf("shard %q is still in use", s.name) + } + s.shut.Store(true) + s.shutdownRequested.Store(false) + start := time.Now() + defer func() { + s.index.metrics.ObserveUpdateShardStatus(storagestate.StatusShutdown.String(), time.Since(start)) + }() + + s.reindexer.Stop(s, fmt.Errorf("shard shutdown")) + + s.haltForTransferMux.Lock() + if s.haltForTransferCancel != nil { + s.haltForTransferCancel() + } + s.haltForTransferMux.Unlock() + + ec := errorcompounder.New() + + err = s.GetPropertyLengthTracker().Close() + ec.AddWrap(err, "close prop length tracker") + + // unregister all callbacks at once, in parallel + err = cyclemanager.NewCombinedCallbackCtrl(0, s.index.logger, + s.cycleCallbacks.compactionCallbacksCtrl, + s.cycleCallbacks.compactionAuxCallbacksCtrl, + s.cycleCallbacks.flushCallbacksCtrl, + s.cycleCallbacks.vectorCombinedCallbacksCtrl, + s.cycleCallbacks.geoPropsCombinedCallbacksCtrl, + ).Unregister(ctx) + ec.Add(err) + + s.mayStopAsyncReplication() + + _ = s.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err = queue.Flush(); err != nil { + ec.Add(fmt.Errorf("flush vector index queue commitlog of vector %q: %w", targetVector, err)) + } + + if err = queue.Close(); err != nil { + ec.Add(fmt.Errorf("shut down vector index queue of vector %q: %w", targetVector, err)) + } + + return nil + }) + + _ = s.ForEachVectorIndex(func(targetVector string, index VectorIndex) error { + // to ensure that all commitlog entries are written to disk. + // otherwise in some cases the tombstone cleanup process' + // 'RemoveTombstone' entry is not picked up on restarts + // resulting in perpetually attempting to remove a tombstone + // which doesn't actually exist anymore + if err = index.Flush(); err != nil { + ec.Add(fmt.Errorf("flush vector index commitlog of vector %q: %w", targetVector, err)) + } + + if err = index.Shutdown(ctx); err != nil { + ec.Add(fmt.Errorf("shut down vector index of vector %q: %w", targetVector, err)) + } + + return nil + }) + + if s.store != nil { + s.UpdateStatus(storagestate.StatusShutdown.String(), "shutdown") + + // store would be nil if loading the objects bucket failed, as we would + // only return the store on success from s.initLSMStore() + err = s.store.Shutdown(ctx) + ec.AddWrap(err, "stop lsmkv store") + } + + if s.dynamicVectorIndexDB != nil { + err = s.dynamicVectorIndexDB.Close() + ec.AddWrap(err, "stop dynamic vector index db") + } + + return ec.ToError() +} + +func (s *Shard) preventShutdown() (release func(), err error) { + if s.shutdownRequested.Load() { + return func() {}, errShutdownInProgress + } + s.shutdownLock.RLock() + defer s.shutdownLock.RUnlock() + + if s.shut.Load() { + return func() {}, errAlreadyShutdown + } + + s.refCountAdd() + return func() { s.refCountSub() }, nil +} + +func (s *Shard) refCountAdd() { + s.inUseCounter.Add(1) +} + +func (s *Shard) refCountSub() { + s.inUseCounter.Add(-1) + // if the counter is 0, we can shutdown + if s.inUseCounter.Load() == 0 && s.shutdownRequested.Load() { + s.performShutdown(context.TODO()) + } +} + +// // cleanupPartialInit is called when the shard was only partially initialized. +// // Internally it just uses [Shutdown], but also adds some logging. +// func (s *Shard) cleanupPartialInit(ctx context.Context) { +// log := s.index.logger.WithField("action", "cleanup_partial_initialization") +// if err := s.Shutdown(ctx); err != nil { +// log.WithError(err).Error("failed to shutdown store") +// } + +// log.Debug("successfully cleaned up partially initialized shard") +// } + +// func (s *Shard) NotifyReady() { +// s.initStatus() +// s.index.logger. +// WithField("action", "startup"). +// Debugf("shard=%s is ready", s.name) +// } diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_shutdown_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_shutdown_test.go new file mode 100644 index 0000000000000000000000000000000000000000..abe265cefe32b269d3c1ef44a979d88d3be4d7e8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_shutdown_test.go @@ -0,0 +1,230 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestShardShutdownWhenIdle(t *testing.T) { + dirName := t.TempDir() + index, cleanup := initIndexAndPopulate(t, dirName) + defer cleanup() + + var shardName string + index.shards.Range(func(name string, _ ShardLike) error { + shardName = name + return nil + }) + + // use shard + shard, release1, err := index.GetShard(context.Background(), shardName) + require.NoError(t, err) + require.NotNil(t, shard) + require.NotNil(t, release1) + + // use same shard + sameShard, release2, err := index.GetShard(context.Background(), shardName) + require.NoError(t, err) + require.NotNil(t, sameShard) + require.NotNil(t, release2) + + // sanity check, no flags marked + requireShardShutdownRequested(t, shard, false) + requireShardShut(t, shard, false) + + // release shard 2x + release1() + release2() + + // shutdown succeeds, shard idle + err = shard.Shutdown(context.Background()) + require.NoError(t, err) + requireShardShutdownRequested(t, shard, false) + requireShardShut(t, shard, true) +} + +func TestShardShutdownWhenIdleEventually(t *testing.T) { + dirName := t.TempDir() + index, cleanup := initIndexAndPopulate(t, dirName) + defer cleanup() + + var shardName string + index.shards.Range(func(name string, _ ShardLike) error { + shardName = name + return nil + }) + + // use shard + shard, release1, err := index.GetShard(context.Background(), shardName) + require.NoError(t, err) + require.NotNil(t, shard) + require.NotNil(t, release1) + + // use same shard + sameShard, release2, err := index.GetShard(context.Background(), shardName) + require.NoError(t, err) + require.NotNil(t, sameShard) + require.NotNil(t, release2) + + // sanity check, no flags marked + requireShardShutdownRequested(t, shard, false) + requireShardShut(t, shard, false) + + // shutdown fails, shard in use 2x + err = shard.Shutdown(context.Background()) + require.ErrorContains(t, err, "still in use") + requireShardShutdownRequested(t, shard, true) + requireShardShut(t, shard, false) + + // getting shard fails, shutdown in progress + sameShardAgain, _, err := index.GetShard(context.Background(), shardName) + require.ErrorIs(t, err, errShutdownInProgress) + require.Nil(t, sameShardAgain) + + // release shard 1x + release1() + + // shutdown still in progress, shard in use 1x + requireShardShutdownRequested(t, shard, true) + requireShardShut(t, shard, false) + + // release shard 1x + release2() + + // shutdown eventually completed, shard idle + requireShardShutdownRequested(t, shard, false) + requireShardShut(t, shard, true) + + // getting shard fails, shutdown completed + sameShardYetAgain, _, err := index.GetShard(context.Background(), shardName) + require.ErrorIs(t, err, errAlreadyShutdown) + require.Nil(t, sameShardYetAgain) +} + +func initIndexAndPopulate(t *testing.T, dirName string) (index *Index, cleanup func()) { + logger, _ := test.NewNullLogger() + className := "Test" + + // create db + shardState := singleShardState() + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: nil}}, + shardState: shardState, + } + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + RootPath: dirName, + QueryMaximumResults: 10000, + MaxImportGoroutinesFactor: 1, + TrackVectorDimensions: true, + }, + &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, + &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader, + ) + require.NoError(t, err) + + repo.SetSchemaGetter(schemaGetter) + err = repo.WaitForStartup(testCtx()) + require.NoError(t, err) + + cleanup = func() { repo.Shutdown(context.Background()) } + runCleanup := true // run cleanup if method fails + defer func() { + if runCleanup { + cleanup() + } + }() + + // set schema + class := &models.Class{ + Class: className, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + InvertedIndexConfig: invertedConfig(), + } + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + migrator := NewMigrator(repo, logger, "node1") + err = migrator.AddClass(context.Background(), class) + require.NoError(t, err) + schemaGetter.schema = sch + + // import objects + for i := 0; i < 10; i++ { + v := float32(i) + vec := []float32{v, v + 1, v + 2, v + 3} + + id := strfmt.UUID(uuid.MustParse(fmt.Sprintf("%032d", i)).String()) + obj := &models.Object{Class: className, ID: id} + err := repo.PutObject(context.Background(), obj, vec, nil, nil, nil, 0) + require.NoError(t, err) + } + + index = repo.GetIndex(schema.ClassName(className)) + runCleanup = false // all good, let caller cleanup + return index, cleanup +} + +func requireShardShutdownRequested(t *testing.T, shard ShardLike, expected bool) { + if expected { + require.True(t, shard.(*LazyLoadShard).shard.shutdownRequested.Load(), "shard should be marked for shut down") + } else { + require.False(t, shard.(*LazyLoadShard).shard.shutdownRequested.Load(), "shard should not be marked for shut down") + } +} + +func requireShardShut(t *testing.T, shard ShardLike, expected bool) { + if expected { + require.True(t, shard.(*LazyLoadShard).shard.shut.Load(), "shard should be marked as shut down") + } else { + require.False(t, shard.(*LazyLoadShard).shard.shut.Load(), "shard should not be marked as shut down") + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_skip_vector_reindex_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_skip_vector_reindex_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9d29a1f7066e803df8fa3283266dcc445e584e1d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_skip_vector_reindex_integration_test.go @@ -0,0 +1,1354 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "os" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/vectorindex/common" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/objects" +) + +func TestShard_SkipVectorReindex(t *testing.T) { + ctx := context.Background() + + uuid_ := strfmt.UUID(uuid.NewString()) + origCreateTimeUnix := int64(1704161045) + origUpdateTimeUnix := int64(1704161046) + updCreateTimeUnix := int64(1704161047) + updUpdateTimeUnix := int64(1704161048) + vector := []float32{1, 2, 3} + altVector := []float32{10, 0, -20} + + class := &models.Class{ + Class: "TestClass", + InvertedIndexConfig: &models.InvertedIndexConfig{ + IndexTimestamps: true, + IndexNullState: true, + IndexPropertyLength: true, + UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND, + }, + Properties: []*models.Property{ + { + Name: "texts", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "numbers", + DataType: schema.DataTypeNumberArray.PropString(), + }, + { + Name: "ints", + DataType: schema.DataTypeIntArray.PropString(), + }, + { + Name: "booleans", + DataType: schema.DataTypeBooleanArray.PropString(), + }, + { + Name: "dates", + DataType: schema.DataTypeDateArray.PropString(), + }, + { + Name: "uuids", + DataType: schema.DataTypeUUIDArray.PropString(), + }, + { + Name: "text", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWord, + }, + { + Name: "number", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "int", + DataType: schema.DataTypeInt.PropString(), + }, + { + Name: "boolean", + DataType: schema.DataTypeBoolean.PropString(), + }, + { + Name: "date", + DataType: schema.DataTypeDate.PropString(), + }, + { + Name: "uuid", + DataType: schema.DataTypeUUID.PropString(), + }, + { + Name: "geo", + DataType: schema.DataTypeGeoCoordinates.PropString(), + }, + }, + } + props := make([]string, len(class.Properties)) + for i, prop := range class.Properties { + props[i] = prop.Name + } + + createOrigObj := func() *storobj.Object { + return &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: uuid_, + Class: class.Class, + Properties: map[string]interface{}{ + "texts": []string{ + "aaa", + "bbb", + "ccc", + }, + "numbers": []interface{}{}, + "ints": []float64{ + 101, 101, 101, 101, 101, 101, + 102, + 103, + 104, + }, + "booleans": []bool{ + true, true, true, + false, + }, + "dates": []time.Time{ + mustParseTime("2001-06-01T12:00:00.000000Z"), + mustParseTime("2002-06-02T12:00:00.000000Z"), + }, + // no uuids + "text": "ddd", + // no number + "int": float64(201), + "boolean": false, + "date": mustParseTime("2003-06-01T12:00:00.000000Z"), + // no uuid + "geo": &models.GeoCoordinates{ + Latitude: ptFloat32(1.1), + Longitude: ptFloat32(2.2), + }, + }, + CreationTimeUnix: origCreateTimeUnix, + LastUpdateTimeUnix: origUpdateTimeUnix, + }, + Vector: vector, + } + } + createUpdObj := func() *storobj.Object { + return &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: uuid_, + Class: class.Class, + Properties: map[string]interface{}{ + "texts": []interface{}{}, + // no numbers + "ints": []float64{ + 101, 101, 101, 101, + 103, + 104, + 105, + }, + "booleans": []bool{ + true, true, true, + false, + }, + // no dates + "uuids": []uuid.UUID{ + uuid.MustParse("d726c960-aede-411c-85d3-2c77e9290a6e"), + }, + "text": "", + // no number + "int": float64(202), + "boolean": true, + // no date + "uuid": uuid.MustParse("7fabaf01-9e10-458a-acea-cc627376c506"), + "geo": &models.GeoCoordinates{ + Latitude: ptFloat32(1.1), + Longitude: ptFloat32(2.2), + }, + }, + CreationTimeUnix: updCreateTimeUnix, + LastUpdateTimeUnix: updUpdateTimeUnix, + }, + Vector: vector, + } + } + createMergeDoc := func() objects.MergeDocument { + return objects.MergeDocument{ + ID: uuid_, + Class: class.Class, + PrimitiveSchema: map[string]interface{}{ + "texts": []interface{}{}, + "ints": []interface{}{ + float64(101), float64(101), float64(101), float64(101), + float64(103), + float64(104), + float64(105), + }, + "uuids": []interface{}{ + uuid.MustParse("d726c960-aede-411c-85d3-2c77e9290a6e"), + }, + "text": "", + "int": float64(202), + "boolean": true, + "uuid": uuid.MustParse("7fabaf01-9e10-458a-acea-cc627376c506"), + }, + UpdateTime: updUpdateTimeUnix, + PropertiesToDelete: []string{"numbers", "dates", "date"}, + Vector: vector, + } + } + + filterId := filterEqual[string](string(uuid_), schema.DataTypeText, class.Class, "_id") + + filterTextsEqAAA := filterEqual[string]("aaa", schema.DataTypeText, class.Class, "texts") + filterTextsLen3 := filterEqual[int](3, schema.DataTypeInt, class.Class, "len(texts)") + filterTextsLen0 := filterEqual[int](0, schema.DataTypeInt, class.Class, "len(texts)") + filterTextsNotNil := filterNil(false, class.Class, "texts") + filterTextsNil := filterNil(true, class.Class, "texts") + + filterNumbersEq123 := filterEqual[float64](1.23, schema.DataTypeNumber, class.Class, "numbers") + filterNumbersLen1 := filterEqual[int](1, schema.DataTypeInt, class.Class, "len(numbers)") + filterNumbersLen0 := filterEqual[int](0, schema.DataTypeInt, class.Class, "len(numbers)") + filterNumbersNotNil := filterNil(false, class.Class, "numbers") + filterNumbersNil := filterNil(true, class.Class, "numbers") + + filterIntsEq102 := filterEqual[int](102, schema.DataTypeInt, class.Class, "ints") + filterIntsEq105 := filterEqual[int](105, schema.DataTypeInt, class.Class, "ints") + filterIntsLen9 := filterEqual[int](9, schema.DataTypeInt, class.Class, "len(ints)") + filterIntsLen7 := filterEqual[int](7, schema.DataTypeInt, class.Class, "len(ints)") + filterIntsNotNil := filterNil(false, class.Class, "ints") + filterIntsNil := filterNil(true, class.Class, "ints") + + filterBoolsEqTrue := filterEqual[bool](true, schema.DataTypeBoolean, class.Class, "booleans") + filterBoolsEqFalse := filterEqual[bool](false, schema.DataTypeBoolean, class.Class, "booleans") + filterBoolsLen4 := filterEqual[int](4, schema.DataTypeInt, class.Class, "len(booleans)") + filterBoolsLen0 := filterEqual[int](0, schema.DataTypeInt, class.Class, "len(booleans)") + filterBoolsNotNil := filterNil(false, class.Class, "booleans") + filterBoolsNil := filterNil(true, class.Class, "booleans") + + filterDatesEq2001 := filterEqual[string]("2001-06-01T12:00:00.000000Z", schema.DataTypeDate, class.Class, "dates") + filterDatesLen2 := filterEqual[int](2, schema.DataTypeInt, class.Class, "len(dates)") + filterDatesLen0 := filterEqual[int](0, schema.DataTypeInt, class.Class, "len(dates)") + filterDatesNotNil := filterNil(false, class.Class, "dates") + filterDatesNil := filterNil(true, class.Class, "dates") + + filterUuidsEqD726 := filterEqual[string]("d726c960-aede-411c-85d3-2c77e9290a6e", schema.DataTypeText, class.Class, "uuids") + filterUuidsLen1 := filterEqual[int](1, schema.DataTypeInt, class.Class, "len(uuids)") + filterUuidsLen0 := filterEqual[int](0, schema.DataTypeInt, class.Class, "len(uuids)") + filterUuidsNotNil := filterNil(false, class.Class, "uuids") + filterUuidsNil := filterNil(true, class.Class, "uuids") + + filterTextEqDDD := filterEqual[string]("ddd", schema.DataTypeText, class.Class, "text") + filterTextLen3 := filterEqual[int](3, schema.DataTypeInt, class.Class, "len(text)") + filterTextLen0 := filterEqual[int](0, schema.DataTypeInt, class.Class, "len(text)") + filterTextNotNil := filterNil(false, class.Class, "text") + filterTextNil := filterNil(true, class.Class, "text") + + filterNumberEq123 := filterEqual[float64](1.23, schema.DataTypeNumber, class.Class, "number") + filterNumberNotNil := filterNil(false, class.Class, "number") + filterNumberNil := filterNil(true, class.Class, "number") + + filterIntEq201 := filterEqual[int](201, schema.DataTypeInt, class.Class, "int") + filterIntEq202 := filterEqual[int](202, schema.DataTypeInt, class.Class, "int") + filterIntNotNil := filterNil(false, class.Class, "int") + filterIntNil := filterNil(true, class.Class, "int") + + filterBoolEqFalse := filterEqual[bool](false, schema.DataTypeBoolean, class.Class, "boolean") + filterBoolEqTrue := filterEqual[bool](true, schema.DataTypeBoolean, class.Class, "boolean") + filterBoolNotNil := filterNil(false, class.Class, "boolean") + filterBoolNil := filterNil(true, class.Class, "boolean") + + filterDateEq2003 := filterEqual[string]("2003-06-01T12:00:00.000000Z", schema.DataTypeDate, class.Class, "date") + filterDateNotNil := filterNil(false, class.Class, "date") + filterDateNil := filterNil(true, class.Class, "date") + + filterUuidEq7FAB := filterEqual[string]("7fabaf01-9e10-458a-acea-cc627376c506", schema.DataTypeText, class.Class, "uuid") + filterUuidNotNil := filterNil(false, class.Class, "uuid") + filterUuidNil := filterNil(true, class.Class, "uuid") + + search := func(t *testing.T, shard ShardLike, filter *filters.LocalFilter) []*storobj.Object { + searchLimit := 10 + found, _, err := shard.ObjectSearch(ctx, searchLimit, filter, + nil, nil, nil, additional.Properties{}, props) + require.NoError(t, err) + return found + } + + verifySearchAfterAdd := func(shard ShardLike) func(t *testing.T) { + return func(t *testing.T) { + t.Run("to be found", func(t *testing.T) { + for name, filter := range map[string]*filters.LocalFilter{ + "id": filterId, + + "textsEqAAA": filterTextsEqAAA, + "textsLen3": filterTextsLen3, + "textsNotNil": filterTextsNotNil, + + "numbersLen0": filterNumbersLen0, + "numbersNil": filterNumbersNil, + + "intsEq102": filterIntsEq102, + "intsLen9": filterIntsLen9, + "intsNotNil": filterIntsNotNil, + + "boolsEqTrue": filterBoolsEqTrue, + "boolsEqFalse": filterBoolsEqFalse, + "boolsLen4": filterBoolsLen4, + "boolsNotNil": filterBoolsNotNil, + + "datesEq2001": filterDatesEq2001, + "datesLen2": filterDatesLen2, + "datesNotNil": filterDatesNotNil, + + "uuidsLen0": filterUuidsLen0, + "uuidsNil": filterUuidsNil, + + "textEqDDD": filterTextEqDDD, + "textLen3": filterTextLen3, + "textNotNil": filterTextNotNil, + + "numberNil": filterNumberNil, + + "intEq201": filterIntEq201, + "intNotNil": filterIntNotNil, + + "boolEqFalse": filterBoolEqFalse, + "boolNotNil": filterBoolNotNil, + + "dateEq2003": filterDateEq2003, + "dateNotNil": filterDateNotNil, + + "uuidNil": filterUuidNil, + } { + t.Run(name, func(t *testing.T) { + found := search(t, shard, filter) + require.Len(t, found, 1) + require.Equal(t, uuid_, found[0].Object.ID) + }) + } + }) + + t.Run("not to be found", func(t *testing.T) { + for name, filter := range map[string]*filters.LocalFilter{ + "textsLen0": filterTextsLen0, + "textsNil": filterTextsNil, + + "numbersEq123": filterNumbersEq123, + "numbersLen1": filterNumbersLen1, + "numbersNotNil": filterNumbersNotNil, + + "intsEq105": filterIntsEq105, + "intsLen7": filterIntsLen7, + "intsNil": filterIntsNil, + + "boolsLen0": filterBoolsLen0, + "boolsNil": filterBoolsNil, + + "datesLen0": filterDatesLen0, + "datesNil": filterDatesNil, + + "uuidsEqD726": filterUuidsEqD726, + "uuidsLen1": filterUuidsLen1, + "uuidsNotNil": filterUuidsNotNil, + + "textLen0": filterTextLen0, + "textNil": filterTextNil, + + "numberEq123": filterNumberEq123, + "numberNotNil": filterNumberNotNil, + + "intEq202": filterIntEq202, + "intNil": filterIntNil, + + "boolEqTrue": filterBoolEqTrue, + "boolNil": filterBoolNil, + + "dateNil": filterDateNil, + + "uuidEq7FAB": filterUuidEq7FAB, + "uuidNotNil": filterUuidNotNil, + } { + t.Run(name, func(t *testing.T) { + found := search(t, shard, filter) + require.Len(t, found, 0) + }) + } + }) + } + } + verifySearchAfterUpdate := func(shard ShardLike) func(t *testing.T) { + return func(t *testing.T) { + t.Run("to be found", func(t *testing.T) { + for name, filter := range map[string]*filters.LocalFilter{ + "id": filterId, + + "textsLen0": filterTextsLen0, + "textsNil": filterTextsNil, + + "numbersLen0": filterNumbersLen0, + "numbersNil": filterNumbersNil, + + "intsEq105": filterIntsEq105, + "intsLen7": filterIntsLen7, + "intsNotNil": filterIntsNotNil, + + "boolsEqTrue": filterBoolsEqTrue, + "boolsEqFalse": filterBoolsEqFalse, + "boolsLen4": filterBoolsLen4, + "boolsNotNil": filterBoolsNotNil, + + "datesLen0": filterDatesLen0, + "datesNil": filterDatesNil, + + "uuidsEqD726": filterUuidsEqD726, + "uuidsLen1": filterUuidsLen1, + "uuidsNotNil": filterUuidsNotNil, + + "textLen0": filterTextLen0, + "textNil": filterTextNil, + + "numberNil": filterNumberNil, + + "intEq202": filterIntEq202, + "intNotNil": filterIntNotNil, + + "boolEqTrue": filterBoolEqTrue, + "boolNotNil": filterBoolNotNil, + + "dateNil": filterDateNil, + + "uuidEq7FAB": filterUuidEq7FAB, + "uuidNotNil": filterUuidNotNil, + } { + t.Run(name, func(t *testing.T) { + found := search(t, shard, filter) + require.Len(t, found, 1) + require.Equal(t, uuid_, found[0].Object.ID) + }) + } + }) + + t.Run("not to be found", func(t *testing.T) { + for name, filter := range map[string]*filters.LocalFilter{ + "textsEqAAA": filterTextsEqAAA, + "textsLen3": filterTextsLen3, + "textsNotNil": filterTextsNotNil, + + "numbersEq123": filterNumbersEq123, + "numbersLen1": filterNumbersLen1, + "numbersNotNil": filterNumbersNotNil, + + "intsEq102": filterIntsEq102, + "intsLen9": filterIntsLen9, + "intsNil": filterIntsNil, + + "boolsLen0": filterBoolsLen0, + "boolsNil": filterBoolsNil, + + "datesEq2001": filterDatesEq2001, + "datesLen2": filterDatesLen2, + "datesNotNil": filterDatesNotNil, + + "uuidsLen0": filterUuidsLen0, + "uuidsNil": filterUuidsNil, + + "textEqDDD": filterTextEqDDD, + "textLen3": filterTextLen3, + "textNotNil": filterTextNotNil, + + "numberEq123": filterNumberEq123, + "numberNotNil": filterNumberNotNil, + + "intEq201": filterIntEq201, + "intNil": filterIntNil, + + "boolEqFalse": filterBoolEqFalse, + "boolNil": filterBoolNil, + + "dateEq2003": filterDateEq2003, + "dateNotNil": filterDateNotNil, + + "uuidNil": filterUuidNil, + } { + t.Run(name, func(t *testing.T) { + found := search(t, shard, filter) + require.Len(t, found, 0) + }) + } + }) + } + } + verifyVectorSearch := func(shard ShardLike, vectorToBeFound, vectorNotToBeFound []float32) func(t *testing.T) { + vectorSearchLimit := -1 // negative to limit results by distance + vectorSearchDist := float32(1) + targetVector := "" + + return func(t *testing.T) { + t.Run("to be found", func(t *testing.T) { + require.EventuallyWithT(t, func(collect *assert.CollectT) { + found, _, err := shard.ObjectVectorSearch(ctx, []models.Vector{vectorToBeFound}, []string{targetVector}, + vectorSearchDist, vectorSearchLimit, nil, nil, nil, additional.Properties{}, nil, nil) + if !assert.NoError(collect, err) { + return + } + if !assert.Len(collect, found, 1) { + return + } + assert.Equal(collect, uuid_, found[0].Object.ID) + }, 15*time.Second, 100*time.Millisecond) + }) + + t.Run("not to be found", func(t *testing.T) { + found, _, err := shard.ObjectVectorSearch(ctx, []models.Vector{vectorNotToBeFound}, []string{targetVector}, + vectorSearchDist, vectorSearchLimit, nil, nil, nil, additional.Properties{}, nil, nil) + require.NoError(t, err) + require.Len(t, found, 0) + }) + } + } + + createShard := func(t *testing.T) (ShardLike, *VectorIndexQueue) { + vectorIndexConfig := hnsw.UserConfig{Distance: common.DefaultDistanceMetric} + shard, _ := testShardWithSettings(t, ctx, class, vectorIndexConfig, true, true) + queue, ok := shard.GetVectorIndexQueue("") + require.True(t, ok) + return shard, queue + } + + t.Run("single object", func(t *testing.T) { + t.Run("sanity check - search after add", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify initial docID and timestamps", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, origUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after add", verifySearchAfterAdd(shard)) + t.Run("verify vector search after add", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with different object, same vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("put object", func(t *testing.T) { + updObj := createUpdObj() + + err := shard.PutObject(ctx, updObj) + require.NoError(t, err) + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify same docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after put", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after put", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with different object, different vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("put object", func(t *testing.T) { + // overwrite vector in updated object + altUpdObj := createUpdObj() + altUpdObj.Vector = altVector + + err := shard.PutObject(ctx, altUpdObj) + require.NoError(t, err) + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after put", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after put", verifyVectorSearch(shard, altVector, vector)) + }) + + t.Run("replace with different object, different geo", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("put object", func(t *testing.T) { + // overwrite geo in updated object + altUpdObj := createUpdObj() + altUpdObj.Object.Properties.(map[string]interface{})["geo"] = &models.GeoCoordinates{ + Latitude: ptFloat32(3.3), + Longitude: ptFloat32(4.4), + } + + err := shard.PutObject(ctx, altUpdObj) + require.NoError(t, err) + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after put", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after put", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("merge with different object, same vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("merge object", func(t *testing.T) { + mergeDoc := createMergeDoc() + + err := shard.MergeObject(ctx, mergeDoc) + require.NoError(t, err) + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify same docID, changed update timestamp", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after merge", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after merge", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("merge with different object, different vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("merge object", func(t *testing.T) { + // overwrite vector in merge doc + altMergeDoc := createMergeDoc() + altMergeDoc.Vector = altVector + + err := shard.MergeObject(ctx, altMergeDoc) + require.NoError(t, err) + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify changed docID, changed update timestamp", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after merge", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after merge", verifyVectorSearch(shard, altVector, vector)) + }) + + t.Run("merge with different object, different geo", func(t *testing.T) { + shard, _ := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("merge object", func(t *testing.T) { + // overwrite geo in merge doc + mergeDoc := createMergeDoc() + mergeDoc.PrimitiveSchema["geo"] = &models.GeoCoordinates{ + Latitude: ptFloat32(3.3), + Longitude: ptFloat32(4.4), + } + + err := shard.MergeObject(ctx, mergeDoc) + require.NoError(t, err) + }) + + t.Run("verify changed docID, changed update timestamp", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after merge", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after merge", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with same object, same vector", func(t *testing.T) { + shard, _ := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("put object", func(t *testing.T) { + // overwrite timestamps in original object + updObj := createOrigObj() + updObj.Object.CreationTimeUnix = updCreateTimeUnix + updObj.Object.LastUpdateTimeUnix = updUpdateTimeUnix + + err := shard.PutObject(ctx, updObj) + require.NoError(t, err) + }) + + t.Run("verify same docID, same timestamps", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, origUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after put same as add", verifySearchAfterAdd(shard)) + t.Run("verify vector search after put", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with same object, different vector", func(t *testing.T) { + shard, _ := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("put object", func(t *testing.T) { + // overwrite timestamps and vector in original object + altUpdObj := createOrigObj() + altUpdObj.Object.CreationTimeUnix = updCreateTimeUnix + altUpdObj.Object.LastUpdateTimeUnix = updUpdateTimeUnix + altUpdObj.Vector = altVector + + err := shard.PutObject(ctx, altUpdObj) + require.NoError(t, err) + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after put same as add", verifySearchAfterAdd(shard)) + t.Run("verify vector search after put", verifyVectorSearch(shard, altVector, vector)) + }) + + t.Run("replace with same object, different geo", func(t *testing.T) { + shard, _ := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("put object", func(t *testing.T) { + // overwrite timestamps and geo in original object + updObj := createOrigObj() + updObj.Object.CreationTimeUnix = updCreateTimeUnix + updObj.Object.LastUpdateTimeUnix = updUpdateTimeUnix + updObj.Object.Properties.(map[string]interface{})["geo"] = &models.GeoCoordinates{ + Latitude: ptFloat32(3.3), + Longitude: ptFloat32(4.4), + } + + err := shard.PutObject(ctx, updObj) + require.NoError(t, err) + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after put same as add", verifySearchAfterAdd(shard)) + t.Run("verify vector search after put", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("merge with same object, same vector", func(t *testing.T) { + shard, _ := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("merge object", func(t *testing.T) { + // same values as in original object + mergeDoc := objects.MergeDocument{ + ID: uuid_, + Class: class.Class, + PrimitiveSchema: map[string]interface{}{ + "int": float64(201), + "text": "ddd", + }, + UpdateTime: updUpdateTimeUnix, + Vector: vector, + } + + err := shard.MergeObject(ctx, mergeDoc) + require.NoError(t, err) + }) + + t.Run("verify same docID, same timestamps", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, origUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after merge same as add", verifySearchAfterAdd(shard)) + t.Run("verify vector search after merge", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("merge with same object, different vector", func(t *testing.T) { + shard, _ := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("merge object", func(t *testing.T) { + // same props as in original object, overwrite timestamp and vector + altMergeDoc := objects.MergeDocument{ + ID: uuid_, + Class: class.Class, + PrimitiveSchema: map[string]interface{}{ + "int": float64(201), + "text": "ddd", + }, + UpdateTime: updUpdateTimeUnix, + Vector: altVector, + } + + err := shard.MergeObject(ctx, altMergeDoc) + require.NoError(t, err) + }) + + t.Run("verify changed docID, changed update timestamp", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after merge same as add", verifySearchAfterAdd(shard)) + t.Run("verify vector search after merge", verifyVectorSearch(shard, altVector, vector)) + }) + + t.Run("merge with same object, different geo", func(t *testing.T) { + shard, _ := createShard(t) + + t.Run("add object", func(t *testing.T) { + err := shard.PutObject(ctx, createOrigObj()) + require.NoError(t, err) + }) + + t.Run("merge object", func(t *testing.T) { + // overwrite geo and timestamp + mergeDoc := objects.MergeDocument{ + ID: uuid_, + Class: class.Class, + PrimitiveSchema: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptFloat32(3.3), + Longitude: ptFloat32(4.4), + }, + }, + UpdateTime: updUpdateTimeUnix, + } + + err := shard.MergeObject(ctx, mergeDoc) + require.NoError(t, err) + }) + + t.Run("verify changed docID, changed update timestamp", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after merge same as add", verifySearchAfterAdd(shard)) + t.Run("verify vector search after merge", verifyVectorSearch(shard, vector, altVector)) + }) + }) + + t.Run("batch", func(t *testing.T) { + runBatch := func(t *testing.T) { + t.Run("sanity check - search after add", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add batch", func(t *testing.T) { + errs := shard.PutObjectBatch(ctx, []*storobj.Object{createOrigObj()}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify initial docID and timestamps", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, origUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after batch", verifySearchAfterAdd(shard)) + t.Run("verify vector search after batch", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with different object, same vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add batch", func(t *testing.T) { + errs := shard.PutObjectBatch(ctx, []*storobj.Object{createOrigObj()}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("add 2nd batch", func(t *testing.T) { + updObj := createUpdObj() + + errs := shard.PutObjectBatch(ctx, []*storobj.Object{updObj}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("verify same docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after 2nd batch", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after 2nd batch", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with different object, different vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add batch", func(t *testing.T) { + errs := shard.PutObjectBatch(ctx, []*storobj.Object{createOrigObj()}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("add 2nd batch", func(t *testing.T) { + // overwrite vector in updated object + altUpdObj := createUpdObj() + altUpdObj.Vector = altVector + + errs := shard.PutObjectBatch(ctx, []*storobj.Object{altUpdObj}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after 2nd batch", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after 2nd batch", verifyVectorSearch(shard, altVector, vector)) + }) + + t.Run("replace with different object, different geo", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add batch", func(t *testing.T) { + errs := shard.PutObjectBatch(ctx, []*storobj.Object{createOrigObj()}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("add 2nd batch", func(t *testing.T) { + // overwrite geo in updated object + altUpdObj := createUpdObj() + altUpdObj.Object.Properties.(map[string]interface{})["geo"] = &models.GeoCoordinates{ + Latitude: ptFloat32(3.3), + Longitude: ptFloat32(4.4), + } + + errs := shard.PutObjectBatch(ctx, []*storobj.Object{altUpdObj}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after 2nd batch", verifySearchAfterUpdate(shard)) + t.Run("verify vector search after 2nd batch", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with same object, same vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add batch", func(t *testing.T) { + errs := shard.PutObjectBatch(ctx, []*storobj.Object{createOrigObj()}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("add 2nd batch", func(t *testing.T) { + // overwrite timestamps in original object + updObj := createOrigObj() + updObj.Object.CreationTimeUnix = updCreateTimeUnix + updObj.Object.LastUpdateTimeUnix = updUpdateTimeUnix + + errs := shard.PutObjectBatch(ctx, []*storobj.Object{updObj}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify same docID, same timestamps", func(t *testing.T) { + expectedNextDocID := uint64(1) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, origCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, origUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after 2nd batch same as 1st", verifySearchAfterAdd(shard)) + t.Run("verify vector search after 2nd batch", verifyVectorSearch(shard, vector, altVector)) + }) + + t.Run("replace with same object, different vector", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add batch", func(t *testing.T) { + errs := shard.PutObjectBatch(ctx, []*storobj.Object{createOrigObj()}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("add 2nd batch", func(t *testing.T) { + // overwrite timestamps and vector in original object + altUpdObj := createOrigObj() + altUpdObj.Object.CreationTimeUnix = updCreateTimeUnix + altUpdObj.Object.LastUpdateTimeUnix = updUpdateTimeUnix + altUpdObj.Vector = altVector + + errs := shard.PutObjectBatch(ctx, []*storobj.Object{altUpdObj}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after 2nd batch same as 1st", verifySearchAfterAdd(shard)) + t.Run("verify vector search after 2nd batch", verifyVectorSearch(shard, altVector, vector)) + }) + + t.Run("replace with same object, different geo", func(t *testing.T) { + shard, queue := createShard(t) + + t.Run("add batch", func(t *testing.T) { + errs := shard.PutObjectBatch(ctx, []*storobj.Object{createOrigObj()}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("add 2nd batch", func(t *testing.T) { + // overwrite geo and timestamp + updObj := createOrigObj() + updObj.Object.CreationTimeUnix = updCreateTimeUnix + updObj.Object.LastUpdateTimeUnix = updUpdateTimeUnix + updObj.Object.Properties.(map[string]interface{})["geo"] = &models.GeoCoordinates{ + Latitude: ptFloat32(3.3), + Longitude: ptFloat32(4.4), + } + + errs := shard.PutObjectBatch(ctx, []*storobj.Object{updObj}) + for i := range errs { + require.NoError(t, errs[i]) + } + }) + + t.Run("wait for queue to be empty", func(t *testing.T) { + queue.Scheduler().Schedule(context.Background()) + time.Sleep(50 * time.Millisecond) + queue.Wait() + }) + + t.Run("verify changed docID, changed create & update timestamps", func(t *testing.T) { + expectedNextDocID := uint64(2) + require.Equal(t, expectedNextDocID, shard.Counter().Get()) + + found := search(t, shard, filterId) + require.Len(t, found, 1) + require.Equal(t, updCreateTimeUnix, found[0].CreationTimeUnix()) + require.Equal(t, updUpdateTimeUnix, found[0].LastUpdateTimeUnix()) + }) + + t.Run("verify search after 2nd batch same as 1st", verifySearchAfterAdd(shard)) + t.Run("verify vector search after 2nd batch", verifyVectorSearch(shard, vector, altVector)) + }) + } + + t.Run("sync", func(t *testing.T) { + currentIndexing := os.Getenv("ASYNC_INDEXING") + t.Setenv("ASYNC_INDEXING", "") + defer t.Setenv("ASYNC_INDEXING", currentIndexing) + + runBatch(t) + }) + + t.Run("async", func(t *testing.T) { + currentIndexing := os.Getenv("ASYNC_INDEXING") + currentStaleTimeout := os.Getenv("ASYNC_INDEXING_STALE_TIMEOUT") + currentSchedulerInterval := os.Getenv("QUEUE_SCHEDULER_INTERVAL") + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "1s") + defer t.Setenv("ASYNC_INDEXING", currentIndexing) + defer t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", currentStaleTimeout) + defer t.Setenv("QUEUE_SCHEDULER_INTERVAL", currentSchedulerInterval) + + runBatch(t) + }) + }) +} + +func filterEqual[T any](value T, dataType schema.DataType, className, propName string) *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorEqual, + Value: &filters.Value{ + Value: value, + Type: dataType, + }, + On: &filters.Path{ + Class: schema.ClassName(className), + Property: schema.PropertyName(propName), + }, + }, + } +} + +func filterNil(value bool, className, propName string) *filters.LocalFilter { + return &filters.LocalFilter{ + Root: &filters.Clause{ + Operator: filters.OperatorIsNull, + Value: &filters.Value{ + Value: value, + Type: schema.DataTypeBoolean, + }, + On: &filters.Path{ + Class: schema.ClassName(className), + Property: schema.PropertyName(propName), + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_skip_vector_reindex_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_skip_vector_reindex_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dcbd9ba3c29676e6bee6c0e8a6730e4227411627 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_skip_vector_reindex_test.go @@ -0,0 +1,986 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "encoding/binary" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" +) + +func TestGeoPropsEqual(t *testing.T) { + type testCase struct { + prevProps map[string]interface{} + nextProps map[string]interface{} + expectedEqual bool + } + + ptrFloat32 := func(f float32) *float32 { + return &f + } + + testCases := []testCase{ + { + prevProps: map[string]interface{}{}, + nextProps: map[string]interface{}{}, + expectedEqual: true, + }, + { + prevProps: map[string]interface{}{ + "notGeo": "abc", + }, + nextProps: map[string]interface{}{ + "notGeo": "def", + }, + expectedEqual: true, + }, + { + prevProps: map[string]interface{}{ + "geo": nil, + }, + nextProps: map[string]interface{}{ + "geo": nil, + }, + expectedEqual: true, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{ + "geo": nil, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{}, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": nil, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{}, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(-1.23), + Longitude: ptrFloat32(2.34), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(-2.34), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(-1.23), + Longitude: ptrFloat32(-2.34), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + expectedEqual: true, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{ + "geo2": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + "notGeo": "string", + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + "notGeo": "otherString", + }, + expectedEqual: true, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + "geo2": &models.GeoCoordinates{ + Latitude: ptrFloat32(4.56), + Longitude: ptrFloat32(5.67), + }, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + "geo2": &models.GeoCoordinates{ + Latitude: ptrFloat32(4.56), + Longitude: ptrFloat32(5.67), + }, + }, + expectedEqual: true, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + "geo2": &models.GeoCoordinates{ + Latitude: ptrFloat32(4.56), + Longitude: ptrFloat32(5.67), + }, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + "geo2": &models.GeoCoordinates{ + Latitude: ptrFloat32(4.56), + Longitude: ptrFloat32(5.67), + }, + }, + nextProps: map[string]interface{}{ + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(1.23), + Longitude: ptrFloat32(2.34), + }, + "geo2": &models.GeoCoordinates{ + Latitude: ptrFloat32(4.56), + Longitude: ptrFloat32(-5.67), + }, + }, + expectedEqual: false, + }, + { + prevProps: map[string]interface{}{ + "geoLike": map[string]interface{}{ + "Latitude": ptrFloat32(1.23), + "Longitude": ptrFloat32(2.34), + }, + }, + nextProps: map[string]interface{}{ + "geoLike": map[string]interface{}{ + "Latitude": ptrFloat32(1.23), + "Longitude": ptrFloat32(2.34), + }, + }, + expectedEqual: true, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + eq := geoPropsEqual(tc.prevProps, tc.nextProps) + + if tc.expectedEqual { + assert.True(t, eq) + } else { + assert.False(t, eq) + } + }) + } +} + +func TestPropsEqual(t *testing.T) { + type testCase struct { + prevProps map[string]interface{} + nextProps map[string]interface{} + expectedEqual bool + } + + _uuid := func(i int) uuid.UUID { + b := [16]byte{} + binary.BigEndian.PutUint64(b[:8], 0) + binary.BigEndian.PutUint64(b[8:], 1234567890+uint64(i)) + return uuid.UUID(b) + } + _uuidAsText := func(i int) string { + u, _ := _uuid(i).MarshalText() + return string(u) + } + _date := func(i int) time.Time { + return time.Unix(int64(1704063600+i), 0) + } + _dateAsText := func(i int) string { + d, _ := _date(i).MarshalText() + return string(d) + } + _text := func(i int) string { + return fmt.Sprintf("text%d", i) + } + ptrFloat32 := func(f float32) *float32 { + return &f + } + + createPrevProps := func(i int) map[string]interface{} { + f := float64(i) + return map[string]interface{}{ + "int": f, + "number": f + 0.5, + "text": _text(i), + "boolean": i%2 == 0, + "uuid": _uuidAsText(i), + "date": _dateAsText(i), + "ints": []float64{f + 1, f + 2, f + 3}, + "numbers": []float64{f + 1.5, f + 2.5, f + 3.5}, + "texts": []string{_text(i + 1), _text(i + 2), _text(i + 3)}, + "booleans": []bool{i%2 != 0, i%2 == 0}, + "uuids": []string{_uuidAsText(i + 1), _uuidAsText(i + 2), _uuidAsText(i + 3)}, + "dates": []string{_dateAsText(i + 1), _dateAsText(i + 2), _dateAsText(i + 3)}, + "phone": &models.PhoneNumber{ + DefaultCountry: "pl", + Input: fmt.Sprintf("%d", 100_000_000+i), + }, + "geo": &models.GeoCoordinates{ + Latitude: ptrFloat32(45.67), + Longitude: ptrFloat32(-12.34), + }, + "object": map[string]interface{}{ + "n_int": f + 10, + "n_number": f + 10.5, + "n_text": _text(i + 10), + "n_boolean": i%2 == 0, + "n_uuid": _uuidAsText(i + 10), + "n_date": _dateAsText(i + 10), + "n_object": map[string]interface{}{ + "nn_int": f + 20, + }, + }, + "objects": []interface{}{ + map[string]interface{}{ + "n_ints": []float64{f + 11, f + 12, f + 13}, + "n_numbers": []float64{f + 11.5, f + 12.5, f + 13.5}, + "n_texts": []string{_text(i + 11), _text(i + 12), _text(i + 13)}, + "n_booleans": []bool{i%2 != 0, i%2 == 0}, + "n_uuids": []string{_uuidAsText(i + 11), _uuidAsText(i + 12), _uuidAsText(i + 13)}, + "n_dates": []string{_dateAsText(i + 11), _dateAsText(i + 12), _dateAsText(i + 13)}, + "n_objects": []interface{}{ + map[string]interface{}{ + "nn_ints": []float64{f + 21, f + 22, f + 23}, + }, + }, + }, + }, + } + } + createNextProps := func(i int) map[string]interface{} { + props := createPrevProps(i) + props["uuid"] = _uuid(i) + props["date"] = _date(i) + props["uuids"] = []uuid.UUID{_uuid(i + 1), _uuid(i + 2), _uuid(i + 3)} + props["dates"] = []time.Time{_date(i + 1), _date(i + 2), _date(i + 3)} + + obj := props["object"].(map[string]interface{}) + obj["n_uuid"] = _uuid(i + 10) + obj["n_date"] = _date(i + 10) + + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + objs0["n_uuids"] = []uuid.UUID{_uuid(i + 11), _uuid(i + 12), _uuid(i + 13)} + objs0["n_dates"] = []time.Time{_date(i + 11), _date(i + 12), _date(i + 13)} + + return props + } + + prevProps := createPrevProps(1) + nextProps := createNextProps(1) + testCases := []testCase{ + { + prevProps: nil, + nextProps: nil, + expectedEqual: true, + }, + { + prevProps: prevProps, + nextProps: nil, + expectedEqual: false, + }, + { + prevProps: nil, + nextProps: nextProps, + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: nextProps, + expectedEqual: true, + }, + { + prevProps: prevProps, + nextProps: createNextProps(2), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["int"] = float64(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["number"] = float64(1000.5) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["text"] = _text(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["boolean"] = true + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["uuid"] = _uuid(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["date"] = _date(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["ints"] = []float64{1000, 1001} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["numbers"] = []float64{1000.5, 1001.5} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["texts"] = []string{_text(1000), _text(1001)} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["booleans"] = []bool{false, true} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["uuids"] = []uuid.UUID{_uuid(1000), _uuid(1001)} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["dates"] = []time.Time{_date(1000), _date(1001)} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["phone"] = &models.PhoneNumber{ + DefaultCountry: "pl", + Input: fmt.Sprintf("%d", 123_456_789), + } + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + props["geo"] = &models.GeoCoordinates{ + Latitude: ptrFloat32(45.67), + Longitude: ptrFloat32(12.34), + } + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + obj := props["object"].(map[string]interface{}) + obj["n_int"] = float64(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + obj := props["object"].(map[string]interface{}) + obj["n_number"] = float64(1000.5) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + obj := props["object"].(map[string]interface{}) + obj["n_text"] = _text(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + obj := props["object"].(map[string]interface{}) + obj["n_boolean"] = true + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + obj := props["object"].(map[string]interface{}) + obj["n_uuid"] = _uuid(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + obj := props["object"].(map[string]interface{}) + obj["n_date"] = _date(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + obj := props["object"].(map[string]interface{}) + nobj := obj["n_object"].(map[string]interface{}) + nobj["nn_int"] = float64(1000) + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + objs0["n_ints"] = []float64{1000, 1001} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + objs0["n_numbers"] = []float64{1000.5, 1001.5} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + objs0["n_texts"] = []string{_text(1000), _text(1001)} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + objs0["n_booleans"] = []bool{false, true} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + objs0["n_uuids"] = []uuid.UUID{_uuid(1000), _uuid(1001)} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + objs0["n_dates"] = []time.Time{_date(1000), _date(1001)} + return props + }(), + expectedEqual: false, + }, + { + prevProps: prevProps, + nextProps: func() map[string]interface{} { + props := createNextProps(1) + objs0 := props["objects"].([]interface{})[0].(map[string]interface{}) + nobjs0 := objs0["n_objects"].([]interface{})[0].(map[string]interface{}) + nobjs0["nn_ints"] = []float64{1000, 1001} + return props + }(), + expectedEqual: false, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + eq := propsEqual(tc.prevProps, tc.nextProps) + + if tc.expectedEqual { + assert.True(t, eq) + } else { + assert.False(t, eq) + } + }) + } +} + +func TestTargetVectorsEqual(t *testing.T) { + vec1 := []float32{1, 2, 3} + vec2 := []float32{2, 3, 4} + vec3 := []float32{3, 4, 5} + vec4 := []float32{4, 5, 6} + + type testCase struct { + prevVecs map[string][]float32 + nextVecs map[string][]float32 + expectedEqual bool + } + + testCases := []testCase{ + { + prevVecs: nil, + nextVecs: nil, + expectedEqual: true, + }, + { + prevVecs: map[string][]float32{}, + nextVecs: nil, + expectedEqual: true, + }, + { + prevVecs: nil, + nextVecs: map[string][]float32{}, + expectedEqual: true, + }, + { + prevVecs: map[string][]float32{}, + nextVecs: map[string][]float32{}, + expectedEqual: true, + }, + { + prevVecs: map[string][]float32{"vec": vec1}, + nextVecs: nil, + expectedEqual: false, + }, + { + prevVecs: nil, + nextVecs: map[string][]float32{"vec": vec1}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{"vec": vec1}, + nextVecs: map[string][]float32{}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{}, + nextVecs: map[string][]float32{"vec": vec1}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{"vec": nil}, + nextVecs: nil, + expectedEqual: true, + }, + { + prevVecs: nil, + nextVecs: map[string][]float32{"vec": nil}, + expectedEqual: true, + }, + { + prevVecs: map[string][]float32{"vec": nil}, + nextVecs: map[string][]float32{}, + expectedEqual: true, + }, + { + prevVecs: map[string][]float32{}, + nextVecs: map[string][]float32{"vec": nil}, + expectedEqual: true, + }, + { + prevVecs: map[string][]float32{"vec": vec1}, + nextVecs: map[string][]float32{"vec": nil}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{"vec": nil}, + nextVecs: map[string][]float32{"vec": vec1}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + nextVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + expectedEqual: true, + }, + { + prevVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + nextVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec4": vec4}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{"vec": vec1}, + nextVecs: map[string][]float32{"vec": vec2}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + nextVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3, "vec4": vec4}, + expectedEqual: false, + }, + { + prevVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3, "vec4": vec4}, + nextVecs: map[string][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + expectedEqual: false, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + eq := targetVectorsEqual(tc.prevVecs, tc.nextVecs) + + if tc.expectedEqual { + assert.True(t, eq) + } else { + assert.False(t, eq) + } + }) + } +} + +func TestTargetMultiVectorsEqual(t *testing.T) { + vec1 := [][]float32{{1, 2, 3}} + vec2 := [][]float32{{2, 3, 4}} + vec3 := [][]float32{{3, 4, 5}} + vec4 := [][]float32{{4, 5, 6}} + complexVec1 := [][]float32{{1, 2, 3}, {1, 2, 3}} + complexVec2 := [][]float32{{2, 3, 4}, {2, 3, 4}} + complexVec3 := [][]float32{{3, 4, 5}, {33, 44, 55}, {333, 444, 555}} + complexVec4 := [][]float32{{4, 5, 6}, {44, 5, 6}, {444, 5, 6}, {444, 5555, 6}, {7, 8, 9}} + + type testCase struct { + prevVecs map[string][][]float32 + nextVecs map[string][][]float32 + expectedEqual bool + } + + testCases := []testCase{ + { + prevVecs: nil, + nextVecs: nil, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{}, + nextVecs: nil, + expectedEqual: true, + }, + { + prevVecs: nil, + nextVecs: map[string][][]float32{}, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{}, + nextVecs: map[string][][]float32{}, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{"vec": vec1}, + nextVecs: nil, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": complexVec4}, + nextVecs: nil, + expectedEqual: false, + }, + { + prevVecs: nil, + nextVecs: map[string][][]float32{"vec": vec1}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": vec1}, + nextVecs: map[string][][]float32{}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": complexVec4}, + nextVecs: map[string][][]float32{}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{}, + nextVecs: map[string][][]float32{"vec": vec1}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": nil}, + nextVecs: nil, + expectedEqual: true, + }, + { + prevVecs: nil, + nextVecs: map[string][][]float32{"vec": nil}, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{"vec": nil}, + nextVecs: map[string][][]float32{}, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{}, + nextVecs: map[string][][]float32{"vec": nil}, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{"vec": vec1}, + nextVecs: map[string][][]float32{"vec": nil}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": nil}, + nextVecs: map[string][][]float32{"vec": vec1}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + nextVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + nextVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec4": vec4}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": vec1}, + nextVecs: map[string][][]float32{"vec": vec2}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + nextVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3, "vec4": vec4}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3, "vec4": vec4}, + nextVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": vec1}, + nextVecs: map[string][][]float32{"vec": complexVec4}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": complexVec1}, + nextVecs: map[string][][]float32{"vec": complexVec4}, + expectedEqual: false, + }, + { + prevVecs: map[string][][]float32{"vec": complexVec4}, + nextVecs: map[string][][]float32{"vec": complexVec4}, + expectedEqual: true, + }, + { + prevVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3, "vec4": complexVec1, "vec5": complexVec2, "vec6": complexVec3, "vec7": complexVec4}, + nextVecs: map[string][][]float32{"vec1": vec1, "vec2": vec2, "vec3": vec3, "vec4": complexVec1, "vec5": complexVec2, "vec6": complexVec3, "vec7": complexVec4}, + expectedEqual: true, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + eq := targetMultiVectorsEqual(tc.prevVecs, tc.nextVecs) + + if tc.expectedEqual { + assert.True(t, eq) + } else { + assert.False(t, eq) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_status.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_status.go new file mode 100644 index 0000000000000000000000000000000000000000..71f061a30a43aebb3685fc6cdf1ccbea91977473 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_status.go @@ -0,0 +1,105 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/entities/storagestate" +) + +type ShardStatus struct { + Status storagestate.Status + Reason string +} + +func (s *Shard) GetStatus() storagestate.Status { + s.statusLock.Lock() + defer s.statusLock.Unlock() + + if s.status.Status != storagestate.StatusReady && s.status.Status != storagestate.StatusIndexing { + return s.status.Status + } + + if !s.hasAnyVectorIndex() { + return s.status.Status + } + + status := storagestate.StatusReady + _ = s.ForEachVectorQueue(func(_ string, queue *VectorIndexQueue) error { + if queue.Size() > 0 { + status = storagestate.StatusIndexing + } + return nil + }) + s.status.Status = status + return status +} + +// isReadOnly returns an error if shard is readOnly and nil otherwise +func (s *Shard) isReadOnly() error { + s.statusLock.Lock() + defer s.statusLock.Unlock() + + if s.status.Status == storagestate.StatusReadOnly { + return storagestate.ErrStatusReadOnlyWithReason(s.status.Reason) + } + return nil +} + +func (s *Shard) SetStatusReadonly(reason string) error { + return s.UpdateStatus(storagestate.StatusReadOnly.String(), reason) +} + +func (s *Shard) UpdateStatus(in, reason string) error { + s.statusLock.Lock() + defer s.statusLock.Unlock() + + return s.updateStatusUnlocked(in, reason) +} + +// updateStatusUnlocked updates the status without locking the statusLock. +// Warning: Use UpdateStatus instead. +func (s *Shard) updateStatusUnlocked(in, reason string) error { + targetStatus, err := storagestate.ValidateStatus(strings.ToUpper(in)) + if err != nil { + return errors.Wrap(err, in) + } + oldStatus := s.status.Status + s.status.Status = targetStatus + s.status.Reason = reason + + logger := s.index.logger.WithFields(logrus.Fields{ + "action": "update_shard_status", + "class": s.index.Config.ClassName, + "shard": s.name, + "status": targetStatus.String(), + "prev": oldStatus.String(), + "reason": reason, + }) + if err = s.store.UpdateBucketsStatus(targetStatus); err != nil { + logger.WithError(err).Error("shard status change failed") + return err + } + + s.index.metrics.UpdateShardStatus(oldStatus.String(), targetStatus.String()) + + lvl := logrus.DebugLevel + if targetStatus == storagestate.StatusReadOnly { + lvl = logrus.WarnLevel + } + logger.Log(lvl, "shard status changed") + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_test.go new file mode 100644 index 0000000000000000000000000000000000000000..51889373c0c3229d7def530f87fc689223cf5fe1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_test.go @@ -0,0 +1,818 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "os" + "path" + "sync" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + hnswindex "github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" + "github.com/weaviate/weaviate/entities/storagestate" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestShard_UpdateStatus(t *testing.T) { + ctx := testCtx() + className := "TestClass" + shd, idx := testShard(t, ctx, className) + + amount := 10 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + t.Run("insert data into shard", func(t *testing.T) { + for i := 0; i < amount; i++ { + obj := testObject(className) + + err := shd.PutObject(ctx, obj) + require.Nil(t, err) + } + + objs, err := shd.ObjectList(ctx, amount, nil, nil, additional.Properties{}, shd.Index().Config.ClassName) + require.Nil(t, err) + require.Equal(t, amount, len(objs)) + }) + + t.Run("mark shard readonly and fail to insert", func(t *testing.T) { + err := shd.SetStatusReadonly("testing") + require.Nil(t, err) + + err = shd.PutObject(ctx, testObject(className)) + require.Contains(t, err.Error(), storagestate.ErrStatusReadOnly.Error()) + require.Contains(t, err.Error(), "testing") + }) + + t.Run("mark shard ready and insert successfully", func(t *testing.T) { + err := shd.UpdateStatus(storagestate.StatusReady.String(), "test ready") + require.Nil(t, err) + + err = shd.PutObject(ctx, testObject(className)) + require.Nil(t, err) + }) + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) +} + +func TestShard_ReadOnly_HaltCompaction(t *testing.T) { + amount := 10000 + sizePerValue := 8 + bucketName := "testbucket" + + keys := make([][]byte, amount) + values := make([][]byte, amount) + + shd, idx := testShard(t, context.Background(), "TestClass") + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + err := shd.Store().CreateOrLoadBucket(context.Background(), bucketName, + lsmkv.WithMemtableThreshold(1024)) + require.Nil(t, err) + + bucket := shd.Store().Bucket(bucketName) + require.NotNil(t, bucket) + dirName := path.Join(shd.Index().path(), shd.Name(), "lsm", bucketName) + + t.Run("generate random data", func(t *testing.T) { + for i := range keys { + n, err := json.Marshal(i) + require.Nil(t, err) + + keys[i] = n + values[i] = make([]byte, sizePerValue) + rand.Read(values[i]) + } + }) + + t.Run("insert data into bucket", func(t *testing.T) { + for i := range keys { + err := bucket.Put(keys[i], values[i]) + assert.Nil(t, err) + time.Sleep(time.Microsecond) + } + + t.Logf("insertion complete!") + }) + + t.Run("halt compaction with readonly status", func(t *testing.T) { + err := shd.UpdateStatus(storagestate.StatusReadOnly.String(), "test readonly") + require.Nil(t, err) + + // give the status time to propagate + // before grabbing the baseline below + time.Sleep(time.Second) + + // once shard status is set to readonly, + // the number of segment files should + // not change + entries, err := os.ReadDir(dirName) + require.Nil(t, err) + numSegments := len(entries) + + // if the number of segments remain the + // same for 30 seconds, we can be + // reasonably sure that the compaction + // process was halted + for i := 0; i < 30; i++ { + entries, err := os.ReadDir(dirName) + require.Nil(t, err) + + require.Equal(t, numSegments, len(entries)) + t.Logf("iteration %d, sleeping", i) + time.Sleep(time.Second) + } + }) + + t.Run("update shard status to ready", func(t *testing.T) { + err := shd.UpdateStatus(storagestate.StatusReady.String(), "test ready") + require.Nil(t, err) + + time.Sleep(time.Second) + }) + + require.Nil(t, idx.drop()) +} + +// tests adding multiple larger batches in parallel using different settings of the goroutine factor. +// In all cases all objects should be added +func TestShard_ParallelBatches(t *testing.T) { + r := getRandomSeed() + batches := make([][]*storobj.Object, 4) + for i := range batches { + batches[i] = createRandomObjects(r, "TestClass", 1000, 4) + } + totalObjects := 1000 * len(batches) + ctx := testCtx() + shd, idx := testShard(t, context.Background(), "TestClass") + + // add batches in parallel + wg := sync.WaitGroup{} + wg.Add(len(batches)) + for _, batch := range batches { + go func(localBatch []*storobj.Object) { + shd.PutObjectBatch(ctx, localBatch) + wg.Done() + }(batch) + } + wg.Wait() + + require.Equal(t, totalObjects, int(shd.Counter().Get())) + require.Nil(t, idx.drop()) +} + +func TestShard_InvalidVectorBatches(t *testing.T) { + ctx := testCtx() + + class := &models.Class{Class: "TestClass"} + + shd, idx := testShardWithSettings(t, ctx, class, hnsw.NewDefaultUserConfig(), false, false) + + testShard(t, context.Background(), class.Class) + + r := getRandomSeed() + + batchSize := 1000 + + validBatch := createRandomObjects(r, class.Class, batchSize, 4) + + shd.PutObjectBatch(ctx, validBatch) + require.Equal(t, batchSize, int(shd.Counter().Get())) + + invalidBatch := createRandomObjects(r, class.Class, batchSize, 5) + + errs := shd.PutObjectBatch(ctx, invalidBatch) + require.Len(t, errs, batchSize) + for _, err := range errs { + require.ErrorContains(t, err, "new node has a vector with length 5. Existing nodes have vectors with length 4") + } + require.Equal(t, batchSize, int(shd.Counter().Get())) + + require.Nil(t, idx.drop()) +} + +func TestShard_DebugResetVectorIndex(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "200ms") + + ctx := testCtx() + className := "TestClass" + shd, idx := testShardWithSettings(t, ctx, &models.Class{Class: className}, hnsw.UserConfig{}, false, true /* withCheckpoints */) + + amount := 1500 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject(className) + objs = append(objs, obj) + } + + errs := shd.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + // wait for the first batch to be indexed + oldIdx, q := getVectorIndexAndQueue(t, shd, "") + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() <= 500 { + break + } + } + + err := shd.DebugResetVectorIndex(ctx, "") + require.Nil(t, err) + + newIdx, q := getVectorIndexAndQueue(t, shd, "") + + // the new index should be different from the old one. + // pointer comparison is enough here + require.NotEqual(t, oldIdx, newIdx) + + // queue should be empty after reset + require.EqualValues(t, 0, q.Size()) + + // make sure the new index does not contain any of the objects + for _, obj := range objs { + if newIdx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should not be in the vector index", obj.DocID) + } + } + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) +} + +func TestShard_DebugResetVectorIndex_WithTargetVectors(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "200ms") + + ctx := testCtx() + className := "TestClass" + shd, idx := testShardWithSettings( + t, + ctx, + &models.Class{Class: className}, + hnsw.UserConfig{}, + false, + true, + func(i *Index) { + i.vectorIndexUserConfigs = make(map[string]schemaConfig.VectorIndexConfig) + i.vectorIndexUserConfigs["foo"] = hnsw.UserConfig{} + }, + ) + + amount := 1500 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject(className) + obj.Vectors = map[string][]float32{ + "foo": {1, 2, 3}, + } + objs = append(objs, obj) + } + + errs := shd.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + oldIdx, q := getVectorIndexAndQueue(t, shd, "foo") + + // wait for the first batch to be indexed + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() <= 500 { + break + } + } + + err := shd.DebugResetVectorIndex(ctx, "foo") + require.Nil(t, err) + + newIdx, q := getVectorIndexAndQueue(t, shd, "foo") + + // the new index should be different from the old one. + // pointer comparison is enough here + require.NotEqual(t, oldIdx, newIdx) + + // queue should be empty after reset + require.EqualValues(t, 0, q.Size()) + + // make sure the new index does not contain any of the objects + for _, obj := range objs { + if newIdx.ContainsDoc(obj.DocID) { + t.Fatalf("node %d should not be in the vector index", obj.DocID) + } + } + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) +} + +func TestShard_RepairIndex(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "200ms") + + tests := []struct { + name string + targetVector string + multiVector bool + cfg schemaConfig.VectorIndexConfig + idxOpt func(*Index) + getVectorIndexAndQueue func(ShardLike) (VectorIndex, *VectorIndexQueue) + }{ + { + name: "hnsw", + cfg: hnsw.UserConfig{}, + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "") + }, + }, + { + name: "hnsw with target vectors", + targetVector: "foo", + cfg: hnsw.UserConfig{}, + idxOpt: func(i *Index) { + i.vectorIndexUserConfigs = make(map[string]schemaConfig.VectorIndexConfig) + i.vectorIndexUserConfigs["foo"] = hnsw.UserConfig{} + }, + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "foo") + }, + }, + { + name: "hnsw with multi vectors", + targetVector: "foo", + multiVector: true, + cfg: hnsw.UserConfig{}, + idxOpt: func(i *Index) { + i.vectorIndexUserConfigs = make(map[string]schemaConfig.VectorIndexConfig) + i.vectorIndexUserConfigs["foo"] = hnsw.UserConfig{ + Multivector: hnsw.MultivectorConfig{ + Enabled: true, + }, + } + }, + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "foo") + }, + }, + { + name: "flat", + cfg: flat.NewDefaultUserConfig(), + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "") + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + className := "TestClass" + var opts []func(*Index) + if test.idxOpt != nil { + opts = append(opts, test.idxOpt) + } + shd, idx := testShardWithSettings(t, ctx, &models.Class{Class: className}, test.cfg, false, true /* withCheckpoints */, opts...) + + amount := 1000 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject(className) + if test.targetVector != "" { + if test.multiVector { + obj.MultiVectors = map[string][][]float32{ + test.targetVector: {{1, 2, 3}, {4, 5, 6}}, + } + } else { + obj.Vectors = map[string][]float32{ + test.targetVector: {1, 2, 3}, + } + } + } else { + obj.Vector = randVector(3) + } + objs = append(objs, obj) + } + + errs := shd.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + vidx, q := test.getVectorIndexAndQueue(shd) + + // wait for the queue to be empty + for i := 0; i < 20; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // remove some objects from the vector index + for i := 400; i < 600; i++ { + if test.multiVector { + err := vidx.(VectorIndexMulti).DeleteMulti(uint64(i)) + require.NoError(t, err) + } else { + err := vidx.Delete(uint64(i)) + require.NoError(t, err) + } + } + + // remove some objects from the store + bucket := shd.Store().Bucket(helpers.ObjectsBucketLSM) + buf := make([]byte, 8) + for i := 100; i < 300; i++ { + binary.LittleEndian.PutUint64(buf, uint64(i)) + v, err := bucket.GetBySecondary(0, buf) + require.NoError(t, err) + obj, err := storobj.FromBinary(v) + require.NoError(t, err) + idBytes, err := uuid.MustParse(obj.ID().String()).MarshalBinary() + require.NoError(t, err) + err = bucket.Delete(idBytes) + require.NoError(t, err) + } + + err := shd.RepairIndex(ctx, test.targetVector) + require.NoError(t, err) + + // wait for the queue to be empty + for i := 0; i < 20; i++ { + time.Sleep(500 * time.Millisecond) + if q.Size() == 0 { + break + } + } + + // wait for the worker to start the indexing + time.Sleep(500 * time.Millisecond) + + // make sure all objects except >= 100 < 300 are back in the vector index + for i := 0; i < amount; i++ { + if i >= 100 && i < 300 { + if vidx.ContainsDoc(uint64(i)) { + t.Fatalf("doc %d should not be in the vector index", i) + } + continue + } + + if !vidx.ContainsDoc(uint64(i)) { + t.Fatalf("doc %d should be in the vector index", i) + } + } + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) + }) + } +} + +func TestShard_FillQueue(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "200ms") + + tests := []struct { + name string + targetVector string + multiVector bool + cfg schemaConfig.VectorIndexConfig + idxOpt func(*Index) + getVectorIndexAndQueue func(ShardLike) (VectorIndex, *VectorIndexQueue) + }{ + { + name: "hnsw", + cfg: hnsw.UserConfig{}, + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "") + }, + }, + { + name: "hnsw with target vectors", + targetVector: "foo", + cfg: hnsw.UserConfig{}, + idxOpt: func(i *Index) { + i.vectorIndexUserConfigs = make(map[string]schemaConfig.VectorIndexConfig) + i.vectorIndexUserConfigs["foo"] = hnsw.UserConfig{} + }, + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "foo") + }, + }, + { + name: "hnsw with multi vectors", + targetVector: "foo", + multiVector: true, + cfg: hnsw.UserConfig{}, + idxOpt: func(i *Index) { + i.vectorIndexUserConfigs = make(map[string]schemaConfig.VectorIndexConfig) + i.vectorIndexUserConfigs["foo"] = hnsw.UserConfig{ + Multivector: hnsw.MultivectorConfig{ + Enabled: true, + }, + } + }, + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "foo") + }, + }, + { + name: "flat", + cfg: flat.NewDefaultUserConfig(), + getVectorIndexAndQueue: func(shd ShardLike) (VectorIndex, *VectorIndexQueue) { + return getVectorIndexAndQueue(t, shd, "") + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + className := "TestClass" + var opts []func(*Index) + if test.idxOpt != nil { + opts = append(opts, test.idxOpt) + } + shd, idx := testShardWithSettings(t, ctx, &models.Class{Class: className}, test.cfg, false, true /* withCheckpoints */, opts...) + + amount := 1000 + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + var objs []*storobj.Object + for i := 0; i < amount; i++ { + obj := testObject(className) + if test.targetVector != "" { + if test.multiVector { + obj.MultiVectors = map[string][][]float32{ + test.targetVector: {{1, 2, 3}, {4, 5, 6}}, + } + } else { + obj.Vectors = map[string][]float32{ + test.targetVector: {1, 2, 3}, + } + } + } else { + obj.Vector = randVector(3) + } + objs = append(objs, obj) + } + + errs := shd.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + + vidx, q := test.getVectorIndexAndQueue(shd) + + // wait for the queue to be empty + require.EventuallyWithT(t, func(t *assert.CollectT) { + assert.Zero(t, q.Size()) + }, 5*time.Second, 100*time.Millisecond) + + // remove most of the objects from the vector index + for i := 100; i < amount; i++ { + if test.multiVector { + err := vidx.(VectorIndexMulti).DeleteMulti(uint64(i)) + require.NoError(t, err) + } else { + err := vidx.Delete(uint64(i)) + require.NoError(t, err) + } + } + + // we need to delete tombstones so the vectors with the same doc ids could be inserted + if hnswindex.IsHNSWIndex(vidx) { + err := hnswindex.AsHNSWIndex(vidx).CleanUpTombstonedNodes(func() bool { return false }) + require.NoError(t, err) + } + + // refill only subset of the objects + err := shd.FillQueue(test.targetVector, 150) + require.NoError(t, err) + + require.EventuallyWithT(t, func(t *assert.CollectT) { + assert.Zero(t, q.Size()) + }, 5*time.Second, 100*time.Millisecond) + + // wait for the worker to index + time.Sleep(500 * time.Millisecond) + + // make sure all objects except >= 100 < 150 are back in the vector index + for i := 0; i < amount; i++ { + if 100 <= i && i < 150 { + require.Falsef(t, vidx.ContainsDoc(uint64(i)), "doc %d should not be in the vector index", i) + continue + } + require.Truef(t, vidx.ContainsDoc(uint64(i)), "doc %d should be in the vector index", i) + } + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) + }) + } +} + +func TestShard_resetDimensionsLSM(t *testing.T) { + ctx := testCtx() + className := "TestClass" + shd, idx := testShard(t, ctx, className) + + amount := 10 + shd.Index().Config.TrackVectorDimensions = true + shd.resetDimensionsLSM(ctx) + + t.Run("count dimensions before insert", func(t *testing.T) { + dims, err := shd.Dimensions(ctx, "") + require.NoError(t, err) + require.Equal(t, 0, dims) + }) + + t.Run("insert data into shard", func(t *testing.T) { + for i := 0; i < amount; i++ { + obj := testObject(className) + obj.Vector = randVector(3) + + err := shd.PutObject(ctx, obj) + require.Nil(t, err) + } + + objs, err := shd.ObjectList(ctx, amount, nil, nil, additional.Properties{}, shd.Index().Config.ClassName) + require.Nil(t, err) + require.Equal(t, amount, len(objs)) + }) + + t.Run("count dimensions", func(t *testing.T) { + dims, err := shd.Dimensions(ctx, "") + require.NoError(t, err) + require.Equal(t, 3*amount, dims) + }) + + t.Run("reset dimensions lsm", func(t *testing.T) { + err := shd.resetDimensionsLSM(ctx) + require.Nil(t, err) + }) + + t.Run("count dimensions after reset", func(t *testing.T) { + dims, err := shd.Dimensions(ctx, "") + require.NoError(t, err) + require.Equal(t, 0, dims) + }) + + t.Run("insert data into shard after reset", func(t *testing.T) { + for i := 0; i < amount; i++ { + obj := testObject(className) + obj.Vector = randVector(3) + + err := shd.PutObject(ctx, obj) + require.Nil(t, err) + } + + objs, err := shd.ObjectList(ctx, amount, nil, nil, additional.Properties{}, shd.Index().Config.ClassName) + require.Nil(t, err) + require.Equal(t, amount, len(objs)) + }) + + t.Run("count dimensions after reset and insert", func(t *testing.T) { + dims, err := shd.Dimensions(ctx, "") + require.NoError(t, err) + require.Equal(t, 3*amount, dims) + }) + + require.Nil(t, idx.drop()) + require.Nil(t, os.RemoveAll(idx.Config.RootPath)) +} + +func TestShard_UpgradeIndex(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + t.Setenv("QUEUE_SCHEDULER_INTERVAL", "1ms") + + cfg := dynamic.NewDefaultUserConfig() + cfg.Threshold = 400 + + ctx := context.Background() + className := "SomeClass" + var opts []func(*Index) + opts = append(opts, func(i *Index) { + i.vectorIndexUserConfig = cfg + }) + + shd, _ := testShardWithSettings(t, ctx, &models.Class{Class: className}, cfg, false, true /* withCheckpoints */, opts...) + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + amount := 400 + for i := 0; i < 3; i++ { + objs := make([]*storobj.Object, 0, amount) + for j := 0; j < amount; j++ { + objs = append(objs, &storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: strfmt.UUID(uuid.NewString()), + Class: className, + }, + Vector: make([]float32, 1536), + }) + } + + errs := shd.PutObjectBatch(ctx, objs) + for _, err := range errs { + require.Nil(t, err) + } + } + + q, ok := shd.GetVectorIndexQueue("") + require.True(t, ok) + + // wait for the queue to be empty + require.EventuallyWithT(t, func(t *assert.CollectT) { + assert.Zero(t, q.Size()) + }, 300*time.Second, 1*time.Second) +} + +func getVectorIndexAndQueue(t *testing.T, shard ShardLike, targetVector string) (VectorIndex, *VectorIndexQueue) { + idx, vok := shard.GetVectorIndex(targetVector) + q, qok := shard.GetVectorIndexQueue(targetVector) + require.True(t, vok && qok) + return idx, q +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_unit_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_unit_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a09db171fc8e8513b8a8d57160d23304219562f9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_unit_test.go @@ -0,0 +1,136 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestShardPathDimensionsLSM(t *testing.T) { + tests := []struct { + name string + indexPath string + shardName string + expected string + }{ + { + name: "basic path", + indexPath: "/data/index", + shardName: "shard1", + expected: "/data/index/shard1/lsm/dimensions", + }, + { + name: "empty shard name", + indexPath: "/data/index", + shardName: "", + expected: "/data/index/lsm/dimensions", + }, + { + name: "empty index path", + indexPath: "", + shardName: "shard1", + expected: "shard1/lsm/dimensions", + }, + { + name: "both empty", + indexPath: "", + shardName: "", + expected: "lsm/dimensions", + }, + { + name: "relative paths", + indexPath: "data/index", + shardName: "shard1", + expected: "data/index/shard1/lsm/dimensions", + }, + { + name: "with special characters in shard name", + indexPath: "/data/index", + shardName: "shard-1_test", + expected: "/data/index/shard-1_test/lsm/dimensions", + }, + { + name: "with spaces in shard name", + indexPath: "/data/index", + shardName: "shard 1", + expected: "/data/index/shard 1/lsm/dimensions", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := shardPathDimensionsLSM(tt.indexPath, tt.shardName) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestShardPathObjectsLSM(t *testing.T) { + tests := []struct { + name string + indexPath string + shardName string + expected string + }{ + { + name: "basic path", + indexPath: "/data/index", + shardName: "shard1", + expected: "/data/index/shard1/lsm/objects", + }, + { + name: "empty shard name", + indexPath: "/data/index", + shardName: "", + expected: "/data/index/lsm/objects", + }, + { + name: "empty index path", + indexPath: "", + shardName: "shard1", + expected: "shard1/lsm/objects", + }, + { + name: "both empty", + indexPath: "", + shardName: "", + expected: "lsm/objects", + }, + { + name: "relative paths", + indexPath: "data/index", + shardName: "shard1", + expected: "data/index/shard1/lsm/objects", + }, + { + name: "with special characters in shard name", + indexPath: "/data/index", + shardName: "shard-1_test", + expected: "/data/index/shard-1_test/lsm/objects", + }, + { + name: "with spaces in shard name", + indexPath: "/data/index", + shardName: "shard 1", + expected: "/data/index/shard 1/lsm/objects", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := shardPathObjectsLSM(tt.indexPath, tt.shardName) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_version.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_version.go new file mode 100644 index 0000000000000000000000000000000000000000..0f56a54292e576de24f8141c4c95ac897bf3b727 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_version.go @@ -0,0 +1,117 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "encoding/binary" + "os" + + "github.com/pkg/errors" +) + +// ShardCodeBaseVersion must be increased whenever there are breaking changes - +// including those that we can handle in a non-breaking way +// the version checker can then decide on init if it should prevent startup +// completely. If it does not prevent startup, but there is still a version +// mismatch, the version can be used to make specific decisions +// +// CHANGELOG +// - Version 1 - Everything up until Weaviate v1.10.1 inclusive +// - Version 2 - Inverted Index is now stored in an always sorted fashion and +// doc ids are stored as BigEndian. To make this backward-compatible with v1, +// doc ids need to be read and written as Little Endian. In addition, an +// additional sort step is required in three places: during a MapList call, +// during a Map Cursor and during Map Compactions. BM25 is entirely disabled +// prior to this version +const ( + ShardCodeBaseVersion = uint16(2) + ShardCodeBaseMinimumVersionForStartup = uint16(1) +) + +type shardVersioner struct { + version uint16 + + // we don't need the file after initialization, but still need to track its + // path so we can delete it on .Drop() + path string +} + +func newShardVersioner(baseDir string, dataPresent bool) (*shardVersioner, error) { + sv := &shardVersioner{} + + return sv, sv.init(baseDir, dataPresent) +} + +func (sv *shardVersioner) init(fileName string, dataPresent bool) error { + sv.path = fileName + + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) + if err != nil { + return err + } + + stat, err := f.Stat() + if err != nil { + return err + } + + var version uint16 = 1 + if stat.Size() > 0 { + // the file has existed before, we need to initialize with its content + err := binary.Read(f, binary.LittleEndian, &version) + if err != nil { + return errors.Wrap(err, "read initial version from file") + } + } else { + // if the version file does not yet exist, there are two scenarios: + // 1) We are just creating this class, which means its version is + // ShardCodeBaseVersion. + // 2) There is data present, so we must assume it was built with a version + // that did not yet have this versioner present, so we assume it's v1 + if !dataPresent { + version = ShardCodeBaseVersion + } else { + version = 1 + } + + err := binary.Write(f, binary.LittleEndian, &version) + if err != nil { + return errors.Wrap(err, "write version back to file") + } + + if err := f.Close(); err != nil { + return errors.Wrap(err, "close version file") + } + } + + if version < ShardCodeBaseMinimumVersionForStartup { + return errors.Errorf("cannot start up shard: it was built with shard "+ + "version v%d, but this version of Weaviate requires at least shard version v%d", + version, ShardCodeBaseMinimumVersionForStartup) + } + + sv.version = version + + return nil +} + +func (sv *shardVersioner) Drop() error { + err := os.Remove(sv.path) + if err != nil { + return errors.Wrap(err, "drop versioner file") + } + return nil +} + +func (sv *shardVersioner) Version() uint16 { + return sv.version +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_delete.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..6225ca0358dbd3e5f71e300c4f7e341584c72752 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_delete.go @@ -0,0 +1,181 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "sync" + "time" + + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/usecases/objects" +) + +// return value map[int]error gives the error for the index as it received it +func (s *Shard) DeleteObjectBatch(ctx context.Context, uuids []strfmt.UUID, deletionTime time.Time, dryRun bool) objects.BatchSimpleObjects { + s.activityTrackerWrite.Add(1) + if err := s.isReadOnly(); err != nil { + return objects.BatchSimpleObjects{ + objects.BatchSimpleObject{Err: err}, + } + } + return newDeleteObjectsBatcher(s).Delete(ctx, uuids, deletionTime, dryRun) +} + +type deleteObjectsBatcher struct { + sync.Mutex + shard ShardLike + objects objects.BatchSimpleObjects +} + +func newDeleteObjectsBatcher(shard ShardLike) *deleteObjectsBatcher { + return &deleteObjectsBatcher{shard: shard} +} + +func (b *deleteObjectsBatcher) Delete(ctx context.Context, uuids []strfmt.UUID, deletionTime time.Time, dryRun bool) objects.BatchSimpleObjects { + b.delete(ctx, uuids, deletionTime, dryRun) + b.flushWALs(ctx) + return b.objects +} + +func (b *deleteObjectsBatcher) delete(ctx context.Context, uuids []strfmt.UUID, deletionTime time.Time, dryRun bool) { + b.objects = b.deleteSingleBatchInLSM(ctx, uuids, deletionTime, dryRun) +} + +func (b *deleteObjectsBatcher) deleteSingleBatchInLSM(ctx context.Context, + batch []strfmt.UUID, deletionTime time.Time, dryRun bool, +) objects.BatchSimpleObjects { + before := time.Now() + defer b.shard.Metrics().BatchDelete(before, "shard_delete_all") + + result := make(objects.BatchSimpleObjects, len(batch)) + objLock := &sync.Mutex{} + + // if the context is expired fail all + if err := ctx.Err(); err != nil { + for i := range result { + result[i] = objects.BatchSimpleObject{Err: errors.Wrap(err, "begin batch")} + } + return result + } + + eg := enterrors.NewErrorGroupWrapper(b.shard.Index().logger) + eg.SetLimit(_NUMCPU) // prevent unbounded concurrency + + for j, docID := range batch { + index := j + docID := docID + f := func() error { + // perform delete + obj := b.deleteObjectOfBatchInLSM(ctx, docID, deletionTime, dryRun) + objLock.Lock() + result[index] = obj + objLock.Unlock() + return nil + } + eg.Go(f, index, docID) + } + // safe to ignore error, as the internal routines never return an error + eg.Wait() + + return result +} + +func (b *deleteObjectsBatcher) deleteObjectOfBatchInLSM(ctx context.Context, + uuid strfmt.UUID, deletionTime time.Time, dryRun bool, +) objects.BatchSimpleObject { + before := time.Now() + defer b.shard.Metrics().BatchDelete(before, "shard_delete_individual_total") + if !dryRun { + err := b.shard.batchDeleteObject(ctx, uuid, deletionTime) + return objects.BatchSimpleObject{UUID: uuid, Err: err} + } + + return objects.BatchSimpleObject{UUID: uuid, Err: nil} +} + +func (b *deleteObjectsBatcher) flushWALs(ctx context.Context) { + before := time.Now() + defer b.shard.Metrics().BatchDelete(before, "shard_flush_wals") + + if err := b.shard.Store().WriteWALs(); err != nil { + for i := range b.objects { + b.setErrorAtIndex(err, i) + } + } + + _ = b.shard.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err := queue.Flush(); err != nil { + for i := range b.objects { + b.setErrorAtIndex(fmt.Errorf("target vector %s: %w", targetVector, err), i) + } + } + return nil + }) + + if err := b.shard.GetPropertyLengthTracker().Flush(); err != nil { + for i := range b.objects { + b.setErrorAtIndex(err, i) + } + } +} + +func (b *deleteObjectsBatcher) setErrorAtIndex(err error, index int) { + b.Lock() + defer b.Unlock() + b.objects[index].Err = err +} + +func (s *Shard) findDocIDs(ctx context.Context, filters *filters.LocalFilter) ([]uint64, error) { + allowList, err := inverted.NewSearcher(s.index.logger, s.store, s.index.getSchema.ReadOnlyClass, + nil, s.index.classSearcher, s.index.stopwords, s.versioner.version, s.isFallbackToSearchable, + s.tenant(), s.index.Config.QueryNestedRefLimit, s.bitmapFactory). + DocIDs(ctx, filters, additional.Properties{}, s.index.Config.ClassName) + if err != nil { + return nil, err + } + defer allowList.Close() + return allowList.Slice(), nil +} + +func (s *Shard) FindUUIDs(ctx context.Context, filters *filters.LocalFilter) ([]strfmt.UUID, error) { + docs, err := s.findDocIDs(ctx, filters) + if err != nil { + return nil, err + } + + var ( + uuids = make([]strfmt.UUID, len(docs)) + currIdx = 0 + ) + + for _, doc := range docs { + uuid, err := s.uuidFromDocID(doc) + if err != nil { + // TODO: More than likely this will occur due to an object which has already been deleted. + // However, this is not a guarantee. This can be improved by logging, or handling + // errors other than `id not found` rather than skipping them entirely. + s.index.logger.WithField("op", "shard.find_uuids").WithField("docID", doc).WithError(err).Debug("failed to find UUID for docID") + continue + } + uuids[currIdx] = uuid + currIdx++ + } + return uuids[:currIdx], nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_objects.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_objects.go new file mode 100644 index 0000000000000000000000000000000000000000..72f6f90f7ecab4d8c89f334df06b7f31d3c1aae1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_objects.go @@ -0,0 +1,504 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "os" + "runtime/debug" + "sync" + "time" + + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + entcfg "github.com/weaviate/weaviate/entities/config" + enterrors "github.com/weaviate/weaviate/entities/errors" + entsentry "github.com/weaviate/weaviate/entities/sentry" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/storobj" +) + +// return value map[int]error gives the error for the index as it received it +func (s *Shard) PutObjectBatch(ctx context.Context, + objects []*storobj.Object, +) []error { + if err := s.isReadOnly(); err != nil { + return []error{err} + } + + return s.putBatch(ctx, objects) +} + +// asyncEnabled is a quick and dirty way to create a feature flag for async +// indexing. +func asyncEnabled() bool { + return entcfg.Enabled(os.Getenv("ASYNC_INDEXING")) +} + +// Workers are started with the first batch and keep working as there are objects to add from any batch. Each batch +// adds its jobs (that contain the respective object) to a single queue that is then processed by the workers. +// When the last batch finishes, all workers receive a shutdown signal and exit +func (s *Shard) putBatch(ctx context.Context, + objects []*storobj.Object, +) []error { + s.activityTrackerWrite.Add(1) + if asyncEnabled() { + return s.putBatchAsync(ctx, objects) + } + // Workers are started with the first batch and keep working as there are objects to add from any batch. Each batch + // adds its jobs (that contain the respective object) to a single queue that is then processed by the workers. + // When the last batch finishes, all workers receive a shutdown signal and exit + batcher := newObjectsBatcher(s) + err := batcher.Objects(ctx, objects) + + // block until all objects of batch have been added + batcher.wg.Wait() + s.metrics.VectorIndex(batcher.batchStartTime) + + return err +} + +func (s *Shard) putBatchAsync(ctx context.Context, objects []*storobj.Object) []error { + beforeBatch := time.Now() + defer s.metrics.BatchObject(beforeBatch, len(objects)) + + batcher := newObjectsBatcher(s) + + batcher.init(objects) + batcher.storeInObjectStore(ctx) + batcher.markDeletedInVectorStorage(ctx) + batcher.storeAdditionalStorageWithAsyncQueue(ctx) + batcher.flushWALs(ctx) + + return batcher.errs +} + +// objectsBatcher is a helper type wrapping around an underlying shard that can +// execute objects batch operations on a shard (as opposed to references batch +// operations) +type objectsBatcher struct { + sync.Mutex + shard ShardLike + statuses map[strfmt.UUID]objectInsertStatus + errs []error + duplicates map[int]struct{} + objects []*storobj.Object + wg sync.WaitGroup + batchStartTime time.Time +} + +func newObjectsBatcher(s ShardLike) *objectsBatcher { + return &objectsBatcher{shard: s} +} + +// Objects imports the specified objects in parallel in a batch-fashion +func (ob *objectsBatcher) Objects(ctx context.Context, + objects []*storobj.Object, +) []error { + beforeBatch := time.Now() + defer ob.shard.Metrics().BatchObject(beforeBatch, len(objects)) + + ob.init(objects) + ob.storeInObjectStore(ctx) + ob.markDeletedInVectorStorage(ctx) + ob.storeAdditionalStorageWithWorkers(ctx) + ob.flushWALs(ctx) + return ob.errs +} + +func (ob *objectsBatcher) init(objects []*storobj.Object) { + ob.objects = objects + ob.statuses = map[strfmt.UUID]objectInsertStatus{} + ob.errs = make([]error, len(objects)) + ob.duplicates = findDuplicatesInBatchObjects(objects) +} + +// storeInObjectStore performs all storage operations on the underlying +// key/value store, this is they object-by-id store, the docID-lookup tables, +// as well as all inverted indices. +func (ob *objectsBatcher) storeInObjectStore(ctx context.Context) { + beforeObjectStore := time.Now() + + errs := ob.storeSingleBatchInLSM(ctx, ob.objects) + for i, err := range errs { + if err != nil { + ob.setErrorAtIndex(err, i) + } + } + + ob.shard.Metrics().ObjectStore(beforeObjectStore) +} + +func (ob *objectsBatcher) storeSingleBatchInLSM(ctx context.Context, + batch []*storobj.Object, +) []error { + errs := make([]error, len(batch)) + errLock := &sync.Mutex{} + + // if the context is expired fail all + if err := ctx.Err(); err != nil { + for i := range errs { + errs[i] = errors.Wrap(err, "begin batch") + } + return errs + } + + eg := enterrors.NewErrorGroupWrapper(ob.shard.Index().logger) + eg.SetLimit(_NUMCPU) + + for j, object := range batch { + object := object + index := j + f := func() error { + if err := ob.storeObjectOfBatchInLSM(ctx, index, object); err != nil { + errLock.Lock() + errs[index] = err + errLock.Unlock() + } + return nil + } + eg.Go(f) + + } + _ = eg.Wait() // no errors can happen here, this is just for concurrency control + + return errs +} + +func (ob *objectsBatcher) storeObjectOfBatchInLSM(ctx context.Context, + objectIndex int, object *storobj.Object, +) error { + if _, ok := ob.duplicates[objectIndex]; ok { + return nil + } + uuidParsed, err := uuid.Parse(object.ID().String()) + if err != nil { + return errors.Wrap(err, "invalid id") + } + + idBytes, err := uuidParsed.MarshalBinary() + if err != nil { + return err + } + + status, err := ob.shard.putObjectLSM(object, idBytes) + if err != nil { + return err + } + + ob.setStatusForID(status, object.ID()) + + if err := ctx.Err(); err != nil { + return errors.Wrapf(err, "end store object %d of batch", objectIndex) + } + + return nil +} + +// setStatusForID is thread-safe as it uses the underlying mutex to lock the +// statuses map when writing into it +func (ob *objectsBatcher) setStatusForID(status objectInsertStatus, id strfmt.UUID) { + ob.Lock() + defer ob.Unlock() + ob.statuses[id] = status +} + +func (ob *objectsBatcher) markDeletedInVectorStorage(ctx context.Context) { + var docIDsToDelete []uint64 + var positions []int + for pos, object := range ob.objects { + status := ob.statuses[object.ID()] + if status.docIDChanged { + docIDsToDelete = append(docIDsToDelete, status.oldDocID) + positions = append(positions, pos) + } + } + + if len(docIDsToDelete) == 0 { + return + } + + _ = ob.shard.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err := queue.Delete(docIDsToDelete...); err != nil { + for _, pos := range positions { + ob.setErrorAtIndex(fmt.Errorf("target vector %s: %w", targetVector, err), pos) + } + } + return nil + }) +} + +// storeAdditionalStorageWithWorkers stores the object in all non-key-value +// stores, such as the main vector index as well as the property-specific +// indices, such as the geo-index. +func (ob *objectsBatcher) storeAdditionalStorageWithWorkers(ctx context.Context) { + if ok := ob.checkContext(ctx); !ok { + // if the context is no longer OK, there's no point in continuing - abort + // early + return + } + + ob.batchStartTime = time.Now() + + for i, object := range ob.objects { + status := ob.statuses[object.ID()] + if ob.shouldSkipInAdditionalStorage(i, status) { + continue + } + + ob.wg.Add(1) + ob.shard.addJobToQueue(job{ + object: object, + status: status, + index: i, + ctx: ctx, + batcher: ob, + }) + } +} + +func (ob *objectsBatcher) storeAdditionalStorageWithAsyncQueue(ctx context.Context) { + if ok := ob.checkContext(ctx); !ok { + // if the context is no longer OK, there's no point in continuing - abort + // early + return + } + + ob.batchStartTime = time.Now() + shouldGeoIndex := ob.shard.hasGeoIndex() + + targetVectors := make(map[string][]common.VectorRecord) + for i, object := range ob.objects { + status := ob.statuses[object.ID()] + + if ob.shouldSkipInAdditionalStorage(i, status) { + continue + } + + if shouldGeoIndex { + if err := ob.shard.updatePropertySpecificIndices(ctx, object, status); err != nil { + ob.setErrorAtIndex(errors.Wrap(err, "update prop-specific indices"), i) + continue + } + } + + // skip vector update, as vector was not changed + // https://github.com/weaviate/weaviate/issues/3948 + if status.docIDPreserved { + continue + } + + if len(object.Vector) == 0 && len(object.Vectors) == 0 && len(object.MultiVectors) == 0 { + continue + } + + for targetVector, vector := range object.Vectors { + targetVectors[targetVector] = append(targetVectors[targetVector], &common.Vector[[]float32]{ + ID: status.docID, + Vector: vector, + }) + } + for targetVector, vector := range object.MultiVectors { + targetVectors[targetVector] = append(targetVectors[targetVector], &common.Vector[[][]float32]{ + ID: status.docID, + Vector: vector, + }) + } + + if len(object.Vector) > 0 { + // use empty string for legacy vector, downstream code will handle that appropriately + targetVectors[""] = append(targetVectors[""], &common.Vector[[]float32]{ + ID: status.docID, + Vector: object.Vector, + }) + } + } + + for targetVector, vectors := range targetVectors { + queue, ok := ob.shard.GetVectorIndexQueue(targetVector) + if !ok { + ob.setErrorAtIndex(fmt.Errorf("queue not found for target vector %s", targetVector), 0) + } else { + err := queue.Insert(ctx, vectors...) + if err != nil { + ob.setErrorAtIndex(err, 0) + } + } + } +} + +func (ob *objectsBatcher) shouldSkipInAdditionalStorage(i int, status objectInsertStatus) bool { + if ok := ob.hasErrorAtIndex(i); ok { + // had an error prior, ignore + return true + } + + // object was not changed, skip further updates + // https://github.com/weaviate/weaviate/issues/3949 + if status.skipUpsert { + return true + } + + // no need to lock the mutex for a duplicate check, as we only ever write + // during init() in there - not concurrently + if _, ok := ob.duplicates[i]; ok { + // is a duplicate, ignore + return true + } + + return false +} + +func (ob *objectsBatcher) storeSingleObjectInAdditionalStorage(ctx context.Context, + object *storobj.Object, status objectInsertStatus, index int, +) { + defer func() { + err := recover() + if err != nil { + entsentry.Recover(err) + ob.setErrorAtIndex(fmt.Errorf("an unexpected error occurred: %s", err), index) + fmt.Fprintf(os.Stderr, "panic: %s\n", err) + debug.PrintStack() + } + }() + + if err := ctx.Err(); err != nil { + ob.setErrorAtIndex(errors.Wrap(err, "insert to vector index"), index) + return + } + + if len(object.Vector) > 0 || len(object.Vectors) > 0 || len(object.MultiVectors) > 0 { + // By this time all required deletes (e.g. because of DocID changes) have + // already been grouped and performed in bulk. Only the insertions are + // left. The motivation for this change is explained in + // https://github.com/weaviate/weaviate/pull/2697. + // + // Before this change, two identical batches in sequence would lead to + // massive lock contention in the hnsw index, as each individual delete + // requires a costly RW.Lock() operation which first drains all "readers" + // which represent the regular imports. See "deleteVsInsertLock" inside the + // hnsw store. + // + // With the improved logic, we group all batches up front in a single call, + // so this highly concurrent method no longer needs to compete for those + // expensive locks. + // + // Since this behavior is exclusive to batching, we can no longer call + // shard.updateVectorIndex which would also handle the delete as required + // for a non-batch update. Instead a new method has been introduced that + // ignores deletes. + if len(object.Vectors) > 0 { + if err := ob.shard.updateVectorIndexesIgnoreDelete(ctx, object.Vectors, status); err != nil { + ob.setErrorAtIndex(errors.Wrap(err, "insert to vector index"), index) + return + } + } + if len(object.MultiVectors) > 0 { + if err := ob.shard.updateMultiVectorIndexesIgnoreDelete(ctx, object.MultiVectors, status); err != nil { + ob.setErrorAtIndex(errors.Wrap(err, "insert to multi vector index"), index) + return + } + } + if len(object.Vector) > 0 { + if err := ob.shard.updateVectorIndexIgnoreDelete(ctx, object.Vector, status); err != nil { + ob.setErrorAtIndex(errors.Wrap(err, "insert to vector index"), index) + return + } + } + } + + if err := ob.shard.updatePropertySpecificIndices(ctx, object, status); err != nil { + ob.setErrorAtIndex(errors.Wrap(err, "update prop-specific indices"), index) + return + } +} + +// hasErrorAtIndex is thread-safe as it uses the underlying mutex to lock +// before reading from the errs map +func (ob *objectsBatcher) hasErrorAtIndex(i int) bool { + ob.Lock() + defer ob.Unlock() + return ob.errs[i] != nil +} + +// setErrorAtIndex is thread-safe as it uses the underlying mutex to lock +// writing into the errs map +func (ob *objectsBatcher) setErrorAtIndex(err error, index int) { + ob.Lock() + defer ob.Unlock() + ob.errs[index] = err +} + +// checkContext does nothing if the context is still active. But if the context +// has error'd, it marks all objects which have not previously error'd yet with +// the ctx error +func (ob *objectsBatcher) checkContext(ctx context.Context) bool { + if err := ctx.Err(); err != nil { + for i, err := range ob.errs { + if err == nil { + // already has an error, ignore + continue + } + + ob.errs[i] = errors.Wrapf(err, + "inverted indexing complete, about to start vector indexing") + } + + return false + } + + return true +} + +func (ob *objectsBatcher) flushWALs(ctx context.Context) { + if err := ob.shard.Store().WriteWALs(); err != nil { + for i := range ob.objects { + ob.setErrorAtIndex(err, i) + } + } + + _ = ob.shard.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err := queue.Flush(); err != nil { + for i := range ob.objects { + ob.setErrorAtIndex(fmt.Errorf("target vector %s: %w", targetVector, err), i) + } + } + return nil + }) + + if err := ob.shard.GetPropertyLengthTracker().Flush(); err != nil { + for i := range ob.objects { + ob.setErrorAtIndex(err, i) + } + } +} + +// returns the originalIndexIDs to be ignored +func findDuplicatesInBatchObjects(in []*storobj.Object) map[int]struct{} { + count := map[strfmt.UUID]int{} + for _, obj := range in { + count[obj.ID()] = count[obj.ID()] + 1 + } + + ignore := map[int]struct{}{} + for i, obj := range in { + if c := count[obj.ID()]; c > 1 { + count[obj.ID()] = c - 1 + ignore[i] = struct{}{} + } + } + + return ignore +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_references.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_references.go new file mode 100644 index 0000000000000000000000000000000000000000..a608997a92a059dab2bfc4551ec5e3f1f4d5ee48 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_batch_references.go @@ -0,0 +1,351 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" +) + +// return value map[int]error gives the error for the index as it received it +func (s *Shard) AddReferencesBatch(ctx context.Context, refs objects.BatchReferences) []error { + s.activityTrackerWrite.Add(1) + if err := s.isReadOnly(); err != nil { + return []error{err} + } + + return newReferencesBatcher(s).References(ctx, refs) +} + +// referencesBatcher is a helper type wrapping around an underlying shard that can +// execute references batch operations on a shard (as opposed to object batch +// operations) +type referencesBatcher struct { + sync.Mutex + shard ShardLike + errs []error + refs objects.BatchReferences +} + +func newReferencesBatcher(s ShardLike) *referencesBatcher { + return &referencesBatcher{ + shard: s, + } +} + +func (b *referencesBatcher) References(ctx context.Context, + refs objects.BatchReferences, +) []error { + b.init(refs) + b.storeInObjectStore(ctx) + b.flushWALs(ctx) + return b.errs +} + +func (b *referencesBatcher) init(refs objects.BatchReferences) { + b.refs = refs + b.errs = make([]error, len(refs)) +} + +func (b *referencesBatcher) storeInObjectStore( + ctx context.Context, +) { + errs := b.storeSingleBatchInLSM(ctx, b.refs) + for i, err := range errs { + if err != nil { + b.setErrorAtIndex(err, i) + } + } + + // adding references can not alter the vector position, so no need to alter + // the vector index +} + +func (b *referencesBatcher) storeSingleBatchInLSM(ctx context.Context, batch objects.BatchReferences) []error { + errs := make([]error, len(batch)) + errLock := &sync.Mutex{} + + // if the context is expired fail all + if err := ctx.Err(); err != nil { + for i := range errs { + errs[i] = errors.Wrap(err, "begin batch") + } + return errs + } + + invertedMerger := inverted.NewDeltaMerger() + propsByName, err := b.getSchemaPropsByName() + if err != nil { + for i := range errs { + errs[i] = errors.Wrap(err, "getting schema properties") + } + return errs + } + + // TODO: is there any benefit in having this parallelized? if so, don't forget to lock before assigning errors + // If we want them to run in parallel we need to look individual objects, + // otherwise we have a race inside the merge functions + // wg := &sync.WaitGroup{} + for i, ref := range batch { + // wg.Add(1) + // go func(index int, reference objects.BatchReference) { + // defer wg.Done() + uuidParsed, err := uuid.Parse(ref.From.TargetID.String()) + if err != nil { + errLock.Lock() + errs[i] = errors.Wrap(err, "invalid id") + errLock.Unlock() + continue + } + + idBytes, err := uuidParsed.MarshalBinary() + if err != nil { + errLock.Lock() + errs[i] = err + errLock.Unlock() + continue + } + + mergeDoc := mergeDocFromBatchReference(ref) + res, err := b.shard.mutableMergeObjectLSM(mergeDoc, idBytes) + if err != nil { + errLock.Lock() + errs[i] = err + errLock.Unlock() + continue + } + + prop, ok := propsByName[ref.From.Property.String()] + if !ok { + errLock.Lock() + errs[i] = fmt.Errorf("property '%s' not found in schema", ref.From.Property) + errLock.Unlock() + continue + } + + // generally the batch ref is an append only change which does not alter + // the vector position. There is however one inverted index link that needs + // to be cleanup: the ref count + if err := b.analyzeInverted(invertedMerger, res, ref, prop); err != nil { + errLock.Lock() + errs[i] = err + errLock.Unlock() + continue + } + } + + if err := b.writeInverted(invertedMerger.Merge()); err != nil { + for i := range errs { + errs[i] = errors.Wrap(err, "write inverted batch") + } + return errs + } + + return errs +} + +func (b *referencesBatcher) analyzeInverted(invertedMerger *inverted.DeltaMerger, mergeResult mutableMergeResult, ref objects.BatchReference, prop *models.Property) error { + prevProps, err := b.analyzeRef(mergeResult.previous, ref, prop) + if err != nil { + return err + } + + nextProps, err := b.analyzeRef(mergeResult.next, ref, prop) + if err != nil { + return err + } + + delta := inverted.Delta(prevProps, nextProps) + invertedMerger.AddAdditions(delta.ToAdd, mergeResult.status.docID) + invertedMerger.AddDeletions(delta.ToDelete, mergeResult.status.docID) + + return nil +} + +func (b *referencesBatcher) writeInverted(in inverted.DeltaMergeResult) error { + before := time.Now() + if err := b.writeInvertedAdditions(in.Additions); err != nil { + return errors.Wrap(err, "write additions") + } + b.shard.Metrics().InvertedExtend(before, len(in.Additions)) + + before = time.Now() + if err := b.writeInvertedDeletions(in.Deletions); err != nil { + return errors.Wrap(err, "write deletions") + } + b.shard.Metrics().InvertedDeleteDelta(before) + + return nil +} + +// TODO text_rbm_inverted_index unify bucket write +func (b *referencesBatcher) writeInvertedDeletions(in []inverted.MergeProperty) error { + for _, prop := range in { + // in the references batcher we can only ever write ref count entire which + // are guaranteed to be not have a frequency, meaning they will use the + // "Set" strategy in the lsmkv store + if prop.HasFilterableIndex { + bucket := b.shard.Store().Bucket(helpers.BucketFromPropNameLSM(prop.Name)) + if bucket == nil { + return errors.Errorf("no bucket for prop '%s' found", prop.Name) + } + + for _, item := range prop.MergeItems { + for _, id := range item.DocIDs { + err := b.shard.deleteFromPropertySetBucket(bucket, id.DocID, item.Data) + if err != nil { + return err + } + } + } + } + } + + return nil +} + +// TODO text_rbm_inverted_index unify bucket write +func (b *referencesBatcher) writeInvertedAdditions(in []inverted.MergeProperty) error { + for _, prop := range in { + // in the references batcher we can only ever write ref count entire which + // are guaranteed to be not have a frequency, meaning they will use the + // "Set" strategy in the lsmkv store + if prop.HasFilterableIndex { + bucket := b.shard.Store().Bucket(helpers.BucketFromPropNameLSM(prop.Name)) + if bucket == nil { + return errors.Errorf("no bucket for prop '%s' found", prop.Name) + } + + for _, item := range prop.MergeItems { + err := b.shard.batchExtendInvertedIndexItemsLSMNoFrequency(bucket, item) + if err != nil { + return err + } + } + } + } + + return nil +} + +func (b *referencesBatcher) analyzeRef(obj *storobj.Object, ref objects.BatchReference, prop *models.Property) ([]inverted.Property, error) { + if prop == nil { + return nil, fmt.Errorf("analyzeRef: property %q not found in schema", ref.From.Property) + } + + props := obj.Properties() + if props == nil { + return nil, nil + } + + propMap, ok := props.(map[string]interface{}) + if !ok { + return nil, nil + } + + var refs models.MultipleRef + refProp, ok := propMap[ref.From.Property.String()] + if !ok { + refs = make(models.MultipleRef, 0) // explicitly mark as length zero + } else { + parsed, ok := refProp.(models.MultipleRef) + if !ok { + return nil, errors.Errorf("prop %s is present, but not a ref, got: %T", + ref.From.Property.String(), refProp) + } + refs = parsed + } + + a := inverted.NewAnalyzer(nil) + + countItems, err := a.RefCount(refs) + if err != nil { + return nil, err + } + + valueItems, err := a.Ref(refs) + if err != nil { + return nil, err + } + + return []inverted.Property{{ + Name: helpers.MetaCountProp(ref.From.Property.String()), + Items: countItems, + HasFilterableIndex: inverted.HasFilterableIndexMetaCount && inverted.HasAnyInvertedIndex(prop), + HasSearchableIndex: inverted.HasSearchableIndexMetaCount && inverted.HasAnyInvertedIndex(prop), + HasRangeableIndex: inverted.HasRangeableIndexMetaCount && inverted.HasAnyInvertedIndex(prop), + }, { + Name: ref.From.Property.String(), + Items: valueItems, + HasFilterableIndex: inverted.HasFilterableIndex(prop), + HasSearchableIndex: inverted.HasSearchableIndex(prop), + HasRangeableIndex: inverted.HasRangeableIndex(prop), + }}, nil +} + +func (b *referencesBatcher) setErrorAtIndex(err error, i int) { + b.Lock() + defer b.Unlock() + + err = errors.Wrap(err, "ref batch") + b.errs[i] = err +} + +func mergeDocFromBatchReference(ref objects.BatchReference) objects.MergeDocument { + return objects.MergeDocument{ + Class: ref.From.Class.String(), + ID: ref.From.TargetID, + UpdateTime: time.Now().UnixMilli(), + References: objects.BatchReferences{ref}, + } +} + +func (b *referencesBatcher) flushWALs(ctx context.Context) { + if err := b.shard.Store().WriteWALs(); err != nil { + for i := range b.refs { + b.setErrorAtIndex(err, i) + } + } + + _ = b.shard.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err := queue.Flush(); err != nil { + for i := range b.refs { + b.setErrorAtIndex(fmt.Errorf("target vector %s: %w", targetVector, err), i) + } + } + return nil + }) +} + +func (b *referencesBatcher) getSchemaPropsByName() (map[string]*models.Property, error) { + idx := b.shard.Index() + class := idx.getSchema.ReadOnlyClass(idx.Config.ClassName.String()) + if class == nil { + return nil, fmt.Errorf("could not find class %s in schema", idx.Config.ClassName) + } + + propsByName := map[string]*models.Property{} + for _, prop := range class.Properties { + propsByName[prop.Name] = prop + } + return propsByName, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_delete.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..2a2bcf08ed67cc7376484885d41579f2c4f73ad2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_delete.go @@ -0,0 +1,179 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (s *Shard) DeleteObject(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error { + if err := s.isReadOnly(); err != nil { + return err + } + + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + err := s.waitForMinimalHashTreeInitialization(ctx) + if err != nil { + return err + } + + idBytes, err := uuid.MustParse(id.String()).MarshalBinary() + if err != nil { + return err + } + + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + + // see comment in shard_write_put.go::putObjectLSM + lock := &s.docIdLock[s.uuidToIdLockPoolId(idBytes)] + + lock.Lock() + defer lock.Unlock() + + existing, err := bucket.Get([]byte(idBytes)) + if err != nil { + return fmt.Errorf("unexpected error on previous lookup: %w", err) + } + + if existing == nil { + // nothing to do + return nil + } + + // we need the doc ID so we can clean up inverted indices currently + // pointing to this object + docID, updateTime, err := storobj.DocIDAndTimeFromBinary(existing) + if err != nil { + return fmt.Errorf("get existing doc id from object binary: %w", err) + } + + if deletionTime.IsZero() { + err = bucket.Delete(idBytes) + } else { + err = bucket.DeleteWith(idBytes, deletionTime) + } + if err != nil { + return fmt.Errorf("delete object from bucket: %w", err) + } + + if err = s.mayDeleteObjectHashTree(idBytes, updateTime); err != nil { + return fmt.Errorf("object deletion in hashtree: %w", err) + } + + err = s.cleanupInvertedIndexOnDelete(existing, docID) + if err != nil { + return fmt.Errorf("delete object from bucket: %w", err) + } + + if err = s.store.WriteWALs(); err != nil { + return fmt.Errorf("flush all buffered WALs: %w", err) + } + + err = s.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err = queue.Delete(docID); err != nil { + return fmt.Errorf("delete from vector index of vector %q: %w", targetVector, err) + } + return nil + }) + if err != nil { + return err + } + + err = s.ForEachVectorQueue(func(targetVector string, queue *VectorIndexQueue) error { + if err = queue.Flush(); err != nil { + return fmt.Errorf("flush all vector index buffered WALs of vector %q: %w", targetVector, err) + } + return nil + }) + if err != nil { + return err + } + + return nil +} + +func (s *Shard) cleanupInvertedIndexOnDelete(previous []byte, docID uint64) error { + previousObject, err := storobj.FromBinary(previous) + if err != nil { + return fmt.Errorf("unmarshal previous object: %w", err) + } + + previousProps, previousNilProps, err := s.AnalyzeObject(previousObject) + if err != nil { + return fmt.Errorf("analyze previous object: %w", err) + } + + if err = s.subtractPropLengths(previousProps); err != nil { + return fmt.Errorf("subtract prop lengths: %w", err) + } + + err = s.deleteFromInvertedIndicesLSM(previousProps, previousNilProps, docID) + if err != nil { + return fmt.Errorf("put inverted indices props: %w", err) + } + + if s.index.Config.TrackVectorDimensions { + err = previousObject.IterateThroughVectorDimensions(func(targetVector string, dims int) error { + if err = s.removeDimensionsLSM(dims, docID, targetVector); err != nil { + return fmt.Errorf("remove dimension tracking for vector %q: %w", targetVector, err) + } + return nil + }) + if err != nil { + return err + } + } + + return nil +} + +func (s *Shard) mayDeleteObjectHashTree(uuidBytes []byte, updateTime int64) error { + if s.hashtree == nil { + return nil + } + + return s.deleteObjectHashTree(uuidBytes, updateTime) +} + +func (s *Shard) deleteObjectHashTree(uuidBytes []byte, updateTime int64) error { + if len(uuidBytes) != 16 { + return fmt.Errorf("invalid object uuid") + } + + if updateTime < 1 { + return fmt.Errorf("invalid object update time") + } + + leaf := s.hashtreeLeafFor(uuidBytes) + + var objectDigest [16 + 8]byte + + copy(objectDigest[:], uuidBytes) + binary.BigEndian.PutUint64(objectDigest[16:], uint64(updateTime)) + + // object deletion is treated as non-existent, + // that because deletion time or tombstone may not be available + + s.hashtree.AggregateLeafWith(leaf, objectDigest[:]) + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted.go new file mode 100644 index 0000000000000000000000000000000000000000..25036cba4a09c65c4606a9bb7616eb2e94f56d18 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted.go @@ -0,0 +1,84 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "fmt" + + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" +) + +func isPropertyForLength(dt schema.DataType) bool { + switch dt { + case schema.DataTypeInt, schema.DataTypeNumber, schema.DataTypeBoolean, schema.DataTypeDate: + return false + default: + return true + } +} + +func (s *Shard) AnalyzeObject(object *storobj.Object) ([]inverted.Property, []inverted.NilProperty, error) { + c := s.index.getSchema.ReadOnlyClass(object.Class().String()) + if c == nil { + return nil, nil, fmt.Errorf("could not find class %s in schema", object.Class().String()) + } + + var schemaMap map[string]interface{} + + if object.Properties() == nil { + schemaMap = make(map[string]interface{}) + } else { + maybeSchemaMap, ok := object.Properties().(map[string]interface{}) + if !ok { + return nil, nil, fmt.Errorf("expected schema to be map, but got %T", object.Properties()) + } + schemaMap = maybeSchemaMap + } + + // add nil for all properties that are not part of the object so that they can be added to the inverted index for + // the null state (if enabled) + var nilProps []inverted.NilProperty + if s.index.invertedIndexConfig.IndexNullState { + for _, prop := range c.Properties { + dt := schema.DataType(prop.DataType[0]) + // some datatypes are not added to the inverted index, so we can skip them here + if dt == schema.DataTypeGeoCoordinates || dt == schema.DataTypePhoneNumber || dt == schema.DataTypeBlob { + continue + } + + // Add props as nil props if + // 1. They are not in the schema map ( == nil) + // 2. Their inverted index is enabled + _, ok := schemaMap[prop.Name] + if !ok && inverted.HasAnyInvertedIndex(prop) { + nilProps = append(nilProps, inverted.NilProperty{ + Name: prop.Name, + AddToPropertyLength: isPropertyForLength(dt), + }) + } + } + } + + if s.index.invertedIndexConfig.IndexTimestamps { + if schemaMap == nil { + schemaMap = make(map[string]interface{}) + } + schemaMap[filters.InternalPropCreationTimeUnix] = object.Object.CreationTimeUnix + schemaMap[filters.InternalPropLastUpdateTimeUnix] = object.Object.LastUpdateTimeUnix + } + + props, err := inverted.NewAnalyzer(s.isFallbackToSearchable).Object(schemaMap, c.Properties, object.ID()) + return props, nilProps, err +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted_lsm.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted_lsm.go new file mode 100644 index 0000000000000000000000000000000000000000..9ff5a6a4f23a6ca3eaf6054ae82809cf11b46429 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted_lsm.go @@ -0,0 +1,382 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/binary" + "fmt" + "math" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/errorcompounder" +) + +func (s *Shard) extendInvertedIndicesLSM(props []inverted.Property, nilProps []inverted.NilProperty, + docID uint64, +) error { + for _, prop := range props { + if err := s.addToPropertyValueIndex(docID, prop); err != nil { + return err + } + + // add non-nil properties to the null-state inverted index, but skip internal properties (__meta_count, _id etc) + if isMetaCountProperty(prop) || isInternalProperty(prop) { + continue + } + + // properties where defining a length does not make sense (floats etc.) have a negative entry as length + if s.index.invertedIndexConfig.IndexPropertyLength && prop.Length >= 0 { + if err := s.addToPropertyLengthIndex(prop.Name, docID, prop.Length); err != nil { + return errors.Wrap(err, "add indexed property length") + } + } + + if s.index.invertedIndexConfig.IndexNullState { + if err := s.addToPropertyNullIndex(prop.Name, docID, prop.Length == 0); err != nil { + return errors.Wrap(err, "add indexed null state") + } + } + } + + // add nil properties to the nullstate and property length inverted index + for _, nilProperty := range nilProps { + if s.index.invertedIndexConfig.IndexPropertyLength && nilProperty.AddToPropertyLength { + if err := s.addToPropertyLengthIndex(nilProperty.Name, docID, 0); err != nil { + return errors.Wrap(err, "add indexed property length") + } + } + + if s.index.invertedIndexConfig.IndexNullState { + if err := s.addToPropertyNullIndex(nilProperty.Name, docID, true); err != nil { + return errors.Wrap(err, "add indexed null state") + } + } + } + + return nil +} + +func (s *Shard) addToPropertyValueIndex(docID uint64, property inverted.Property) error { + if property.HasFilterableIndex { + bucketValue := s.store.Bucket(helpers.BucketFromPropNameLSM(property.Name)) + if bucketValue == nil { + return errors.Errorf("no bucket for prop '%s' found", property.Name) + } + + for _, item := range property.Items { + key := item.Data + if err := s.addToPropertySetBucket(bucketValue, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' value bucket", property.Name) + } + } + } + + if property.HasSearchableIndex { + bucketValue := s.store.Bucket(helpers.BucketSearchableFromPropNameLSM(property.Name)) + if bucketValue == nil { + return errors.Errorf("no bucket searchable for prop '%s' found", property.Name) + } + propLen := float32(0) + + if bucketValue.Strategy() == lsmkv.StrategyInverted { + // Iterating over all items to calculate the property length, which is the sum of all term frequencies + for _, item := range property.Items { + propLen += item.TermFrequency + } + } else { + // This is the old way of calculating the property length, which counts terms that show up multiple times only once, + // which is not standard for BM25 + propLen = float32(len(property.Items)) + } + for _, item := range property.Items { + key := item.Data + pair := s.pairPropertyWithFrequency(docID, item.TermFrequency, propLen) + if err := s.addToPropertyMapBucket(bucketValue, pair, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' value bucket", property.Name) + } + } + } + + if property.HasRangeableIndex { + bucketValue := s.store.Bucket(helpers.BucketRangeableFromPropNameLSM(property.Name)) + if bucketValue == nil { + return errors.Errorf("no bucket rangeable for prop '%s' found", property.Name) + } + + for _, item := range property.Items { + key := item.Data + if err := s.addToPropertyRangeBucket(bucketValue, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' value bucket", property.Name) + } + } + } + + if err := s.onAddToPropertyValueIndex(docID, &property); err != nil { + return err + } + + return nil +} + +func (s *Shard) addToPropertyLengthIndex(propName string, docID uint64, length int) error { + bucketLength := s.store.Bucket(helpers.BucketFromPropNameLengthLSM(propName)) + if bucketLength == nil { + return errors.Errorf("no bucket for prop '%s' length found", propName) + } + + key, err := bucketKeyPropertyLength(length) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' length", propName) + } + if err := s.addToPropertySetBucket(bucketLength, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' length bucket", propName) + } + return nil +} + +func (s *Shard) addToPropertyNullIndex(propName string, docID uint64, isNull bool) error { + bucketNull := s.store.Bucket(helpers.BucketFromPropNameNullLSM(propName)) + if bucketNull == nil { + return errors.Errorf("no bucket for prop '%s' null found", propName) + } + + key, err := bucketKeyPropertyNull(isNull) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' null", propName) + } + if err := s.addToPropertySetBucket(bucketNull, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' null bucket", propName) + } + return nil +} + +func (s *Shard) pairPropertyWithFrequency(docID uint64, freq, propLen float32) lsmkv.MapPair { + // 8 bytes for doc id, 4 bytes for frequency, 4 bytes for prop term length + buf := make([]byte, 16) + + // Shard Index version 2 requires BigEndian for sorting, if the shard was + // built prior assume it uses LittleEndian + if s.versioner.Version() < 2 { + binary.LittleEndian.PutUint64(buf[0:8], docID) + } else { + binary.BigEndian.PutUint64(buf[0:8], docID) + } + binary.LittleEndian.PutUint32(buf[8:12], math.Float32bits(freq)) + binary.LittleEndian.PutUint32(buf[12:16], math.Float32bits(propLen)) + + return lsmkv.MapPair{ + Key: buf[:8], + Value: buf[8:], + } +} + +func (s *Shard) addToPropertyMapBucket(bucket *lsmkv.Bucket, pair lsmkv.MapPair, key []byte) error { + lsmkv.MustBeExpectedStrategy(bucket.Strategy(), lsmkv.StrategyMapCollection, lsmkv.StrategyInverted) + + return bucket.MapSet(key, pair) +} + +func (s *Shard) addToPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + lsmkv.MustBeExpectedStrategy(bucket.Strategy(), lsmkv.StrategySetCollection, lsmkv.StrategyRoaringSet) + + if bucket.Strategy() == lsmkv.StrategySetCollection { + docIDBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(docIDBytes, docID) + + return bucket.SetAdd(key, [][]byte{docIDBytes}) + } + + return bucket.RoaringSetAddOne(key, docID) +} + +func (s *Shard) addToPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + lsmkv.MustBeExpectedStrategy(bucket.Strategy(), lsmkv.StrategyRoaringSetRange) + + if len(key) != 8 { + return fmt.Errorf("shard: invalid value length %d, should be 8 bytes", len(key)) + } + + return bucket.RoaringSetRangeAdd(binary.BigEndian.Uint64(key), docID) +} + +func (s *Shard) batchExtendInvertedIndexItemsLSMNoFrequency(b *lsmkv.Bucket, + item inverted.MergeItem, +) error { + if b.Strategy() != lsmkv.StrategySetCollection && b.Strategy() != lsmkv.StrategyRoaringSet { + panic("prop has no frequency, but bucket does not have 'Set' nor 'RoaringSet' strategy") + } + + if b.Strategy() == lsmkv.StrategyRoaringSet { + docIDs := make([]uint64, len(item.DocIDs)) + for i, idTuple := range item.DocIDs { + docIDs[i] = idTuple.DocID + } + return b.RoaringSetAddList(item.Data, docIDs) + } + + docIDs := make([][]byte, len(item.DocIDs)) + for i, idTuple := range item.DocIDs { + docIDs[i] = make([]byte, 8) + binary.LittleEndian.PutUint64(docIDs[i], idTuple.DocID) + } + + return b.SetAdd(item.Data, docIDs) +} + +func (s *Shard) SetPropertyLengths(props []inverted.Property) error { + for _, prop := range props { + if !prop.HasSearchableIndex { + continue + } + + if err := s.GetPropertyLengthTracker().TrackProperty(prop.Name, float32(len(prop.Items))); err != nil { + return err + } + + } + + return nil +} + +func (s *Shard) subtractPropLengths(props []inverted.Property) error { + for _, prop := range props { + if !prop.HasSearchableIndex { + continue + } + + if err := s.GetPropertyLengthTracker().UnTrackProperty(prop.Name, float32(len(prop.Items))); err != nil { + return err + } + + } + + return nil +} + +func (s *Shard) extendDimensionTrackerLSM( + dimLength int, docID uint64, targetVector string, +) error { + return s.addToDimensionBucket(dimLength, docID, targetVector, false) +} + +var uniqueCounter atomic.Uint64 + +// GenerateUniqueString generates a random string of the specified length +func GenerateUniqueString(length int) (string, error) { + uniqueCounter.Add(1) + return fmt.Sprintf("%v", uniqueCounter.Load()), nil +} + +// Empty the dimensions bucket, quickly and efficiently +func (s *Shard) resetDimensionsLSM(ctx context.Context) error { + // Load the current one, or an empty one if it doesn't exist + err := s.store.CreateOrLoadBucket(ctx, + helpers.DimensionsBucketLSM, + s.memtableDirtyConfig(), + lsmkv.WithStrategy(lsmkv.StrategyMapCollection), + lsmkv.WithPread(s.index.Config.AvoidMMap), + lsmkv.WithAllocChecker(s.index.allocChecker), + lsmkv.WithMaxSegmentSize(s.index.Config.MaxSegmentSize), + lsmkv.WithMinMMapSize(s.index.Config.MinMMapSize), + lsmkv.WithMinWalThreshold(s.index.Config.MaxReuseWalSize), + lsmkv.WithWriteSegmentInfoIntoFileName(s.index.Config.SegmentInfoIntoFileNameEnabled), + lsmkv.WithWriteMetadata(s.index.Config.WriteMetadataFilesEnabled), + s.segmentCleanupConfig(), + ) + if err != nil { + return fmt.Errorf("create dimensions bucket: %w", err) + } + + // Fetch the actual bucket + b := s.store.Bucket(helpers.DimensionsBucketLSM) + if b == nil { + return errors.Errorf("resetDimensionsLSM: no bucket dimensions") + } + + // Create random bucket name + name, err := GenerateUniqueString(32) + if err != nil { + return errors.Wrap(err, "generate unique bucket name") + } + + // Create a new bucket with the unique name + err = s.createDimensionsBucket(context.Background(), name) + if err != nil { + return errors.Wrap(err, "create temporary dimensions bucket") + } + + // Replace the old bucket with the new one + err = s.store.ReplaceBuckets(context.Background(), helpers.DimensionsBucketLSM, name) + if err != nil { + return errors.Wrap(err, "replace dimensions bucket") + } + + return nil +} + +// Key (target vector name and dimensionality) | Value Doc IDs +// targetVector,128 | 1,2,4,5,17 +// targetVector,128 | 1,2,4,5,17, Tombstone 4, +func (s *Shard) removeDimensionsLSM( + dimLength int, docID uint64, targetVector string, +) error { + return s.addToDimensionBucket(dimLength, docID, targetVector, true) +} + +func (s *Shard) addToDimensionBucket( + dimLength int, docID uint64, vecName string, tombstone bool, +) error { + err := s.addDimensionsProperty(context.Background()) + if err != nil { + return errors.Wrap(err, "add dimensions property") + } + b := s.store.Bucket(helpers.DimensionsBucketLSM) + if b == nil { + return errors.Errorf("add dimension bucket: no bucket dimensions") + } + + tv := []byte(vecName) + // 8 bytes for doc id (map key) + // 4 bytes for dim count (row key) + // len(vecName) bytes for vector name (prefix of row key) + buf := make([]byte, 12+len(tv)) + binary.LittleEndian.PutUint64(buf[:8], docID) + binary.LittleEndian.PutUint32(buf[8+len(tv):], uint32(dimLength)) + copy(buf[8:], tv) + + return b.MapSet(buf[8:], lsmkv.MapPair{ + Key: buf[:8], + Value: []byte{}, + Tombstone: tombstone, + }) +} + +func (s *Shard) onAddToPropertyValueIndex(docID uint64, property *inverted.Property) error { + ec := errorcompounder.New() + for i := range s.callbacksAddToPropertyValueIndex { + ec.Add(s.callbacksAddToPropertyValueIndex[i](s, docID, property)) + } + return ec.ToError() +} + +func isMetaCountProperty(property inverted.Property) bool { + return len(property.Name) > 12 && property.Name[len(property.Name)-12:] == "__meta_count" +} + +func isInternalProperty(property inverted.Property) bool { + return property.Name[0] == '_' +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted_lsm_delete.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted_lsm_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..074c68b5971849172ff88986d10f9d7ca542bf58 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_inverted_lsm_delete.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "encoding/binary" + "fmt" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/entities/errorcompounder" +) + +func (s *Shard) deleteFromInvertedIndicesLSM(props []inverted.Property, nilProps []inverted.NilProperty, + docID uint64, +) error { + for _, prop := range props { + if prop.HasFilterableIndex { + bucket := s.store.Bucket(helpers.BucketFromPropNameLSM(prop.Name)) + if bucket == nil { + return fmt.Errorf("no bucket for prop '%s' found", prop.Name) + } + + for _, item := range prop.Items { + if err := s.deleteFromPropertySetBucket(bucket, docID, item.Data); err != nil { + return errors.Wrapf(err, "delete item '%s' from index", + string(item.Data)) + } + } + } + + if prop.HasSearchableIndex { + bucket := s.store.Bucket(helpers.BucketSearchableFromPropNameLSM(prop.Name)) + if bucket == nil { + return fmt.Errorf("no bucket searchable for prop '%s' found", prop.Name) + } + + for _, item := range prop.Items { + if err := s.deleteInvertedIndexItemWithFrequencyLSM(bucket, item, + docID); err != nil { + return errors.Wrapf(err, "delete item '%s' from index", + string(item.Data)) + } + } + } + + if prop.HasRangeableIndex { + bucket := s.store.Bucket(helpers.BucketRangeableFromPropNameLSM(prop.Name)) + if bucket == nil { + return fmt.Errorf("no bucket rangeable for prop %q found", prop.Name) + } + for _, item := range prop.Items { + if err := s.deleteFromPropertyRangeBucket(bucket, docID, item.Data); err != nil { + return errors.Wrapf(err, "delete item '%s' from index", + string(item.Data)) + } + } + } + + if err := s.onDeleteFromPropertyValueIndex(docID, &prop); err != nil { + return err + } + + // add non-nil properties to the null-state inverted index, but skip internal properties (__meta_count, _id etc) + if isMetaCountProperty(prop) || isInternalProperty(prop) { + continue + } + + // properties where defining a length does not make sense (floats etc.) have a negative entry as length + if s.index.invertedIndexConfig.IndexPropertyLength && prop.Length >= 0 { + if err := s.deleteFromPropertyLengthIndex(prop.Name, docID, prop.Length); err != nil { + return errors.Wrap(err, "add indexed property length") + } + } + + if s.index.invertedIndexConfig.IndexNullState { + if err := s.deleteFromPropertyNullIndex(prop.Name, docID, prop.Length == 0); err != nil { + return errors.Wrap(err, "add indexed null state") + } + } + } + + // remove nil properties from the nullstate and property length inverted index + for _, nilProperty := range nilProps { + if s.index.invertedIndexConfig.IndexPropertyLength && nilProperty.AddToPropertyLength { + if err := s.deleteFromPropertyLengthIndex(nilProperty.Name, docID, 0); err != nil { + return errors.Wrap(err, "add indexed property length") + } + } + + if s.index.invertedIndexConfig.IndexNullState { + if err := s.deleteFromPropertyNullIndex(nilProperty.Name, docID, true); err != nil { + return errors.Wrap(err, "add indexed null state") + } + } + } + + return nil +} + +func (s *Shard) deleteInvertedIndexItemWithFrequencyLSM(bucket *lsmkv.Bucket, + item inverted.Countable, docID uint64, +) error { + lsmkv.MustBeExpectedStrategy(bucket.Strategy(), lsmkv.StrategyMapCollection, lsmkv.StrategyInverted) + + docIDBytes := make([]byte, 8) + // Shard Index version 2 requires BigEndian for sorting, if the shard was + // built prior assume it uses LittleEndian + if s.versioner.Version() < 2 { + binary.LittleEndian.PutUint64(docIDBytes, docID) + } else { + binary.BigEndian.PutUint64(docIDBytes, docID) + } + + return bucket.MapDeleteKey(item.Data, docIDBytes) +} + +func (s *Shard) deleteFromPropertyLengthIndex(propName string, docID uint64, length int) error { + bucketLength := s.store.Bucket(helpers.BucketFromPropNameLengthLSM(propName)) + if bucketLength == nil { + return errors.Errorf("no bucket for prop '%s' length found", propName) + } + + key, err := bucketKeyPropertyLength(length) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' length", propName) + } + if err := s.deleteFromPropertySetBucket(bucketLength, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' length bucket", propName) + } + return nil +} + +func (s *Shard) deleteFromPropertyNullIndex(propName string, docID uint64, isNull bool) error { + bucketNull := s.store.Bucket(helpers.BucketFromPropNameNullLSM(propName)) + if bucketNull == nil { + return errors.Errorf("no bucket for prop '%s' null found", propName) + } + + key, err := bucketKeyPropertyNull(isNull) + if err != nil { + return errors.Wrapf(err, "failed creating key for prop '%s' null", propName) + } + if err := s.deleteFromPropertySetBucket(bucketNull, docID, key); err != nil { + return errors.Wrapf(err, "failed adding to prop '%s' null bucket", propName) + } + return nil +} + +func (s *Shard) deleteFromPropertySetBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + lsmkv.MustBeExpectedStrategy(bucket.Strategy(), lsmkv.StrategySetCollection, lsmkv.StrategyRoaringSet) + + if bucket.Strategy() == lsmkv.StrategySetCollection { + docIDBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(docIDBytes, docID) + + return bucket.SetDeleteSingle(key, docIDBytes) + } + + return bucket.RoaringSetRemoveOne(key, docID) +} + +func (s *Shard) deleteFromPropertyRangeBucket(bucket *lsmkv.Bucket, docID uint64, key []byte) error { + lsmkv.MustBeExpectedStrategy(bucket.Strategy(), lsmkv.StrategyRoaringSetRange) + + if len(key) != 8 { + return fmt.Errorf("shard: invalid value length %d, should be 8 bytes", len(key)) + } + + return bucket.RoaringSetRangeRemove(binary.BigEndian.Uint64(key), docID) +} + +func (s *Shard) onDeleteFromPropertyValueIndex(docID uint64, property *inverted.Property) error { + ec := errorcompounder.New() + for i := range s.callbacksRemoveFromPropertyValueIndex { + ec.Add(s.callbacksRemoveFromPropertyValueIndex[i](s, docID, property)) + } + return ec.ToError() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_merge.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_merge.go new file mode 100644 index 0000000000000000000000000000000000000000..7b253ee888a8f6daef3e53d7be044e7e90b692ab --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_merge.go @@ -0,0 +1,370 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" +) + +var errObjectNotFound = errors.New("object not found") + +func (s *Shard) MergeObject(ctx context.Context, merge objects.MergeDocument) error { + s.activityTrackerWrite.Add(1) + if err := s.isReadOnly(); err != nil { + return err + } + + for targetVector, vector := range merge.Vectors { + // validation needs to happen before any changes are done. Otherwise, insertion is aborted somewhere in-between. + vectorIndex, ok := s.GetVectorIndex(targetVector) + if !ok { + return errors.Errorf("validate vector index for update of %v for target vector %s: vector index not found", merge.ID, targetVector) + } + switch v := vector.(type) { + case []float32: + err := vectorIndex.ValidateBeforeInsert(v) + if err != nil { + return errors.Wrapf(err, "validate vector index for update of %v for target vector %s", merge.ID, targetVector) + } + case [][]float32: + err := vectorIndex.(VectorIndexMulti).ValidateMultiBeforeInsert(v) + if err != nil { + return errors.Wrapf(err, "validate multi vector index for update of %v for target vector %s", merge.ID, targetVector) + } + default: + return errors.Errorf("validate vector index for update of %v for target vector %s: unrecongnized vector type: %T", merge.ID, targetVector, vector) + } + } + + if len(merge.Vector) > 0 { + vectorIndex, ok := s.GetVectorIndex("") + if !ok { + return errors.Errorf("validate vector index for update of %v for vector: vector index not found", merge.ID) + } + + // validation needs to happen before any changes are done. Otherwise, insertion is aborted somewhere in-between. + err := vectorIndex.ValidateBeforeInsert(merge.Vector) + if err != nil { + return errors.Wrapf(err, "validate vector index for update of %v", merge.ID) + } + } + + idBytes, err := uuid.MustParse(merge.ID.String()).MarshalBinary() + if err != nil { + return err + } + + return s.merge(ctx, idBytes, merge) +} + +func (s *Shard) merge(ctx context.Context, idBytes []byte, doc objects.MergeDocument) error { + obj, status, err := s.mergeObjectInStorage(doc, idBytes) + if err != nil { + return err + } + + // object was not changed, no further updates are required + // https://github.com/weaviate/weaviate/issues/3949 + if status.skipUpsert { + return nil + } + + for targetVector, vector := range obj.Vectors { + if err = s.updateVectorIndex(ctx, vector, status, targetVector); err != nil { + return errors.Wrapf(err, "update vector index for target vector %s", targetVector) + } + } + for targetVector, vector := range obj.MultiVectors { + if err = s.updateMultiVectorIndex(ctx, vector, status, targetVector); err != nil { + return errors.Wrapf(err, "update multi vector index for target vector %s", targetVector) + } + } + + if s.hasLegacyVectorIndex() { + if err = s.updateVectorIndex(ctx, obj.Vector, status, ""); err != nil { + return errors.Wrap(err, "update vector index") + } + } + + if err := s.updatePropertySpecificIndices(ctx, obj, status); err != nil { + return errors.Wrap(err, "update property-specific indices") + } + + if err := s.store.WriteWALs(); err != nil { + return errors.Wrap(err, "flush all buffered WALs") + } + + return nil +} + +func (s *Shard) mergeObjectInStorage(merge objects.MergeDocument, + idBytes []byte, +) (*storobj.Object, objectInsertStatus, error) { + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + + var prevObj, obj *storobj.Object + var status objectInsertStatus + + // see comment in shard_write_put.go::putObjectLSM + lock := &s.docIdLock[s.uuidToIdLockPoolId(idBytes)] + + // wrapped in function to handle lock/unlock + if err := func() error { + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + err := s.waitForMinimalHashTreeInitialization(context.Background()) + if err != nil { + return err + } + + lock.Lock() + defer lock.Unlock() + + prevObj, err = fetchObject(bucket, idBytes) + if err != nil { + return errors.Wrap(err, "get bucket") + } + + if prevObj == nil { + return errObjectNotFound + } + + obj, _, err = s.mergeObjectData(prevObj, merge) + if err != nil { + return errors.Wrap(err, "merge object data") + } + + status, err = s.determineInsertStatus(prevObj, obj) + if err != nil { + return errors.Wrap(err, "check insert/update status") + } + + obj.DocID = status.docID + if status.skipUpsert { + return nil + } + + objBytes, err := obj.MarshalBinary() + if err != nil { + return errors.Wrapf(err, "marshal object %s to binary", obj.ID()) + } + + if err := s.upsertObjectDataLSM(bucket, idBytes, objBytes, status.docID); err != nil { + return errors.Wrap(err, "upsert object data") + } + + if err := s.mayUpsertObjectHashTree(obj, idBytes, status); err != nil { + return errors.Wrap(err, "object merge in hashtree") + } + + return nil + }(); err != nil { + return nil, objectInsertStatus{}, err + } else if status.skipUpsert { + return obj, status, nil + } + + if err := s.updateInvertedIndexLSM(obj, status, prevObj); err != nil { + return nil, status, errors.Wrap(err, "update inverted indices") + } + + return obj, status, nil +} + +// mutableMergeObjectLSM is a special version of mergeObjectInTx where no doc +// id increases will be made, but instead the old doc ID will be re-used. This +// is only possible if the following two conditions are met: +// +// 1. We only add to the inverted index, but there is nothing which requires +// cleaning up. Example `name: "John"` is updated to `name: "John Doe"`, +// this is valid because we only add new entry for "Doe", but do not alter +// the existing entry for "John" +// An invalid update would be `name:"John"` is updated to `name:"Diane"`, +// this would require a cleanup for the existing link from "John" to this +// doc id, which is not possible. The only way to clean up is to increase +// the doc id and delete all entries for the old one +// +// 2. The vector position is not altered. Vector Indices cannot be mutated +// therefore a vector update would not be reflected +// +// The above makes this a perfect candidate for a batch reference update as +// this alters neither the vector position, nor does it remove anything from +// the inverted index +func (s *Shard) mutableMergeObjectLSM(merge objects.MergeDocument, + idBytes []byte, +) (mutableMergeResult, error) { + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + out := mutableMergeResult{} + + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + err := s.waitForMinimalHashTreeInitialization(context.Background()) + if err != nil { + return out, err + } + + // see comment in shard_write_put.go::putObjectLSM + lock := &s.docIdLock[s.uuidToIdLockPoolId(idBytes)] + lock.Lock() + defer lock.Unlock() + + prevObj, err := fetchObject(bucket, idBytes) + if err != nil { + return out, err + } + + if prevObj == nil { + uid := uuid.UUID{} + uid.UnmarshalBinary(idBytes) + return out, fmt.Errorf("object with id %s not found", uid) + } + + obj, notEmptyPrevObj, err := s.mergeObjectData(prevObj, merge) + if err != nil { + return out, errors.Wrap(err, "merge object data") + } + + out.next = obj + out.previous = notEmptyPrevObj + + status, err := s.determineMutableInsertStatus(prevObj, obj) + if err != nil { + return out, errors.Wrap(err, "check insert/update status") + } + out.status = status + + obj.DocID = status.docID // is not changed + objBytes, err := obj.MarshalBinary() + if err != nil { + return out, errors.Wrapf(err, "marshal object %s to binary", obj.ID()) + } + + if err := s.upsertObjectDataLSM(bucket, idBytes, objBytes, status.docID); err != nil { + return out, errors.Wrap(err, "upsert object data") + } + + if err := s.mayUpsertObjectHashTree(obj, idBytes, status); err != nil { + return out, fmt.Errorf("object merge in hashtree: %w", err) + } + + // do not updated inverted index, since this requires delta analysis, which + // must be done by the caller! + + return out, nil +} + +type mutableMergeResult struct { + next *storobj.Object + previous *storobj.Object + status objectInsertStatus +} + +func (s *Shard) mergeObjectData(prevObj *storobj.Object, + merge objects.MergeDocument, +) (*storobj.Object, *storobj.Object, error) { + if prevObj == nil { + s.index.logger.WithField("id", merge.ID).Error("resurrecting a zombie object") + // DocID must be overwritten after status check, simply set to initial + // value + prevObj = storobj.New(0) + prevObj.SetClass(merge.Class) + prevObj.SetID(merge.ID) + } + + return mergeProps(prevObj, merge), prevObj, nil +} + +func mergeProps(previous *storobj.Object, + merge objects.MergeDocument, +) *storobj.Object { + next := previous.DeepCopyDangerous() + properties, ok := next.Properties().(map[string]interface{}) + if !ok || properties == nil { + properties = map[string]interface{}{} + } + + // remove properties from object that have been set to nil + for _, propToDelete := range merge.PropertiesToDelete { + delete(properties, propToDelete) + } + + for propName, value := range merge.PrimitiveSchema { + // for primitive props, we simply need to overwrite + properties[propName] = value + } + + for _, ref := range merge.References { + propName := ref.From.Property.String() + prop := properties[propName] + propParsed, ok := prop.(models.MultipleRef) + if !ok { + propParsed = models.MultipleRef{} + } + propParsed = append(propParsed, ref.To.SingleRef()) + properties[propName] = propParsed + } + + if merge.Vector == nil { + next.Vector = previous.Vector + } else { + next.Vector = merge.Vector + } + + if len(merge.Vectors) == 0 { + next.Vectors = previous.Vectors + next.MultiVectors = previous.MultiVectors + } else { + next.Vectors = vectorsAsMap(merge.Vectors) + next.MultiVectors = multiVectorsAsMap(merge.Vectors) + } + + next.Object.LastUpdateTimeUnix = merge.UpdateTime + next.SetProperties(properties) + + return next +} + +func vectorsAsMap(in models.Vectors) map[string][]float32 { + if len(in) > 0 { + out := make(map[string][]float32) + for targetVector, vector := range in { + if v, ok := vector.([]float32); ok { + out[targetVector] = v + } + } + return out + } + return nil +} + +func multiVectorsAsMap(in models.Vectors) map[string][][]float32 { + if len(in) > 0 { + out := make(map[string][][]float32) + for targetVector, vector := range in { + if v, ok := vector.([][]float32); ok { + out[targetVector] = v + } + } + return out + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_put.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_put.go new file mode 100644 index 0000000000000000000000000000000000000000..914117c3b6c4945348a9bf16e0bff7939bfda946 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/shard_write_put.go @@ -0,0 +1,785 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/inverted" + "github.com/weaviate/weaviate/adapters/repos/db/lsmkv" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" +) + +func (s *Shard) PutObject(ctx context.Context, object *storobj.Object) error { + s.activityTrackerWrite.Add(1) + if err := s.isReadOnly(); err != nil { + return err + } + uid, err := uuid.MustParse(object.ID().String()).MarshalBinary() + if err != nil { + return err + } + return s.putOne(ctx, uid, object) +} + +func (s *Shard) putOne(ctx context.Context, uuid []byte, object *storobj.Object) error { + status, err := s.putObjectLSM(object, uuid) + if err != nil { + return errors.Wrap(err, "store object in LSM store") + } + + // object was not changed, no further updates are required + // https://github.com/weaviate/weaviate/issues/3949 + if status.skipUpsert { + return nil + } + + for targetVector, vector := range object.Vectors { + if err := s.updateVectorIndex(ctx, vector, status, targetVector); err != nil { + return errors.Wrapf(err, "update vector index for target vector %s", targetVector) + } + } + for targetVector, multiVector := range object.MultiVectors { + if err := s.updateMultiVectorIndex(ctx, multiVector, status, targetVector); err != nil { + return errors.Wrapf(err, "update multi vector index for target vector %s", targetVector) + } + } + + if s.hasLegacyVectorIndex() { + if err := s.updateVectorIndex(ctx, object.Vector, status, ""); err != nil { + return errors.Wrap(err, "update vector index") + } + } + + if err := s.updatePropertySpecificIndices(ctx, object, status); err != nil { + return errors.Wrap(err, "update property-specific indices") + } + + if err := s.store.WriteWALs(); err != nil { + return errors.Wrap(err, "flush all buffered WALs") + } + + if err := s.GetPropertyLengthTracker().Flush(); err != nil { + return errors.Wrap(err, "flush prop length tracker to disk") + } + + return nil +} + +// as the name implies this method only performs the insertions, but completely +// ignores any deletes. It thus assumes that the caller has already taken care +// of all the deletes in another way +func (s *Shard) updateVectorIndexIgnoreDelete(ctx context.Context, vector []float32, + status objectInsertStatus, +) error { + // vector was not changed, object was not changed or changed without changing vector + // https://github.com/weaviate/weaviate/issues/3948 + // https://github.com/weaviate/weaviate/issues/3949 + if status.docIDPreserved || status.skipUpsert { + return nil + } + + // vector is now optional as of + // https://github.com/weaviate/weaviate/issues/1800 + if len(vector) == 0 { + return nil + } + + if queue, ok := s.GetVectorIndexQueue(""); ok { + if err := queue.Insert(ctx, &common.Vector[[]float32]{ID: status.docID, Vector: vector}); err != nil { + return errors.Wrapf(err, "insert doc id %d to vector index", status.docID) + } + } + + return nil +} + +// as the name implies this method only performs the insertions, but completely +// ignores any deletes. It thus assumes that the caller has already taken care +// of all the deletes in another way +func (s *Shard) updateVectorIndexesIgnoreDelete(ctx context.Context, + vectors map[string][]float32, status objectInsertStatus, +) error { + // vector was not changed, object was not changed or changed without changing vector + // https://github.com/weaviate/weaviate/issues/3948 + // https://github.com/weaviate/weaviate/issues/3949 + if status.docIDPreserved || status.skipUpsert { + return nil + } + + // vector is now optional as of + // https://github.com/weaviate/weaviate/issues/1800 + if len(vectors) == 0 { + return nil + } + + for targetVector, vector := range vectors { + if q, ok := s.GetVectorIndexQueue(targetVector); ok { + if err := q.Insert(ctx, &common.Vector[[]float32]{ID: status.docID, Vector: vector}); err != nil { + return errors.Wrapf(err, "insert doc id %d to vector index for target vector %s", status.docID, targetVector) + } + } + } + + return nil +} + +// this method implements the same logic as updateVectorIndexesIgnoreDelete but +// supports multi vectors +func (s *Shard) updateMultiVectorIndexesIgnoreDelete(ctx context.Context, + multiVectors map[string][][]float32, status objectInsertStatus, +) error { + // vector was not changed, object was not changed or changed without changing vector + // https://github.com/weaviate/weaviate/issues/3948 + // https://github.com/weaviate/weaviate/issues/3949 + if status.docIDPreserved || status.skipUpsert { + return nil + } + + // vector is now optional as of + // https://github.com/weaviate/weaviate/issues/1800 + if len(multiVectors) == 0 { + return nil + } + + for targetVector, vector := range multiVectors { + if q, ok := s.GetVectorIndexQueue(targetVector); ok { + if err := q.Insert(ctx, &common.Vector[[][]float32]{ID: status.docID, Vector: vector}); err != nil { + return errors.Wrapf(err, "insert doc id %d to multi vector index for target vector %s", status.docID, targetVector) + } + } + } + + return nil +} + +func (s *Shard) updateVectorIndex(ctx context.Context, vector []float32, + status objectInsertStatus, targetVector string, +) error { + return updateVectorInVectorIndex(ctx, s, targetVector, vector, status) +} + +func (s *Shard) updateMultiVectorIndex(ctx context.Context, vector [][]float32, + status objectInsertStatus, targetVector string, +) error { + return updateVectorInVectorIndex(ctx, s, targetVector, vector, status) +} + +func fetchObject(bucket *lsmkv.Bucket, idBytes []byte) (*storobj.Object, error) { + objBytes, err := bucket.Get(idBytes) + if err != nil { + return nil, err + } + if len(objBytes) == 0 { + return nil, nil + } + + obj, err := storobj.FromBinary(objBytes) + if err != nil { + return nil, err + } + + return obj, nil +} + +func (s *Shard) putObjectLSM(obj *storobj.Object, idBytes []byte, +) (status objectInsertStatus, err error) { + before := time.Now() + defer s.metrics.PutObject(before) + + for targetVector, vector := range obj.Vectors { + if vectorIndex, ok := s.GetVectorIndex(targetVector); ok { + if err := vectorIndex.ValidateBeforeInsert(vector); err != nil { + return status, errors.Wrapf(err, "Validate vector index %s for target vector %s", targetVector, obj.ID()) + } + } + } + + for targetVector, vector := range obj.MultiVectors { + if vectorIndex, ok := s.GetVectorIndex(targetVector); ok { + if err := vectorIndex.(VectorIndexMulti).ValidateMultiBeforeInsert(vector); err != nil { + return status, errors.Wrapf(err, "Validate vector index %s for target multi vector %s", targetVector, obj.ID()) + } + } + } + + if len(obj.Vector) > 0 && s.hasLegacyVectorIndex() { + // validation needs to happen before any changes are done. Otherwise, insertion is aborted somewhere in-between. + if index, ok := s.GetVectorIndex(""); ok { + if err = index.ValidateBeforeInsert(obj.Vector); err != nil { + return status, errors.Wrapf(err, "Validate vector index for %s", obj.ID()) + } + } + } + + bucket := s.store.Bucket(helpers.ObjectsBucketLSM) + var prevObj *storobj.Object + + // First the object bucket is checked if an object with the same uuid is alreadypresent, + // to determine if it is insert or an update. + // Afterwards the bucket is updated. To avoid races, only one goroutine can do this at once. + lock := &s.docIdLock[s.uuidToIdLockPoolId(idBytes)] + + // wrapped in function to handle lock/unlock + if err := func() error { + s.asyncReplicationRWMux.RLock() + defer s.asyncReplicationRWMux.RUnlock() + + err := s.waitForMinimalHashTreeInitialization(context.Background()) + if err != nil { + return err + } + + lock.Lock() + defer lock.Unlock() + + before = time.Now() + prevObj, err = fetchObject(bucket, idBytes) + if err != nil { + return err + } + + status, err = s.determineInsertStatus(prevObj, obj) + if err != nil { + return err + } + s.metrics.PutObjectDetermineStatus(before) + + obj.DocID = status.docID + if status.skipUpsert { + return nil + } + + objBinary, err := obj.MarshalBinary() + if err != nil { + return errors.Wrapf(err, "marshal object %s to binary", obj.ID()) + } + + before = time.Now() + if err := s.upsertObjectDataLSM(bucket, idBytes, objBinary, status.docID); err != nil { + return errors.Wrap(err, "upsert object data") + } + s.metrics.PutObjectUpsertObject(before) + + if err := s.mayUpsertObjectHashTree(obj, idBytes, status); err != nil { + return errors.Wrap(err, "object creation in hashtree") + } + + return nil + }(); err != nil { + return objectInsertStatus{}, err + } else if status.skipUpsert { + return status, nil + } + + before = time.Now() + if err := s.updateInvertedIndexLSM(obj, status, prevObj); err != nil { + return objectInsertStatus{}, errors.Wrap(err, "update inverted indices") + } + s.metrics.PutObjectUpdateInverted(before) + + return status, nil +} + +func (s *Shard) mayUpsertObjectHashTree(object *storobj.Object, uuidBytes []byte, status objectInsertStatus) error { + if s.hashtree == nil { + return nil + } + + return s.upsertObjectHashTree(object, uuidBytes, status) +} + +func (s *Shard) upsertObjectHashTree(object *storobj.Object, uuidBytes []byte, status objectInsertStatus) error { + if len(uuidBytes) != 16 { + return fmt.Errorf("invalid object uuid") + } + + if object.Object.LastUpdateTimeUnix < 1 { + return fmt.Errorf("invalid object last update time") + } + + leaf := s.hashtreeLeafFor(uuidBytes) + + var objectDigest [16 + 8]byte + copy(objectDigest[:], uuidBytes) + + if status.oldUpdateTime > 0 { + // Given only latest object version is maintained, previous registration is erased + binary.BigEndian.PutUint64(objectDigest[16:], uint64(status.oldUpdateTime)) + s.hashtree.AggregateLeafWith(leaf, objectDigest[:]) + } + + binary.BigEndian.PutUint64(objectDigest[16:], uint64(object.Object.LastUpdateTimeUnix)) + s.hashtree.AggregateLeafWith(leaf, objectDigest[:]) + + return nil +} + +func (s *Shard) hashtreeLeafFor(uuidBytes []byte) uint64 { + hashtreeHeight := s.asyncReplicationConfig.hashtreeHeight + + if hashtreeHeight == 0 { + return 0 + } + + return binary.BigEndian.Uint64(uuidBytes[:8]) >> (64 - hashtreeHeight) +} + +type objectInsertStatus struct { + docID uint64 + docIDChanged bool + oldDocID uint64 + oldUpdateTime int64 + // docID was not changed, although object itself did. DocID can be preserved if + // object's vector remain the same, allowing to omit vector index update which is time + // consuming operation. New object is saved and inverted indexes updated if required. + docIDPreserved bool + // object was not changed, all properties and additional properties are the same as in + // the one already stored. No object update, inverted indexes update and vector index + // update is required. + skipUpsert bool +} + +// to be called with the current contents of a row, if the row is empty (i.e. +// didn't exist before), we will get a new docID from the central counter. +// Otherwise, we will reuse the previous docID and mark this as an update +func (s *Shard) determineInsertStatus(prevObj, nextObj *storobj.Object) (objectInsertStatus, error) { + var out objectInsertStatus + + if prevObj == nil { + docID, err := s.counter.GetAndInc() + if err != nil { + return out, errors.Wrap(err, "initial doc id: get new doc id from counter") + } + out.docID = docID + return out, nil + } + + out.oldDocID = prevObj.DocID + out.oldUpdateTime = prevObj.LastUpdateTimeUnix() + + // If object was not changed (props and additional props of prev and next objects are the same) + // skip updates of object, inverted indexes and vector index. + // https://github.com/weaviate/weaviate/issues/3949 + // + // If object was changed (props or additional props of prev and next objects differ) + // update objects and inverted indexes, skip update of vector index. + // https://github.com/weaviate/weaviate/issues/3948 + // + // Due to geo index's (using HNSW vector index) requirement new docID for delete+insert + // (delete initially adds tombstone, which "overwrite" following insert of the same docID) + // any update of geo property needs new docID for updating geo index. + if preserve, skip := compareObjsForInsertStatus(prevObj, nextObj); preserve || skip { + out.docID = prevObj.DocID + out.docIDPreserved = preserve + out.skipUpsert = skip + return out, nil + } + + docID, err := s.counter.GetAndInc() + if err != nil { + return out, errors.Wrap(err, "doc id update: get new doc id from counter") + } + out.docID = docID + out.docIDChanged = true + + return out, nil +} + +// determineMutableInsertStatus is a special version of determineInsertStatus +// where it does not alter the doc id if one already exists. Calling this +// method only makes sense under very special conditions, such as those +// outlined in mutableMergeObjectInTx +func (s *Shard) determineMutableInsertStatus(previous, next *storobj.Object) (objectInsertStatus, error) { + var out objectInsertStatus + + if previous == nil { + docID, err := s.counter.GetAndInc() + if err != nil { + return out, errors.Wrap(err, "initial doc id: get new doc id from counter") + } + out.docID = docID + return out, nil + } + + out.docID = previous.DocID + out.oldUpdateTime = previous.LastUpdateTimeUnix() + + // we are planning on mutating and thus not altering the doc id + return out, nil +} + +func (s *Shard) upsertObjectDataLSM(bucket *lsmkv.Bucket, id []byte, data []byte, + docID uint64, +) error { + keyBuf := bytes.NewBuffer(nil) + err := binary.Write(keyBuf, binary.LittleEndian, &docID) + if err != nil { + return fmt.Errorf("write doc id to buffer: %w", err) + } + docIDBytes := keyBuf.Bytes() + + return bucket.Put(id, data, + lsmkv.WithSecondaryKey(helpers.ObjectsBucketLSMDocIDSecondaryIndex, docIDBytes), + ) +} + +func (s *Shard) updateInvertedIndexLSM(object *storobj.Object, + status objectInsertStatus, prevObject *storobj.Object, +) error { + props, nilprops, err := s.AnalyzeObject(object) + if err != nil { + return errors.Wrap(err, "analyze next object") + } + + var prevProps []inverted.Property + var prevNilprops []inverted.NilProperty + + if prevObject != nil { + prevProps, prevNilprops, err = s.AnalyzeObject(prevObject) + if err != nil { + return fmt.Errorf("analyze previous object: %w", err) + } + } + + // if object updated (with or without docID changed) + if status.docIDChanged || status.docIDPreserved { + if err := s.subtractPropLengths(prevProps); err != nil { + s.index.logger.WithField("action", "subtractPropLengths").WithError(err).Error("could not subtract prop lengths") + } + } + + if err := s.SetPropertyLengths(props); err != nil { + return errors.Wrap(err, "store field length values for props") + } + + var propsToAdd []inverted.Property + var propsToDel []inverted.Property + var nilpropsToAdd []inverted.NilProperty + var nilpropsToDel []inverted.NilProperty + + // determine only changed properties to avoid unnecessary updates of inverted indexes + if status.docIDPreserved { + delta := inverted.DeltaSkipSearchable(prevProps, props, s.getSearchableBlockmaxProperties()) + + propsToAdd = delta.ToAdd + propsToDel = delta.ToDelete + deltaNil := inverted.DeltaNil(prevNilprops, nilprops) + nilpropsToAdd = deltaNil.ToAdd + nilpropsToDel = deltaNil.ToDelete + } else { + propsToAdd = inverted.DedupItems(props) + propsToDel = inverted.DedupItems(prevProps) + nilpropsToAdd = nilprops + nilpropsToDel = prevNilprops + } + + if prevObject != nil { + // TODO: metrics + if err := s.deleteFromInvertedIndicesLSM(propsToDel, nilpropsToDel, status.oldDocID); err != nil { + return fmt.Errorf("delete inverted indices props: %w", err) + } + if s.index.Config.TrackVectorDimensions { + err = prevObject.IterateThroughVectorDimensions(func(targetVector string, dims int) error { + if err = s.removeDimensionsLSM(dims, status.oldDocID, targetVector); err != nil { + return fmt.Errorf("remove dimension tracking for vector %q: %w", targetVector, err) + } + return nil + }) + if err != nil { + return err + } + } + } + + before := time.Now() + if err := s.extendInvertedIndicesLSM(propsToAdd, nilpropsToAdd, status.docID); err != nil { + return fmt.Errorf("put inverted indices props: %w", err) + } + s.metrics.InvertedExtend(before, len(propsToAdd)) + + if s.index.Config.TrackVectorDimensions { + err = object.IterateThroughVectorDimensions(func(targetVector string, dims int) error { + if err = s.extendDimensionTrackerLSM(dims, status.docID, targetVector); err != nil { + return fmt.Errorf("add dimension tracking for vector %q: %w", targetVector, err) + } + return nil + }) + if err != nil { + return err + } + } + + return nil +} + +func compareObjsForInsertStatus(prevObj, nextObj *storobj.Object) (preserve, skip bool) { + prevProps, ok := prevObj.Object.Properties.(map[string]interface{}) + if !ok { + return false, false + } + nextProps, ok := nextObj.Object.Properties.(map[string]interface{}) + if !ok { + return false, false + } + if !geoPropsEqual(prevProps, nextProps) { + return false, false + } + if !common.VectorsEqual(prevObj.Vector, nextObj.Vector) { + return false, false + } + if !targetVectorsEqual(prevObj.Vectors, nextObj.Vectors) { + return false, false + } + if !targetMultiVectorsEqual(prevObj.MultiVectors, nextObj.MultiVectors) { + return false, false + } + if !addPropsEqual(prevObj.Object.Additional, nextObj.Object.Additional) { + return true, false + } + if !propsEqual(prevProps, nextProps) { + return true, false + } + return false, true +} + +func geoPropsEqual(prevProps, nextProps map[string]interface{}) bool { + geoPropsCompared := map[string]struct{}{} + + for name, prevVal := range prevProps { + switch prevGeoVal := prevVal.(type) { + case *models.GeoCoordinates: + nextVal, ok := nextProps[name] + if !ok { + // matching prop does not exist in next + return false + } + + switch nextGeoVal := nextVal.(type) { + case *models.GeoCoordinates: + if !reflect.DeepEqual(prevGeoVal, nextGeoVal) { + // matching geo props in prev and next differ + return false + } + default: + // matching prop in next is not geo + return false + } + geoPropsCompared[name] = struct{}{} + } + } + + for name, nextVal := range nextProps { + switch nextVal.(type) { + case *models.GeoCoordinates: + if _, ok := geoPropsCompared[name]; !ok { + // matching geo prop does not exist in prev + return false + } + } + } + + return true +} + +func timeToString(t time.Time) string { + if b, err := t.MarshalText(); err == nil { + return string(b) + } + return "" +} + +func uuidToString(u uuid.UUID) string { + if b, err := u.MarshalText(); err == nil { + return string(b) + } + return "" +} + +func targetVectorsEqual(prevTargetVectors, nextTargetVectors map[string][]float32) bool { + return targetVectorsEqualCheck(prevTargetVectors, nextTargetVectors, common.VectorsEqual) +} + +func targetMultiVectorsEqual(prevTargetVectors, nextTargetVectors map[string][][]float32) bool { + return targetVectorsEqualCheck(prevTargetVectors, nextTargetVectors, common.MultiVectorsEqual) +} + +func targetVectorsEqualCheck[T []float32 | [][]float32](prevTargetVectors, nextTargetVectors map[string]T, + vectorsEqual func(vecA, vecB T) bool, +) bool { + if len(prevTargetVectors) == 0 && len(nextTargetVectors) == 0 { + return true + } + + visited := map[string]struct{}{} + for vecName, vec := range prevTargetVectors { + if !vectorsEqual(vec, nextTargetVectors[vecName]) { + return false + } + visited[vecName] = struct{}{} + } + for vecName, vec := range nextTargetVectors { + if _, ok := visited[vecName]; !ok { + if !vectorsEqual(vec, prevTargetVectors[vecName]) { + return false + } + } + } + + return true +} + +func addPropsEqual(prevAddProps, nextAddProps models.AdditionalProperties) bool { + return reflect.DeepEqual(prevAddProps, nextAddProps) +} + +func propsEqual(prevProps, nextProps map[string]interface{}) bool { + if len(prevProps) != len(nextProps) { + return false + } + + for name := range nextProps { + if _, ok := prevProps[name]; !ok { + return false + } + + switch nextVal := nextProps[name].(type) { + case time.Time: + if timeToString(nextVal) != prevProps[name] { + return false + } + + case []time.Time: + prevVal, ok := prevProps[name].([]string) + if !ok { + return false + } + if len(nextVal) != len(prevVal) { + return false + } + for i := range nextVal { + if timeToString(nextVal[i]) != prevVal[i] { + return false + } + } + + case uuid.UUID: + if uuidToString(nextVal) != prevProps[name] { + return false + } + + case []uuid.UUID: + prevVal, ok := prevProps[name].([]string) + if !ok { + return false + } + if len(nextVal) != len(prevVal) { + return false + } + for i := range nextVal { + if uuidToString(nextVal[i]) != prevVal[i] { + return false + } + } + + case map[string]interface{}: // data type "object" + prevVal, ok := prevProps[name].(map[string]interface{}) + if !ok { + return false + } + if !propsEqual(prevVal, nextVal) { + return false + } + + case []interface{}: // data type "objects" + prevVal, ok := prevProps[name].([]interface{}) + if !ok { + return false + } + if len(nextVal) != len(prevVal) { + return false + } + for i := range nextVal { + nextValI, ok := nextVal[i].(map[string]interface{}) + if !ok { + return false + } + prevValI, ok := prevVal[i].(map[string]interface{}) + if !ok { + return false + } + if !propsEqual(prevValI, nextValI) { + return false + } + } + + default: + if !reflect.DeepEqual(nextProps[name], prevProps[name]) { + return false + } + } + } + + return true +} + +func updateVectorInVectorIndex[T dto.Embedding](ctx context.Context, shard *Shard, targetVector string, vector T, + status objectInsertStatus, +) error { + queue, ok := shard.GetVectorIndexQueue(targetVector) + if !ok { + return fmt.Errorf("vector index not found for %s", targetVector) + } + + // even if no vector is provided in an update, we still need + // to delete the previous vector from the index, if it + // exists. otherwise, the associated doc id is left dangling, + // resulting in failed attempts to merge an object on restarts. + if status.docIDChanged { + if err := queue.Delete(status.oldDocID); err != nil { + return errors.Wrapf(err, "delete doc id %d from vector index", status.oldDocID) + } + } + + // vector was not changed, object was updated without changing docID + // https://github.com/weaviate/weaviate/issues/3948 + if status.docIDPreserved { + return nil + } + + // vector is now optional as of + // https://github.com/weaviate/weaviate/issues/1800 + if len(vector) == 0 { + return nil + } + + if err := queue.Insert(ctx, &common.Vector[T]{ID: status.docID, Vector: vector}); err != nil { + return errors.Wrapf(err, "insert doc id %d to vector index", status.docID) + } + + if err := queue.Flush(); err != nil { + return errors.Wrap(err, "flush all vector index buffered WALs") + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_distances.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_distances.go new file mode 100644 index 0000000000000000000000000000000000000000..51b2ef6bdad83009b365f915baff4f1861a2e3d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_distances.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "sort" + + "github.com/weaviate/weaviate/entities/storobj" +) + +type sortByDistances struct { + objects []*storobj.Object + scores []float32 +} + +func (sbd *sortByDistances) Len() int { + return len(sbd.objects) +} + +func (sbd *sortByDistances) Less(i, j int) bool { + return sbd.scores[i] < sbd.scores[j] +} + +func (sbd *sortByDistances) Swap(i, j int) { + sbd.scores[i], sbd.scores[j] = sbd.scores[j], sbd.scores[i] + sbd.objects[i], sbd.objects[j] = sbd.objects[j], sbd.objects[i] +} + +type sortObjectsByDistance struct{} + +func newDistancesSorter() *sortObjectsByDistance { + return &sortObjectsByDistance{} +} + +func (s *sortObjectsByDistance) sort(objects []*storobj.Object, distances []float32) ([]*storobj.Object, []float32) { + sbd := &sortByDistances{objects, distances} + sort.Sort(sbd) + return sbd.objects, sbd.scores +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_id.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_id.go new file mode 100644 index 0000000000000000000000000000000000000000..d403a17b19807ba263dfe517848f52fa231acd99 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_id.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "sort" + + "github.com/weaviate/weaviate/entities/storobj" +) + +type sortByID struct { + objects []*storobj.Object + scores []float32 +} + +func (s *sortByID) Swap(i, j int) { + if len(s.objects) == len(s.scores) { + s.scores[i], s.scores[j] = s.scores[j], s.scores[i] + } + s.objects[i], s.objects[j] = s.objects[j], s.objects[i] +} + +func (s *sortByID) Less(i, j int) bool { + return s.objects[i].ID() < s.objects[j].ID() +} + +func (s *sortByID) Len() int { + return len(s.objects) +} + +type sortObjectsByID struct{} + +func newIDSorter() *sortObjectsByID { + return &sortObjectsByID{} +} + +func (s *sortObjectsByID) sort(objects []*storobj.Object, scores []float32, +) ([]*storobj.Object, []float32) { + sbd := &sortByID{objects, scores} + sort.Sort(sbd) + return sbd.objects, sbd.scores +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_scores.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_scores.go new file mode 100644 index 0000000000000000000000000000000000000000..1f8d629b9e572a9f9a16ce0f02c60e5c327d0d24 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_scores.go @@ -0,0 +1,51 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "sort" + + "github.com/weaviate/weaviate/entities/storobj" +) + +// sortByScores aka RankedResults implements sort.Interface, allowing +// results aggregated from multiple shards to be +// sorted according to their BM25 ranking +type sortByScores struct { + objects []*storobj.Object + scores []float32 +} + +func (r *sortByScores) Swap(i, j int) { + r.objects[i], r.objects[j] = r.objects[j], r.objects[i] + r.scores[i], r.scores[j] = r.scores[j], r.scores[i] +} + +func (r *sortByScores) Less(i, j int) bool { + return r.scores[i] > r.scores[j] +} + +func (r *sortByScores) Len() int { + return len(r.scores) +} + +type sortObjectsByScore struct{} + +func newScoresSorter() *sortObjectsByScore { + return &sortObjectsByScore{} +} + +func (s *sortObjectsByScore) sort(objects []*storobj.Object, scores []float32) ([]*storobj.Object, []float32) { + sbd := &sortByScores{objects, scores} + sort.Sort(sbd) + return sbd.objects, sbd.scores +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_scores_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_scores_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2057f80bee5a1012d20ff3e2f62129f2d906f6d9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/sortby_scores_test.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "testing" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" +) + +func Test_SortBy_Scores(t *testing.T) { + type testcase struct { + testName string + givenObjects []*storobj.Object + givenScores []float32 + expectedOrder []string + } + + tests := []testcase{ + { + testName: "with multiple results", + givenObjects: []*storobj.Object{ + {Object: models.Object{ID: strfmt.UUID("40d3be3e-2ecc-49c8-b37c-d8983164848b")}}, + {Object: models.Object{ID: strfmt.UUID("31bdf9ef-d1c0-4b43-8331-1a89a48c1d2b")}}, + {Object: models.Object{ID: strfmt.UUID("4432797a-ef18-429f-83dc-d971dd9e4dd0")}}, + {Object: models.Object{ID: strfmt.UUID("8ef8c6fd-93b5-4452-b3c3-cef1cd0a18ed")}}, + {Object: models.Object{ID: strfmt.UUID("d79f0d2d-ebc5-4dad-b3df-323bc1e6f183")}}, + }, + givenScores: []float32{12, 34, 100, 43, 2}, + expectedOrder: []string{ + "4432797a-ef18-429f-83dc-d971dd9e4dd0", + "8ef8c6fd-93b5-4452-b3c3-cef1cd0a18ed", + "31bdf9ef-d1c0-4b43-8331-1a89a48c1d2b", + "40d3be3e-2ecc-49c8-b37c-d8983164848b", + "d79f0d2d-ebc5-4dad-b3df-323bc1e6f183", + }, + }, + { + testName: "with a single result", + givenObjects: []*storobj.Object{ + {Object: models.Object{ID: strfmt.UUID("4a483f11-7b2f-452b-be49-f7844dbc5693")}}, + }, + givenScores: []float32{1}, + expectedOrder: []string{ + "4a483f11-7b2f-452b-be49-f7844dbc5693", + }, + }, + { + testName: "with no results", + givenObjects: []*storobj.Object{}, + givenScores: []float32{}, + expectedOrder: []string{}, + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + objects, _ := newScoresSorter().sort(test.givenObjects, test.givenScores) + for i := range objects { + assert.Equal(t, test.expectedOrder[i], objects[i].ID().String()) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_distance_query_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_distance_query_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e94abe1103b1a51630208a772fb7366fc4f3b3cd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_distance_query_integration_test.go @@ -0,0 +1,237 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "testing" + + schemaUC "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/cluster" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + flatent "github.com/weaviate/weaviate/entities/vectorindex/flat" + + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +func TestVectorDistanceQuery(t *testing.T) { + logger, _ := test.NewNullLogger() + dirName := t.TempDir() + shardState := singleShardState() + mockSchemaReader := schemaUC.NewMockSchemaReader(t) + mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe() + mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + return readFunc(class, shardState) + }).Maybe() + mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t) + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe() + mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe() + mockNodeSelector := cluster.NewMockNodeSelector(t) + mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe() + mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe() + repo, err := New(logger, "node1", Config{ + MemtablesFlushDirtyAfter: 60, + RootPath: dirName, + QueryMaximumResults: 10, + MaxImportGoroutinesFactor: 1, + DisableLazyLoadShards: true, // need access to the shard directly to convert UUIDs to docIds + }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(), + mockNodeSelector, mockSchemaReader, mockReplicationFSMReader) + require.Nil(t, err) + + class := &models.Class{ + Class: "Test", + InvertedIndexConfig: invertedConfig(), + VectorConfig: map[string]models.VectorConfig{ + "custom1": {VectorIndexConfig: hnsw.UserConfig{}}, + "custom2": {VectorIndexType: "hnsw", VectorIndexConfig: hnsw.UserConfig{}}, + "custom3": {VectorIndexType: "flat", VectorIndexConfig: flatent.UserConfig{}}, + //"custom4": {VectorIndexType: "dynamic", VectorIndexConfig: dynamicent.UserConfig{}}, // async only + }, + Properties: []*models.Property{}, + } + schemaGetter := &fakeSchemaGetter{ + schema: schema.Schema{Objects: &models.Schema{Classes: []*models.Class{class}}}, + shardState: shardState, + } + repo.SetSchemaGetter(schemaGetter) + migrator := NewMigrator(repo, logger, "node1") + + require.Nil(t, + migrator.AddClass(context.Background(), class)) + // update schema getter so it's in sync with class + schemaGetter.schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + } + + ids := make([]strfmt.UUID, 5) + for i := range ids { + uid := uuid.New() + ids[i] = strfmt.UUID(uid.String()) + } + + vectors := [][]float32{ + {1, 0, 0, 0}, + {0, 1, 0, 0}, + {0, 0, 1, 0}, + {0, 0, 0, 1}, + } + index := repo.GetIndex(schema.ClassName(class.Class)) + + var shards []ShardLike + index.shards.Range(func(_ string, shard ShardLike) error { + shards = append(shards, shard) + return nil + }) + + t.Run("error cases", func(t *testing.T) { + require.Nil(t, repo.PutObject( + context.Background(), + &models.Object{ID: ids[0], Class: class.Class}, + nil, + map[string][]float32{"custom1": vectors[0], "custom2": vectors[1], "custom3": vectors[2]}, + nil, + nil, + 0), + ) + require.Nil(t, err) + + docId, err := docIdFromUUID(shards[0].(*Shard), ids[0]) + require.Nil(t, err) + + _, err = shards[0].VectorDistanceForQuery( + context.Background(), + docId, + []models.Vector{vectors[1], vectors[2], vectors[3]}, + []string{"custom1", "custom2"}, + ) + require.NotNil(t, err) + + _, err = shards[0].VectorDistanceForQuery( + context.Background(), + docId, + []models.Vector{}, + []string{}, + ) + require.NotNil(t, err) + + _, err = shards[0].VectorDistanceForQuery( + context.Background(), + docId, + []models.Vector{vectors[1], vectors[2]}, + []string{"custom1", "doesNotExist"}) + require.NotNil(t, err) + + _, err = shards[0].VectorDistanceForQuery( + context.Background(), + docId, + []models.Vector{vectors[1], []float32{1, 0}}, + []string{"custom1", "custom2"}) + require.NotNil(t, err) + }) + + t.Run("object with all vectors", func(t *testing.T) { + require.Nil(t, repo.PutObject( + context.Background(), + &models.Object{ID: ids[1], Class: class.Class}, + nil, + map[string][]float32{"custom1": vectors[0], "custom2": vectors[1], "custom3": vectors[2]}, + nil, + nil, + 0), + ) + docId, err := docIdFromUUID(shards[0].(*Shard), ids[1]) + require.Nil(t, err) + + distances, err := shards[0].VectorDistanceForQuery( + context.Background(), + docId, + []models.Vector{vectors[1], vectors[2], vectors[3]}, + []string{"custom1", "custom2", "custom3"}) + require.Nil(t, err) + require.Len(t, distances, 3) + require.Equal(t, float32(1), distances[0]) + require.Equal(t, float32(1), distances[1]) + require.Equal(t, float32(1), distances[2]) + }) + + t.Run("Missing one vector", func(t *testing.T) { + require.Nil(t, repo.PutObject( + context.Background(), + &models.Object{ID: ids[2], Class: class.Class}, + nil, + map[string][]float32{"custom1": vectors[0], "custom2": vectors[1]}, + nil, + nil, + 0), + ) + + docId, err := docIdFromUUID(shards[0].(*Shard), ids[2]) + require.Nil(t, err) + + // querying for existing target vectors works + distances, err := shards[0].VectorDistanceForQuery( + context.Background(), + docId, + []models.Vector{vectors[1], vectors[2]}, + + []string{"custom1", "custom2"}) + require.Nil(t, err) + require.Len(t, distances, 2) + require.Equal(t, float32(1), distances[0]) + require.Equal(t, float32(1), distances[1]) + + // error for non-existing target vector + _, err = shards[0].VectorDistanceForQuery( + context.Background(), + docId, + []models.Vector{vectors[1], vectors[2]}, + []string{"custom1", "custom3"}) + require.NotNil(t, err) + }) +} + +func docIdFromUUID(s *Shard, id strfmt.UUID) (uint64, error) { + idBytes, err := uuid.MustParse(id.String()).MarshalBinary() + if err != nil { + return 0, err + } + docIdBytes, err := s.store.Bucket(helpers.ObjectsBucketLSM).Get(idBytes) + if err != nil { + return 0, err + } + + docId, err := storobj.DocIDFromBinary(docIdBytes) + if err != nil { + return 0, err + } + return docId, nil +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index.go new file mode 100644 index 0000000000000000000000000000000000000000..417f947cc58e6fb5aff698d6107abab7e84a1e97 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index.go @@ -0,0 +1,65 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + + "github.com/weaviate/weaviate/adapters/repos/db/helpers" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/adapters/repos/db/vector/compressionhelpers" + schemaConfig "github.com/weaviate/weaviate/entities/schema/config" +) + +// VectorIndex is anything that indexes vectors efficiently. For an example +// look at ./vector/hnsw/index.go +type VectorIndex interface { + Type() common.IndexType + Add(ctx context.Context, id uint64, vector []float32) error + AddBatch(ctx context.Context, ids []uint64, vector [][]float32) error + Delete(id ...uint64) error + SearchByVector(ctx context.Context, vector []float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) + SearchByVectorDistance(ctx context.Context, vector []float32, dist float32, + maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) + UpdateUserConfig(updated schemaConfig.VectorIndexConfig, callback func()) error + Drop(ctx context.Context) error + Shutdown(ctx context.Context) error + Flush() error + SwitchCommitLogs(ctx context.Context) error + ListFiles(ctx context.Context, basePath string) ([]string, error) + PostStartup() + Compressed() bool + Multivector() bool + ValidateBeforeInsert(vector []float32) error + // ContainsDoc returns true if the index has indexed document with a given id. + // It must return false if the document does not exist, or has a tombstone. + ContainsDoc(docID uint64) bool + // Iterate over all indexed document ids in the index. + // Consistency or order is not guaranteed, as the index may be concurrently modified. + // If the callback returns false, the iteration will stop. + Iterate(fn func(docID uint64) bool) + QueryVectorDistancer(queryVector []float32) common.QueryVectorDistancer + // CompressionStats returns the compression statistics for this index + CompressionStats() compressionhelpers.CompressionStats +} + +// VectorIndexMulti is a VectorIndex that supports multi-vector indexing. +type VectorIndexMulti interface { + AddMulti(ctx context.Context, docId uint64, vector [][]float32) error + AddMultiBatch(ctx context.Context, docIds []uint64, vectors [][][]float32) error + DeleteMulti(id ...uint64) error + SearchByMultiVector(ctx context.Context, vector [][]float32, k int, allow helpers.AllowList) ([]uint64, []float32, error) + SearchByMultiVectorDistance(ctx context.Context, vector [][]float32, dist float32, + maxLimit int64, allow helpers.AllowList) ([]uint64, []float32, error) + QueryMultiVectorDistancer(queryVector [][]float32) common.QueryVectorDistancer + ValidateMultiBeforeInsert(vector [][]float32) error +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue.go new file mode 100644 index 0000000000000000000000000000000000000000..2f3d74f366171203443dbffe9b37529ca7e792e9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue.go @@ -0,0 +1,499 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "context" + "encoding/binary" + "fmt" + "math" + "os" + "path/filepath" + "strconv" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/entities/dto" +) + +const ( + vectorIndexQueueInsertOp uint8 = iota + 1 + vectorIndexQueueDeleteOp + vectorIndexQueueMultiInsertOp + vectorIndexQueueMultiDeleteOp +) + +type VectorIndexQueue struct { + *queue.DiskQueue + + asyncEnabled bool + shard *Shard + scheduler *queue.Scheduler + metrics *VectorIndexQueueMetrics + + // tracks the dimensions of the vectors in the queue + dims atomic.Int32 + // If positive, accumulates vectors in a batch before indexing them. + // Otherwise, the batch size is determined by the size of a chunk file + // (typically 10MB worth of vectors). + // Batch size is not guaranteed to match this value exactly. + batchSize int + + vectorIndex VectorIndex +} + +func NewVectorIndexQueue( + shard *Shard, + targetVector string, + index VectorIndex, +) (*VectorIndexQueue, error) { + viq := VectorIndexQueue{ + shard: shard, + scheduler: shard.scheduler, + asyncEnabled: asyncEnabled(), + } + viq.vectorIndex = index + + logger := shard.index.logger.WithField("component", "vector_index_queue"). + WithField("shard_id", shard.ID()). + WithField("target_vector", targetVector) + + staleTimeout, _ := time.ParseDuration(os.Getenv("ASYNC_INDEXING_STALE_TIMEOUT")) + batchSize, _ := strconv.Atoi(os.Getenv("ASYNC_INDEXING_BATCH_SIZE")) + if batchSize > 0 { + viq.batchSize = batchSize + } + + viq.metrics = NewVectorIndexQueueMetrics(logger, shard.promMetrics, shard.index.Config.ClassName.String(), shard.Name(), targetVector) + + q, err := queue.NewDiskQueue( + queue.DiskQueueOptions{ + ID: fmt.Sprintf("vector_index_queue_%s_%s", shard.ID(), shard.vectorIndexID(targetVector)), + Logger: logger, + Scheduler: shard.scheduler, + Dir: filepath.Join(shard.path(), fmt.Sprintf("%s.queue.d", shard.vectorIndexID(targetVector))), + TaskDecoder: &vectorIndexQueueDecoder{ + q: &viq, + }, + OnBatchProcessed: viq.OnBatchProcessed, + StaleTimeout: staleTimeout, + Metrics: viq.metrics.QueueMetrics(), + }, + ) + if err != nil { + return nil, errors.Wrap(err, "failed to create vector index queue") + } + viq.DiskQueue = q + + if viq.asyncEnabled { + err = q.Init() + if err != nil { + return nil, errors.Wrap(err, "failed to initialize vector index queue") + } + + shard.scheduler.RegisterQueue(&viq) + } + + return &viq, nil +} + +func (iq *VectorIndexQueue) Close() error { + if iq == nil { + // the queue is nil when the shard is not fully initialized + return nil + } + + return iq.DiskQueue.Close() +} + +func (iq *VectorIndexQueue) Insert(ctx context.Context, vectors ...common.VectorRecord) error { + if !iq.asyncEnabled { + return common.AddVectorsToIndex(ctx, vectors, iq.vectorIndex) + } + + start := time.Now() + defer iq.metrics.Insert(start, len(vectors)) + + var buf []byte + var err error + + for _, v := range vectors { + // validate vector + if err := v.Validate(iq.vectorIndex); err != nil { + return errors.Wrap(err, "failed to validate") + } + + // if the index is still empty, ensure the first batch is consistent + // by keeping track of the dimensions of the vectors. + if !iq.dims.CompareAndSwap(0, int32(v.Len())) { + if iq.dims.Load() != int32(v.Len()) { + return errors.Errorf("inconsistent vector lengths: %d != %d", v.Len(), iq.dims.Load()) + } + } + + // encode vector + buf = buf[:0] + buf, err = encodeVector(buf, v) + if err != nil { + return errors.Wrap(err, "failed to encode record") + } + + err = iq.DiskQueue.Push(buf) + if err != nil { + return errors.Wrap(err, "failed to push record to queue") + } + } + + return nil +} + +// DequeueBatch dequeues a batch of tasks from the queue. +// If the queue is configured to accumulate vectors in a batch, it will dequeue +// tasks until the target batch size is reached. +// Otherwise, dequeues a single chunk file worth of tasks. +func (iq *VectorIndexQueue) DequeueBatch() (*queue.Batch, error) { + if iq.batchSize <= 0 { + return iq.DiskQueue.DequeueBatch() + } + + var batches []*queue.Batch + var taskCount int + + for { + batch, err := iq.DiskQueue.DequeueBatch() + if err != nil { + return nil, err + } + + if batch == nil { + break + } + + batches = append(batches, batch) + + taskCount += len(batch.Tasks) + if taskCount >= iq.batchSize { + break + } + } + + if len(batches) == 0 { + return nil, nil + } + + return queue.MergeBatches(batches...), nil +} + +func (iq *VectorIndexQueue) Delete(ids ...uint64) error { + if !iq.asyncEnabled { + return iq.vectorIndex.Delete(ids...) + } + + if iq.vectorIndex.Multivector() { + return iq.delete(vectorIndexQueueMultiDeleteOp, ids...) + } + return iq.delete(vectorIndexQueueDeleteOp, ids...) +} + +func (iq *VectorIndexQueue) delete(deleteOperation uint8, ids ...uint64) error { + start := time.Now() + defer iq.metrics.Delete(start, len(ids)) + + var buf []byte + + for _, id := range ids { + buf = buf[:0] + // write the operation + buf = append(buf, deleteOperation) + // write the id + buf = binary.BigEndian.AppendUint64(buf, id) + + err := iq.DiskQueue.Push(buf) + if err != nil { + return errors.Wrap(err, "failed to push record to queue") + } + } + + return nil +} + +func (iq *VectorIndexQueue) Flush() error { + if iq == nil { + // the queue is nil when the shard is not fully initialized + return nil + } + + if !iq.asyncEnabled { + return iq.vectorIndex.Flush() + } + + return iq.DiskQueue.Flush() +} + +func (iq *VectorIndexQueue) BeforeSchedule() (skip bool) { + return iq.checkCompressionSettings() +} + +// Flush the vector index after a batch is processed. +func (iq *VectorIndexQueue) OnBatchProcessed() { + if err := iq.vectorIndex.Flush(); err != nil { + iq.Logger.WithError(err).Error("failed to flush vector index") + } +} + +type upgradableIndexer interface { + Upgraded() bool + Upgrade(callback func()) error + ShouldUpgrade() (bool, int) + AlreadyIndexed() uint64 +} + +// triggers compression if the index is ready to be upgraded +func (iq *VectorIndexQueue) checkCompressionSettings() (skip bool) { + ci, ok := iq.vectorIndex.(upgradableIndexer) + if !ok { + return false + } + + shouldUpgrade, shouldUpgradeAt := ci.ShouldUpgrade() + if !shouldUpgrade || ci.Upgraded() { + return false + } + + if ci.AlreadyIndexed() > uint64(shouldUpgradeAt) { + iq.scheduler.PauseQueue(iq.DiskQueue.ID()) + + err := ci.Upgrade(func() { + iq.scheduler.ResumeQueue(iq.DiskQueue.ID()) + }) + if err != nil { + iq.DiskQueue.Logger.WithError(err).Error("failed to upgrade vector index") + } + + return true + } + + return false +} + +// ResetWith resets the queue with the given vector index. +// The queue must be paused before calling this method. +func (iq *VectorIndexQueue) ResetWith(vidx VectorIndex) { + iq.vectorIndex = vidx +} + +type vectorIndexQueueDecoder struct { + q *VectorIndexQueue +} + +func (v *vectorIndexQueueDecoder) DecodeTask(data []byte) (queue.Task, error) { + op := data[0] + data = data[1:] + + switch op { + case vectorIndexQueueInsertOp: + // decode id + id := binary.BigEndian.Uint64(data) + data = data[8:] + + // decode array size on 2 bytes + alen := binary.BigEndian.Uint16(data) + data = data[2:] + + // decode vector + vec := make([]float32, alen) + for i := 0; i < int(alen); i++ { + bits := binary.BigEndian.Uint32(data) + vec[i] = math.Float32frombits(bits) + data = data[4:] + } + + return &Task[[]float32]{ + op: op, + id: uint64(id), + vector: vec, + idx: v.q.vectorIndex, + }, nil + case vectorIndexQueueDeleteOp: + // decode id + id := binary.BigEndian.Uint64(data) + + return &Task[[]float32]{ + op: op, + id: uint64(id), + idx: v.q.vectorIndex, + }, nil + case vectorIndexQueueMultiInsertOp: + // decode id + id := binary.BigEndian.Uint64(data) + data = data[8:] + + // decode array size on 2 bytes + alen := binary.BigEndian.Uint16(data) + data = data[2:] + + // decode vector + multiVec := make([][]float32, alen) + for i := 0; i < int(alen); i++ { + alenvec := binary.BigEndian.Uint16(data) + data = data[2:] + vec := make([]float32, int(alenvec)) + for j := 0; j < int(alenvec); j++ { + bits := binary.BigEndian.Uint32(data) + vec[j] = math.Float32frombits(bits) + data = data[4:] + } + multiVec[i] = vec + } + + return &Task[[][]float32]{ + op: op, + id: uint64(id), + vector: multiVec, + idx: v.q.vectorIndex, + }, nil + case vectorIndexQueueMultiDeleteOp: + // decode id + id := binary.BigEndian.Uint64(data) + + return &Task[[][]float32]{ + op: op, + id: uint64(id), + idx: v.q.vectorIndex, + }, nil + } + + return nil, errors.Errorf("unknown operation: %d", op) +} + +type Task[T dto.Embedding] struct { + op uint8 + id uint64 + vector T + idx VectorIndex +} + +func (t *Task[T]) Op() uint8 { + return t.op +} + +func (t *Task[T]) Key() uint64 { + return t.id +} + +func (t *Task[T]) Execute(ctx context.Context) error { + if ctx.Err() != nil { + return ctx.Err() + } + + switch t.op { + case vectorIndexQueueInsertOp: + return t.idx.Add(ctx, t.id, any(t.vector).([]float32)) + case vectorIndexQueueMultiInsertOp: + return t.idx.(VectorIndexMulti).AddMulti(ctx, t.id, any(t.vector).([][]float32)) + case vectorIndexQueueDeleteOp, vectorIndexQueueMultiDeleteOp: + return t.idx.Delete(t.id) + } + + return errors.Errorf("unknown operation: %d", t.Op()) +} + +func (t *Task[T]) NewGroup(op uint8, tasks ...queue.Task) queue.Task { + ids := make([]uint64, len(tasks)) + vectors := make([]T, len(tasks)) + + for i, task := range tasks { + t := task.(*Task[T]) + ids[i] = t.id + vectors[i] = t.vector + } + + return &TaskGroup[T]{ + op: op, + ids: ids, + vectors: vectors, + idx: t.idx, + } +} + +type TaskGroup[T dto.Embedding] struct { + op uint8 + ids []uint64 + vectors []T + idx VectorIndex +} + +func (t *TaskGroup[T]) Op() uint8 { + return t.op +} + +func (t *TaskGroup[T]) Key() uint64 { + return t.ids[0] +} + +func (t *TaskGroup[T]) Execute(ctx context.Context) error { + if ctx.Err() != nil { + return ctx.Err() + } + + switch t.op { + case vectorIndexQueueInsertOp: + return t.idx.AddBatch(ctx, t.ids, any(t.vectors).([][]float32)) + case vectorIndexQueueMultiInsertOp: + return t.idx.(VectorIndexMulti).AddMultiBatch(ctx, t.ids, any(t.vectors).([][][]float32)) + case vectorIndexQueueDeleteOp, vectorIndexQueueMultiDeleteOp: + return t.idx.Delete(t.ids...) + } + + return errors.Errorf("unknown operation: %d", t.Op()) +} + +func encodeVector(buf []byte, vectorRec common.VectorRecord) ([]byte, error) { + switch v := vectorRec.(type) { + case *common.Vector[[]float32]: + // write the operation first + buf = append(buf, vectorIndexQueueInsertOp) + // put multi or normal vector operation header! + buf = binary.BigEndian.AppendUint64(buf, v.ID) + // write the vector + buf = binary.BigEndian.AppendUint16(buf, uint16(len(v.Vector))) + for _, v := range v.Vector { + buf = binary.BigEndian.AppendUint32(buf, math.Float32bits(v)) + } + return buf, nil + case *common.Vector[[][]float32]: + // write the operation first + buf = append(buf, vectorIndexQueueMultiInsertOp) + // put multi or normal vector operation header! + buf = binary.BigEndian.AppendUint64(buf, v.ID) + // write the vector + buf = binary.BigEndian.AppendUint16(buf, uint16(len(v.Vector))) + for _, v := range v.Vector { + buf = binary.BigEndian.AppendUint16(buf, uint16(len(v))) + for _, v := range v { + buf = binary.BigEndian.AppendUint32(buf, math.Float32bits(v)) + } + } + return buf, nil + default: + return nil, errors.Errorf("unrecognized vector type: %T", vectorRec) + } +} + +// compile time check for TaskGrouper interface +var ( + _ = queue.TaskGrouper(new(Task[[]float32])) + _ = queue.TaskGrouper(new(Task[[][]float32])) +) diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue_metrics.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue_metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..8bcd7859debaa5184feb62f90450f945de5e3c4d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue_metrics.go @@ -0,0 +1,125 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package db + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/adapters/repos/db/queue" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type VectorIndexQueueMetrics struct { + logger *logrus.Entry + baseMetrics *monitoring.PrometheusMetrics + monitoring bool + insertCount prometheus.Counter + deleteCount prometheus.Counter + grouped bool + className string + shardName string + targetVector string +} + +func NewVectorIndexQueueMetrics( + logger logrus.FieldLogger, prom *monitoring.PrometheusMetrics, + className, shardName string, targetVector string, +) *VectorIndexQueueMetrics { + m := &VectorIndexQueueMetrics{ + logger: logger.WithField("monitoring", "index_queue"), + className: className, + shardName: shardName, + targetVector: targetVector, + } + + if prom == nil { + return m + } + + m.baseMetrics = prom + + if prom.Group { + m.className = "n/a" + m.shardName = "n/a" + m.targetVector = "n/a" + m.grouped = true + } + + m.monitoring = true + + m.insertCount = prom.VectorIndexQueueInsertCount.With(prometheus.Labels{ + "class_name": m.className, + "shard_name": m.shardName, + "target_vector": m.targetVector, + }) + m.deleteCount = prom.VectorIndexQueueDeleteCount.With(prometheus.Labels{ + "class_name": m.className, + "shard_name": m.shardName, + "target_vector": m.targetVector, + }) + + return m +} + +func (m *VectorIndexQueueMetrics) QueueMetrics() *queue.Metrics { + return queue.NewMetrics( + m.logger, + m.baseMetrics, + prometheus.Labels{ + "class_name": m.className, + "shard_name": m.shardName, + }, + ) +} + +func (m *VectorIndexQueueMetrics) DeleteShardLabels(class, shard string) { + if !m.monitoring { + return + } + + if m.grouped { + // never delete the shared label, only individual ones + return + } + + m.baseMetrics.DeleteShard(class, shard) +} + +func (m *VectorIndexQueueMetrics) Insert(start time.Time, count int) { + took := time.Since(start) + m.logger.WithField("action", "insert"). + WithField("batch_size", count). + WithField("took", took). + Tracef("push insert operations to vector index queue took %s", took) + + if !m.monitoring { + return + } + + m.insertCount.Add(float64(count)) +} + +func (m *VectorIndexQueueMetrics) Delete(start time.Time, count int) { + took := time.Since(start) + m.logger.WithField("action", "delete"). + WithField("batch_size", count). + WithField("took", took). + Tracef("push delete operations to vector index queue took %s", took) + + if !m.monitoring { + return + } + + m.deleteCount.Add(float64(count)) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f47a72f4217dd13c7700d58d4049713a77c038d1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/db/vector_index_queue_test.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest + +package db + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/adapters/repos/db/vector/common" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func TestVectorIndexQueueBatchSize(t *testing.T) { + t.Setenv("ASYNC_INDEXING", "true") + os.Setenv("ASYNC_INDEXING_BATCH_SIZE", "6000") + os.Setenv("ASYNC_INDEXING_STALE_TIMEOUT", "1ms") + + ctx := context.Background() + className := "TestClass" + shd, _ := testShardWithSettings(t, ctx, &models.Class{Class: className}, hnsw.UserConfig{}, false, true) + + defer func(path string) { + err := os.RemoveAll(path) + if err != nil { + fmt.Println(err) + } + }(shd.Index().Config.RootPath) + + count := 10_000 + + v := make([]float32, 1000) + + var vectors []common.VectorRecord + for i := range count { + vectors = append(vectors, &common.Vector[[]float32]{ + ID: uint64(i), + Vector: v, + }) + } + + q, ok := shd.GetVectorIndexQueue("") + require.True(t, ok) + + // ensure the queue doesn't get scheduled + q.Pause() + + err := q.Insert(ctx, vectors...) + require.NoError(t, err) + + // wait for the batch to be stale + time.Sleep(100 * time.Millisecond) + + b, err := q.DequeueBatch() + require.NoError(t, err) + require.NotNil(t, b) + require.Equal(t, len(b.Tasks), 7836) + size := len(b.Tasks) + b.Done() + require.EqualValues(t, 10000-size, q.Size()) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/modules/modules.go b/platform/dbops/binaries/weaviate-src/adapters/repos/modules/modules.go new file mode 100644 index 0000000000000000000000000000000000000000..b6ad27289ce4182a9caf8caa020b5f981048908a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/modules/modules.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modulestorage + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/moduletools" + bolt "go.etcd.io/bbolt" +) + +type Repo struct { + logger logrus.FieldLogger + baseDir string + db *bolt.DB +} + +func NewRepo(baseDir string, logger logrus.FieldLogger) (*Repo, error) { + r := &Repo{ + baseDir: baseDir, + logger: logger, + } + + err := r.init() + return r, err +} + +func (r *Repo) DBPath() string { + return fmt.Sprintf("%s/modules.db", r.baseDir) +} + +func (r *Repo) DataPath() string { + return r.baseDir +} + +func (r *Repo) init() error { + if err := os.MkdirAll(r.baseDir, 0o777); err != nil { + return errors.Wrapf(err, "create root path directory at %s", r.baseDir) + } + + boltdb, err := bolt.Open(r.DBPath(), 0o600, nil) + if err != nil { + return errors.Wrapf(err, "open bolt at %s", r.DBPath()) + } + + r.db = boltdb + + return nil +} + +type storageBucket struct { + bucketKey []byte + repo *Repo +} + +func (r *Repo) Storage(bucketName string) (moduletools.Storage, error) { + storage := &storageBucket{ + bucketKey: []byte(bucketName), + repo: r, + } + + err := storage.init() + return storage, err +} + +func (s *storageBucket) init() error { + return s.repo.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(s.bucketKey); err != nil { + return errors.Wrapf(err, "create module storage bucket '%s'", + string(s.bucketKey)) + } + + return nil + }) +} + +func (s *storageBucket) Put(key, value []byte) error { + return s.repo.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketKey) + if b == nil { + return errors.Errorf("no bucket for key %s found", string(s.bucketKey)) + } + + if err := b.Put(key, value); err != nil { + return errors.Wrapf(err, "put value for key %s", string(key)) + } + + return nil + }) +} + +func (s *storageBucket) Get(key []byte) ([]byte, error) { + var out []byte + err := s.repo.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketKey) + if b == nil { + return errors.Errorf("no bucket for key %s found", string(s.bucketKey)) + } + + out = b.Get(key) + return nil + }) + + return out, err +} + +func (s *storageBucket) Scan(scan moduletools.ScanFn) error { + err := s.repo.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(s.bucketKey) + + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + ok, err := scan(k, v) + if err != nil { + return errors.Wrapf(err, "read item %q", string(k)) + } + + if !ok { + break + } + } + + return nil + }) + + return err +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/modules/modules_integration_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/modules/modules_integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..480747771e472b8b69846e3d2a9d7c41a31dc1ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/modules/modules_integration_test.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build integrationTest +// +build integrationTest + +package modulestorage + +import ( + "crypto/rand" + "fmt" + "math/big" + "os" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func mustRandIntn(max int64) int { + randInt, err := rand.Int(rand.Reader, big.NewInt(max)) + if err != nil { + panic(fmt.Sprintf("mustRandIntn error: %v", err)) + } + return int(randInt.Int64()) +} + +func Test_ModuleStorage(t *testing.T) { + dirName := fmt.Sprintf("./generated_testdata/%d", mustRandIntn(10000000)) + os.MkdirAll(dirName, 0o777) + defer func() { + err := os.RemoveAll(dirName) + fmt.Println(err) + }() + + logger, _ := test.NewNullLogger() + + r, err := NewRepo(dirName, logger) + require.Nil(t, err) + + module1, err := r.Storage("my-module") + require.Nil(t, err) + module2, err := r.Storage("my-other-module") + require.Nil(t, err) + + t.Run("storing two k/v pairs for each bucket", func(t *testing.T) { + err := module1.Put([]byte("module1-key1"), []byte("module1-value1")) + require.Nil(t, err) + err = module1.Put([]byte("module1-key2"), []byte("module1-value2")) + require.Nil(t, err) + err = module2.Put([]byte("module2-key1"), []byte("module2-value1")) + require.Nil(t, err) + err = module2.Put([]byte("module2-key2"), []byte("module2-value2")) + require.Nil(t, err) + }) + + t.Run("retrieving values across buckets and keys", func(t *testing.T) { + var v []byte + var err error + + // on module 1 bucket + v, err = module1.Get([]byte("module1-key1")) + require.Nil(t, err) + assert.Equal(t, []byte("module1-value1"), v) + + v, err = module1.Get([]byte("module1-key2")) + require.Nil(t, err) + assert.Equal(t, []byte("module1-value2"), v) + + v, err = module1.Get([]byte("module2-key1")) + require.Nil(t, err) + assert.Equal(t, []byte(nil), v) + + v, err = module1.Get([]byte("module2-key2")) + require.Nil(t, err) + assert.Equal(t, []byte(nil), v) + + // on module 2 bucket + v, err = module2.Get([]byte("module1-key1")) + require.Nil(t, err) + assert.Equal(t, []byte(nil), v) + + v, err = module2.Get([]byte("module1-key2")) + require.Nil(t, err) + assert.Equal(t, []byte(nil), v) + + v, err = module2.Get([]byte("module2-key1")) + require.Nil(t, err) + assert.Equal(t, []byte("module2-value1"), v) + + v, err = module2.Get([]byte("module2-key2")) + require.Nil(t, err) + assert.Equal(t, []byte("module2-value2"), v) + }) + + t.Run("scanning all k/v for a bucket", func(t *testing.T) { + t.Run("module1 - full range", func(t *testing.T) { + var ( + keys [][]byte + values [][]byte + ) + expectedKeys := [][]byte{ + []byte("module1-key1"), + []byte("module1-key2"), + } + expectedValues := [][]byte{ + []byte("module1-value1"), + []byte("module1-value2"), + } + + err := module1.Scan(func(k, v []byte) (bool, error) { + keys = append(keys, k) + values = append(values, v) + return true, nil + }) + + require.Nil(t, err) + + assert.Equal(t, expectedKeys, keys) + assert.Equal(t, expectedValues, values) + }) + + t.Run("module2 - full range", func(t *testing.T) { + var ( + keys [][]byte + values [][]byte + ) + expectedKeys := [][]byte{ + []byte("module2-key1"), + []byte("module2-key2"), + } + expectedValues := [][]byte{ + []byte("module2-value1"), + []byte("module2-value2"), + } + + err := module2.Scan(func(k, v []byte) (bool, error) { + keys = append(keys, k) + values = append(values, v) + return true, nil + }) + + require.Nil(t, err) + + assert.Equal(t, expectedKeys, keys) + assert.Equal(t, expectedValues, values) + }) + + t.Run("module2 - stop after single row", func(t *testing.T) { + var ( + keys [][]byte + values [][]byte + ) + expectedKeys := [][]byte{ + []byte("module2-key1"), + } + expectedValues := [][]byte{ + []byte("module2-value1"), + } + + err := module2.Scan(func(k, v []byte) (bool, error) { + keys = append(keys, k) + values = append(values, v) + return false, nil + }) + + require.Nil(t, err) + + assert.Equal(t, expectedKeys, keys) + assert.Equal(t, expectedValues, values) + }) + + t.Run("module2 - with scan error", func(t *testing.T) { + err := module2.Scan(func(k, v []byte) (bool, error) { + return false, fmt.Errorf("oops") + }) + + assert.Equal(t, "read item \"module2-key1\": oops", err.Error()) + }) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/schema/store.go b/platform/dbops/binaries/weaviate-src/adapters/repos/schema/store.go new file mode 100644 index 0000000000000000000000000000000000000000..b71b98b3027b1319cbad2dfbe42db4f9011a8dd0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/schema/store.go @@ -0,0 +1,517 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/cluster/types" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + ucs "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" + bolt "go.etcd.io/bbolt" +) + +var ( + // old keys are still needed for migration + schemaBucket = []byte("schema") + schemaKey = []byte("schema") + // static keys + keyMetaClass = []byte{eTypeMeta, 0} + keyShardingState = []byte{eTypeSharingState, 0} + keyConfig = []byte{eTypeConfig, 0} + _Version int = 2 +) + +// constant to encode the type of entry in the DB +const ( + eTypeConfig byte = 1 + eTypeClass byte = 2 + eTypeShard byte = 4 + eTypeMeta byte = 5 + eTypeSharingState byte = 15 +) + +const ( + // BoltDBTimeout is the timeout for acquiring file lock when opening BoltDB + BoltDBTimeout = 5 * time.Second +) + +// config configuration specific the stored schema +type config struct { + Version int + // add more fields +} + +/* +Store is responsible for storing and persisting the schema in a structured manner. +It ensures that each class has a dedicated bucket, which includes metadata, and sharding state. + +Schema Structure: + - Config: contains metadata related to parsing the schema + - Nested buckets for each class + +Schema Structure for a class Bucket: + - Metadata contains models.Class + - Sharding state without shards + - Class shards: individual shard associated with the sharding state + +By organizing the schema in this manner, it facilitates efficient management of class specific data during runtime. +In addition, old schema are backed up and migrated to the new structure for a seamless transitions +*/ +type store struct { + version int // schema version + homeDir string // home directory of schema files + log logrus.FieldLogger + db *bolt.DB +} + +// NewStore returns a new schema repository. Call the Open() method to open the underlying DB. +// To free the resources, call the Close() method. +func NewStore(homeDir string, logger logrus.FieldLogger) *store { + return &store{ + version: _Version, + homeDir: homeDir, + log: logger, + } +} + +func initBoltDB(filePath string, version int, cfg *config) (*bolt.DB, error) { + db, err := bolt.Open(filePath, 0o600, &bolt.Options{Timeout: BoltDBTimeout}) + if err != nil { + return nil, fmt.Errorf("open %q: %w", filePath, err) + } + root := func(tx *bolt.Tx) error { + b, err := tx.CreateBucket(schemaBucket) + // A new bucket has been created + if err == nil { + *cfg = config{Version: version} + return saveConfig(b, *cfg) + } + // load existing bucket + b = tx.Bucket(schemaBucket) + if b == nil { + return fmt.Errorf("retrieve existing bucket %q", schemaBucket) + } + // read config: config exists since version 2 + data := b.Get(keyConfig) + if len(data) > 0 { + if err := json.Unmarshal(data, &cfg); err != nil { + return fmt.Errorf("cannot read config: %w", err) + } + } + return nil + } + + return db, db.Update(root) +} + +// Open the underlying DB +// Deprecated: instead schema now is persistent via RAFT +// see : cluster package +// Load and save are left to support backward compatibility +func (r *store) Open() (err error) { + if err := os.MkdirAll(r.homeDir, 0o777); err != nil { + return fmt.Errorf("create root directory %q: %w", r.homeDir, err) + } + cfg := config{} + path := path.Join(r.homeDir, "schema.db") + boltDB, err := initBoltDB(path, r.version, &cfg) + if err != nil { + return fmt.Errorf("init bolt_db: %w", err) + } + defer func() { + if err != nil { + boltDB.Close() + } + }() + r.db = boltDB + if cfg.Version < r.version { + if err := r.migrate(path, cfg.Version, r.version); err != nil { + return fmt.Errorf("migrate: %w", err) + } + } + if cfg.Version > r.version { + return fmt.Errorf("schema version %d higher than %d", cfg.Version, r.version) + } + return err +} + +// Close the underlying DB +func (r *store) Close() { + r.db.Close() +} + +// migrate from old to new schema +// It will back up the old schema file if it exists +func (r *store) migrate(filePath string, from, to int) (err error) { + r.log.Infof("schema migration from v%d to v%d process has started", from, to) + defer func() { + if err == nil { + r.log.Infof("successfully completed schema migration from v%d to v%d", from, to) + } + }() + state, err := r.loadSchemaV1() + if err != nil { + return fmt.Errorf("load old schema: %w", err) + } + if state != nil { + // create backupPath by copying file + backupPath := fmt.Sprintf("%s_v%d.bak", filePath, from) + if err := copyFile(backupPath, filePath); err != nil { + return fmt.Errorf("schema backup: %w", err) + } + + // write new schema + f := func(tx *bolt.Tx) error { + b := tx.Bucket(schemaBucket) + if err := saveConfig(b, config{Version: to}); err != nil { + return err + } + b.Delete(schemaKey) // remove old schema + return r.saveAllTx(context.Background(), b, *state)(tx) + } + if err := r.db.Update(f); err != nil { + os.Remove(backupPath) + return fmt.Errorf("convert to new schema: %w", err) + } + } + return nil +} + +// saveSchemaV1 might be needed to migrate from v2 to v0 +func (r *store) saveSchemaV1(schema ucs.State) error { + schemaJSON, err := json.Marshal(schema) + if err != nil { + return fmt.Errorf("marshal schema state to json: %w", err) + } + + return r.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(schemaBucket) + return b.Put(schemaKey, schemaJSON) + }) +} + +// loadSchemaV1 is needed to migrate from v0 to v2 +func (r *store) loadSchemaV1() (*ucs.State, error) { + var schemaJSON []byte + r.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(schemaBucket) + schemaJSON = b.Get(schemaKey) + return nil + }) + + if len(schemaJSON) == 0 { + return nil, nil + } + + var state ucs.State + err := json.Unmarshal(schemaJSON, &state) + if err != nil { + return nil, fmt.Errorf("parse schema state from JSON: %w", err) + } + + return &state, nil +} + +func (r *store) updateClass(b *bolt.Bucket, data ucs.ClassPayload) error { + // remove old shards + if data.ReplaceShards { + cursor := b.Cursor() // b.Put before + for key, _ := cursor.First(); key != nil; { + if key[0] == eTypeShard { + b.Delete(key) + } + key, _ = cursor.Next() + } + } + if data.Metadata != nil { + if err := b.Put(keyMetaClass, data.Metadata); err != nil { + return err + } + } + + if data.ShardingState != nil { + if err := b.Put(keyShardingState, data.ShardingState); err != nil { + return err + } + } + + return appendShards(b, data.Shards, make([]byte, 1, 68)) +} + +// Load loads the complete schema from the persistent storage +func (r *store) Load(ctx context.Context) (ucs.State, error) { + state := ucs.NewState(32) + for data := range r.load(ctx) { + if data.Error != nil { + return state, data.Error + } + cls := models.Class{Class: string(data.Name)} + ss := sharding.State{} + + if err := json.Unmarshal(data.Metadata, &cls); err != nil { + return state, fmt.Errorf("unmarshal class %q", cls.Class) + } + if err := json.Unmarshal(data.ShardingState, &ss); err != nil { + return state, fmt.Errorf("unmarshal sharding state for class %q size %d", + cls.Class, len(data.ShardingState)) + } + if n := len(data.Shards); n > 0 { + ss.Physical = make(map[string]sharding.Physical, n) + } + for _, shard := range data.Shards { + phy := sharding.Physical{} + name := string(shard.Key) + if err := json.Unmarshal(shard.Value, &phy); err != nil { + return state, fmt.Errorf("unmarshal shard %q for class %q", name, cls.Class) + } + ss.Physical[name] = phy + } + state.ObjectSchema.Classes = append(state.ObjectSchema.Classes, &cls) + state.ShardingState[cls.Class] = &ss + } + return state, nil +} + +func (r *store) load(ctx context.Context) <-chan ucs.ClassPayload { + ch := make(chan ucs.ClassPayload, 1) + f := func(tx *bolt.Tx) (err error) { + root := tx.Bucket(schemaBucket) + rootCursor := root.Cursor() + for cls, _ := rootCursor.First(); cls != nil; { + if cls[0] != eTypeClass { + cls, _ = rootCursor.Next() + continue + } + if err := ctx.Err(); err != nil { + ch <- ucs.ClassPayload{Error: err} + return err + } + b := root.Bucket(cls) + if b == nil { + err := fmt.Errorf("class not found") + ch <- ucs.ClassPayload{Error: err} + return err + } + x := ucs.ClassPayload{ + Name: string(cls[1:]), + Shards: make([]ucs.KeyValuePair, 0, 32), + } + cursor := b.Cursor() + for key, value := cursor.First(); key != nil; { + if bytes.Equal(key, keyMetaClass) { + x.Metadata = value + } else if bytes.Equal(key, keyShardingState) { + x.ShardingState = value + } else { + x.Shards = append(x.Shards, ucs.KeyValuePair{Key: string(key[1:]), Value: value}) + } + key, value = cursor.Next() + } + ch <- x + cls, _ = rootCursor.Next() + } + return nil + } + enterrors.GoWrapper(func() { + defer close(ch) + r.db.View(f) + }, r.log) + return ch +} + +// Save saves the complete schema to the persistent storage +func (r *store) Save(ctx context.Context, ss ucs.State) error { + if (ss.ObjectSchema == nil || len(ss.ObjectSchema.Classes) == 0) && + len(ss.ShardingState) == 0 { + return nil // empty schema nothing to store + } + + if ss.ObjectSchema == nil || + len(ss.ObjectSchema.Classes) == 0 || + len(ss.ShardingState) == 0 { + return fmt.Errorf("inconsistent schema: missing required fields") + } + + currState, err := r.Load(ctx) + if err != nil { + return fmt.Errorf("load existing schema state: %w", err) + } + // If the store already contains the same contents as the incoming + // schema state, we don't need to delete and re-put all schema contents. + // Doing so can cause a very high MTTR when the number of tenants is on + // the order of 100k+. + // + // Here we have to check equality with rough equivalency checks, because + // there is currently no way to make a comparison at the byte-level + // + // See: https://github.com/weaviate/weaviate/issues/4634 + if currState.EqualEnough(&ss) { + return nil + } + + r.log.WithField("action", "save_schema"). + Infof("Current schema state outdated, updating schema store") + + f := func(tx *bolt.Tx) error { + root := tx.Bucket(schemaBucket) + return r.saveAllTx(ctx, root, ss)(tx) + } + + err = r.db.Update(f) + if err == nil { + r.log.WithField("action", "save_schema"). + Infof("Schema store successfully updated") + } + + return err +} + +func (r *store) saveAllTx(ctx context.Context, root *bolt.Bucket, ss ucs.State) func(tx *bolt.Tx) error { + return func(tx *bolt.Tx) error { + rootCursor := root.Cursor() + for cls, _ := rootCursor.First(); cls != nil; { + if cls[0] == eTypeClass { + err := root.DeleteBucket(cls) + if err != nil && !errors.Is(err, bolt.ErrBucketNotFound) { + return err + } + } + cls, _ = rootCursor.Next() + } + for _, cls := range ss.ObjectSchema.Classes { + if err := ctx.Err(); err != nil { + return fmt.Errorf("context for class %q: %w", cls.Class, err) + } + sharding := ss.ShardingState[cls.Class] + payload, err := createClassPayload(cls, sharding) + if err != nil { + return fmt.Errorf("create payload for class %q: %w", cls.Class, err) + } + b, err := root.CreateBucket(encodeClassName(cls.Class)) + if err != nil { + return fmt.Errorf("create bucket for class %q: %w", cls.Class, err) + } + if err := r.updateClass(b, payload); err != nil { + return fmt.Errorf("update bucket %q: %w", cls.Class, err) + } + r.log.WithField("action", "update_schema_store"). + Debugf("Class updated: %s", cls.Class) + } + + r.log.WithField("action", "update_schema_store"). + Debug("All classes updated") + + return nil + } +} + +func saveConfig(root *bolt.Bucket, cfg config) error { + data, err := json.Marshal(&cfg) + if err != nil { + return fmt.Errorf("marshal config: %w", err) + } + if err := root.Put(keyConfig, data); err != nil { + return fmt.Errorf("write config: %w", err) + } + return nil +} + +func appendShards(b *bolt.Bucket, shards []ucs.KeyValuePair, key []byte) error { + key[0] = eTypeShard + for _, pair := range shards { + kLen := len(pair.Key) + 1 + key = append(key, pair.Key...) + if err := b.Put(key[:kLen], pair.Value); err != nil { + return err + } + key = key[:1] + } + return nil +} + +func encodeClassName(name string) []byte { + len := len(name) + 1 + buf := make([]byte, 1, len) + buf[0] = eTypeClass + buf = append(buf, name...) + return buf[:len] +} + +func copyFile(dst, src string) error { + data, err := os.ReadFile(src) + if err != nil { + return err + } + return os.WriteFile(dst, data, 0o644) +} + +func createClassPayload(class *models.Class, + shardingState *sharding.State, +) (pl ucs.ClassPayload, err error) { + pl.Name = class.Class + if pl.Metadata, err = json.Marshal(class); err != nil { + return pl, fmt.Errorf("marshal class %q metadata: %w", pl.Name, err) + } + if shardingState != nil { + ss := *shardingState + pl.Shards = make([]ucs.KeyValuePair, len(ss.Physical)) + i := 0 + for name, shard := range ss.Physical { + data, err := json.Marshal(shard) + if err != nil { + return pl, fmt.Errorf("marshal shard %q metadata: %w", name, err) + } + pl.Shards[i] = ucs.KeyValuePair{Key: name, Value: data} + i++ + } + ss.Physical = nil + if pl.ShardingState, err = json.Marshal(&ss); err != nil { + return pl, fmt.Errorf("marshal class %q sharding state: %w", pl.Name, err) + } + } + return pl, nil +} + +func (r *store) LoadLegacySchema() (map[string]types.ClassState, error) { + res := make(map[string]types.ClassState) + legacySchema, err := r.Load(context.Background()) + if err != nil { + return res, fmt.Errorf("could not load legacy schema: %w", err) + } + for _, c := range legacySchema.ObjectSchema.Classes { + res[c.Class] = types.ClassState{Class: *c, Shards: *legacySchema.ShardingState[c.Class]} + } + return res, nil +} + +func (r *store) SaveLegacySchema(cluster map[string]types.ClassState) error { + states := ucs.NewState(len(cluster)) + + for _, s := range cluster { + currState := s // new var to avoid passing pointer to s + states.ObjectSchema.Classes = append(states.ObjectSchema.Classes, &currState.Class) + states.ShardingState[s.Class.Class] = &currState.Shards + } + + return r.Save(context.Background(), states) +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/schema/store_test.go b/platform/dbops/binaries/weaviate-src/adapters/repos/schema/store_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1df93d8f9fed18934ae49a27e0b4ec290bb60edc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/schema/store_test.go @@ -0,0 +1,219 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package schema + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + + ucs "github.com/weaviate/weaviate/usecases/schema" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestSaveAndLoadSchema(t *testing.T) { + var ( + ctx = context.Background() + logger, _ = test.NewNullLogger() + dirName = t.TempDir() + ) + + schema := ucs.NewState(2) + addClass(&schema, "C1", 0, 1, 0) + addClass(&schema, "C2", 0, 3, 3) + + // Save the schema + repo, _ := newRepo(dirName, 0, logger) + defer repo.Close() + + cs := map[string]types.ClassState{} + for _, s := range schema.ObjectSchema.Classes { + cs[s.Class] = types.ClassState{ + Class: *s, + Shards: *schema.ShardingState[s.Class], + } + } + + if err := repo.SaveLegacySchema(cs); err != nil { + t.Fatalf("save all schema: %v", err) + } + + // Load the schema + loadedSchema, err := repo.Load(ctx) + if err != nil { + t.Fatalf("load schema: %v", err) + } + + // Assert that the loaded schema is the same as the saved schema + + if !reflect.DeepEqual(schema.ObjectSchema, loadedSchema.ObjectSchema) { + t.Errorf("loaded schema does not match saved schema") + } + if !reflect.DeepEqual(schema.ShardingState, loadedSchema.ShardingState) { + t.Errorf("loaded sharding state does not match saved sharding state") + } +} + +func TestRepositoryMigrate(t *testing.T) { + var ( + ctx = context.Background() + logger, _ = test.NewNullLogger() + dirName = t.TempDir() + canceledCtx, cancel = context.WithCancel(ctx) + ) + cancel() + schema := ucs.NewState(3) + addClass(&schema, "C1", 0, 1, 0) + addClass(&schema, "C2", 0, 3, 3) + t.Run("SaveOldSchema", func(t *testing.T) { + repo, _ := newRepo(dirName, 0, logger) + defer repo.Close() + if err := repo.saveSchemaV1(schema); err != nil { + t.Fatalf("save all schema: %v", err) + } + }) + t.Run("LoadOldchema", func(t *testing.T) { + repo, err := newRepo(dirName, -1, logger) + if err != nil { + t.Fatalf("create new repo %v", err) + } + defer repo.Close() + + _, err = repo.Load(canceledCtx) + assert.ErrorIs(t, err, context.Canceled) + + state, err := repo.Load(ctx) + assert.Nil(t, err) + assert.Equal(t, schema, state) + }) + t.Run("LoadSchema", func(t *testing.T) { + repo, err := newRepo(dirName, -1, logger) + if err != nil { + t.Fatalf("create new repo %v", err) + } + defer repo.Close() + + state, err := repo.Load(ctx) + assert.Nil(t, err) + assert.Equal(t, schema, state) + }) + + t.Run("LoadSchemaWithHigherVersion", func(t *testing.T) { + _, err := newRepo(dirName, 1, logger) + assert.NotNil(t, err) + }) +} + +func TestRepositorySaveLoad(t *testing.T) { + var ( + ctx = context.Background() + canceledCtx, cancel = context.WithCancel(ctx) + logger, _ = test.NewNullLogger() + dirName = t.TempDir() + ) + cancel() + repo, err := newRepo(dirName, -1, logger) + if err != nil { + t.Fatalf("create new repo: %v", err) + } + // load empty schema + res, err := repo.Load(ctx) + if err != nil { + t.Fatalf("loading schema from empty file: %v", err) + } + if len(res.ShardingState) != 0 || len(res.ObjectSchema.Classes) != 0 { + t.Fatalf("expected empty schema got %v", res) + } + + // save and load non empty schema + schema := ucs.NewState(3) + addClass(&schema, "C1", 0, 1, 0) + addClass(&schema, "C2", 0, 3, 3) + err = repo.Save(canceledCtx, schema) + assert.ErrorIs(t, err, context.Canceled) + + if err = repo.Save(ctx, schema); err != nil { + t.Fatalf("save schema: %v", err) + } + if err = repo.Save(ctx, schema); err != nil { + t.Fatalf("save schema: %v", err) + } + + res, err = repo.Load(context.Background()) + if err != nil { + t.Fatalf("load schema: %v", err) + } + assert.Equal(t, schema, res) +} + +func createClass(name string, start, nProps, nShards int) (models.Class, sharding.State) { + cls := models.Class{Class: name} + for i := start; i < start+nProps; i++ { + prop := models.Property{ + Name: fmt.Sprintf("property-%d", i), + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + } + cls.Properties = append(cls.Properties, &prop) + } + ss := sharding.State{IndexID: name} + ss.Physical = createShards(start, nShards, models.TenantActivityStatusHOT) + + return cls, ss +} + +func createShards(start, nShards int, activityStatus string) map[string]sharding.Physical { + if nShards < 1 { + return nil + } + + shards := make(map[string]sharding.Physical, nShards) + for i := start; i < start+nShards; i++ { + name := fmt.Sprintf("shard-%d", i) + node := fmt.Sprintf("node-%d", i) + shards[name] = sharding.Physical{ + Name: name, + BelongsToNodes: []string{node}, + Status: activityStatus, + } + } + return shards +} + +func addClass(schema *ucs.State, name string, start, nProps, nShards int) (*models.Class, *sharding.State) { + cls, ss := createClass(name, start, nProps, nShards) + if schema.ObjectSchema == nil { + schema.ObjectSchema = &models.Schema{} + } + if schema.ShardingState == nil { + schema.ShardingState = make(map[string]*sharding.State) + } + schema.ObjectSchema.Classes = append(schema.ObjectSchema.Classes, &cls) + schema.ShardingState[name] = &ss + return &cls, &ss +} + +func newRepo(homeDir string, version int, logger logrus.FieldLogger) (*store, error) { + r := NewStore(homeDir, logger) + if version > -1 { + r.version = version + } + return r, r.Open() +} diff --git a/platform/dbops/binaries/weaviate-src/adapters/repos/transactions/store.go b/platform/dbops/binaries/weaviate-src/adapters/repos/transactions/store.go new file mode 100644 index 0000000000000000000000000000000000000000..ba92af90f9300944e7ad9c07b6e1446f3bbf9349 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/adapters/repos/transactions/store.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package txstore + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/usecases/cluster" + "go.etcd.io/bbolt" +) + +var txBucket = []byte("transactions") + +type Store struct { + db *bbolt.DB + log logrus.FieldLogger + homeDir string + unmarshaller unmarshalFn +} + +func NewStore(homeDir string, logger logrus.FieldLogger) *Store { + return &Store{ + homeDir: homeDir, + log: logger, + } +} + +func (s *Store) SetUmarshalFn(fn unmarshalFn) { + s.unmarshaller = fn +} + +func (s *Store) Open() error { + if err := os.MkdirAll(s.homeDir, 0o777); err != nil { + return fmt.Errorf("create root directory %q: %w", s.homeDir, err) + } + + path := path.Join(s.homeDir, "tx.db") + boltDB, err := initBoltDB(path) + if err != nil { + return fmt.Errorf("init bolt_db: %w", err) + } + + s.db = boltDB + + return nil +} + +func (s *Store) StoreTx(ctx context.Context, tx *cluster.Transaction) error { + data, err := json.Marshal(txWrapper{ + ID: tx.ID, + Payload: tx.Payload, + Type: tx.Type, + }) + if err != nil { + return fmt.Errorf("marshal tx: %w", err) + } + + return s.db.Update(func(boltTx *bbolt.Tx) error { + b := boltTx.Bucket(txBucket) + return b.Put([]byte(tx.ID), data) + }) +} + +func (s *Store) DeleteTx(ctx context.Context, txId string) error { + return s.db.Update(func(boltTx *bbolt.Tx) error { + b := boltTx.Bucket(txBucket) + return b.Delete([]byte(txId)) + }) +} + +func (s *Store) IterateAll(ctx context.Context, + cb func(tx *cluster.Transaction), +) error { + return s.db.View(func(boltTx *bbolt.Tx) error { + b := boltTx.Bucket(txBucket) + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + var txWrap txWrapperRead + if err := json.Unmarshal(v, &txWrap); err != nil { + return err + } + + tx := cluster.Transaction{ + ID: txWrap.ID, + Type: txWrap.Type, + } + + pl, err := s.unmarshaller(tx.Type, txWrap.Payload) + if err != nil { + return err + } + + tx.Payload = pl + + cb(&tx) + + } + return nil + }) +} + +func (s *Store) Close() error { + return nil +} + +func initBoltDB(filePath string) (*bbolt.DB, error) { + db, err := bbolt.Open(filePath, 0o600, nil) + if err != nil { + return nil, fmt.Errorf("open %q: %w", filePath, err) + } + + root := func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(txBucket) + return err + } + + return db, db.Update(root) +} + +type txWrapper struct { + ID string `json:"id"` + Payload any `json:"payload"` + Type cluster.TransactionType `json:"type"` +} + +// delayed unmarshalling of the payload, so we can inject a specific +// marshaller +type txWrapperRead struct { + ID string `json:"id"` + Payload json.RawMessage `json:"payload"` + Type cluster.TransactionType `json:"type"` +} + +type unmarshalFn func(txType cluster.TransactionType, payload json.RawMessage) (any, error) diff --git a/platform/dbops/binaries/weaviate-src/ci/docker_report.md.tpl b/platform/dbops/binaries/weaviate-src/ci/docker_report.md.tpl new file mode 100644 index 0000000000000000000000000000000000000000..071062fc132f38dc83a1b851e4473b19f9c34a20 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/ci/docker_report.md.tpl @@ -0,0 +1,47 @@ +## Docker Preview Image :whale: + +A preview docker image for this branch is available with the following tag: + +``` +$PREVIEW_TAG +``` + +A semver compliant docker image tag for this branch is available with the following tag: + +``` +$PREVIEW_SEMVER_TAG +``` + +## Use at your own risk :warning: + +Preview builds make no promises about stability or feature completeness. Use them at your own risk. A preview build is not generated if tests failed, so they have at least passed the common test suite. They may or may not have been subjected to the asynchronous stress test and chaos pipelines. + +## Usage :newspaper: + +### Docker-compose + +You can obtain a [docker-compose.yaml here](https://weaviate.io/developers/weaviate/current/installation/docker-compose.html#configurator) and configure it to your liking. Then make sure to set `services.weaviate.image` to `$FIRST_TAG`. For example, like so: + +```yaml +services: + weaviate: + image: $FIRST_TAG +``` + +### Helm / Kubernetes + +To use this preview image with Helm/Kubernetes, set `image.tag` to `$TAG_ONLY` in your `values.yaml`. For example, like so: + +```yaml +image: + tag: $TAG_ONLY +``` + +### Chaos-Pipeline + +To use this build in a chaos pipeline, change the `WEAVIATE_VERSION` in `.github/workflows/tests.yaml` to `$TAG_ONLY`, for example, like so: + +```yaml +env: + WEAVIATE_VERSION: $TAG_ONLY +``` diff --git a/platform/dbops/binaries/weaviate-src/ci/generate_docker_report.sh b/platform/dbops/binaries/weaviate-src/ci/generate_docker_report.sh new file mode 100644 index 0000000000000000000000000000000000000000..a74565bfe43f090521dbaf387d2a57b9180db083 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/ci/generate_docker_report.sh @@ -0,0 +1,27 @@ +set -e + +cd "${0%/*}" + +function generate_report() { + # Handle both single-line and multi-line tags + echo "PREVIEW_TAG=$PREVIEW_TAG" + if [ -z "$PREVIEW_TAG" ]; then + echo "No preview tags found" + return + fi + + echo "PREVIEW_SEMVER_TAG=$PREVIEW_SEMVER_TAG" + if [ -z "$PREVIEW_SEMVER_TAG" ]; then + echo "No semver tags found" + return + fi + + # Extract first tag for examples (works for both single and multi-line) + export FIRST_TAG=$(echo "$PREVIEW_TAG" | head -n 1) + export TAG_ONLY="$(echo "$FIRST_TAG" | cut -d ':' -f 2)" + + # Generate report using the template + envsubst < docker_report.md.tpl >> "$GITHUB_STEP_SUMMARY" +} + +generate_report diff --git a/platform/dbops/binaries/weaviate-src/ci/push_docker.sh b/platform/dbops/binaries/weaviate-src/ci/push_docker.sh new file mode 100644 index 0000000000000000000000000000000000000000..25c43648e0f0cda876a58271bfbf08e327e9f12b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/ci/push_docker.sh @@ -0,0 +1,125 @@ +#!/usr/bin/env bash + +set -euo pipefail + +DOCKER_REPO_WEAVIATE="semitechnologies/weaviate" + +only_build_amd64=false +only_build_arm64=false +while [[ "$#" -gt 0 ]]; do + case $1 in + --amd64-only) only_build_amd64=true;; + --arm64-only) only_build_arm64=true;; + --help|-h) printf '%s\n' \ + "Options:"\ + "--amd64-only"\ + "--arm64-only"\ + "--help | -h"; exit 1;; + *) echo "Unknown parameter passed: $1"; exit 1 ;; + esac + shift +done + +function release() { + DOCKER_REPO=$DOCKER_REPO_WEAVIATE + + # for multi-platform build + if [ "$only_build_amd64" == "false" ] && [ "$only_build_arm64" == "false" ]; then + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + fi + + docker buildx create --use + + # nightly tag was added to be pushed on merges to main branch, latest tag is used to get latest released version + tag_latest="${DOCKER_REPO}:latest" + tag_exact= + tag_preview= + tag_preview_semver= + tag_nightly= + + git_revision=$(echo "$GITHUB_SHA" | cut -c1-7) + git_branch="$GITHUB_HEAD_REF" + build_user="ci" + build_date=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + + # Determine architecture and platform + arch="" + if $only_build_amd64; then + build_platform="linux/amd64" + arch="amd64" + elif $only_build_arm64; then + build_platform="linux/arm64" + arch="arm64" + else + build_platform="linux/amd64,linux/arm64" + fi + + weaviate_version="$(jq -r '.info.version' < openapi-specs/schema.json)" + if [ "$GITHUB_REF_TYPE" == "tag" ]; then + if [ "$GITHUB_REF_NAME" != "v$weaviate_version" ]; then + echo "The release tag ($GITHUB_REF_NAME) and Weaviate version (v$weaviate_version) are not equal! Can't release." + return 1 + fi + tag_exact="${DOCKER_REPO}:${weaviate_version}" + git_branch="$GITHUB_REF_NAME" + else + if [ -n "$arch" ]; then + tag_preview_semver="${DOCKER_REPO}:${weaviate_version}-${git_revision}.${arch}" + else + tag_preview_semver="${DOCKER_REPO}:${weaviate_version}-${git_revision}" + fi + pr_title="$(echo -n "$PR_TITLE" | tr '[:upper:]' '[:lower:]' | tr -c -s '[:alnum:]' '-' | sed 's/-$//g')" + if [ "$pr_title" == "" ]; then + git_branch="$GITHUB_REF_NAME" + branch_name="$(echo -n $GITHUB_REF_NAME | sed 's/\//-/g')" + tag_preview="${DOCKER_REPO}:${branch_name}-${git_revision}" + weaviate_version="${branch_name}-${git_revision}" + git_branch="$GITHUB_HEAD_REF" + if [ "$branch_name" == "main" ]; then + tag_nightly="${DOCKER_REPO}:nightly" + fi + else + if [ -n "$arch" ]; then + tag_preview="${DOCKER_REPO}:preview-${pr_title}-${git_revision}.${arch}" + else + tag_preview="${DOCKER_REPO}:preview-${pr_title}-${git_revision}" + fi + weaviate_version="preview-${pr_title}-${git_revision}" + fi + fi + + args=("--build-arg=GIT_REVISION=$git_revision" + "--build-arg=GIT_BRANCH=$git_branch" + "--build-arg=BUILD_USER=$build_user" + "--build-arg=BUILD_DATE=$build_date" + "--build-arg=CGO_ENABLED=0" # Force-disable CGO for cross-compilation - Fixes segmentation faults on arm64 (https://docs.docker.com/docker-hub/image-library/trusted-content/#alpine-images) + "--platform=$build_platform" + "--target=weaviate" + "--push") + + if [ -n "$tag_exact" ]; then + # exact tag on main + args+=("-t=$tag_exact") + args+=("-t=$tag_latest") + fi + if [ -n "$tag_preview" ]; then + # preview tag on PR builds + args+=("-t=$tag_preview") + args+=("-t=$tag_preview_semver") + if [ -n "$tag_nightly" ]; then + args+=("-t=$tag_nightly") + fi + fi + + docker buildx build "${args[@]}" . || exit 1 + + if [ -n "$tag_preview" ]; then + echo "PREVIEW_TAG=$tag_preview" >> "$GITHUB_OUTPUT" + echo "PREVIEW_SEMVER_TAG=$tag_preview_semver" >> "$GITHUB_OUTPUT" + elif [ -n "$tag_exact" ]; then + echo "PREVIEW_TAG=$tag_exact" >> "$GITHUB_OUTPUT" + echo "PREVIEW_SEMVER_TAG=$tag_exact" >> "$GITHUB_OUTPUT" + fi +} + +release diff --git a/platform/dbops/binaries/weaviate-src/client/weaviate_client.go b/platform/dbops/binaries/weaviate-src/client/weaviate_client.go new file mode 100644 index 0000000000000000000000000000000000000000..2049119a15f421562e733040918b6b6907d9200e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/client/weaviate_client.go @@ -0,0 +1,193 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go-swagger; DO NOT EDIT. + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/client/authz" + "github.com/weaviate/weaviate/client/backups" + "github.com/weaviate/weaviate/client/batch" + "github.com/weaviate/weaviate/client/classifications" + "github.com/weaviate/weaviate/client/cluster" + "github.com/weaviate/weaviate/client/distributed_tasks" + "github.com/weaviate/weaviate/client/graphql" + "github.com/weaviate/weaviate/client/meta" + "github.com/weaviate/weaviate/client/nodes" + "github.com/weaviate/weaviate/client/objects" + "github.com/weaviate/weaviate/client/operations" + "github.com/weaviate/weaviate/client/replication" + "github.com/weaviate/weaviate/client/schema" + "github.com/weaviate/weaviate/client/users" + "github.com/weaviate/weaviate/client/well_known" +) + +// Default weaviate HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "localhost" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/v1" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"https"} + +// NewHTTPClient creates a new weaviate HTTP client. +func NewHTTPClient(formats strfmt.Registry) *Weaviate { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new weaviate HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *Weaviate { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new weaviate client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Weaviate { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(Weaviate) + cli.Transport = transport + cli.Authz = authz.New(transport, formats) + cli.Backups = backups.New(transport, formats) + cli.Batch = batch.New(transport, formats) + cli.Classifications = classifications.New(transport, formats) + cli.Cluster = cluster.New(transport, formats) + cli.DistributedTasks = distributed_tasks.New(transport, formats) + cli.Graphql = graphql.New(transport, formats) + cli.Meta = meta.New(transport, formats) + cli.Nodes = nodes.New(transport, formats) + cli.Objects = objects.New(transport, formats) + cli.Operations = operations.New(transport, formats) + cli.Replication = replication.New(transport, formats) + cli.Schema = schema.New(transport, formats) + cli.Users = users.New(transport, formats) + cli.WellKnown = well_known.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// Weaviate is a client for weaviate +type Weaviate struct { + Authz authz.ClientService + + Backups backups.ClientService + + Batch batch.ClientService + + Classifications classifications.ClientService + + Cluster cluster.ClientService + + DistributedTasks distributed_tasks.ClientService + + Graphql graphql.ClientService + + Meta meta.ClientService + + Nodes nodes.ClientService + + Objects objects.ClientService + + Operations operations.ClientService + + Replication replication.ClientService + + Schema schema.ClientService + + Users users.ClientService + + WellKnown well_known.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *Weaviate) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.Authz.SetTransport(transport) + c.Backups.SetTransport(transport) + c.Batch.SetTransport(transport) + c.Classifications.SetTransport(transport) + c.Cluster.SetTransport(transport) + c.DistributedTasks.SetTransport(transport) + c.Graphql.SetTransport(transport) + c.Meta.SetTransport(transport) + c.Nodes.SetTransport(transport) + c.Objects.SetTransport(transport) + c.Operations.SetTransport(transport) + c.Replication.SetTransport(transport) + c.Schema.SetTransport(transport) + c.Users.SetTransport(transport) + c.WellKnown.SetTransport(transport) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/backoff.go b/platform/dbops/binaries/weaviate-src/cluster/backoff.go new file mode 100644 index 0000000000000000000000000000000000000000..fa2e7d43d92908f73053734d4380d2c0b466c1c3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/backoff.go @@ -0,0 +1,45 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// backoffConfig creates a backoff configuration based on the election timeout. +// The initial interval is set to 1/20th of the election timeout, and the max interval +// is set to the election timeout itself. This ensures that retries are aggressive +// enough to detect leader changes quickly while not overwhelming the system. +// With exponential backoff, each retry doubles the previous interval, but is capped at the max interval. +// For example, with a 1s election timeout: +// - Initial interval: 50ms (1/20th of election timeout) +// - Max interval: 1s (election timeout) +// - Max retries: 10 +// If electionTimeout = 1s → max time ≈ 5.55s (raft default) +// If electionTimeout = 2s → max time ≈ 11.1s +// If electionTimeout = 5s → max time ≈ 27.75s +func backoffConfig(ctx context.Context, electionTimeout time.Duration) backoff.BackOffContext { + initialInterval := electionTimeout / 20 + return backoff.WithContext( + backoff.WithMaxRetries( + backoff.NewExponentialBackOff( + backoff.WithInitialInterval(initialInterval), + backoff.WithMaxInterval(electionTimeout), + ), + 10, + ), + ctx, + ) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/backoff_test.go b/platform/dbops/binaries/weaviate-src/cluster/backoff_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2f66d42ec800c718edbbbaed47d8021819cd5f78 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/backoff_test.go @@ -0,0 +1,170 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "testing" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/stretchr/testify/assert" +) + +func TestBackoffConfig(t *testing.T) { + tests := []struct { + name string + electionTimeout time.Duration + wantInitial time.Duration + wantMax time.Duration + }{ + { + name: "1 second election timeout", + electionTimeout: time.Second, + wantInitial: time.Millisecond * 50, // 1/20th of 1s + wantMax: time.Second, // same as election timeout + }, + { + name: "2 second election timeout", + electionTimeout: time.Second * 2, + wantInitial: time.Millisecond * 100, // 1/20th of 2s + wantMax: time.Second * 2, // same as election timeout + }, + { + name: "500ms election timeout", + electionTimeout: time.Millisecond * 500, + wantInitial: time.Millisecond * 25, // 1/20th of 500ms + wantMax: time.Millisecond * 500, // same as election timeout + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + bo := backoffConfig(ctx, tt.electionTimeout) + + // Test initial interval with larger delta to account for randomization + firstInterval := bo.NextBackOff() + // Allow up to 50% variation for initial interval + assert.InDelta(t, float64(tt.wantInitial), float64(firstInterval), float64(tt.wantInitial)*0.5) + + // Collect all intervals + var intervals []time.Duration + for i := 0; i < 15; i++ { // Try more than max retries to ensure we get all intervals + next := bo.NextBackOff() + if next == backoff.Stop { + break + } + intervals = append(intervals, next) + } + + // Verify we got at least 9 intervals (the backoff library might stop at 9) + assert.GreaterOrEqual(t, len(intervals), 9) + assert.LessOrEqual(t, len(intervals), 10) + + // Verify max interval is within 50% of expected + maxInterval := intervals[len(intervals)-1] + assert.InDelta(t, float64(tt.wantMax), float64(maxInterval), float64(tt.wantMax)*0.5) + + // Verify overall pattern: + // 1. All intervals should be between initial and max + // 2. Later intervals should generally be larger than earlier ones + minAllowed := float64(tt.wantInitial) * 0.5 // Allow 50% below initial + maxAllowed := float64(tt.wantMax) * float64(1.5) // Allow 50% above max + + // Track the running average to detect general increase + var sum float64 + var count int + + for i, interval := range intervals { + duration := float64(interval) + + // Check bounds + assert.GreaterOrEqual(t, duration, minAllowed, "interval %d too small", i) + assert.LessOrEqual(t, duration, maxAllowed, "interval %d too large", i) + + // Update running average + sum += duration + count++ + avg := sum / float64(count) + + // After first few intervals, check that we're generally increasing + if i > 2 { + // Current interval should be at least 80% of the average so far + // This allows for some variation while ensuring general increase + assert.GreaterOrEqual(t, duration, avg*0.8, + "interval %d (%v) too small compared to average so far (%v)", + i, interval, time.Duration(avg)) + } + } + + // Verify context is properly set + assert.Equal(t, ctx, bo.Context()) + }) + } +} + +func TestBackoffBehavior(t *testing.T) { + ctx := context.Background() + electionTimeout := time.Second + bo := backoffConfig(ctx, electionTimeout) + + // Test that intervals increase exponentially but are capped at max interval + intervals := make([]time.Duration, 0) + for i := 0; i < 15; i++ { // Try more than max retries to verify capping + next := bo.NextBackOff() + if next == backoff.Stop { + break + } + intervals = append(intervals, next) + } + + // Should have between 9 and 10 retries + assert.GreaterOrEqual(t, len(intervals), 9) + assert.LessOrEqual(t, len(intervals), 10) + + // Verify overall pattern using the same approach as TestBackoffConfig + minAllowed := float64(time.Millisecond * 25) // 50% of initial (50ms) + maxAllowed := float64(time.Second) * float64(1.5) // 50% above max (1s) + + var sum float64 + var count int + + for i, interval := range intervals { + duration := float64(interval) + + // Check bounds + assert.GreaterOrEqual(t, duration, minAllowed, "interval %d too small", i) + assert.LessOrEqual(t, duration, maxAllowed, "interval %d too large", i) + + // Update running average + sum += duration + count++ + avg := sum / float64(count) + + // After first few intervals, check that we're generally increasing + if i > 2 { + assert.GreaterOrEqual(t, duration, avg*0.8, + "interval %d (%v) too small compared to average so far (%v)", + i, interval, time.Duration(avg)) + } + } + + // Verify total time is within reasonable bounds + totalTime := time.Duration(0) + for _, interval := range intervals { + totalTime += interval + } + expectedTotal := time.Millisecond * 5550 // 50 + 100 + 200 + 400 + 800 + 1000*5 + // Allow 50% variation for total time due to randomization + assert.InDelta(t, float64(expectedTotal), float64(totalTime), float64(expectedTotal)*0.5) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/log.go b/platform/dbops/binaries/weaviate-src/cluster/log.go new file mode 100644 index 0000000000000000000000000000000000000000..ac8a29a412d2d0548076998c9462eeb92c76aaae --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/log.go @@ -0,0 +1,48 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "errors" + "fmt" + + "github.com/hashicorp/raft" +) + +func (st *Store) LastAppliedCommand() (uint64, error) { + if st.logStore == nil { + return 0, fmt.Errorf("log store can't be nil") + } + + first, err := st.logStore.FirstIndex() + if err != nil { + return 0, fmt.Errorf("first index: %w", err) + } + last, err := st.logStore.LastIndex() + if err != nil { + return 0, fmt.Errorf("last index: %w", err) + } + if last == 0 { + return 0, nil + } + var rLog raft.Log + for ; last >= first; last-- { + err := st.logStore.GetLog(last, &rLog) + if err != nil && !errors.Is(err, raft.ErrLogNotFound) { + return 0, fmt.Errorf("get log at index %d: %w", last, err) + } + if rLog.Type == raft.LogCommand { + return last, nil + } + } + return 0, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft.go b/platform/dbops/binaries/weaviate-src/cluster/raft.go new file mode 100644 index 0000000000000000000000000000000000000000..700ae1a3515c084b0d67ece7ee394b44b09ef96e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft.go @@ -0,0 +1,94 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "time" + + "github.com/weaviate/weaviate/cluster/replication" + + "github.com/sirupsen/logrus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/usecases/cluster" +) + +// Raft abstracts away the Raft store, providing clients with an interface that encompasses all query & write operations. +// It ensures that these operations are executed on the current leader, regardless of the specific leader in the cluster. +// If current node is the leader, then changes will be applied on the local node and bypass any networking requests. +type Raft struct { + nodeSelector cluster.NodeSelector + store *Store + cl client + log *logrus.Logger +} + +// client to communicate with remote services +type client interface { + Apply(ctx context.Context, leaderAddr string, req *cmd.ApplyRequest) (*cmd.ApplyResponse, error) + Query(ctx context.Context, leaderAddr string, req *cmd.QueryRequest) (*cmd.QueryResponse, error) + Remove(ctx context.Context, leaderAddress string, req *cmd.RemovePeerRequest) (*cmd.RemovePeerResponse, error) + Join(ctx context.Context, leaderAddr string, req *cmd.JoinPeerRequest) (*cmd.JoinPeerResponse, error) +} + +func NewRaft(selector cluster.NodeSelector, store *Store, client client) *Raft { + return &Raft{nodeSelector: selector, store: store, cl: client, log: store.log} +} + +// Open opens this store service and marked as such. +// It constructs a new Raft node using the provided configuration. +// If there is any old state, such as snapshots, logs, peers, etc., all of those will be restored +func (s *Raft) Open(ctx context.Context, db schema.Indexer) error { + s.log.Info("starting raft sub-system ...") + s.store.SetDB(db) + return s.store.Open(ctx) +} + +func (s *Raft) Close(ctx context.Context) (err error) { + s.log.Info("shutting down raft sub-system ...") + + // non-voter can be safely removed, as they don't partake in RAFT elections + if !s.store.IsVoter() { + s.log.Info("removing this node from cluster prior to shutdown ...") + if err := s.Remove(ctx, s.store.ID()); err != nil { + s.log.WithError(err).Error("remove this node from cluster") + } else { + s.log.Info("successfully removed this node from the cluster.") + } + } + return s.store.Close(ctx) +} + +func (s *Raft) Ready() bool { + return s.store.Ready() +} + +func (s *Raft) SchemaReader() schema.SchemaReader { + return s.store.SchemaReader() +} + +func (s *Raft) WaitUntilDBRestored(ctx context.Context, period time.Duration, close chan struct{}) error { + return s.store.WaitToRestoreDB(ctx, period, close) +} + +func (s *Raft) WaitForUpdate(ctx context.Context, schemaVersion uint64) error { + return s.store.WaitForAppliedIndex(ctx, time.Millisecond*50, schemaVersion) +} + +func (s *Raft) NodeSelector() cluster.NodeSelector { + return s.nodeSelector +} + +func (s *Raft) ReplicationFsm() *replication.ShardReplicationFSM { + return s.store.replicationManager.GetReplicationFSM() +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_alias_apply_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_alias_apply_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..43166a2736d05f4307fe0865c7c8d9aa07b8bfe4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_alias_apply_endpoints.go @@ -0,0 +1,89 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/entities/models" + gproto "google.golang.org/protobuf/proto" +) + +func (s *Raft) CreateAlias(ctx context.Context, alias string, class *models.Class) (uint64, error) { + if alias == "" { + return 0, fmt.Errorf("empty alias name: %w", schema.ErrBadRequest) + } + + if class == nil { + return 0, fmt.Errorf("class does not exist: %w", schema.ErrBadRequest) + } + + if class.Class == "" { + return 0, fmt.Errorf("empty class name: %w", schema.ErrBadRequest) + } + + req := cmd.CreateAliasRequest{Collection: class.Class, Alias: alias} + subCommand, err := gproto.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_CREATE_ALIAS, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) ReplaceAlias(ctx context.Context, alias *models.Alias, newClass *models.Class) (uint64, error) { + if alias == nil { + return 0, fmt.Errorf("empty alias: %w", schema.ErrBadRequest) + } + + if newClass == nil { + return 0, fmt.Errorf("class does not exist: %w", schema.ErrBadRequest) + } + + if newClass.Class == "" { + return 0, fmt.Errorf("empty class name: %w", schema.ErrBadRequest) + } + + req := cmd.ReplaceAliasRequest{Collection: newClass.Class, Alias: alias.Alias} + subCommand, err := gproto.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_REPLACE_ALIAS, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) DeleteAlias(ctx context.Context, alias string) (uint64, error) { + if alias == "" { + return 0, fmt.Errorf("empty alias name: %w", schema.ErrBadRequest) + } + + req := cmd.DeleteAliasRequest{Alias: alias} + subCommand, err := gproto.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DELETE_ALIAS, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_alias_query_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_alias_query_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..d4dc0f2a2bbb2093e84181e837a79db5da04a81b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_alias_query_endpoints.go @@ -0,0 +1,108 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/getsentry/sentry-go" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/models" + entSentry "github.com/weaviate/weaviate/entities/sentry" +) + +func (s *Raft) GetAlias(ctx context.Context, aliasName string) (*models.Alias, error) { + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.alias"), + sentry.WithDescription("Query the alias"), + ) + ctx = transaction.Context() + defer transaction.Finish() + } + req := cmd.QueryResolveAliasRequest{ + Alias: aliasName, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_RESOLVE_ALIAS, + SubCommand: subCommand, + } + + queryResp, err := s.Query(ctx, command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QueryResolveAliasResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + alias := &models.Alias{ + Alias: aliasName, + Class: resp.Class, + } + + return alias, nil +} + +func (s *Raft) GetAliases(ctx context.Context, alias string, class *models.Class) ([]*models.Alias, error) { + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.aliases"), + sentry.WithDescription("Query the aliases"), + ) + ctx = transaction.Context() + defer transaction.Finish() + } + req := cmd.QueryGetAliasesRequest{} + if alias != "" { + req.Alias = alias + } + if class != nil { + req.Class = class.Class + } + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_ALIASES, + SubCommand: subCommand, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QueryGetAliasesResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + aliases := []*models.Alias{} + for alias, className := range resp.Aliases { + aliases = append(aliases, &models.Alias{Alias: alias, Class: className}) + } + return aliases, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_apply_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_apply_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..0d81dbe6aafd499d4f59640bbaa22ff65f5df029 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_apply_endpoints.go @@ -0,0 +1,332 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/cenkalti/backoff/v4" + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/sharding" + "google.golang.org/protobuf/proto" +) + +func (s *Raft) AddClass(ctx context.Context, cls *models.Class, ss *sharding.State) (uint64, error) { + if cls == nil || cls.Class == "" { + return 0, fmt.Errorf("nil class or empty class name: %w", schema.ErrBadRequest) + } + + req := cmd.AddClassRequest{Class: cls, State: ss} + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_ADD_CLASS, + Class: cls.Class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) UpdateClass(ctx context.Context, cls *models.Class, ss *sharding.State) (uint64, error) { + if cls == nil || cls.Class == "" { + return 0, fmt.Errorf("nil class or empty class name: %w", schema.ErrBadRequest) + } + req := cmd.UpdateClassRequest{Class: cls, State: ss} + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_UPDATE_CLASS, + Class: cls.Class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) DeleteClass(ctx context.Context, name string) (uint64, error) { + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DELETE_CLASS, + Class: name, + } + return s.Execute(ctx, command) +} + +func (s *Raft) RestoreClass(ctx context.Context, cls *models.Class, ss *sharding.State) (uint64, error) { + if cls == nil || cls.Class == "" { + return 0, fmt.Errorf("nil class or empty class name: %w", schema.ErrBadRequest) + } + req := cmd.AddClassRequest{Class: cls, State: ss} + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_RESTORE_CLASS, + Class: cls.Class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) AddProperty(ctx context.Context, class string, props ...*models.Property) (uint64, error) { + for _, p := range props { + if p == nil || p.Name == "" || class == "" { + return 0, fmt.Errorf("empty property or empty class name: %w", schema.ErrBadRequest) + } + } + req := cmd.AddPropertyRequest{Properties: props} + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_ADD_PROPERTY, + Class: class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) AddReplicaToShard(ctx context.Context, class, shard, targetNode string) (uint64, error) { + if class == "" || shard == "" || targetNode == "" { + return 0, fmt.Errorf("empty class or shard or sourceNode or targetNode: %w", schema.ErrBadRequest) + } + req := cmd.AddReplicaToShard{ + Class: class, + Shard: shard, + TargetNode: targetNode, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD, + Class: req.Class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) DeleteReplicaFromShard(ctx context.Context, class, shard, targetNode string) (uint64, error) { + if class == "" || shard == "" || targetNode == "" { + return 0, fmt.Errorf("empty class or shard or sourceNode or targetNode: %w", schema.ErrBadRequest) + } + req := cmd.DeleteReplicaFromShard{ + Class: class, + Shard: shard, + TargetNode: targetNode, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, + Class: req.Class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) ReplicationAddReplicaToShard(ctx context.Context, class, shard, targetNode string, opId uint64) (uint64, error) { + if class == "" || shard == "" || targetNode == "" { + return 0, fmt.Errorf("empty class or shard or sourceNode or targetNode: %w", schema.ErrBadRequest) + } + req := cmd.ReplicationAddReplicaToShard{ + Class: class, + Shard: shard, + TargetNode: targetNode, + OpId: opId, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_REPLICATION_REPLICATE_ADD_REPLICA_TO_SHARD, + Class: req.Class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) SyncShard(ctx context.Context, collection, shard, nodeId string) (uint64, error) { + if collection == "" || shard == "" || nodeId == "" { + return 0, fmt.Errorf("empty class or shard or sourceNode or targetNode: %w", schema.ErrBadRequest) + } + req := cmd.SyncShardRequest{ + Collection: collection, + Shard: shard, + NodeId: nodeId, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_REPLICATION_REPLICATE_SYNC_SHARD, + Class: req.Collection, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) UpdateShardStatus(ctx context.Context, class, shard, status string) (uint64, error) { + if class == "" || shard == "" { + return 0, fmt.Errorf("empty class or shard: %w", schema.ErrBadRequest) + } + req := cmd.UpdateShardStatusRequest{Class: class, Shard: shard, Status: status} + subCommand, err := json.Marshal(&req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_UPDATE_SHARD_STATUS, + Class: req.Class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) AddTenants(ctx context.Context, class string, req *cmd.AddTenantsRequest) (uint64, error) { + if class == "" || req == nil { + return 0, fmt.Errorf("empty class name or nil request: %w", schema.ErrBadRequest) + } + subCommand, err := proto.Marshal(req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_ADD_TENANT, + Class: class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) UpdateTenants(ctx context.Context, class string, req *cmd.UpdateTenantsRequest) (uint64, error) { + if class == "" || req == nil { + return 0, fmt.Errorf("empty class name or nil request: %w", schema.ErrBadRequest) + } + subCommand, err := proto.Marshal(req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_UPDATE_TENANT, + Class: class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) DeleteTenants(ctx context.Context, class string, req *cmd.DeleteTenantsRequest) (uint64, error) { + if class == "" || req == nil { + return 0, fmt.Errorf("empty class name or nil request: %w", schema.ErrBadRequest) + } + subCommand, err := proto.Marshal(req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DELETE_TENANT, + Class: class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) UpdateTenantsProcess(ctx context.Context, class string, req *cmd.TenantProcessRequest) (uint64, error) { + if class == "" || req == nil { + return 0, fmt.Errorf("empty class name or nil request: %w", schema.ErrBadRequest) + } + subCommand, err := proto.Marshal(req) + if err != nil { + return 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_TENANT_PROCESS, + Class: class, + SubCommand: subCommand, + } + return s.Execute(ctx, command) +} + +func (s *Raft) StoreSchemaV1() error { + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_STORE_SCHEMA_V1, + } + _, err := s.Execute(context.Background(), command) + return err +} + +func (s *Raft) Execute(ctx context.Context, req *cmd.ApplyRequest) (uint64, error) { + t := prometheus.NewTimer( + monitoring.GetMetrics().SchemaWrites.WithLabelValues( + req.Type.String(), + )) + defer t.ObserveDuration() + + var schemaVersion uint64 + err := backoff.Retry(func() error { + var err error + + // Validate the apply first + if _, ok := cmd.ApplyRequest_Type_name[int32(req.Type.Number())]; !ok { + err = types.ErrUnknownCommand + // This is an invalid apply command, don't retry + return backoff.Permanent(err) + } + + // We are the leader, let's apply + if s.store.IsLeader() { + schemaVersion, err = s.store.Execute(req) + // We might fail due to leader not found as we are losing or transferring leadership, retry + if errors.Is(err, raft.ErrNotLeader) || errors.Is(err, raft.ErrLeadershipLost) { + return err + } + return backoff.Permanent(err) + } + + leader := s.store.Leader() + if leader == "" { + err = s.leaderErr() + s.log.Warnf("apply: could not find leader: %s", err) + return err + } + + var resp *cmd.ApplyResponse + resp, err = s.cl.Apply(ctx, leader, req) + if err != nil { + // Don't retry if the actual apply to the leader failed, we have retry at the network layer already + return backoff.Permanent(err) + } + schemaVersion = resp.Version + return nil + // pass in the election timeout after applying multiplier + }, backoffConfig(ctx, s.store.raftConfig().ElectionTimeout)) + + return schemaVersion, err +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_cluster_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_cluster_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..3096c05f1ae54c0a73a8ae54df9c49f9d75cd5bb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_cluster_endpoints.go @@ -0,0 +1,101 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "slices" + + "github.com/sirupsen/logrus" + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +// LeaderWithID is used to return the current leader address and ID of the cluster. +// It may return empty strings if there is no current leader or the leader is unknown. +func (s *Raft) LeaderWithID() (string, string) { + addr, id := s.store.LeaderWithID() + return string(addr), string(id) +} + +// StorageCandidates return the nodes in the raft configuration or memberlist storage nodes +// based on the current configuration of the cluster if it does have MetadataVoterOnly nodes. +func (s *Raft) StorageCandidates() []string { + if s.store.raft == nil { + // get candidates from memberlist + return s.nodeSelector.StorageCandidates() + } + + var ( + existedRaftCandidates []string + raftStorageCandidates []string + memStorageCandidates = s.nodeSelector.StorageCandidates() + nonStorageCandidates = s.nodeSelector.NonStorageNodes() + ) + + for _, server := range s.store.raft.GetConfiguration().Configuration().Servers { + existedRaftCandidates = append(existedRaftCandidates, string(server.ID)) + } + + // filter non storage candidates + for _, c := range existedRaftCandidates { + if slices.Contains(nonStorageCandidates, c) { + continue + } + raftStorageCandidates = append(raftStorageCandidates, c) + } + + if len(memStorageCandidates) > len(raftStorageCandidates) { + // if memberlist has more nodes then use it instead + // this case could happen if we have MetaVoterOnly Nodes + // in the RAFT config + return memStorageCandidates + } + + return s.nodeSelector.SortCandidates(raftStorageCandidates) +} + +func (s *Raft) Join(ctx context.Context, id, addr string, voter bool) error { + s.log.WithFields(logrus.Fields{ + "id": id, + "address": addr, + "voter": voter, + }).Debug("membership.join") + if s.store.IsLeader() { + return s.store.Join(id, addr, voter) + } + leader := s.store.Leader() + if leader == "" { + return s.leaderErr() + } + req := &cmd.JoinPeerRequest{Id: id, Address: addr, Voter: voter} + _, err := s.cl.Join(ctx, leader, req) + return err +} + +func (s *Raft) Remove(ctx context.Context, id string) error { + s.log.WithField("id", id).Debug("membership.remove") + if s.store.IsLeader() { + return s.store.Remove(id) + } + leader := s.store.Leader() + if leader == "" { + return s.leaderErr() + } + req := &cmd.RemovePeerRequest{Id: id} + _, err := s.cl.Remove(ctx, leader, req) + return err +} + +func (s *Raft) Stats() map[string]any { + s.log.Debug("membership.stats") + return s.store.Stats() +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_distributed_tasks_apply_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_distributed_tasks_apply_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..7eac4e90960b1f2b653dae1844ac08f9a220a970 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_distributed_tasks_apply_endpoints.go @@ -0,0 +1,119 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + "time" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +func (s *Raft) AddDistributedTask(ctx context.Context, namespace, taskID string, taskPayload any) error { + payloadBytes, err := json.Marshal(taskPayload) + if err != nil { + return fmt.Errorf("failed to marshal task payload: %w", err) + } + + req := cmd.AddDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Payload: payloadBytes, + SubmittedAtUnixMillis: time.Now().UnixMilli(), + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DISTRIBUTED_TASK_ADD, + SubCommand: subCommand, + } + if _, err = s.Execute(ctx, command); err != nil { + return fmt.Errorf("executing command: %w", err) + } + return nil +} + +func (s *Raft) RecordDistributedTaskNodeCompletion(ctx context.Context, namespace, taskID string, version uint64) error { + return s.recordDistributedTaskNodeCompletion(ctx, namespace, taskID, version, nil) +} + +func (s *Raft) RecordDistributedTaskNodeFailure(ctx context.Context, namespace, taskID string, version uint64, failureReason string) error { + return s.recordDistributedTaskNodeCompletion(ctx, namespace, taskID, version, &failureReason) +} + +func (s *Raft) recordDistributedTaskNodeCompletion(ctx context.Context, namespace, taskID string, version uint64, failureReason *string) error { + req := cmd.RecordDistributedTaskNodeCompletionRequest{ + Namespace: namespace, + Id: taskID, + Version: version, + NodeId: s.nodeSelector.LocalName(), + Error: failureReason, + FinishedAtUnixMillis: time.Now().UnixMilli(), + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DISTRIBUTED_TASK_RECORD_NODE_COMPLETED, + SubCommand: subCommand, + } + if _, err = s.Execute(ctx, command); err != nil { + return fmt.Errorf("executing command: %w", err) + } + return nil +} + +func (s *Raft) CancelDistributedTask(ctx context.Context, namespace, taskID string, taskVersion uint64) error { + req := cmd.CancelDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: taskVersion, + CancelledAtUnixMillis: time.Now().UnixMilli(), + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DISTRIBUTED_TASK_CANCEL, + SubCommand: subCommand, + } + if _, err = s.Execute(ctx, command); err != nil { + return fmt.Errorf("executing command: %w", err) + } + return nil +} + +func (s *Raft) CleanUpDistributedTask(ctx context.Context, namespace, taskID string, taskVersion uint64) error { + req := cmd.CleanUpDistributedTaskRequest{ + Namespace: namespace, + Id: taskID, + Version: taskVersion, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DISTRIBUTED_TASK_CLEAN_UP, + SubCommand: subCommand, + } + if _, err = s.Execute(ctx, command); err != nil { + return fmt.Errorf("executing command: %w", err) + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_distributed_tasks_query_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_distributed_tasks_query_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..9f02a3af2088fc759d4c5866dcd051091fe6652b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_distributed_tasks_query_endpoints.go @@ -0,0 +1,38 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/cluster/distributedtask" + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +func (s *Raft) ListDistributedTasks(ctx context.Context) (map[string][]*distributedtask.Task, error) { + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_DISTRIBUTED_TASK_LIST, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + response := distributedtask.ListDistributedTasksResponse{} + if err = json.Unmarshal(queryResp.Payload, &response); err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.Tasks, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_dynuser_apply_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_dynuser_apply_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..4326e2e8107f6e1a585756611706eb9fee0160ca --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_dynuser_apply_endpoints.go @@ -0,0 +1,148 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +func (s *Raft) CreateUser(userId, secureHash, userIdentifier, apiKeyFirstLetters string, createdAt time.Time) error { + req := cmd.CreateUsersRequest{ + UserId: userId, + SecureHash: secureHash, + UserIdentifier: userIdentifier, + CreatedAt: createdAt, + ApiKeyFirstLetters: apiKeyFirstLetters, + Version: cmd.DynUserLatestCommandPolicyVersion, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_UPSERT_USER, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) CreateUserWithKey(userId, apiKeyFirstLetters string, weakHash [sha256.Size]byte, createdAt time.Time) error { + req := cmd.CreateUserWithKeyRequest{ + UserId: userId, + CreatedAt: createdAt, + ApiKeyFirstLetters: apiKeyFirstLetters, + WeakHash: weakHash, + Version: cmd.DynUserLatestCommandPolicyVersion, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_CREATE_USER_WITH_KEY, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) RotateKey(userId, apiKeyFirstLetters, secureHash, oldIdentifier, newIdentifier string) error { + req := cmd.RotateUserApiKeyRequest{ + UserId: userId, + ApiKeyFirstLetters: apiKeyFirstLetters, + SecureHash: secureHash, + OldIdentifier: oldIdentifier, + NewIdentifier: newIdentifier, + Version: cmd.DynUserLatestCommandPolicyVersion, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_ROTATE_USER_API_KEY, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) DeleteUser(userId string) error { + req := cmd.DeleteUsersRequest{ + UserId: userId, + Version: cmd.DynUserLatestCommandPolicyVersion, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DELETE_USER, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) ActivateUser(userId string) error { + req := cmd.ActivateUsersRequest{ + UserId: userId, + Version: cmd.DynUserLatestCommandPolicyVersion, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_ACTIVATE_USER, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) DeactivateUser(userId string, revokeKey bool) error { + req := cmd.SuspendUserRequest{ + UserId: userId, + RevokeKey: revokeKey, + Version: cmd.DynUserLatestCommandPolicyVersion, + } + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_SUSPEND_USER, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_dynuser_query_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_dynuser_query_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..37ea8d102ed08c7b0e4058701ec848ec476a3c06 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_dynuser_query_endpoints.go @@ -0,0 +1,77 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" +) + +func (s *Raft) GetUsers(userIds ...string) (map[string]*apikey.User, error) { + req := cmd.QueryGetUsersRequest{ + UserIds: userIds, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_USERS, + SubCommand: subCommand, + } + queryResp, err := s.Query(context.Background(), command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + response := cmd.QueryGetUsersResponse{} + err = json.Unmarshal(queryResp.Payload, &response) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.Users, nil +} + +func (s *Raft) CheckUserIdentifierExists(userIdentifier string) (bool, error) { + req := cmd.QueryUserIdentifierExistsRequest{ + UserIdentifier: userIdentifier, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return false, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_USER_IDENTIFIER_EXISTS, + SubCommand: subCommand, + } + queryResp, err := s.Query(context.Background(), command) + if err != nil { + return false, fmt.Errorf("failed to execute query: %w", err) + } + + response := cmd.QueryUserIdentifierExistsResponse{} + err = json.Unmarshal(queryResp.Payload, &response) + if err != nil { + return false, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.Exists, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_query_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_query_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..4a2a8edebf7cda0ed1d5b1f4a30644b097074c81 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_query_endpoints.go @@ -0,0 +1,397 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + "slices" + + "github.com/cenkalti/backoff/v4" + "github.com/getsentry/sentry-go" + "github.com/prometheus/client_golang/prometheus" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/entities/models" + entSentry "github.com/weaviate/weaviate/entities/sentry" + "github.com/weaviate/weaviate/entities/versioned" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// QueryReadOnlyClass will verify that class is non empty and then build a Query that will be directed to the leader to +// ensure we will read the class with strong consistency +func (s *Raft) QueryReadOnlyClasses(classes ...string) (map[string]versioned.Class, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.read_only_classes"), + sentry.WithDescription("Query class schema"), + ) + transaction.SetData("classes", classes) + ctx = transaction.Context() + defer transaction.Finish() + } + if len(classes) == 0 { + return nil, fmt.Errorf("empty classes names: %w", schema.ErrBadRequest) + } + + // remove dedup and empty + slices.Sort(classes) + classes = slices.Compact(classes) + if len(classes) == 0 { + return map[string]versioned.Class{}, fmt.Errorf("empty classes names: %w", schema.ErrBadRequest) + } + + if len(classes) > 1 && classes[0] == "" { + classes = classes[1:] + } + + // Build the query and execute it + req := cmd.QueryReadOnlyClassesRequest{Classes: classes} + subCommand, err := json.Marshal(&req) + if err != nil { + return map[string]versioned.Class{}, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_CLASSES, + SubCommand: subCommand, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return map[string]versioned.Class{}, fmt.Errorf("failed to execute query: %w", err) + } + + // Empty payload doesn't unmarshal to an empty struct and will instead result in an error. + // We have an empty payload when the requested class if not present in the schema. + // In that case return a nil pointer and no error. + if len(queryResp.Payload) == 0 { + return nil, nil + } + + // Unmarshal the response + resp := cmd.QueryReadOnlyClassResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return map[string]versioned.Class{}, fmt.Errorf("failed to unmarshal query result: %w", err) + } + return resp.Classes, nil +} + +// QuerySchema build a Query to read the schema that will be directed to the leader to ensure we will read the class +// with strong consistency +func (s *Raft) QuerySchema() (models.Schema, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.schema"), + sentry.WithDescription("Query the schema"), + ) + ctx = transaction.Context() + defer transaction.Finish() + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_SCHEMA, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return models.Schema{}, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QuerySchemaResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return models.Schema{}, fmt.Errorf("failed to unmarshal query result: %w", err) + } + return resp.Schema, nil +} + +// QueryCollectionsCount build a Query to read the schema that will be directed to the leader to ensure we will read the class +// with strong consistency +func (s *Raft) QueryCollectionsCount() (int, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.collections.count"), + sentry.WithDescription("Query the collections count"), + ) + ctx = transaction.Context() + defer transaction.Finish() + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_COLLECTIONS_COUNT, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return 0, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QueryCollectionsCountResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return 0, fmt.Errorf("failed to unmarshal query result: %w", err) + } + return resp.Count, nil +} + +// QueryTenants build a Query to read the tenants of a given class that will be directed to the leader to ensure we +// will read the class with strong consistency +func (s *Raft) QueryTenants(class string, tenants []string) ([]*models.Tenant, uint64, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.tenants"), + sentry.WithDescription("Query the status of tenants in a given class"), + ) + transaction.SetData("class", class) + transaction.SetData("tenants", tenants) + ctx = transaction.Context() + defer transaction.Finish() + } + // Build the query and execute it + req := cmd.QueryTenantsRequest{Class: class, Tenants: tenants} + subCommand, err := json.Marshal(&req) + if err != nil { + return []*models.Tenant{}, 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_TENANTS, + SubCommand: subCommand, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return []*models.Tenant{}, 0, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QueryTenantsResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return []*models.Tenant{}, 0, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return resp.Tenants, resp.ShardVersion, nil +} + +// QueryShardOwner build a Query to read the tenants of a given class that will be directed to the leader to ensure we +// will read the tenant with strong consistency and return the shard owner node +func (s *Raft) QueryShardOwner(class, shard string) (string, uint64, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.shard_owner"), + sentry.WithDescription("Query the owner of a given shard in a given class"), + ) + transaction.SetData("class", class) + transaction.SetData("shard", shard) + ctx = transaction.Context() + defer transaction.Finish() + } + // Build the query and execute it + req := cmd.QueryShardOwnerRequest{Class: class, Shard: shard} + subCommand, err := json.Marshal(&req) + if err != nil { + return "", 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_SHARD_OWNER, + SubCommand: subCommand, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return "", 0, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QueryShardOwnerResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return "", 0, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return resp.Owner, resp.ShardVersion, nil +} + +// QueryTenantsShards build a Query to read the tenants and their activity status of a given class. +// The request will be directed to the leader to ensure we will read the tenant with strong consistency and return the +// shard owner node +func (s *Raft) QueryTenantsShards(class string, tenants ...string) (map[string]string, uint64, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.tenants_shards"), + sentry.WithDescription("Query the tenants of a given class"), + ) + transaction.SetData("class", class) + transaction.SetData("tenants", tenants) + ctx = transaction.Context() + defer transaction.Finish() + } + // Build the query and execute it + req := cmd.QueryTenantsShardsRequest{Class: class, Tenants: tenants} + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_TENANTS_SHARDS, + SubCommand: subCommand, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return nil, 0, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QueryTenantsShardsResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return nil, 0, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return resp.TenantsActivityStatus, resp.SchemaVersion, nil +} + +// QueryShardingState build a Query to read the sharding state of a given class. +// The request will be directed to the leader to ensure we will read the shard state with strong consistency and return the +// state and it's version. +func (s *Raft) QueryShardingState(class string) (*sharding.State, uint64, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.sharding_state"), + sentry.WithDescription("Query the sharding state of a given class"), + ) + transaction.SetData("class", class) + ctx = transaction.Context() + defer transaction.Finish() + } + // Build the query and execute it + req := cmd.QueryShardingStateRequest{Class: class} + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, 0, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_SHARDING_STATE, + SubCommand: subCommand, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return nil, 0, fmt.Errorf("failed to execute query: %w", err) + } + + // Unmarshal the response + resp := cmd.QueryShardingStateResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return nil, 0, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return resp.State, resp.Version, nil +} + +// QueryClassVersions returns the current version of the requested classes. +func (s *Raft) QueryClassVersions(classes ...string) (map[string]uint64, error) { + ctx := context.Background() + if entSentry.Enabled() { + transaction := sentry.StartSpan(ctx, "grpc.client", + sentry.WithTransactionName("raft.query.class_versions"), + sentry.WithDescription("Query class versions"), + ) + transaction.SetData("classes", classes) + ctx = transaction.Context() + defer transaction.Finish() + } + if len(classes) == 0 { + return nil, fmt.Errorf("empty classes names: %w", schema.ErrBadRequest) + } + + // remove dedup and empty + slices.Sort(classes) + classes = slices.Compact(classes) + if len(classes) == 0 { + return map[string]uint64{}, fmt.Errorf("empty classes names: %w", schema.ErrBadRequest) + } + + if len(classes) > 1 && classes[0] == "" { + classes = classes[1:] + } + + // Build the query and execute it + req := cmd.QueryClassVersionsRequest{Classes: classes} + subCommand, err := json.Marshal(&req) + if err != nil { + return map[string]uint64{}, fmt.Errorf("marshal request: %w", err) + } + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_CLASS_VERSIONS, + SubCommand: subCommand, + } + queryResp, err := s.Query(ctx, command) + if err != nil { + return map[string]uint64{}, fmt.Errorf("failed to execute query: %w", err) + } + + // Empty payload doesn't unmarshal to an empty struct and will instead result in an error. + // We have an empty payload when the requested class if not present in the schema. + // In that case return a nil pointer and no error. + if len(queryResp.Payload) == 0 { + return nil, nil + } + + // Unmarshal the response + resp := cmd.QueryClassVersionsResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + if err != nil { + return map[string]uint64{}, fmt.Errorf("failed to unmarshal query result: %w", err) + } + return resp.Classes, nil +} + +// Query receives a QueryRequest and ensure it is executed on the leader and returns the related QueryResponse +// If any error happens it returns it +func (s *Raft) Query(ctx context.Context, req *cmd.QueryRequest) (*cmd.QueryResponse, error) { + t := prometheus.NewTimer( + monitoring.GetMetrics().SchemaReadsLeader.WithLabelValues( + req.Type.String(), + )) + defer t.ObserveDuration() + + if s.store.IsLeader() { + return s.store.Query(req) + } + + // find out who the leader is + var leader string + if err := backoff.Retry(func() error { + if leader = s.store.Leader(); leader == "" { + return s.leaderErr() + } + + return nil + // pass in the election timeout after applying multiplier + }, backoffConfig(ctx, s.store.raftConfig().ElectionTimeout)); err != nil { + s.log.Warnf("query: failed to find leader after retries: %s", err) + return &cmd.QueryResponse{}, err + } + + resp, err := s.cl.Query(ctx, leader, req) + if err != nil { + s.log.WithField("leader", leader).Errorf("query: failed to query leader: %s", err) + } + return resp, err +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_rbac_apply_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_rbac_apply_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..bb902071cbbd2420aa91583364f52196bcb9413e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_rbac_apply_endpoints.go @@ -0,0 +1,126 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func (s *Raft) UpdateRolesPermissions(roles map[string][]authorization.Policy) error { + return s.upsertRolesPermissions(roles, false) +} + +func (s *Raft) CreateRolesPermissions(roles map[string][]authorization.Policy) error { + return s.upsertRolesPermissions(roles, true) +} + +func (s *Raft) upsertRolesPermissions(roles map[string][]authorization.Policy, roleCreation bool) error { + if len(roles) == 0 { + return fmt.Errorf("no roles to create: %w", schema.ErrBadRequest) + } + + req := cmd.CreateRolesRequest{Roles: roles, Version: cmd.RBACLatestCommandPolicyVersion, RoleCreation: roleCreation} + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_UPSERT_ROLES_PERMISSIONS, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) DeleteRoles(names ...string) error { + if len(names) == 0 { + return fmt.Errorf("no roles to delete: %w", schema.ErrBadRequest) + } + req := cmd.DeleteRolesRequest{Roles: names} + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_DELETE_ROLES, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) RemovePermissions(role string, permissions []*authorization.Policy) error { + if role == "" { + return fmt.Errorf("no roles to remove permissions from: %w", schema.ErrBadRequest) + } + req := cmd.RemovePermissionsRequest{Role: role, Permissions: permissions, Version: cmd.RBACLatestCommandPolicyVersion} + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_REMOVE_PERMISSIONS, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) AddRolesForUser(user string, roles []string) error { + if len(roles) == 0 { + return fmt.Errorf("no roles to assign: %w", schema.ErrBadRequest) + } + req := cmd.AddRolesForUsersRequest{User: user, Roles: roles, Version: cmd.RBACAssignRevokeLatestCommandPolicyVersion} + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_ADD_ROLES_FOR_USER, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} + +func (s *Raft) RevokeRolesForUser(user string, roles ...string) error { + if len(roles) == 0 { + return fmt.Errorf("no roles to revoke: %w", schema.ErrBadRequest) + } + req := cmd.RevokeRolesForUserRequest{User: user, Roles: roles, Version: cmd.RBACAssignRevokeLatestCommandPolicyVersion} + subCommand, err := json.Marshal(&req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &cmd.ApplyRequest{ + Type: cmd.ApplyRequest_TYPE_REVOKE_ROLES_FOR_USER, + SubCommand: subCommand, + } + if _, err := s.Execute(context.Background(), command); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_rbac_query_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_rbac_query_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..451560c1a41f014817937ac1b04f1a2f85a9b49f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_rbac_query_endpoints.go @@ -0,0 +1,170 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/weaviate/weaviate/usecases/auth/authentication" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +func (s *Raft) GetRoles(names ...string) (map[string][]authorization.Policy, error) { + req := cmd.QueryGetRolesRequest{ + Roles: names, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_ROLES, + SubCommand: subCommand, + } + queryResp, err := s.Query(context.Background(), command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + response := cmd.QueryGetRolesResponse{} + err = json.Unmarshal(queryResp.Payload, &response) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.Roles, nil +} + +func (s *Raft) GetUsersOrGroupsWithRoles(isGroup bool, authType authentication.AuthType) ([]string, error) { + req := cmd.QueryGetAllUsersOrGroupsWithRolesRequest{ + IsGroup: isGroup, + AuthType: authType, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_USERS_OR_GROUPS_WITH_ROLES, + SubCommand: subCommand, + } + queryResp, err := s.Query(context.Background(), command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + response := cmd.QueryGetAllUsersOrGroupsWithRolesResponse{} + err = json.Unmarshal(queryResp.Payload, &response) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.UsersOrGroups, nil +} + +func (s *Raft) GetRolesForUserOrGroup(user string, authType authentication.AuthType, isGroup bool) (map[string][]authorization.Policy, error) { + req := cmd.QueryGetRolesForUserOrGroupRequest{ + User: user, + UserType: authType, + IsGroup: isGroup, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_ROLES_FOR_USER, + SubCommand: subCommand, + } + queryResp, err := s.Query(context.Background(), command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + response := cmd.QueryGetRolesForUserOrGroupResponse{} + err = json.Unmarshal(queryResp.Payload, &response) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.Roles, nil +} + +func (s *Raft) GetUsersOrGroupForRole(role string, authType authentication.AuthType, isGroup bool) ([]string, error) { + req := cmd.QueryGetUsersForRoleRequest{ + Role: role, + UserType: authType, + IsGroup: isGroup, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return nil, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_USERS_FOR_ROLE, + SubCommand: subCommand, + } + queryResp, err := s.Query(context.Background(), command) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + + response := cmd.QueryGetUsersForRoleResponse{} + err = json.Unmarshal(queryResp.Payload, &response) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.Users, nil +} + +// HasPermission returns consistent permissions check by asking the leader +func (s *Raft) HasPermission(roleName string, permission *authorization.Policy) (bool, error) { + req := cmd.QueryHasPermissionRequest{ + Role: roleName, + Permission: permission, + } + + subCommand, err := json.Marshal(&req) + if err != nil { + return false, fmt.Errorf("marshal request: %w", err) + } + + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_HAS_PERMISSION, + SubCommand: subCommand, + } + queryResp, err := s.Query(context.Background(), command) + if err != nil { + return false, fmt.Errorf("failed to execute query: %w", err) + } + + response := cmd.QueryHasPermissionResponse{} + err = json.Unmarshal(queryResp.Payload, &response) + if err != nil { + return false, fmt.Errorf("failed to unmarshal query result: %w", err) + } + + return response.HasPermission, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_replication_apply_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_replication_apply_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..55004732e53da475ab215cd25ff36a0d1e2d325b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_replication_apply_endpoints.go @@ -0,0 +1,364 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/replication" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" +) + +func (s *Raft) ReplicationReplicateReplica(ctx context.Context, uuid strfmt.UUID, sourceNode string, sourceCollection string, sourceShard string, targetNode string, transferType string) error { + req := &api.ReplicationReplicateShardRequest{ + Version: api.ReplicationCommandVersionV0, + SourceNode: sourceNode, + SourceCollection: sourceCollection, + SourceShard: sourceShard, + TargetNode: targetNode, + Uuid: uuid, + TransferType: transferType, + } + + if err := replication.ValidateReplicationReplicateShard(s.SchemaReader(), req); err != nil { + return fmt.Errorf("%w: %w", replicationTypes.ErrInvalidRequest, err) + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ReplicationUpdateReplicaOpStatus(ctx context.Context, id uint64, state api.ShardReplicationState) error { + req := &api.ReplicationUpdateOpStateRequest{ + Version: api.ReplicationCommandVersionV0, + Id: id, + State: state, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_UPDATE_STATE, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ReplicationRegisterError(ctx context.Context, id uint64, errorToRegister string) error { + req := &api.ReplicationRegisterErrorRequest{ + Version: api.ReplicationCommandVersionV0, + Id: id, + Error: errorToRegister, + TimeUnixMs: time.Now().UnixMilli(), + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ReplicationCancellationComplete(ctx context.Context, id uint64) error { + req := &api.ReplicationCancellationCompleteRequest{ + Version: api.ReplicationCommandVersionV0, + Id: id, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_CANCELLATION_COMPLETE, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) CancelReplication(ctx context.Context, uuid strfmt.UUID) error { + req := &api.ReplicationCancelRequest{ + Version: api.ReplicationCommandVersionV0, + Uuid: uuid, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_CANCEL, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return fmt.Errorf("execute cancel replication: %w", replicationTypes.ErrReplicationOperationNotFound) + } + if strings.Contains(err.Error(), replicationTypes.ErrCancellationImpossible.Error()) { + return fmt.Errorf("execute cancel replication: %w", replicationTypes.ErrCancellationImpossible) + } + return err + } + return nil +} + +func (s *Raft) DeleteReplication(ctx context.Context, uuid strfmt.UUID) error { + req := &api.ReplicationDeleteRequest{ + Version: api.ReplicationCommandVersionV0, + Uuid: uuid, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return fmt.Errorf("execute delete replication: %w", replicationTypes.ErrReplicationOperationNotFound) + } + if strings.Contains(err.Error(), replicationTypes.ErrDeletionImpossible.Error()) { + return fmt.Errorf("execute delete replication: %w", replicationTypes.ErrDeletionImpossible) + } + return err + } + return nil +} + +func (s *Raft) ReplicationRemoveReplicaOp(ctx context.Context, id uint64) error { + req := &api.ReplicationRemoveOpRequest{ + Version: api.ReplicationCommandVersionV0, + Id: id, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REMOVE, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ForceDeleteAllReplications(ctx context.Context) error { + req := &api.ReplicationForceDeleteAllRequest{} + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_ALL, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ForceDeleteReplicationsByCollection(ctx context.Context, collection string) error { + req := &api.ReplicationForceDeleteByCollectionRequest{ + Collection: collection, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ForceDeleteReplicationsByCollectionAndShard(ctx context.Context, collection, shard string) error { + req := &api.ReplicationForceDeleteByCollectionAndShardRequest{ + Collection: collection, + Shard: shard, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION_AND_SHARD, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ForceDeleteReplicationsByTargetNode(ctx context.Context, node string) error { + req := &api.ReplicationForceDeleteByTargetNodeRequest{ + Node: node, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_TARGET_NODE, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ForceDeleteReplicationByUuid(ctx context.Context, uuid strfmt.UUID) error { + req := &api.ReplicationForceDeleteByUuidRequest{ + Uuid: uuid, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_UUID, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) DeleteAllReplications(ctx context.Context) error { + req := &api.ReplicationDeleteAllRequest{ + Version: api.ReplicationCommandVersionV0, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_ALL, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) DeleteReplicationsByCollection(ctx context.Context, collection string) error { + req := &api.ReplicationsDeleteByCollectionRequest{ + Version: api.ReplicationCommandVersionV0, + Collection: collection, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_BY_COLLECTION, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) DeleteReplicationsByTenants(ctx context.Context, collection string, tenants []string) error { + req := &api.ReplicationsDeleteByTenantsRequest{ + Version: api.ReplicationCommandVersionV0, + Collection: collection, + Tenants: tenants, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_BY_TENANTS, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} + +func (s *Raft) ReplicationStoreSchemaVersion(ctx context.Context, id uint64, schemaVersion uint64) error { + req := &api.ReplicationStoreSchemaVersionRequest{ + Version: api.ReplicationCommandVersionV0, + SchemaVersion: schemaVersion, + Id: id, + } + + subCommand, err := json.Marshal(req) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + command := &api.ApplyRequest{ + Type: api.ApplyRequest_TYPE_REPLICATION_REGISTER_SCHEMA_VERSION, + SubCommand: subCommand, + } + if _, err := s.Execute(ctx, command); err != nil { + return err + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_replication_query_endpoints.go b/platform/dbops/binaries/weaviate-src/cluster/raft_replication_query_endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..a4cbd03695cb69b79005db35151cf94320911bb4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_replication_query_endpoints.go @@ -0,0 +1,272 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/cluster/proto/api" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/types" +) + +func (s *Raft) GetReplicationDetailsByReplicationId(ctx context.Context, uuid strfmt.UUID) (api.ReplicationDetailsResponse, error) { + request := &api.ReplicationDetailsRequest{ + Uuid: uuid, + } + + subCommand, err := json.Marshal(request) + if err != nil { + return api.ReplicationDetailsResponse{}, fmt.Errorf("marshal request: %w", err) + } + + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS, + SubCommand: subCommand, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return api.ReplicationDetailsResponse{}, fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrReplicationOperationNotFound) + } + return api.ReplicationDetailsResponse{}, fmt.Errorf("failed to execute query: %w", err) + } + + response := api.ReplicationDetailsResponse{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return api.ReplicationDetailsResponse{}, fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response, nil +} + +func (s *Raft) GetReplicationDetailsByCollection(ctx context.Context, collection string) ([]api.ReplicationDetailsResponse, error) { + request := &api.ReplicationDetailsRequestByCollection{ + Collection: collection, + } + + subCommand, err := json.Marshal(request) + if err != nil { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("marshal request: %w", err) + } + + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION, + SubCommand: subCommand, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrReplicationOperationNotFound) + } + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to execute query: %w", err) + } + + response := []api.ReplicationDetailsResponse{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response, nil +} + +func (s *Raft) GetReplicationDetailsByCollectionAndShard(ctx context.Context, collection string, shard string) ([]api.ReplicationDetailsResponse, error) { + request := &api.ReplicationDetailsRequestByCollectionAndShard{ + Collection: collection, + Shard: shard, + } + + subCommand, err := json.Marshal(request) + if err != nil { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("marshal request: %w", err) + } + + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION_AND_SHARD, + SubCommand: subCommand, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrReplicationOperationNotFound) + } + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to execute query: %w", err) + } + + response := []api.ReplicationDetailsResponse{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response, nil +} + +func (s *Raft) GetReplicationDetailsByTargetNode(ctx context.Context, node string) ([]api.ReplicationDetailsResponse, error) { + request := &api.ReplicationDetailsRequestByTargetNode{ + Node: node, + } + + subCommand, err := json.Marshal(request) + if err != nil { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("marshal request: %w", err) + } + + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_TARGET_NODE, + SubCommand: subCommand, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrReplicationOperationNotFound) + } + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to execute query: %w", err) + } + + response := []api.ReplicationDetailsResponse{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response, nil +} + +func (s *Raft) GetAllReplicationDetails(ctx context.Context) ([]api.ReplicationDetailsResponse, error) { + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_ALL_REPLICATION_DETAILS, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrReplicationOperationNotFound) + } + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to execute query: %w", err) + } + + response := []api.ReplicationDetailsResponse{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return []api.ReplicationDetailsResponse{}, fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response, nil +} + +func (s *Raft) QueryShardingStateByCollection(ctx context.Context, collection string) (api.ShardingState, error) { + request := &api.ReplicationQueryShardingStateByCollectionRequest{ + Collection: collection, + } + + subCommand, err := json.Marshal(request) + if err != nil { + return api.ShardingState{}, fmt.Errorf("marshal request: %w", err) + } + + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_SHARDING_STATE_BY_COLLECTION, + SubCommand: subCommand, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrNotFound.Error()) { + return api.ShardingState{}, fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrNotFound) + } + return api.ShardingState{}, fmt.Errorf("failed to execute query: %w", err) + } + + response := api.ShardingState{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return api.ShardingState{}, fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response, nil +} + +func (s *Raft) QueryShardingStateByCollectionAndShard(ctx context.Context, collection string, shard string) (api.ShardingState, error) { + request := &api.ReplicationQueryShardingStateByCollectionAndShardRequest{ + Collection: collection, + Shard: shard, + } + + subCommand, err := json.Marshal(request) + if err != nil { + return api.ShardingState{}, fmt.Errorf("marshal request: %w", err) + } + + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_SHARDING_STATE_BY_COLLECTION_AND_SHARD, + SubCommand: subCommand, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrNotFound.Error()) { + return api.ShardingState{}, fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrNotFound) + } + return api.ShardingState{}, fmt.Errorf("failed to execute query: %w", err) + } + + response := api.ShardingState{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return api.ShardingState{}, fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response, nil +} + +func (s *Raft) ReplicationGetReplicaOpStatus(ctx context.Context, id uint64) (api.ShardReplicationState, error) { + request := &api.ReplicationOperationStateRequest{ + Id: id, + } + + subCommand, err := json.Marshal(request) + if err != nil { + return "", fmt.Errorf("marshal request: %w", err) + } + + command := &api.QueryRequest{ + Type: api.QueryRequest_TYPE_GET_REPLICATION_OPERATION_STATE, + SubCommand: subCommand, + } + + queryResponse, err := s.Query(ctx, command) + if err != nil { + if strings.Contains(err.Error(), replicationTypes.ErrReplicationOperationNotFound.Error()) { + return "", fmt.Errorf("%w: %w", types.ErrNotFound, replicationTypes.ErrReplicationOperationNotFound) + } + return "", fmt.Errorf("failed to execute query: %w", err) + } + + response := api.ReplicationOperationStateResponse{} + err = json.Unmarshal(queryResponse.Payload, &response) + if err != nil { + return "", fmt.Errorf("failed to unmarshal query response: %w", err) + } + + return response.State, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_snapshot_test.go b/platform/dbops/binaries/weaviate-src/cluster/raft_snapshot_test.go new file mode 100644 index 0000000000000000000000000000000000000000..36f35a4002cc5713199605d8c7f4c6c6a695f631 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_snapshot_test.go @@ -0,0 +1,152 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/utils" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/fakes" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// TestSnapshotRestoreSchemaOnly ensures that when restoring a snapshot we correctly restore the state of the schema +// without impacting the underlying database if it has integrated changes already +func TestSnapshotRestoreSchemaOnly(t *testing.T) { + ctx := context.Background() + m := NewMockStore(t, "Node-1", utils.MustGetFreeTCPPort()) + addr := fmt.Sprintf("%s:%d", m.cfg.Host, m.cfg.RaftPort) + srv := NewRaft(mocks.NewMockNodeSelector(), m.store, nil) + + // Open + m.indexer.On("Open", Anything).Return(nil) + assert.Nil(t, srv.Open(ctx, m.indexer)) + + // Ensure Raft starts and a leader is elected + assert.Nil(t, srv.store.Notify(m.cfg.NodeID, addr)) + assert.Nil(t, srv.WaitUntilDBRestored(ctx, time.Second*1, make(chan struct{}))) + assert.True(t, tryNTimesWithWait(10, time.Millisecond*200, srv.Ready)) + tryNTimesWithWait(20, time.Millisecond*100, srv.store.IsLeader) + assert.True(t, srv.store.IsLeader()) + + // DeleteClass + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("DeleteClass", Anything).Return(nil) + m.replicationFSM.On("DeleteReplicationsByCollection", Anything).Return(nil) + _, err := srv.DeleteClass(ctx, "C") + assert.Nil(t, err) + + // Add a class C with a tenant T0 with state S0 + m.indexer.On("AddClass", Anything).Return(nil) + m.parser.On("ParseClass", mock.Anything).Return(nil) + cls := &models.Class{ + Class: "C", + MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: true}, + } + // Get a shema reader to verify our schema operation are working + schemaReader := srv.SchemaReader() + ss := &sharding.State{PartitioningEnabled: true, Physical: map[string]sharding.Physical{"T0": {Name: "T0", Status: "S0"}}} + _, err = srv.AddClass(ctx, cls, ss) + assert.Nil(t, err) + assert.Equal(t, schemaReader.ClassEqual(cls.Class), cls.Class) + assert.Equal(t, "S0", getTenantStatus(t, schemaReader, cls.Class, "T0")) + + // Create a snapshot here with the class and the tenant existing + assert.Nil(t, srv.store.raft.Barrier(2*time.Second).Error()) + assert.Nil(t, srv.store.raft.Snapshot().Error()) + + m.indexer.On("DeleteTenants", Anything, Anything).Return(nil) + m.replicationFSM.On("DeleteReplicationsByTenants", Anything, Anything).Return(nil) + // Now let's drop the tenant T0 (this will be a log entry and not included in the snapshot) + _, err = srv.DeleteTenants(ctx, cls.Class, &api.DeleteTenantsRequest{Tenants: []string{"T0"}}) + require.NoError(t, err) + + // Now re-add the tenant T0 with state S1 + m.indexer.On("AddTenants", Anything, Anything).Return(nil) + _, err = srv.AddTenants(ctx, cls.Class, &api.AddTenantsRequest{ + ClusterNodes: []string{"Node-1"}, + Tenants: []*api.Tenant{{Name: "T0", Status: "S1"}}, + }) + require.NoError(t, err) + assert.Equal(t, "S1", getTenantStatus(t, schemaReader, cls.Class, "T0")) + + // close service + m.indexer.On("Close", Anything).Return(nil) + assert.Nil(t, srv.Close(ctx)) + m.indexer.AssertExpectations(t) + + // Create a new FSM that will restore from it's state from the disk (using snapshot and logs) + s := NewFSM(m.cfg, nil, nil, prometheus.NewPedanticRegistry()) + m.store = &s + // We refresh the mock schema to ensure that we can assert no calls except Open are sent to the database + m.indexer = fakes.NewMockSchemaExecutor() + // NewRaft will try to restore from any snapshot it can find on disk + srv = NewRaft(mocks.NewMockNodeSelector(), m.store, nil) + // Ensure raft starts and a leader is elected + m.indexer.On("Open", Anything).Return(nil) + // shall be called because of restoring from snapshot + m.indexer.On("TriggerSchemaUpdateCallbacks").Return().Once() + assert.Nil(t, srv.Open(ctx, m.indexer)) + assert.Nil(t, srv.store.Notify(m.cfg.NodeID, addr)) + assert.Nil(t, srv.WaitUntilDBRestored(ctx, time.Second*1, make(chan struct{}))) + assert.True(t, tryNTimesWithWait(10, time.Millisecond*200, srv.Ready)) + tryNTimesWithWait(20, time.Millisecond*100, srv.store.IsLeader) + + // Ensure that the class has been restored and that the tenant is present with the right state + schemaReader = srv.SchemaReader() + assert.Equal(t, cls.Class, schemaReader.ClassEqual(cls.Class)) + assert.Equal(t, "S1", getTenantStatus(t, schemaReader, cls.Class, "T0")) + + // Ensure there was no supplementary call to the underlying DB as we were just recovering the schema + m.indexer.AssertExpectations(t) +} + +func getTenantStatus(t *testing.T, schemaReader interface{}, className, tenantName string) string { + type schemaReaderWithRead interface { + Read(className string, readerFunc func(*models.Class, *sharding.State) error) error + } + + reader, ok := schemaReader.(schemaReaderWithRead) + if !ok { + t.Fatalf("schemaReader does not have Read method") + } + + var tenantStatus string + + err := reader.Read(className, func(_ *models.Class, state *sharding.State) error { + if state == nil { + return fmt.Errorf("no sharding state found for class %s", className) + } + + physical, exists := state.Physical[tenantName] + if !exists { + return fmt.Errorf("tenant %s not found in class %s", tenantName, className) + } + + tenantStatus = physical.Status + return nil + }) + + require.NoError(t, err) + return tenantStatus +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_test.go b/platform/dbops/binaries/weaviate-src/cluster/raft_test.go new file mode 100644 index 0000000000000000000000000000000000000000..de658da8049acc48950c58d062882e2f54949c64 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_test.go @@ -0,0 +1,465 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + command "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/cluster/types" + "github.com/weaviate/weaviate/cluster/utils" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestRaftEndpoints(t *testing.T) { + ctx := context.Background() + m := NewMockStore(t, "Node-1", utils.MustGetFreeTCPPort()) + addr := fmt.Sprintf("%s:%d", m.cfg.Host, m.cfg.RaftPort) + m.indexer.On("Open", Anything).Return(nil) + m.indexer.On("Close", Anything).Return(nil) + m.indexer.On("AddClass", Anything).Return(nil) + m.indexer.On("RestoreClassDir", Anything).Return(nil) + m.indexer.On("UpdateClass", Anything).Return(nil) + m.indexer.On("DeleteClass", Anything).Return(nil) + m.indexer.On("AddProperty", Anything, Anything).Return(nil) + m.indexer.On("UpdateShardStatus", Anything).Return(nil) + m.indexer.On("AddTenants", Anything, Anything).Return(nil) + m.indexer.On("UpdateTenants", Anything, Anything).Return(nil) + m.indexer.On("DeleteTenants", Anything, Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddReplicaToShard", Anything, Anything, Anything).Return(nil) + m.indexer.On("DeleteReplicaFromShard", Anything, Anything, Anything).Return(nil) + + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.parser.On("ParseClassUpdate", mock.Anything, mock.Anything).Return(mock.Anything, nil) + + srv := NewRaft(mocks.NewMockNodeSelector(), m.store, nil) + + // LeaderNotFound + _, err := srv.Execute(ctx, &command.ApplyRequest{}) + assert.ErrorIs(t, err, types.ErrLeaderNotFound) + assert.ErrorIs(t, srv.Join(ctx, m.store.cfg.NodeID, addr, true), types.ErrLeaderNotFound) + assert.ErrorIs(t, srv.Remove(ctx, m.store.cfg.NodeID), types.ErrLeaderNotFound) + + // Deadline exceeded while waiting for DB to be restored + func() { + ctx, cancel := context.WithTimeout(ctx, time.Millisecond*30) + defer cancel() + assert.ErrorIs(t, srv.WaitUntilDBRestored(ctx, 5*time.Millisecond, make(chan struct{})), context.DeadlineExceeded) + }() + + // Open + defer srv.Close(ctx) + assert.Nil(t, srv.Open(ctx, m.indexer)) + + // node lose leadership after service call + assert.ErrorIs(t, srv.store.Join(m.store.cfg.NodeID, addr, true), types.ErrNotLeader) + assert.ErrorIs(t, srv.store.Remove(m.store.cfg.NodeID), types.ErrNotLeader) + + // Connect + assert.Nil(t, srv.store.Notify(m.cfg.NodeID, addr)) + + assert.Nil(t, srv.WaitUntilDBRestored(ctx, time.Second*1, make(chan struct{}))) + assert.True(t, tryNTimesWithWait(10, time.Millisecond*200, srv.Ready)) + tryNTimesWithWait(20, time.Millisecond*100, srv.store.IsLeader) + assert.True(t, srv.store.IsLeader()) + schemaReader := srv.SchemaReader() + assert.Equal(t, schemaReader.Len(), 0) + + // AddClass + _, err = srv.AddClass(ctx, nil, nil) + assert.ErrorIs(t, err, schema.ErrBadRequest) + assert.Equal(t, schemaReader.ClassEqual("C"), "") + + cls := &models.Class{ + Class: "C", + MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: true}, + } + ss := &sharding.State{PartitioningEnabled: true, Physical: map[string]sharding.Physical{"T0": {Name: "T0"}}} + version0, err := srv.AddClass(ctx, cls, ss) + assert.Nil(t, err) + assert.Equal(t, schemaReader.ClassEqual("C"), "C") + + // Add same class again + _, err = srv.AddClass(ctx, cls, ss) + assert.Error(t, err) + assert.Equal(t, "class name C already exists", err.Error()) + + // Add similar class + _, err = srv.AddClass(ctx, &models.Class{Class: "c"}, ss) + assert.ErrorIs(t, err, schema.ErrClassExists) + + // QueryReadOnlyClass + readOnlyVClass, err := srv.QueryReadOnlyClasses(cls.Class) + assert.NoError(t, err) + assert.NotNil(t, readOnlyVClass[cls.Class].Class) + assert.Equal(t, cls, readOnlyVClass[cls.Class].Class) + + // QueryClassVersions + classVersions, err := srv.QueryClassVersions(cls.Class) + assert.NoError(t, err) + assert.Equal(t, readOnlyVClass[cls.Class].Version, classVersions[cls.Class]) + + // QuerySchema + getSchema, err := srv.QuerySchema() + assert.NoError(t, err) + assert.NotNil(t, getSchema) + assert.Equal(t, models.Schema{Classes: []*models.Class{readOnlyVClass[cls.Class].Class}}, getSchema) + + // QueryTenants all + getTenantsAll, _, err := srv.QueryTenants(cls.Class, []string{}) + assert.NoError(t, err) + assert.NotNil(t, getTenantsAll) + assert.Equal(t, []*models.Tenant{{ + Name: "T0", + ActivityStatus: models.TenantActivityStatusHOT, + }}, getTenantsAll) + + // QueryTenants one + getTenantsOne, _, err := srv.QueryTenants(cls.Class, []string{"T0"}) + assert.NoError(t, err) + assert.NotNil(t, getTenantsOne) + assert.Equal(t, []*models.Tenant{{ + Name: "T0", + ActivityStatus: models.TenantActivityStatusHOT, + }}, getTenantsOne) + + // QueryTenants one + getTenantsNone, _, err := srv.QueryTenants(cls.Class, []string{"T"}) + assert.NoError(t, err) + assert.NotNil(t, getTenantsNone) + assert.Equal(t, []*models.Tenant{}, getTenantsNone) + + // Query ShardTenant + getTenantShards, _, err := srv.QueryTenantsShards(cls.Class, "T0") + for tenant, status := range getTenantShards { + assert.Nil(t, err) + assert.Equal(t, "T0", tenant) + assert.Equal(t, models.TenantActivityStatusHOT, status) + } + + // QueryShardOwner - Err + _, _, err = srv.QueryShardOwner(cls.Class, "T0") + assert.NotNil(t, err) + + // QueryShardOwner + srv.UpdateClass(ctx, cls, &sharding.State{Physical: map[string]sharding.Physical{"T0": {BelongsToNodes: []string{"N0"}}}}) + getShardOwner, _, err := srv.QueryShardOwner(cls.Class, "T0") + assert.Nil(t, err) + assert.Equal(t, "N0", getShardOwner) + // Verify that updating with nil sharding state does not change the sharding state + srv.UpdateClass(ctx, cls, nil) + getShardOwner, _, err = srv.QueryShardOwner(cls.Class, "T0") + assert.Nil(t, err) + assert.Equal(t, "N0", getShardOwner) + + // QueryShardingState + shardingState := &sharding.State{Physical: map[string]sharding.Physical{"T0": {BelongsToNodes: []string{"N0"}}}, ReplicationFactor: 2} + srv.UpdateClass(ctx, cls, shardingState) + getShardingState, _, err := srv.QueryShardingState(cls.Class) + assert.Nil(t, err) + assert.Equal(t, shardingState, getShardingState) + + // UpdateClass + info := schema.ClassInfo{ + Exists: true, + MultiTenancy: models.MultiTenancyConfig{Enabled: true}, + ReplicationFactor: 1, + Tenants: 1, + } + _, err = srv.UpdateClass(ctx, nil, nil) + assert.ErrorIs(t, err, schema.ErrBadRequest) + cls.MultiTenancyConfig = &models.MultiTenancyConfig{Enabled: true} + cls.ReplicationConfig = &models.ReplicationConfig{Factor: 1} + ss.Physical = map[string]sharding.Physical{"T0": {Name: "T0"}} + version, err := srv.UpdateClass(ctx, cls, nil) + info.ClassVersion = version + info.ShardVersion = version0 + assert.Nil(t, err) + assert.Nil(t, srv.store.WaitForAppliedIndex(ctx, time.Millisecond*10, version)) + assert.Equal(t, info, schemaReader.ClassInfo("C")) + assert.ErrorIs(t, srv.store.WaitForAppliedIndex(ctx, time.Millisecond*10, srv.store.lastAppliedIndex.Load()+1), types.ErrDeadlineExceeded) + + // DeleteClass + m.replicationFSM.EXPECT().DeleteReplicationsByCollection(Anything).Return(nil).Times(2) + _, err = srv.DeleteClass(ctx, "X") + assert.Nil(t, err) + _, err = srv.DeleteClass(ctx, "C") + assert.Nil(t, err) + assert.Equal(t, schema.ClassInfo{}, schemaReader.ClassInfo("C")) + + // RestoreClass + _, err = srv.RestoreClass(ctx, nil, nil) + assert.ErrorIs(t, err, schema.ErrBadRequest) + version, err = srv.RestoreClass(ctx, cls, ss) + assert.Nil(t, err) + info.ClassVersion = version + info.ShardVersion = version + assert.Equal(t, info, schemaReader.ClassInfo("C")) + + // AddProperty + _, err = srv.AddProperty(ctx, "C", nil) + assert.ErrorIs(t, err, schema.ErrBadRequest) + _, err = srv.AddProperty(ctx, "", &models.Property{Name: "P1"}) + assert.ErrorIs(t, err, schema.ErrBadRequest) + version, err = srv.AddProperty(ctx, "C", &models.Property{Name: "P1"}) + assert.Nil(t, err) + info.ClassVersion = version + info.Properties = 1 + assert.Equal(t, info, schemaReader.ClassInfo("C")) + + // UpdateStatus + _, err = srv.UpdateShardStatus(ctx, "", "A", "ACTIVE") + assert.ErrorIs(t, err, schema.ErrBadRequest) + _, err = srv.UpdateShardStatus(ctx, "C", "", "ACTIVE") + assert.ErrorIs(t, err, schema.ErrBadRequest) + _, err = srv.UpdateShardStatus(ctx, "C", "A", "ACTIVE") + assert.Nil(t, err) + + // AddTenants + _, err = srv.AddTenants(ctx, "", &command.AddTenantsRequest{}) + assert.ErrorIs(t, err, schema.ErrBadRequest) + version, err = srv.AddTenants(ctx, "C", &command.AddTenantsRequest{ + ClusterNodes: []string{"Node-1"}, + Tenants: []*command.Tenant{nil, {Name: "T2", Status: "S1"}, nil}, + }) + assert.Nil(t, err) + info.ShardVersion = version + info.Tenants += 1 + assert.Equal(t, info, schemaReader.ClassInfo("C")) + + // AddReplicaToShard + _, err = srv.AddReplicaToShard(ctx, "", "", "") + assert.ErrorIs(t, err, schema.ErrBadRequest) + version, err = srv.AddReplicaToShard(ctx, "C", "T2", "Node-2") + assert.Nil(t, err) + info.ClassVersion = version + assert.Equal(t, info, schemaReader.ClassInfo("C")) + ss, err = readShardingState(schemaReader, "C") + require.Nil(t, err) + assert.Equal(t, []string{"Node-1", "Node-2"}, ss.Physical["T2"].BelongsToNodes) + + // DeleteReplicaFromShard + _, err = srv.DeleteReplicaFromShard(ctx, "", "", "") + assert.ErrorIs(t, err, schema.ErrBadRequest) + version, err = srv.DeleteReplicaFromShard(ctx, "C", "T2", "Node-2") + assert.Nil(t, err) + info.ClassVersion = version + assert.Equal(t, info, schemaReader.ClassInfo("C")) + ss, err = readShardingState(schemaReader, "C") + require.Nil(t, err) + assert.Equal(t, []string{"Node-1"}, ss.Physical["T2"].BelongsToNodes) + + // SyncShard with active tenant + _, err = srv.SyncShard(ctx, "", "", "") + assert.ErrorIs(t, err, schema.ErrBadRequest) + m.indexer.On("ShutdownShard", mock.Anything, mock.Anything).Return(nil).Times(0) + m.indexer.On("LoadShard", "C", "A").Return(nil).Times(1) + _, err = srv.SyncShard(ctx, "C", "A", "Node-1") + assert.Nil(t, err) + + // SyncShard with inactive tenant + _, err = srv.UpdateShardStatus(ctx, "C", "A", "INACTIVE") + assert.Nil(t, err) + + _, err = srv.SyncShard(ctx, "", "", "") + assert.ErrorIs(t, err, schema.ErrBadRequest) + m.indexer.On("ShutdownShard", "C", "A").Return(nil).Times(1) + m.indexer.On("LoadShard", mock.Anything, mock.Anything).Return(nil).Times(0) + _, err = srv.SyncShard(ctx, "C", "A", "Node-1") + assert.Nil(t, err) + + _, err = srv.UpdateShardStatus(ctx, "C", "A", "ACTIVE") + assert.Nil(t, err) + + // SyncShard with absent tenant + _, err = srv.SyncShard(ctx, "", "", "") + assert.ErrorIs(t, err, schema.ErrBadRequest) + m.indexer.On("ShutdownShard", "C", "T0").Return(nil).Times(1) + m.indexer.On("LoadShard", mock.Anything, mock.Anything).Return(nil).Times(0) + _, err = srv.SyncShard(ctx, "C", "T0", "Node-1") + assert.Nil(t, err) + + // Add single-tenant collection + cls = &models.Class{ + Class: "D", + } + ss = &sharding.State{PartitioningEnabled: false, Physical: map[string]sharding.Physical{"S0": {Name: "S0"}}} + _, err = srv.AddClass(ctx, cls, ss) + assert.Nil(t, err) + assert.Equal(t, schemaReader.ClassEqual("D"), "D") + + // SyncShard with ST collection and present shard + m.indexer.On("ShutdownShard", mock.Anything, mock.Anything).Return(nil).Times(0) + m.indexer.On("LoadShard", "D", "S0").Return(nil).Times(1) + _, err = srv.SyncShard(ctx, "D", "S0", "Node-1") + assert.Nil(t, err) + + // SyncShard with ST collection and absent shard + m.indexer.On("ShutdownShard", "D", "S0").Return(nil).Times(1) + m.indexer.On("LoadShard", mock.Anything, mock.Anything).Return(nil).Times(0) + _, err = srv.SyncShard(ctx, "D", "S0", "Node-1") + assert.Nil(t, err) + + // UpdateTenants + _, err = srv.UpdateTenants(ctx, "", &command.UpdateTenantsRequest{}) + assert.ErrorIs(t, err, schema.ErrBadRequest) + _, err = srv.UpdateTenants(ctx, "C", &command.UpdateTenantsRequest{Tenants: []*command.Tenant{{Name: "T2", Status: "S2"}}}) + assert.Nil(t, err) + + // DeleteTenants + m.replicationFSM.EXPECT().DeleteReplicationsByTenants(Anything, Anything).Return(nil) + _, err = srv.DeleteTenants(ctx, "", &command.DeleteTenantsRequest{}) + assert.ErrorIs(t, err, schema.ErrBadRequest) + version, err = srv.DeleteTenants(ctx, "C", &command.DeleteTenantsRequest{Tenants: []string{"T0", "Tn"}}) + assert.Nil(t, err) + info.Tenants -= 1 + info.ShardVersion = version + assert.Equal(t, info, schemaReader.ClassInfo("C")) + ss, err = readShardingState(schemaReader, "C") + require.Nil(t, err) + assert.Equal(t, "S2", ss.Physical["T2"].Status) + + // Self Join + assert.Nil(t, srv.Join(ctx, m.store.cfg.NodeID, addr, true)) + assert.True(t, srv.store.IsLeader()) + assert.Nil(t, srv.Join(ctx, m.store.cfg.NodeID, addr, false)) + assert.True(t, srv.store.IsLeader()) + assert.ErrorContains(t, srv.Remove(ctx, m.store.cfg.NodeID), "configuration") + assert.True(t, srv.store.IsLeader()) + + // Stats + stats := srv.Stats() + // stats:raft_state + assert.Equal(t, "Leader", stats["raft"].(map[string]string)["state"]) + // stats:leader_address + leaderAddress := string(stats["leader_address"].(raft.ServerAddress)) + splitAddress := strings.Split(leaderAddress, ":") + assert.Len(t, splitAddress, 2) + ipAddress, portStr := splitAddress[0], splitAddress[1] + assert.Equal(t, "127.0.0.1", ipAddress) + port, err := strconv.Atoi(portStr) + if err != nil { + t.Errorf("Port should have been parsable as an int but was: %v", portStr) + } + assert.GreaterOrEqual(t, port, 0) + // stats:leader_id + leaderID := string(stats["leader_id"].(raft.ServerID)) + assert.Equal(t, m.store.cfg.NodeID, leaderID) + + // create snapshot + assert.Nil(t, srv.store.raft.Barrier(2*time.Second).Error()) + assert.Nil(t, srv.store.raft.Snapshot().Error()) + + // restore from snapshot + assert.Nil(t, srv.Close(ctx)) + + s := NewFSM(m.cfg, nil, nil, prometheus.NewPedanticRegistry()) + m.store = &s + srv = NewRaft(mocks.NewMockNodeSelector(), m.store, nil) + assert.Nil(t, srv.Open(ctx, m.indexer)) + assert.Nil(t, srv.store.Notify(m.cfg.NodeID, addr)) + assert.Nil(t, srv.WaitUntilDBRestored(ctx, time.Second*1, make(chan struct{}))) + assert.True(t, tryNTimesWithWait(10, time.Millisecond*200, srv.Ready)) + tryNTimesWithWait(20, time.Millisecond*100, srv.store.IsLeader) + schemaReader = srv.SchemaReader() + assert.Equal(t, info, schemaReader.ClassInfo("C")) +} + +func TestRaftStoreInit(t *testing.T) { + var ( + ctx = context.Background() + m = NewMockStore(t, "Node-1", 9093) + store = m.store + addr = fmt.Sprintf("%s:%d", m.cfg.Host, m.cfg.RaftPort) + ) + + // NotOpen + assert.ErrorIs(t, store.Join(m.store.cfg.NodeID, addr, true), types.ErrNotOpen) + assert.ErrorIs(t, store.Remove(m.store.cfg.NodeID), types.ErrNotOpen) + assert.ErrorIs(t, store.Notify(m.store.cfg.NodeID, addr), types.ErrNotOpen) + + // Already Open + store.open.Store(true) + assert.Nil(t, store.Open(ctx)) + + // notify non voter + store.cfg.BootstrapExpect = 0 + assert.Nil(t, store.Notify("A", "localhost:123")) + + // not enough voter + store.cfg.BootstrapExpect = 2 + assert.Nil(t, store.Notify("A", "localhost:123")) +} + +func TestRaftClose(t *testing.T) { + ctx := context.Background() + m := NewMockStore(t, "Node-1", utils.MustGetFreeTCPPort()) + addr := fmt.Sprintf("%s:%d", m.cfg.Host, m.cfg.RaftPort) + s := NewFSM(m.cfg, nil, nil, prometheus.NewPedanticRegistry()) + m.store = &s + srv := NewRaft(mocks.NewMockNodeSelector(), m.store, nil) + m.indexer.On("Open", mock.Anything).Return(nil) + assert.Nil(t, srv.Open(ctx, m.indexer)) + assert.Nil(t, srv.store.Notify(m.cfg.NodeID, addr)) + close := make(chan struct{}) + go func() { + time.Sleep(time.Second) + close <- struct{}{} + }() + now := time.Now() + assert.Nil(t, srv.WaitUntilDBRestored(ctx, time.Second*10, close)) + after := time.Now() + assert.Less(t, after.Sub(now), 2*time.Second) +} + +func TestRaftPanics(t *testing.T) { + m := NewMockStore(t, "Node-1", 9091) + + // Assert Correct Response Type + ret := m.store.Apply(&raft.Log{Type: raft.LogNoop}) + resp, ok := ret.(Response) + assert.True(t, ok) + assert.Equal(t, resp, Response{}) + + // Not a Valid Payload + assert.Panics(t, func() { m.store.Apply(&raft.Log{Data: []byte("a")}) }) + + // Cannot Open File Store + m.indexer.On("Open", mock.Anything).Return(errAny) + assert.Panics(t, func() { m.store.openDatabase(context.TODO()) }) +} + +func readShardingState(schemaReader schema.SchemaReader, className string) (*sharding.State, error) { + var result *sharding.State + err := schemaReader.Read(className, func(_ *models.Class, state *sharding.State) error { + stateCopy := state.DeepCopy() + result = &stateCopy + return nil + }) + return result, err +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/raft_utils.go b/platform/dbops/binaries/weaviate-src/cluster/raft_utils.go new file mode 100644 index 0000000000000000000000000000000000000000..a70bd0258723da56a2c8a91f81e18fad705fff1b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/raft_utils.go @@ -0,0 +1,35 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "fmt" + "strings" + + "github.com/weaviate/weaviate/cluster/types" +) + +// leaderErr decorates ErrLeaderNotFound by distinguishing between +// normal election happening and there is no leader been chosen yet +// and if it can't reach the other nodes either for intercluster +// communication issues or other nodes were down. +func (s *Raft) leaderErr() error { + if s.store.raftResolver != nil && len(s.store.raftResolver.NotResolvedNodes()) > 0 { + var nodes []string + for n := range s.store.raftResolver.NotResolvedNodes() { + nodes = append(nodes, string(n)) + } + + return fmt.Errorf("%w, can not resolve nodes [%s]", types.ErrLeaderNotFound, strings.Join(nodes, ",")) + } + return types.ErrLeaderNotFound +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/service.go b/platform/dbops/binaries/weaviate-src/cluster/service.go new file mode 100644 index 0000000000000000000000000000000000000000..6457e21fc8a8094177f2a77c46195893fe0cef05 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/service.go @@ -0,0 +1,256 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/cluster/bootstrap" + "github.com/weaviate/weaviate/cluster/fsm" + "github.com/weaviate/weaviate/cluster/replication" + "github.com/weaviate/weaviate/cluster/replication/metrics" + "github.com/weaviate/weaviate/cluster/resolver" + "github.com/weaviate/weaviate/cluster/rpc" + "github.com/weaviate/weaviate/cluster/schema" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +const ( + // TODO: consider exposing these as settings + shardReplicationEngineBufferSize = 16 + fsmOpProducerPollingInterval = 5 * time.Second + replicationEngineShutdownTimeout = 20 * time.Second + replicationOperationTimeout = 24 * time.Hour + catchUpInterval = 5 * time.Second +) + +// Service class serves as the primary entry point for the Raft layer, managing and coordinating +// the key functionalities of the distributed consensus protocol. +type Service struct { + *Raft + + replicationEngine *replication.ShardReplicationEngine + raftAddr string + config *Config + + rpcClient *rpc.Client + rpcServer *rpc.Server + logger *logrus.Logger + + // closing channels + cancelReplicationEngine context.CancelFunc + closeBootstrapper chan struct{} + closeOnFSMCaughtUp chan struct{} + closeWaitForDB chan struct{} +} + +// New returns a Service configured with cfg. The service will initialize internals gRPC api & clients to other cluster +// nodes. +// Raft store will be initialized and ready to be started. To start the service call Open(). +func New(cfg Config, authZController authorization.Controller, snapshotter fsm.Snapshotter, svrMetrics *monitoring.GRPCServerMetrics) *Service { + rpcListenAddress := fmt.Sprintf("%s:%d", cfg.Host, cfg.RPCPort) + raftAdvertisedAddress := fmt.Sprintf("%s:%d", cfg.Host, cfg.RaftPort) + client := rpc.NewClient(resolver.NewRpc(cfg.IsLocalHost, cfg.RPCPort), cfg.RaftRPCMessageMaxSize, cfg.SentryEnabled, cfg.Logger) + + fsm := NewFSM(cfg, authZController, snapshotter, prometheus.DefaultRegisterer) + raft := NewRaft(cfg.NodeSelector, &fsm, client) + fsmOpProducer := replication.NewFSMOpProducer( + cfg.Logger, + fsm.replicationManager.GetReplicationFSM(), + fsmOpProducerPollingInterval, + cfg.NodeSelector.LocalName(), + ) + replicaCopyOpConsumer := replication.NewCopyOpConsumer( + cfg.Logger, + raft, + cfg.ReplicaCopier, + cfg.NodeSelector.LocalName(), + &backoff.StopBackOff{}, + replication.NewOpsCache(), + replicationOperationTimeout, + cfg.ReplicationEngineMaxWorkers, + cfg.ReplicaMovementMinimumAsyncWait, + metrics.NewReplicationEngineOpsCallbacks(prometheus.DefaultRegisterer), + raft.SchemaReader(), + ) + replicationEngine := replication.NewShardReplicationEngine( + cfg.Logger, + cfg.NodeSelector.LocalName(), + fsmOpProducer, + replicaCopyOpConsumer, + shardReplicationEngineBufferSize, + cfg.ReplicationEngineMaxWorkers, + replicationEngineShutdownTimeout, + metrics.NewReplicationEngineCallbacks(prometheus.DefaultRegisterer), + ) + svr := rpc.NewServer(&fsm, raft, rpcListenAddress, cfg.RaftRPCMessageMaxSize, cfg.SentryEnabled, svrMetrics, cfg.Logger) + + return &Service{ + Raft: raft, + replicationEngine: replicationEngine, + raftAddr: raftAdvertisedAddress, + config: &cfg, + rpcClient: client, + rpcServer: svr, + logger: cfg.Logger, + closeBootstrapper: make(chan struct{}), + closeOnFSMCaughtUp: make(chan struct{}), + closeWaitForDB: make(chan struct{}), + } +} + +func (c *Service) onFSMCaughtUp(ctx context.Context) { + if c.config.ReplicaMovementDisabled { + return + } + + ticker := time.NewTicker(catchUpInterval) + defer ticker.Stop() + for { + select { + case <-c.closeOnFSMCaughtUp: + return + case <-ticker.C: + if c.Raft.store.FSMHasCaughtUp() { + c.logger.Infof("Metadata FSM reported caught up, starting replication engine") + engineCtx, engineCancel := context.WithCancel(ctx) + c.cancelReplicationEngine = engineCancel + enterrors.GoWrapper(func() { + // The context is cancelled by the engine itself when it is stopped + if err := c.replicationEngine.Start(engineCtx); err != nil { + if !errors.Is(err, context.Canceled) { + c.logger.WithError(err).Error("replication engine failed to start after FSM caught up") + } + } + }, c.logger) + return + } + } + } +} + +// Open internal RPC service to handle node communication, +// bootstrap the Raft node, and restore the database state +func (c *Service) Open(ctx context.Context, db schema.Indexer) error { + c.logger.WithField("servers", c.config.NodeNameToPortMap).Info("open cluster service") + if err := c.rpcServer.Open(); err != nil { + return fmt.Errorf("start rpc service: %w", err) + } + + if err := c.Raft.Open(ctx, db); err != nil { + return fmt.Errorf("open raft store: %w", err) + } + + hasState, err := raft.HasExistingState(c.Raft.store.logCache, c.Raft.store.logStore, c.Raft.store.snapshotStore) + if err != nil { + return err + } + c.log.WithField("hasState", hasState).Info("raft init") + + // If we have a state in raft, we only want to re-join the nodes in raft_join list to ensure that we update the + // configuration with our current ip. + // If we have no state, we want to do the bootstrap procedure where we will try to join a cluster or notify other + // peers that we are ready to form a new cluster. + bootstrapCtx, bCancel := context.WithTimeout(ctx, c.config.BootstrapTimeout) + defer bCancel() + if hasState { + joiner := bootstrap.NewJoiner(c.rpcClient, c.config.NodeID, c.raftAddr, c.config.Voter) + err = backoff.Retry(func() error { + joinNodes := bootstrap.ResolveRemoteNodes(c.config.NodeSelector, c.config.NodeNameToPortMap) + _, err := joiner.Do(bootstrapCtx, c.logger, joinNodes) + return err + }, backoff.WithContext(backoff.NewConstantBackOff(1*time.Second), bootstrapCtx)) + if err != nil { + return fmt.Errorf("could not join raft join list: %w. Weaviate detected this node to have state stored. If the DB is still loading up we will hit this timeout. You can try increasing/setting RAFT_BOOTSTRAP_TIMEOUT env variable to a higher value", err) + } + } else { + bs := bootstrap.NewBootstrapper( + c.rpcClient, + c.config.NodeID, + c.raftAddr, + c.config.Voter, + c.config.NodeSelector, + c.Raft.Ready, + ) + if err := bs.Do( + bootstrapCtx, + c.config.NodeNameToPortMap, + c.logger, + c.closeBootstrapper); err != nil { + return fmt.Errorf("bootstrap: %w", err) + } + } + + if err := c.WaitUntilDBRestored(ctx, 10*time.Second, c.closeWaitForDB); err != nil { + return fmt.Errorf("restore database: %w", err) + } + + enterrors.GoWrapper(func() { + c.onFSMCaughtUp(ctx) + }, c.logger) + return nil +} + +// Close closes the raft service and frees all allocated ressources. Internal RAFT store will be closed and if +// leadership is assumed it will be transferred to another node. gRPC server and clients will also be closed. +func (c *Service) Close(ctx context.Context) error { + enterrors.GoWrapper(func() { + c.closeBootstrapper <- struct{}{} + c.closeWaitForDB <- struct{}{} + c.closeOnFSMCaughtUp <- struct{}{} + }, c.logger) + + if !c.config.ReplicaMovementDisabled { + c.logger.Info("closing replication engine ...") + if c.cancelReplicationEngine != nil { + c.cancelReplicationEngine() + } + c.replicationEngine.Stop() + } + + c.logger.Info("closing raft FSM store ...") + if err := c.Raft.Close(ctx); err != nil { + return err + } + + c.logger.Info("closing raft-rpc client ...") + c.rpcClient.Close() + + c.logger.Info("closing raft-rpc server ...") + c.rpcServer.Close() + return nil +} + +// Ready returns or not whether the node is ready to accept requests. +func (c *Service) Ready() bool { + return c.Raft.Ready() +} + +// LeaderWithID is used to return the current leader address and ID of the cluster. +// It may return empty strings if there is no current leader or the leader is unknown. +func (c *Service) LeaderWithID() (string, string) { + return c.Raft.LeaderWithID() +} + +func (c *Service) StorageCandidates() []string { + return c.Raft.StorageCandidates() +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store.go b/platform/dbops/binaries/weaviate-src/cluster/store.go new file mode 100644 index 0000000000000000000000000000000000000000..6d4082bb353d5497504db6735f2c1fef702bcbb9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store.go @@ -0,0 +1,918 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/raft" + raftbolt "github.com/hashicorp/raft-boltdb/v2" + "github.com/jonboulle/clockwork" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/cluster/distributedtask" + "github.com/weaviate/weaviate/cluster/dynusers" + "github.com/weaviate/weaviate/cluster/fsm" + "github.com/weaviate/weaviate/cluster/log" + rbacRaft "github.com/weaviate/weaviate/cluster/rbac" + "github.com/weaviate/weaviate/cluster/replication" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + "github.com/weaviate/weaviate/cluster/resolver" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/cluster/types" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/usecases/auth/authentication/apikey" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac" + "github.com/weaviate/weaviate/usecases/cluster" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +const ( + + // tcpMaxPool controls how many connections we will pool + tcpMaxPool = 3 + + // tcpTimeout is used to apply I/O deadlines. For InstallSnapshot, we multiply + // the timeout by (SnapshotSize / TimeoutScale). + tcpTimeout = 10 * time.Second + + raftDBName = "raft.db" + + // logCacheCapacity is the maximum number of logs to cache in-memory. + // This is used to reduce disk I/O for the recently committed entries. + logCacheCapacity = 512 + + nRetainedSnapShots = 3 +) + +type Config struct { + // WorkDir is the directory RAFT will use to store config & snapshot + WorkDir string + // NodeID is this node id + NodeID string + // Host is this node host name + Host string + // RaftPort is used by internal RAFT communication + RaftPort int + // RPCPort is used by weaviate internal gRPC communication + RPCPort int + // RaftRPCMessageMaxSize is the maximum message sized allowed on the internal RPC communication + // TODO: Remove Raft prefix to avoid confusion between RAFT and RPC. + RaftRPCMessageMaxSize int + + // NodeNameToPortMap maps server names to port numbers + NodeNameToPortMap map[string]int + + // Raft leader election related settings + + // HeartbeatTimeout specifies the time in follower state without contact + // from a leader before we attempt an election. + HeartbeatTimeout time.Duration + // ElectionTimeout specifies the time in candidate state without contact + // from a leader before we attempt an election. + ElectionTimeout time.Duration + // LeaderLeaseTimeout specifies the time in leader state without contact + // from a follower before we attempt an election. + LeaderLeaseTimeout time.Duration + // TimeoutsMultiplier is the multiplier for the timeout values for + // raft election, heartbeat, and leader lease + TimeoutsMultiplier int + + // Raft snapshot related settings + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshotting by + // replaying a small set of logs instead. The value passed here is the initial + // setting used. This can be tuned during operation using ReloadConfig. + SnapshotThreshold uint64 + + // SnapshotInterval controls how often we check if we should perform a + // snapshot. We randomly stagger between this value and 2x this value to avoid + // the entire cluster from performing a snapshot at once. The value passed + // here is the initial setting used. This can be tuned during operation using + // ReloadConfig. + SnapshotInterval time.Duration + + // TrailingLogs controls how many logs we leave after a snapshot. This is used + // so that we can quickly replay logs on a follower instead of being forced to + // send an entire snapshot. The value passed here is the initial setting used. + // This can be tuned during operation using ReloadConfig. + TrailingLogs uint64 + + // Cluster bootstrap related settings + + // BootstrapTimeout is the time a node will notify other node that it is ready to bootstrap a cluster if it can't + // find a an existing cluster to join + BootstrapTimeout time.Duration + // BootstrapExpect is the number of nodes this cluster expect to receive a notify from to start bootstrapping a + // cluster + BootstrapExpect int + + // ConsistencyWaitTimeout is the duration we will wait for a schema version to land on that node + ConsistencyWaitTimeout time.Duration + // NodeSelector is the memberlist interface to RAFT + NodeSelector cluster.NodeSelector + Logger *logrus.Logger + Voter bool + + // MetadataOnlyVoters configures the voters to store metadata exclusively, without storing any other data + MetadataOnlyVoters bool + + // DB is the interface to the weaviate database. It is necessary so that schema changes are reflected to the DB + DB schema.Indexer + // Parser parses class field after deserialization + Parser schema.Parser + // LoadLegacySchema is responsible for loading old schema from boltDB + LoadLegacySchema schema.LoadLegacySchema + // SaveLegacySchema is responsible for loading new schema into boltDB + SaveLegacySchema schema.SaveLegacySchema + // IsLocalHost only required when running Weaviate from the console in localhost + IsLocalHost bool + + // SentryEnabled configures the sentry integration to add internal middlewares to rpc client/server to set spans & + // capture traces + SentryEnabled bool + + // EnableOneNodeRecovery enables the actually one node recovery logic to avoid it running all the time when + // unnecessary + EnableOneNodeRecovery bool + // ForceOneNodeRecovery will force the single node recovery routine to run. This is useful if the cluster has + // committed wrong peer configuration entry that makes it unable to obtain a quorum to start. + // WARNING: This should be run on *actual* one node cluster only. + ForceOneNodeRecovery bool + + // AuthzController to manage RBAC commands and apply it to casbin + AuthzController authorization.Controller + AuthNConfig config.Authentication + RBAC *rbac.Manager + + DynamicUserController *apikey.DBUser + + // ReplicaCopier copies shard replicas between nodes + ReplicaCopier replicationTypes.ReplicaCopier + + // ReplicationEngineMaxWorkers is the maximum number of workers for the replication engine + ReplicationEngineMaxWorkers int + + // DistributedTasks is the configuration for the distributed task manager. + DistributedTasks config.DistributedTasksConfig + + ReplicaMovementDisabled bool + + // ReplicaMovementMinimumAsyncWait is the minimum time bound that replica movement operations will wait before + // async replication can complete. + ReplicaMovementMinimumAsyncWait *runtime.DynamicValue[time.Duration] +} + +// Store is the implementation of RAFT on this local node. It will handle the local schema and RAFT operations (startup, +// bootstrap, snapshot, etc...). It ensures that a raft cluster is setup with remote node on start (either recovering +// from old state, or bootstrap itself based on the provided configuration). +type Store struct { + cfg Config + // log is a shorthand to the logger passed in the config to reduce the amount of indirection when logging in the + // code + log *logrus.Logger + + // open is set on opening the store + open atomic.Bool + // dbLoaded is set when the DB is loaded at startup + dbLoaded atomic.Bool + + // raft implementation from external library + raft *raft.Raft + raftResolver types.RaftResolver + raftTransport *raft.NetworkTransport + + // applyTimeout timeout limit the amount of time raft waits for a command to be applied + applyTimeout time.Duration + + // raft snapshot store + snapshotStore *raft.FileSnapshotStore + + // raft log store + logStore *raftbolt.BoltStore + + // raft log cache + logCache *raft.LogCache + + // cluster bootstrap related attributes + bootstrapMutex sync.Mutex + candidates map[string]string + // bootstrapped is set once the node has either bootstrapped or recovered from RAFT log entries + bootstrapped atomic.Bool + + // schemaManager is responsible for applying changes committed by RAFT to the schema representation & querying the + // schema + schemaManager *schema.SchemaManager + + // authZManager is responsible for applying/querying changes committed by RAFT to the rbac representation + authZManager *rbacRaft.Manager + + // authZManager is responsible for applying/querying changes committed by RAFT to the rbac representation + dynUserManager *dynusers.Manager + + // replicationManager is responsible for applying/querying the replication FSM used to handle replication operations + replicationManager *replication.Manager + + // distributedTaskManager is responsible for applying/querying the distributed task FSM used to handle distributed tasks. + distributedTasksManager *distributedtask.Manager + + // lastAppliedIndexToDB represents the index of the last applied command when the store is opened. + lastAppliedIndexToDB atomic.Uint64 + // lastAppliedIndex index of latest update to the store + lastAppliedIndex atomic.Uint64 + + // snapshotter is the snapshotter for the store + snapshotter fsm.Snapshotter + + // authZController is the authz controller for the store + authZController authorization.Controller + + metrics *storeMetrics +} + +// storeMetrics exposes RAFT store related prometheus metrics +type storeMetrics struct { + applyDuration prometheus.Histogram + applyFailures prometheus.Counter + + // raftLastAppliedIndex represents current applied index of a raft cluster in local node. + // This includes every commands including config changes + raftLastAppliedIndex prometheus.Gauge + + // fsmLastAppliedIndex represents current applied index of cluster store FSM in local node. + // This includes commands without config changes + fsmLastAppliedIndex prometheus.Gauge + + // fsmStartupAppliedIndex represents previous applied index of the cluster store FSM in local node + // that any restart would try to catch up + fsmStartupAppliedIndex prometheus.Gauge +} + +// newStoreMetrics cretes and registers the store related metrics on +// given prometheus registry. +func newStoreMetrics(nodeID string, reg prometheus.Registerer) *storeMetrics { + r := promauto.With(reg) + return &storeMetrics{ + applyDuration: r.NewHistogram(prometheus.HistogramOpts{ + Name: "weaviate_cluster_store_fsm_apply_duration_seconds", + Help: "Time to apply cluster store FSM state in local node", + ConstLabels: prometheus.Labels{"nodeID": nodeID}, + Buckets: prometheus.ExponentialBuckets(0.001, 5, 5), // 1ms, 5ms, 25ms, 125ms, 625ms + }), + applyFailures: r.NewCounter(prometheus.CounterOpts{ + Name: "weaviate_cluster_store_fsm_apply_failures_total", + Help: "Total failure count of cluster store FSM state apply in local node", + ConstLabels: prometheus.Labels{"nodeID": nodeID}, + }), + raftLastAppliedIndex: r.NewGauge(prometheus.GaugeOpts{ + Name: "weaviate_cluster_store_raft_last_applied_index", + Help: "Current applied index of a raft cluster in local node. This includes every commands including config changes", + ConstLabels: prometheus.Labels{"nodeID": nodeID}, + }), + fsmLastAppliedIndex: r.NewGauge(prometheus.GaugeOpts{ + Name: "weaviate_cluster_store_fsm_last_applied_index", + Help: "Current applied index of cluster store FSM in local node. This includes commands without config changes", + ConstLabels: prometheus.Labels{"nodeID": nodeID}, + }), + fsmStartupAppliedIndex: r.NewGauge(prometheus.GaugeOpts{ + Name: "weaviate_cluster_store_fsm_startup_applied_index", + Help: "Previous applied index of the cluster store FSM in local node that any restart would try to catch up", + ConstLabels: prometheus.Labels{"nodeID": nodeID}, + }), + } +} + +func NewFSM(cfg Config, authZController authorization.Controller, snapshotter fsm.Snapshotter, reg prometheus.Registerer) Store { + schemaManager := schema.NewSchemaManager(cfg.NodeID, cfg.DB, cfg.Parser, reg, cfg.Logger) + replicationManager := replication.NewManager(schemaManager.NewSchemaReader(), reg) + schemaManager.SetReplicationFSM(replicationManager.GetReplicationFSM()) + + return Store{ + cfg: cfg, + log: cfg.Logger, + candidates: make(map[string]string, cfg.BootstrapExpect), + applyTimeout: time.Second * 20, + raftResolver: resolver.NewRaft(resolver.RaftConfig{ + ClusterStateReader: cfg.NodeSelector, + RaftPort: cfg.RaftPort, + IsLocalHost: cfg.IsLocalHost, + NodeNameToPortMap: cfg.NodeNameToPortMap, + }), + schemaManager: schemaManager, + snapshotter: snapshotter, + authZController: authZController, + authZManager: rbacRaft.NewManager(cfg.RBAC, cfg.AuthNConfig, snapshotter, cfg.Logger), + dynUserManager: dynusers.NewManager(cfg.DynamicUserController, cfg.Logger), + replicationManager: replicationManager, + distributedTasksManager: distributedtask.NewManager(distributedtask.ManagerParameters{ + Clock: clockwork.NewRealClock(), + CompletedTaskTTL: cfg.DistributedTasks.CompletedTaskTTL, + }), + metrics: newStoreMetrics(cfg.NodeID, reg), + } +} + +func (st *Store) IsVoter() bool { return st.cfg.Voter } +func (st *Store) ID() string { return st.cfg.NodeID } + +// lastIndex returns the last index in stable storage, +// either from the last log or from the last snapshot. +// this method work as a protection from applying anything was applied to the db +// by checking either raft or max(snapshot, log store) instead the db will catchup +func (st *Store) lastIndex() uint64 { + if st.raft != nil { + return st.raft.AppliedIndex() + } + + l, err := st.LastAppliedCommand() + if err != nil { + panic(fmt.Sprintf("read log last command: %s", err.Error())) + } + return max(lastSnapshotIndex(st.snapshotStore), l) +} + +// Open opens this store and marked as such. +// It constructs a new Raft node using the provided configuration. +// If there is any old state, such as snapshots, logs, peers, etc., all of those will be restored. +func (st *Store) Open(ctx context.Context) (err error) { + if st.open.Load() { // store already opened + return nil + } + defer func() { st.open.Store(err == nil) }() + + if err := st.init(); err != nil { + return fmt.Errorf("initialize raft store: %w", err) + } + + li := st.lastIndex() + st.lastAppliedIndexToDB.Store(li) + st.metrics.fsmStartupAppliedIndex.Set(float64(li)) + + // we have to open the DB before constructing new raft in case of restore calls + st.openDatabase(ctx) + + st.log.WithFields(logrus.Fields{ + "name": st.cfg.NodeID, + "metadata_only_voters": st.cfg.MetadataOnlyVoters, + }).Info("construct a new raft node") + st.raft, err = raft.NewRaft(st.raftConfig(), st, st.logCache, st.logStore, st.snapshotStore, st.raftTransport) + if err != nil { + return fmt.Errorf("raft.NewRaft %v %w", st.raftTransport.LocalAddr(), err) + } + + // Only if node recovery is enabled will we check if we are either forcing it or automating the detection of a one + // node cluster + if st.cfg.EnableOneNodeRecovery && (st.cfg.ForceOneNodeRecovery || (st.cfg.BootstrapExpect == 1 && len(st.candidates) < 2)) { + if err := st.recoverSingleNode(st.cfg.ForceOneNodeRecovery); err != nil { + return err + } + } + + snapIndex := lastSnapshotIndex(st.snapshotStore) + if st.lastAppliedIndexToDB.Load() == 0 && snapIndex == 0 { + // if empty node report ready + st.dbLoaded.Store(true) + } + + st.lastAppliedIndex.Store(st.raft.AppliedIndex()) + + st.log.WithFields(logrus.Fields{ + "raft_applied_index": st.raft.AppliedIndex(), + "raft_last_index": st.raft.LastIndex(), + "last_store_applied_index_on_start": st.lastAppliedIndexToDB.Load(), + "last_snapshot_index": snapIndex, + }).Info("raft node constructed") + + // There's no hard limit on the migration, so it should take as long as necessary. + // However, we believe that 1 day should be more than sufficient. + f := func() { st.onLeaderFound(time.Hour * 24) } + enterrors.GoWrapper(f, st.log) + return nil +} + +func (st *Store) init() error { + var err error + if err := os.MkdirAll(st.cfg.WorkDir, 0o755); err != nil { + return fmt.Errorf("mkdir %s: %w", st.cfg.WorkDir, err) + } + + // log store + st.logStore, err = raftbolt.NewBoltStore(filepath.Join(st.cfg.WorkDir, raftDBName)) + if err != nil { + return fmt.Errorf("bolt db: %w", err) + } + + // log cache + st.logCache, err = raft.NewLogCache(logCacheCapacity, st.logStore) + if err != nil { + return fmt.Errorf("log cache: %w", err) + } + + // file snapshot store + st.snapshotStore, err = raft.NewFileSnapshotStore(st.cfg.WorkDir, nRetainedSnapShots, st.log.Out) + if err != nil { + return fmt.Errorf("file snapshot store: %w", err) + } + + // tcp transport + address := fmt.Sprintf("%s:%d", st.cfg.Host, st.cfg.RaftPort) + tcpAddr, err := net.ResolveTCPAddr("tcp", address) + if err != nil { + return fmt.Errorf("net.resolve tcp address=%v: %w", address, err) + } + + st.raftTransport, err = st.raftResolver.NewTCPTransport(address, tcpAddr, tcpMaxPool, tcpTimeout, st.log) + if err != nil { + return fmt.Errorf("raft transport address=%v tcpAddress=%v maxPool=%v timeOut=%v: %w", address, tcpAddr, tcpMaxPool, tcpTimeout, err) + } + st.log.WithFields(logrus.Fields{ + "address": address, + "tcpMaxPool": tcpMaxPool, + "tcpTimeout": tcpTimeout, + }).Info("tcp transport") + + return err +} + +// onLeaderFound execute specific tasks when the leader is detected +func (st *Store) onLeaderFound(timeout time.Duration) { + t := time.NewTicker(time.Second) + defer t.Stop() + for range t.C { + + if leader := st.Leader(); leader != "" { + st.log.WithField("address", leader).Info("current Leader") + } else { + continue + } + + // migrate from old non raft schema to the new schema + migrate := func() error { + legacySchema, err := st.cfg.LoadLegacySchema() + if err != nil { + return fmt.Errorf("load schema: %w", err) + } + + // If the legacy schema is empty we can abort early + if len(legacySchema) == 0 { + st.log.Info("legacy schema is empty, nothing to migrate") + return nil + } + + // serialize snapshot + b, c, err := schema.LegacySnapshot(st.cfg.NodeID, legacySchema) + if err != nil { + return fmt.Errorf("create snapshot: %w", err) + } + b.Index = st.raft.LastIndex() + b.Term = 1 + if err := st.raft.Restore(b, c, timeout); err != nil { + return fmt.Errorf("raft restore: %w", err) + } + return nil + } + + // Only leader can restore the old schema + if st.IsLeader() && st.schemaManager.NewSchemaReader().Len() == 0 && st.cfg.LoadLegacySchema != nil { + st.log.Info("starting migration from old schema") + if err := migrate(); err != nil { + st.log.WithError(err).Error("migrate from old schema") + } else { + st.log.Info("migration from the old schema has been successfully completed") + } + } + return + } +} + +// StoreSchemaV1() is responsible for saving new schema (RAFT) to boltDB +func (st *Store) StoreSchemaV1() error { + return st.cfg.SaveLegacySchema(st.schemaManager.NewSchemaReader().States()) +} + +func (st *Store) Close(ctx context.Context) error { + if !st.open.Load() { + return nil + } + + // transfer leadership: it stops accepting client requests, ensures + // the target server is up to date and initiates the transfer + if st.IsLeader() { + st.log.Info("transferring leadership to another server") + if err := st.raft.LeadershipTransfer().Error(); err != nil { + st.log.WithError(err).Error("transferring leadership") + } else { + st.log.Info("successfully transferred leadership to another server") + } + } + + if err := st.raft.Shutdown().Error(); err != nil { + return err + } + + st.open.Store(false) + + st.log.Info("closing raft-net ...") + if err := st.raftTransport.Close(); err != nil { + // it's not that fatal if we weren't able to close + // the transport, that's why just warn + st.log.WithError(err).Warn("close raft-net") + } + + st.log.Info("closing log store ...") + if err := st.logStore.Close(); err != nil { + return fmt.Errorf("close log store: %w", err) + } + + st.log.Info("closing data store ...") + if err := st.schemaManager.Close(ctx); err != nil { + return fmt.Errorf(" close database: %w", err) + } + + return nil +} + +func (st *Store) SetDB(db schema.Indexer) { st.schemaManager.SetIndexer(db) } + +func (st *Store) Ready() bool { + return st.open.Load() && st.dbLoaded.Load() && st.Leader() != "" +} + +// WaitToLoadDB waits for the DB to be loaded. The DB might be first loaded +// after RAFT is in a healthy state, which is when the leader has been elected and there +// is consensus on the log. +func (st *Store) WaitToRestoreDB(ctx context.Context, period time.Duration, close chan struct{}) error { + t := time.NewTicker(period) + defer t.Stop() + for { + select { + case <-close: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + if st.dbLoaded.Load() { + return nil + } else { + st.log.Info("waiting for database to be restored") + } + } + } +} + +// WaitForAppliedIndex waits until the update with the given version is propagated to this follower node +func (st *Store) WaitForAppliedIndex(ctx context.Context, period time.Duration, version uint64) error { + if idx := st.lastAppliedIndex.Load(); idx >= version { + return nil + } + ctx, cancel := context.WithTimeout(ctx, st.cfg.ConsistencyWaitTimeout) + defer cancel() + ticker := time.NewTicker(period) + defer ticker.Stop() + var idx uint64 + for { + select { + case <-ctx.Done(): + return fmt.Errorf("%w: version got=%d want=%d", types.ErrDeadlineExceeded, idx, version) + case <-ticker.C: + if idx = st.lastAppliedIndex.Load(); idx >= version { + return nil + } else { + st.log.WithFields(logrus.Fields{ + "got": idx, + "want": version, + }).Debug("wait for update version") + } + } + } +} + +// IsLeader returns whether this node is the leader of the cluster +func (st *Store) IsLeader() bool { + return st.raft != nil && st.raft.State() == raft.Leader +} + +// SchemaReader returns a SchemaReader from the underlying schema manager using a wait function that will make it wait +// for a raft log entry to be applied in the FSM Store before authorizing the read to continue. +func (st *Store) SchemaReader() schema.SchemaReader { + f := func(ctx context.Context, version uint64) error { + return st.WaitForAppliedIndex(ctx, time.Millisecond*50, version) + } + return st.schemaManager.NewSchemaReaderWithWaitFunc(f) +} + +// Stats returns internal statistics from this store, for informational/debugging purposes only. +// +// The statistics directly from raft are nested under the "raft" key. If the raft statistics are +// not yet available, then the "raft" key will not exist. +// See https://pkg.go.dev/github.com/hashicorp/raft#Raft.Stats for the default raft stats. +// +// The values of "leader_address" and "leader_id" are the respective address/ID for the current +// leader of the cluster. They may be empty strings if there is no current leader or the leader is +// unknown. +// +// The value of "ready" indicates whether this store is ready, see Store.Ready. +// +// The value of "is_voter" indicates whether this store is a voter, see Store.IsVoter. +// +// The value of "open" indicates whether this store is open, see Store.open. +// +// The value of "bootstrapped" indicates whether this store has completed bootstrapping, +// see Store.bootstrapped. +// +// The value of "candidates" is a map[string]string of the current candidates IDs/addresses, +// see Store.candidates. +// +// The value of "last_store_log_applied_index" is the index of the last applied command found when +// the store was opened, see Store.lastAppliedIndexToDB. +// +// The value of "last_applied_index" is the index of the latest update to the store, +// see Store.lastAppliedIndex. +// +// The value of "db_loaded" indicates whether the DB has finished loading, see Store.dbLoaded. +// +// Since this is for information/debugging we want to avoid enforcing unnecessary restrictions on +// what can go in these stats, thus we're returning map[string]any. However, any values added to +// this map should be able to be JSON encoded. +func (st *Store) Stats() map[string]any { + stats := make(map[string]any) + + // Add custom stats for this store + currentLeaderAddress, currentLeaderID := st.LeaderWithID() + stats["id"] = st.cfg.NodeID + stats["leader_address"] = currentLeaderAddress + stats["leader_id"] = currentLeaderID + stats["ready"] = st.Ready() + stats["is_voter"] = st.IsVoter() + stats["open"] = st.open.Load() + stats["bootstrapped"] = st.bootstrapped.Load() + stats["candidates"] = st.candidates + stats["last_store_log_applied_index"] = st.lastAppliedIndexToDB.Load() + stats["last_applied_index"] = st.lastIndex() + stats["db_loaded"] = st.dbLoaded.Load() + + // If the raft stats exist, add them as a nested map + if st.raft != nil { + stats["raft"] = st.raft.Stats() + // add the servers information + var servers []map[string]any + if cf := st.raft.GetConfiguration(); cf.Error() == nil { + servers = make([]map[string]any, len(cf.Configuration().Servers)) + for i, server := range cf.Configuration().Servers { + servers[i] = map[string]any{ + "id": server.ID, + "address": server.Address, + "suffrage": server.Suffrage, + } + } + stats["raft_latest_configuration_servers"] = servers + } + } + + return stats +} + +// Leader is used to return the current leader address. +// It may return empty strings if there is no current leader or the leader is unknown. +func (st *Store) Leader() string { + if st.raft == nil { + return "" + } + add, _ := st.raft.LeaderWithID() + return string(add) +} + +func (st *Store) LeaderWithID() (raft.ServerAddress, raft.ServerID) { + if st.raft == nil { + return "", "" + } + return st.raft.LeaderWithID() +} + +func (st *Store) assertFuture(fut raft.IndexFuture) error { + if err := fut.Error(); err != nil && errors.Is(err, raft.ErrNotLeader) { + return types.ErrNotLeader + } else { + return err + } +} + +func (st *Store) raftConfig() *raft.Config { + cfg := raft.DefaultConfig() + // If the TimeoutsMultiplier is set, use it to multiply the timeout values + // This is used to speed up the raft election, heartbeat, and leader lease + // in a multi-node cluster. + // the default value is 1 + // for production requirement,it's recommended to set it to 5 + // this in order to tolerate the network delay and avoid extensive leader election triggered more frequently + // example : https://developer.hashicorp.com/consul/docs/reference/architecture/server#production-server-requirements + timeOutMultiplier := 1 + if st.cfg.TimeoutsMultiplier > 1 { + timeOutMultiplier = st.cfg.TimeoutsMultiplier + } + if st.cfg.HeartbeatTimeout > 0 { + cfg.HeartbeatTimeout = st.cfg.HeartbeatTimeout + } + if st.cfg.ElectionTimeout > 0 { + cfg.ElectionTimeout = st.cfg.ElectionTimeout + } + if st.cfg.LeaderLeaseTimeout > 0 { + cfg.LeaderLeaseTimeout = st.cfg.LeaderLeaseTimeout + } + if st.cfg.SnapshotInterval > 0 { + cfg.SnapshotInterval = st.cfg.SnapshotInterval + } + if st.cfg.SnapshotThreshold > 0 { + cfg.SnapshotThreshold = st.cfg.SnapshotThreshold + } + if st.cfg.TrailingLogs > 0 { + cfg.TrailingLogs = st.cfg.TrailingLogs + } + cfg.HeartbeatTimeout *= time.Duration(timeOutMultiplier) + cfg.ElectionTimeout *= time.Duration(timeOutMultiplier) + cfg.LeaderLeaseTimeout *= time.Duration(timeOutMultiplier) + cfg.LocalID = raft.ServerID(st.cfg.NodeID) + cfg.LogLevel = st.cfg.Logger.GetLevel().String() + cfg.NoLegacyTelemetry = true + + logger := log.NewHCLogrusLogger("raft", st.log) + cfg.Logger = logger + + return cfg +} + +func (st *Store) openDatabase(ctx context.Context) { + if st.dbLoaded.Load() { + return + } + + if st.cfg.MetadataOnlyVoters { + st.log.Info("Not loading local DB as the node is metadata only") + } else { + st.log.Info("loading local db") + if err := st.schemaManager.Load(ctx, st.cfg.NodeID); err != nil { + st.log.WithError(err).Error("cannot restore database") + panic("error restoring database") + } + st.log.Info("local DB successfully loaded") + } + + st.log.WithField("n", st.schemaManager.NewSchemaReader().Len()).Info("schema manager loaded") +} + +// reloadDBFromSchema() it will be called from two places Restore(), Apply() +// on constructing raft.NewRaft(..) the raft lib. will +// call Restore() first to restore from snapshots if there is any and +// then later will call Apply() on any new committed log +func (st *Store) reloadDBFromSchema() { + if !st.cfg.MetadataOnlyVoters { + st.schemaManager.ReloadDBFromSchema() + } else { + st.log.Info("skipping reload DB from schema as the node is metadata only") + } + st.dbLoaded.Store(true) + + // in this path it means it was called from Apply() + // or forced Restore() + if st.raft != nil { + // we don't update lastAppliedIndexToDB if not a restore + return + } + + // restore requests from snapshots before init new RAFT node + lastLogApplied, err := st.LastAppliedCommand() + if err != nil { + st.log.WithField("error", err).Warn("can't detect the last applied command, setting the lastLogApplied to 0") + } + + val := max(lastSnapshotIndex(st.snapshotStore), lastLogApplied) + st.lastAppliedIndexToDB.Store(val) + st.metrics.fsmStartupAppliedIndex.Set(float64(val)) +} + +func (st *Store) FSMHasCaughtUp() bool { + return st.lastAppliedIndex.Load() >= st.lastAppliedIndexToDB.Load() +} + +type Response struct { + Error error + Version uint64 +} + +var _ raft.FSM = &Store{} + +func lastSnapshotIndex(snapshotStore *raft.FileSnapshotStore) uint64 { + if snapshotStore == nil { + return 0 + } + + ls, err := snapshotStore.List() + if err != nil || len(ls) == 0 { + return 0 + } + return ls[0].Index +} + +// recoverSingleNode is used to manually force a new configuration in order to +// recover from a loss of quorum where the current configuration cannot be +// WARNING! This operation implicitly commits all entries in the Raft log, so +// in general this is an extremely unsafe operation and that's why it's made to be +// used in a single cluster node. +// for more details see : https://github.com/hashicorp/raft/blob/main/api.go#L279 +func (st *Store) recoverSingleNode(force bool) error { + if !force && (st.cfg.BootstrapExpect > 1 || len(st.candidates) > 1) { + return fmt.Errorf("bootstrap expect %v, candidates %v, "+ + "can't perform auto recovery in multi node cluster", st.cfg.BootstrapExpect, st.candidates) + } + servers := st.raft.GetConfiguration().Configuration().Servers + // nothing to do here, wasn't a single node + if !force && len(servers) != 1 { + st.log.WithFields(logrus.Fields{ + "servers_from_previous_configuration": servers, + "candidates": st.candidates, + }).Warn("didn't perform cluster recovery") + return nil + } + + exNode := servers[0] + newNode := raft.Server{ + ID: raft.ServerID(st.cfg.NodeID), + Address: raft.ServerAddress(fmt.Sprintf("%s:%d", st.cfg.Host, st.cfg.RPCPort)), + Suffrage: raft.Voter, + } + + // same node nothing to do here + if !force && (exNode.ID == newNode.ID && exNode.Address == newNode.Address) { + return nil + } + + st.log.WithFields(logrus.Fields{ + "action": "raft_cluster_recovery", + "existed_single_cluster_node": exNode, + "new_single_cluster_node": newNode, + }).Info("perform cluster recovery") + + fut := st.raft.Shutdown() + if err := fut.Error(); err != nil { + return err + } + + recoveryConfig := st.cfg + // Force the recovery to be metadata only and un-assign the associated DB to ensure no DB operations are made during + // the restore to avoid any data change. + recoveryConfig.MetadataOnlyVoters = true + recoveryConfig.DB = nil + // we don't use actual registry here, because we don't want to register metrics, it's already registered + // in actually FSM and this is FSM is temporary for recovery. + tempFSM := NewFSM(recoveryConfig, st.authZController, st.snapshotter, prometheus.NewPedanticRegistry()) + if err := raft.RecoverCluster(st.raftConfig(), + &tempFSM, + st.logCache, + st.logStore, + st.snapshotStore, + st.raftTransport, + raft.Configuration{Servers: []raft.Server{newNode}}); err != nil { + return err + } + + var err error + st.raft, err = raft.NewRaft(st.raftConfig(), st, st.logCache, st.logStore, st.snapshotStore, st.raftTransport) + if err != nil { + return fmt.Errorf("raft.NewRaft %v %w", st.raftTransport.LocalAddr(), err) + } + + if exNode.ID == newNode.ID { + // no node name change needed in the state + return nil + } + + st.log.WithFields(logrus.Fields{ + "action": "replace_states_node_name", + "old_single_cluster_node_name": exNode.ID, + "new_single_cluster_node_name": newNode.ID, + }).Info("perform cluster recovery") + st.schemaManager.ReplaceStatesNodeName(string(newNode.ID)) + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_apply.go b/platform/dbops/binaries/weaviate-src/cluster/store_apply.go new file mode 100644 index 0000000000000000000000000000000000000000..782970377a020e733acf31e53ae15dcb14dd3bc1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_apply.go @@ -0,0 +1,405 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "fmt" + "sync" + "time" + + "github.com/hashicorp/raft" + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/proto" + + "github.com/weaviate/weaviate/cluster/proto/api" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +func (st *Store) Execute(req *api.ApplyRequest) (uint64, error) { + st.log.WithFields(logrus.Fields{ + "type": api.ApplyRequest_Type_name[int32(req.Type)], + "class": req.Class, + }).Debug("server.execute") + + // Parse the underlying command before pre execute filtering to avoid queryinf the schema is the underlying command + // is invalid + cmdBytes, err := proto.Marshal(req) + if err != nil { + return 0, fmt.Errorf("marshal command: %w", err) + } + + // Call the filtering to avoid committing to the FSM unnecessary updates + if err := st.schemaManager.PreApplyFilter(req); err != nil { + return 0, err + } + + // The change is validated, we can apply it in RAFT + fut := st.raft.Apply(cmdBytes, st.applyTimeout) + + // Always call Error first otherwise the response can't be read from the future + if err := fut.Error(); err != nil { + return 0, err + } + + // Always wait for the response + futureResponse := fut.Response() + resp, ok := futureResponse.(Response) + if !ok { + // This should not happen, but it's better to log an error *if* it happens than panic and crash. + return 0, fmt.Errorf("response returned from raft apply is not of type Response instead got: %T, this should not happen", futureResponse) + } + return resp.Version, resp.Error +} + +// StoreConfiguration is invoked once a log entry containing a configuration +// change is committed. It takes the index at which the configuration was +// written and the configuration value. + +// We implemented this to keep `lastAppliedIndex` metric to correct value +// to also handle `LogConfiguration` type of Raft command. +func (st *Store) StoreConfiguration(index uint64, _ raft.Configuration) { + st.metrics.raftLastAppliedIndex.Set(float64(index)) +} + +// Apply is called once a log entry is committed by a majority of the cluster. +// Apply should apply the log to the FSM. Apply must be deterministic and +// produce the same result on all peers in the cluster. +// The returned value is returned to the client as the ApplyFuture.Response. +func (st *Store) Apply(l *raft.Log) any { + ret := Response{Version: l.Index} + + start := time.Now() + defer func() { + // this defer is final one that called before returning and thus capturing the + // applyDuration correctly. + st.metrics.applyDuration.Observe(float64(time.Since(start).Seconds())) + }() + + if l.Type != raft.LogCommand { + st.log.WithFields(logrus.Fields{ + "type": l.Type, + "index": l.Index, + }).Warn("not a valid command") + return ret + } + cmd := api.ApplyRequest{} + if err := proto.Unmarshal(l.Data, &cmd); err != nil { + st.log.WithError(err).Error("decode command") + panic("error proto un-marshalling log data") + } + + // schemaOnly is necessary so that on restart when we are re-applying RAFT log entries to our in-memory schema we + // don't update the database. This can lead to data loss for example if we drop then re-add a class. + // If we don't have any last applied index on start, schema only is always false. + // we check for index !=0 to force apply of the 1st index in both db and schema + catchingUp := l.Index != 0 && l.Index <= st.lastAppliedIndexToDB.Load() + schemaOnly := catchingUp || st.cfg.MetadataOnlyVoters + defer func() { + // If we have an applied index from the previous store (i.e from disk). Then reload the DB once we catch up as + // that means we're done doing schema only. + // we do this at the beginning to handle situation were schema was catching up + // and to make sure no matter is the error status we are going to open the db on startup + // we reload the db only if we have a previous state and the db is not loaded + dbReloadRequired := st.lastAppliedIndexToDB.Load() != 0 && !st.dbLoaded.Load() + if dbReloadRequired && l.Index != 0 && l.Index >= st.lastAppliedIndexToDB.Load() { + st.log.WithFields(logrus.Fields{ + "log_type": l.Type, + "log_name": l.Type.String(), + "log_index": l.Index, + "last_store_log_applied_index": st.lastAppliedIndexToDB.Load(), + }).Info("reloading local DB as RAFT and local DB are now caught up") + st.reloadDBFromSchema() + } + + // we update no mater the error status to avoid any edge cases in the DB layer for already released versions, + // however we do not update the metrics so the metric will be the source of truth + // about AppliedIndex + st.lastAppliedIndex.Store(l.Index) + + if ret.Error != nil { + st.metrics.applyFailures.Inc() + st.log.WithFields(logrus.Fields{ + "log_type": l.Type, + "log_name": l.Type.String(), + "log_index": l.Index, + "cmd_type": cmd.Type, + "cmd_type_name": cmd.Type.String(), + "cmd_class": cmd.Class, + }).WithError(ret.Error).Error("apply command") + return + } + + st.metrics.fsmLastAppliedIndex.Set(float64(l.Index)) + st.metrics.raftLastAppliedIndex.Set(float64(l.Index)) + }() + + cmd.Version = l.Index + // Report only when not ready the progress made on applying log entries. This help users with big schema and long + // startup time to keep track of progress. + // We check for ready state and index <= lastAppliedIndexToDB because just checking ready state would mean this log line + // would keep printing if the node has caught up but there's no leader in the cluster. + // This can happen for example if quorum is lost briefly. + // By checking lastAppliedIndexToDB we ensure that we never print past that index + if !st.Ready() && l.Index <= st.lastAppliedIndexToDB.Load() { + st.log.Debugf("Schema catching up: applying log entry: [%d/%d]", l.Index, st.lastAppliedIndexToDB.Load()) + } + st.log.WithFields(logrus.Fields{ + "log_type": l.Type, + "log_name": l.Type.String(), + "log_index": l.Index, + "cmd_type": cmd.Type, + "cmd_type_name": cmd.Type.String(), + "cmd_class": cmd.Class, + "cmd_schema_only": schemaOnly, + }).Debug("server.apply") + + f := func() {} + + switch cmd.Type { + + case api.ApplyRequest_TYPE_ADD_CLASS: + f = func() { + ret.Error = st.schemaManager.AddClass(&cmd, st.cfg.NodeID, schemaOnly, !catchingUp) + } + + case api.ApplyRequest_TYPE_RESTORE_CLASS: + f = func() { + ret.Error = st.schemaManager.RestoreClass(&cmd, st.cfg.NodeID, schemaOnly, !catchingUp) + } + + case api.ApplyRequest_TYPE_UPDATE_CLASS: + f = func() { + ret.Error = st.schemaManager.UpdateClass(&cmd, st.cfg.NodeID, schemaOnly, !catchingUp) + } + + case api.ApplyRequest_TYPE_DELETE_CLASS: + f = func() { + ret.Error = st.schemaManager.DeleteClass(&cmd, schemaOnly, !catchingUp) + } + + case api.ApplyRequest_TYPE_ADD_PROPERTY: + f = func() { + ret.Error = st.schemaManager.AddProperty(&cmd, schemaOnly, !catchingUp) + } + case api.ApplyRequest_TYPE_CREATE_ALIAS: + f = func() { + ret.Error = st.schemaManager.CreateAlias(&cmd) + } + case api.ApplyRequest_TYPE_REPLACE_ALIAS: + f = func() { + ret.Error = st.schemaManager.ReplaceAlias(&cmd) + } + case api.ApplyRequest_TYPE_DELETE_ALIAS: + f = func() { + ret.Error = st.schemaManager.DeleteAlias(&cmd) + } + case api.ApplyRequest_TYPE_UPDATE_SHARD_STATUS: + f = func() { + ret.Error = st.schemaManager.UpdateShardStatus(&cmd, schemaOnly) + } + case api.ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD: + f = func() { + ret.Error = st.schemaManager.AddReplicaToShard(&cmd, schemaOnly) + } + case api.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD: + f = func() { + ret.Error = st.schemaManager.DeleteReplicaFromShard(&cmd, schemaOnly) + } + + case api.ApplyRequest_TYPE_ADD_TENANT: + f = func() { + ret.Error = st.schemaManager.AddTenants(&cmd, schemaOnly) + } + + case api.ApplyRequest_TYPE_UPDATE_TENANT: + f = func() { + ret.Error = st.schemaManager.UpdateTenants(&cmd, schemaOnly) + } + + case api.ApplyRequest_TYPE_DELETE_TENANT: + f = func() { + ret.Error = st.schemaManager.DeleteTenants(&cmd, schemaOnly) + } + + case api.ApplyRequest_TYPE_TENANT_PROCESS: + f = func() { + ret.Error = st.schemaManager.UpdateTenantsProcess(&cmd, schemaOnly) + } + + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_SYNC_SHARD: + f = func() { + ret.Error = st.schemaManager.SyncShard(&cmd, schemaOnly) + } + + case api.ApplyRequest_TYPE_STORE_SCHEMA_V1: + f = func() { + ret.Error = st.StoreSchemaV1() + } + case api.ApplyRequest_TYPE_UPSERT_ROLES_PERMISSIONS: + f = func() { + ret.Error = st.authZManager.UpsertRolesPermissions(&cmd) + } + case api.ApplyRequest_TYPE_DELETE_ROLES: + f = func() { + ret.Error = st.authZManager.DeleteRoles(&cmd) + } + case api.ApplyRequest_TYPE_REMOVE_PERMISSIONS: + f = func() { + ret.Error = st.authZManager.RemovePermissions(&cmd) + } + case api.ApplyRequest_TYPE_ADD_ROLES_FOR_USER: + f = func() { + ret.Error = st.authZManager.AddRolesForUser(&cmd) + } + case api.ApplyRequest_TYPE_REVOKE_ROLES_FOR_USER: + f = func() { + ret.Error = st.authZManager.RevokeRolesForUser(&cmd) + } + + case api.ApplyRequest_TYPE_UPSERT_USER: + f = func() { + ret.Error = st.dynUserManager.CreateUser(&cmd) + } + case api.ApplyRequest_TYPE_DELETE_USER: + f = func() { + ret.Error = st.dynUserManager.DeleteUser(&cmd) + } + case api.ApplyRequest_TYPE_ROTATE_USER_API_KEY: + f = func() { + ret.Error = st.dynUserManager.RotateKey(&cmd) + } + case api.ApplyRequest_TYPE_SUSPEND_USER: + f = func() { + ret.Error = st.dynUserManager.SuspendUser(&cmd) + } + case api.ApplyRequest_TYPE_ACTIVATE_USER: + f = func() { + ret.Error = st.dynUserManager.ActivateUser(&cmd) + } + case api.ApplyRequest_TYPE_CREATE_USER_WITH_KEY: + f = func() { + ret.Error = st.dynUserManager.CreateUserWithKeyRequest(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE: + f = func() { + ret.Error = st.replicationManager.Replicate(l.Index, &cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REGISTER_ERROR: + f = func() { + ret.Error = st.replicationManager.RegisterError(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_UPDATE_STATE: + f = func() { + ret.Error = st.replicationManager.UpdateReplicateOpState(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_CANCEL: + f = func() { + ret.Error = st.replicationManager.CancelReplication(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE: + f = func() { + ret.Error = st.replicationManager.DeleteReplication(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_ALL: + f = func() { + ret.Error = st.replicationManager.DeleteAllReplications(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_REMOVE: + f = func() { + ret.Error = st.replicationManager.RemoveReplicaOp(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_CANCELLATION_COMPLETE: + f = func() { + ret.Error = st.replicationManager.ReplicationCancellationComplete(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_BY_COLLECTION: + f = func() { + ret.Error = st.replicationManager.DeleteReplicationsByCollection(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_DELETE_BY_TENANTS: + f = func() { + ret.Error = st.replicationManager.DeleteReplicationsByTenants(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REGISTER_SCHEMA_VERSION: + f = func() { + ret.Error = st.replicationManager.StoreSchemaVersion(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_ADD_REPLICA_TO_SHARD: + f = func() { + ret.Error = st.schemaManager.ReplicationAddReplicaToShard(&cmd, schemaOnly) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_ALL: + f = func() { + ret.Error = st.replicationManager.ForceDeleteAll(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION: + f = func() { + ret.Error = st.replicationManager.ForceDeleteByCollection(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_COLLECTION_AND_SHARD: + f = func() { + ret.Error = st.replicationManager.ForceDeleteByCollectionAndShard(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_TARGET_NODE: + f = func() { + ret.Error = st.replicationManager.ForceDeleteByTargetNode(&cmd) + } + case api.ApplyRequest_TYPE_REPLICATION_REPLICATE_FORCE_DELETE_BY_UUID: + f = func() { + ret.Error = st.replicationManager.ForceDeleteByUuid(&cmd) + } + + case api.ApplyRequest_TYPE_DISTRIBUTED_TASK_ADD: + f = func() { + ret.Error = st.distributedTasksManager.AddTask(&cmd, l.Index) + } + case api.ApplyRequest_TYPE_DISTRIBUTED_TASK_RECORD_NODE_COMPLETED: + f = func() { + ret.Error = st.distributedTasksManager.RecordNodeCompletion(&cmd, st.numberOfNodesInTheCluster()) + } + case api.ApplyRequest_TYPE_DISTRIBUTED_TASK_CANCEL: + f = func() { + ret.Error = st.distributedTasksManager.CancelTask(&cmd) + } + case api.ApplyRequest_TYPE_DISTRIBUTED_TASK_CLEAN_UP: + f = func() { + ret.Error = st.distributedTasksManager.CleanUpTask(&cmd) + } + + default: + // This could occur when a new command has been introduced in a later app version + // At this point, we need to panic so that the app undergo an upgrade during restart + const msg = "consider upgrading to newer version" + st.log.WithFields(logrus.Fields{ + "type": cmd.Type, + "class": cmd.Class, + "more": msg, + }).Error("unknown command") + } + + // Wrap the function in a go routine to ensure panic recovery. This is necessary as this function is run in an + // unwrapped goroutine in the raft library + wg := sync.WaitGroup{} + wg.Add(1) + g := func() { + f() + wg.Done() + } + enterrors.GoWrapper(g, st.log) + wg.Wait() + + return ret +} + +func (st *Store) numberOfNodesInTheCluster() int { + return len(st.raft.GetConfiguration().Configuration().Servers) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_apply_index_test.go b/platform/dbops/binaries/weaviate-src/cluster/store_apply_index_test.go new file mode 100644 index 0000000000000000000000000000000000000000..39b2e392182ed6ecd9495ecded364b29263f1625 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_apply_index_test.go @@ -0,0 +1,101 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "errors" + "testing" + + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// setupTestData creates common test data for store apply index tests +func setupTestData(t *testing.T, initialIndex uint64) (MockStore, *raft.Log) { + mockStore := NewMockStore(t, "Node-1", 0) + mockStore.store.lastAppliedIndex.Store(initialIndex) + mockStore.store.metrics = newStoreMetrics("Node-1", prometheus.NewPedanticRegistry()) + mockStore.store.metrics.fsmLastAppliedIndex.Set(float64(initialIndex)) + + cls := &models.Class{ + Class: "TestClass", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: "HOT", + }, + }, + } + + log := &raft.Log{ + Index: initialIndex + 1, + Type: raft.LogCommand, + Data: cmdAsBytes("TestClass", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{Class: cls, State: ss}, nil), + } + + return mockStore, log +} + +func TestStore_ApplyIndex(t *testing.T) { + // Test case 1: Apply index is greater than last applied index + t.Run("Apply index is greater than last applied index", func(t *testing.T) { + mockStore, log := setupTestData(t, 100) + mockStore.parser.On("ParseClass", mock.Anything).Return(nil) + mockStore.indexer.On("AddClass", mock.Anything).Return(nil) + mockStore.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + result := mockStore.store.Apply(log) + + // Verify that the result contains no error + resp, ok := result.(Response) + assert.True(t, ok) + assert.NoError(t, resp.Error) + + // Verify that lastAppliedIndex was updated + currentIndex := mockStore.store.lastAppliedIndex.Load() + assert.Equal(t, uint64(101), log.Index) + assert.Equal(t, uint64(101), currentIndex, "lastAppliedIndex should be updated on success") + }) + + // Test case 2: Apply index fails due to parse error + t.Run("Apply index fails due to parse error", func(t *testing.T) { + mockStore, log := setupTestData(t, 100) + mockStore.parser.On("ParseClass", mock.Anything).Return(errors.New("parse error")) + result := mockStore.store.Apply(log) + + // Verify that the result contains an error + resp, ok := result.(Response) + assert.True(t, ok) + assert.Error(t, resp.Error) + + // Verify that lastAppliedIndex was not updated + assert.Equal(t, uint64(101), log.Index) + // we depend on metrics to check if the index was updated + // as we allow lastAppliedIndex to be updated even when there's an error + // to handle edge cases in the DB layer for already released versions + assert.Equal(t, float64(100), testutil.ToFloat64(mockStore.store.metrics.fsmLastAppliedIndex), "lastAppliedIndex should not be updated when there's an error") + }) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_apply_test.go b/platform/dbops/binaries/weaviate-src/cluster/store_apply_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c5e95ea76245f741115e0927de66da2a21a6f3c8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_apply_test.go @@ -0,0 +1,470 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "errors" + "testing" + + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/weaviate/weaviate/cluster/proto/api" + clusterschema "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" +) + +func TestStore_Apply_LogTypes(t *testing.T) { + ms, _ := setupApplyTest(t) + + tests := []struct { + name string + logType raft.LogType + expected bool + }{ + { + name: "Valid LogCommand type", + logType: raft.LogCommand, + expected: true, + }, + { + name: "LogNoop type", + logType: raft.LogNoop, + expected: true, // Noop logs are valid but don't do anything + }, + { + name: "LogBarrier type", + logType: raft.LogBarrier, + expected: true, // Barrier logs are valid but don't do anything + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log := &raft.Log{ + Index: 1, + Type: tt.logType, + Data: []byte{}, + } + + result := ms.store.Apply(log) + resp, ok := result.(Response) + assert.True(t, ok) + assert.NoError(t, resp.Error) // All log types return no error, but LogCommand is the only one that processes data + + // Verify all mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + }) + } +} + +func TestStore_Apply_CommandTypes(t *testing.T) { + // Create test data that will be reused + cls := &models.Class{ + Class: "TestClass", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: "HOT", + }, + }, + } + + tests := []struct { + name string + cmdType api.ApplyRequest_Type + setupMocks func(MockStore) + expectError bool + cmdData interface{} + preApply func(MockStore) // Function to run before applying the command + }{ + { + name: "AddClass command", + cmdType: api.ApplyRequest_TYPE_ADD_CLASS, + setupMocks: func(ms MockStore) { + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + expectError: false, + cmdData: api.AddClassRequest{Class: cls, State: ss}, + }, + { + name: "UpdateClass command", + cmdType: api.ApplyRequest_TYPE_UPDATE_CLASS, + setupMocks: func(ms MockStore) { + // For UpdateClass, we need to set up ParseClassUpdate + // Note: ParseClass is called by the schema manager during update + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.parser.On("ParseClassUpdate", mock.Anything, mock.Anything).Return(cls, nil) + ms.indexer.On("UpdateClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + expectError: false, + cmdData: api.UpdateClassRequest{Class: cls, State: ss}, + preApply: func(ms MockStore) { + // First add the class so it exists for update + addLog := &raft.Log{ + Index: 1, + Type: raft.LogCommand, + Data: cmdAsBytes("TestClass", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{Class: cls, State: ss}, nil), + } + // Set up separate mock expectations for the add operation + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + ms.store.Apply(addLog) + // Reset mock expectations after add operation + ms.parser.ExpectedCalls = nil + ms.indexer.ExpectedCalls = nil + }, + }, + { + name: "DeleteClass command", + cmdType: api.ApplyRequest_TYPE_DELETE_CLASS, + setupMocks: func(ms MockStore) { + ms.indexer.On("DeleteClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + ms.replicationFSM.On("DeleteReplicationsByCollection", mock.Anything).Return(nil) + }, + expectError: false, + cmdData: nil, + }, + { + name: "Unknown command type", + cmdType: api.ApplyRequest_Type(999), // Non-existent type + setupMocks: func(ms MockStore) { + // No mocks needed for unknown type + }, + expectError: false, // Unknown commands don't return errors, they just log + cmdData: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms, log := setupApplyTest(t) + + // Run pre-apply setup if needed + if tt.preApply != nil { + tt.preApply(ms) + } + + // Set up mocks after pre-apply to ensure they're not affected by pre-apply operations + tt.setupMocks(ms) + + // Update log with test command type and data + log.Data = cmdAsBytes("TestClass", tt.cmdType, tt.cmdData, nil) + + result := ms.store.Apply(log) + resp, ok := result.(Response) + assert.True(t, ok) + + if tt.expectError { + assert.Error(t, resp.Error) + } else { + assert.NoError(t, resp.Error) + } + + // Verify all mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + }) + } +} + +func TestStore_Apply_CatchingUp(t *testing.T) { + tests := []struct { + name string + logIndex uint64 + schemaOnly bool + }{ + { + name: "Catching up (index <= lastAppliedIndexToDB)", + logIndex: 50, + schemaOnly: true, + }, + { + name: "Caught up (index > lastAppliedIndexToDB)", + logIndex: 150, + schemaOnly: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms, log := setupApplyTest(t) + // Set lastAppliedIndexToDB to simulate catching up scenario + ms.store.lastAppliedIndexToDB.Store(uint64(100)) + log.Index = tt.logIndex + + // Setup mock to verify schemaOnly parameter + ms.parser.On("ParseClass", mock.Anything).Return(nil) + if !tt.schemaOnly { + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + } + + // The snapshot store is now properly initialized in setupApplyTest + result := ms.store.Apply(log) + resp, ok := result.(Response) + assert.True(t, ok) + assert.NoError(t, resp.Error) + + // Verify schemaOnly was set correctly by checking if the class was added + class := ms.store.SchemaReader().ReadOnlyClass("TestClass") + if tt.schemaOnly { + assert.NotNil(t, class, "Class should be added in schema-only mode") + } else { + assert.NotNil(t, class, "Class should be added in full mode") + } + + // Verify all mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + }) + } +} + +func TestStore_Apply_ReloadDB(t *testing.T) { + t.Run("Reload DB when caught up", func(t *testing.T) { + ms, log := setupApplyTest(t) + // Set lastAppliedIndexToDB to trigger DB reload + ms.store.lastAppliedIndexToDB.Store(100) + log.Index = 150 // Greater than lastAppliedIndexToDB + + // Setup mocks + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + result := ms.store.Apply(log) + resp, ok := result.(Response) + assert.True(t, ok) + assert.NoError(t, resp.Error) + + // Verify lastAppliedIndexToDB was reset to 0 + assert.Equal(t, uint64(0), ms.store.lastAppliedIndexToDB.Load()) + + // Verify all mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + }) + + t.Run("No reload on subsequent higher indices", func(t *testing.T) { + ms, log := setupApplyTest(t) + // Set lastAppliedIndexToDB to trigger initial DB reload + ms.store.lastAppliedIndexToDB.Store(100) + log.Index = 150 // Greater than lastAppliedIndexToDB + + // Setup mocks for first apply + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + // First apply should trigger reload + result := ms.store.Apply(log) + resp, ok := result.(Response) + assert.True(t, ok) + assert.NoError(t, resp.Error) + assert.Equal(t, uint64(0), ms.store.lastAppliedIndexToDB.Load()) + + // Reset mocks for second apply + ms.parser.ExpectedCalls = nil + ms.indexer.ExpectedCalls = nil + + // Create a different class for the second apply + cls2 := &models.Class{ + Class: "TestClass2", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + // Create sharding state for the second class + ss2 := &sharding.State{ + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: "HOT", + }, + }, + } + + // Setup mocks for second apply + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + // Second apply with higher index should not trigger reload + log.Index = 200 + log.Data = cmdAsBytes("TestClass2", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{Class: cls2, State: ss2}, nil) + result = ms.store.Apply(log) + resp, ok = result.(Response) + assert.True(t, ok) + assert.NoError(t, resp.Error) + + // Verify lastAppliedIndexToDB is still 0 + assert.Equal(t, uint64(0), ms.store.lastAppliedIndexToDB.Load()) + + // Verify all mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + }) +} + +func TestStore_Apply_Metrics(t *testing.T) { + t.Run("Metrics are updated correctly", func(t *testing.T) { + ms, log := setupApplyTest(t) + + // Setup mocks + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + // Apply the log + result := ms.store.Apply(log) + resp, ok := result.(Response) + assert.True(t, ok) + assert.NoError(t, resp.Error) + + // Verify metrics were updated + assert.Equal(t, float64(1), testutil.ToFloat64(ms.store.metrics.fsmLastAppliedIndex)) + assert.Equal(t, float64(1), testutil.ToFloat64(ms.store.metrics.raftLastAppliedIndex)) + assert.Equal(t, float64(0), testutil.ToFloat64(ms.store.metrics.applyFailures)) + + // Verify all mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + }) +} + +func TestStore_Apply_ErrorHandling(t *testing.T) { + tests := []struct { + name string + setupMocks func(MockStore) + expectError bool + expectPanic bool + }{ + { + name: "Schema manager error", + setupMocks: func(ms MockStore) { + ms.parser.On("ParseClass", mock.Anything).Return(errors.New("schema error")) + }, + expectError: true, + expectPanic: false, + }, + { + name: "Invalid proto data", + setupMocks: func(ms MockStore) { + // No mocks needed, we'll modify the log data directly + }, + expectError: true, + expectPanic: true, // The Apply function panics on invalid proto data + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms, log := setupApplyTest(t) + tt.setupMocks(ms) + + if tt.name == "Invalid proto data" { + log.Data = []byte("invalid proto data") + } + + if tt.expectPanic { + // Use assert.Panics to verify that the function panics as expected + assert.PanicsWithValue(t, "error proto un-marshalling log data", func() { + ms.store.Apply(log) + }) + // For panic cases, we still want to verify mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + return + } + + result := ms.store.Apply(log) + resp, ok := result.(Response) + assert.True(t, ok) + + if tt.expectError { + assert.Error(t, resp.Error) + assert.Equal(t, float64(1), testutil.ToFloat64(ms.store.metrics.applyFailures)) + } else { + assert.NoError(t, resp.Error) + assert.Equal(t, float64(0), testutil.ToFloat64(ms.store.metrics.applyFailures)) + } + + // Verify all mock expectations + ms.parser.AssertExpectations(t) + ms.indexer.AssertExpectations(t) + }) + } +} + +func setupApplyTest(t *testing.T) (MockStore, *raft.Log) { + mockStore := NewMockStore(t, "Node-1", 0) + mockStore.store.metrics = newStoreMetrics("Node-1", prometheus.NewPedanticRegistry()) + + // Create a temporary directory for the snapshot store + tmpDir := t.TempDir() + snapshotStore, err := raft.NewFileSnapshotStore(tmpDir, 3, nil) + if err != nil { + t.Fatalf("failed to create snapshot store: %v", err) + } + mockStore.store.snapshotStore = snapshotStore + + // Create a basic class for testing + cls := &models.Class{ + Class: "TestClass", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + ss := &sharding.State{ + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: "HOT", + }, + }, + } + + // Create a basic log entry + log := &raft.Log{ + Index: 1, + Type: raft.LogCommand, + Data: cmdAsBytes("TestClass", api.ApplyRequest_TYPE_ADD_CLASS, api.AddClassRequest{Class: cls, State: ss}, nil), + } + + // Initialize the schema manager with replication FSM + mockStore.store.schemaManager = clusterschema.NewSchemaManager("Node-1", mockStore.indexer, mockStore.parser, prometheus.NewPedanticRegistry(), mockStore.logger) + mockStore.store.schemaManager.SetReplicationFSM(mockStore.replicationFSM) + + return mockStore, log +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_cluster_rpc.go b/platform/dbops/binaries/weaviate-src/cluster/store_cluster_rpc.go new file mode 100644 index 0000000000000000000000000000000000000000..eb323e895e43a8a5f659bcd178441c2474e28412 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_cluster_rpc.go @@ -0,0 +1,104 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "errors" + + "github.com/hashicorp/raft" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/cluster/types" +) + +// Join adds the given peer to the cluster. +// This operation must be executed on the leader, otherwise, it will fail with ErrNotLeader. +// If the cluster has not been opened yet, it will return ErrNotOpen. +func (st *Store) Join(id, addr string, voter bool) error { + if !st.open.Load() { + return types.ErrNotOpen + } + if st.raft.State() != raft.Leader { + return types.ErrNotLeader + } + + rID, rAddr := raft.ServerID(id), raft.ServerAddress(addr) + + if !voter { + return st.assertFuture(st.raft.AddNonvoter(rID, rAddr, 0, 0)) + } + return st.assertFuture(st.raft.AddVoter(rID, rAddr, 0, 0)) +} + +// Remove removes this peer from the cluster +func (st *Store) Remove(id string) error { + if !st.open.Load() { + return types.ErrNotOpen + } + if st.raft.State() != raft.Leader { + return types.ErrNotLeader + } + return st.assertFuture(st.raft.RemoveServer(raft.ServerID(id), 0, 0)) +} + +// Notify signals this Store that a node is ready for bootstrapping at the specified address. +// Bootstrapping will be initiated once the number of known nodes reaches the expected level, +// which includes this node. +func (st *Store) Notify(id, addr string) (err error) { + if !st.open.Load() { + return types.ErrNotOpen + } + // peer is not voter or already bootstrapped or belong to an existing cluster + if !st.cfg.Voter || st.cfg.BootstrapExpect == 0 || st.bootstrapped.Load() || st.Leader() != "" { + return nil + } + + st.bootstrapMutex.Lock() + defer st.bootstrapMutex.Unlock() + + st.candidates[id] = addr + if len(st.candidates) < st.cfg.BootstrapExpect { + st.log.WithFields(logrus.Fields{ + "action": "bootstrap", + "expect": st.cfg.BootstrapExpect, + "got": st.candidates, + }).Debug("number of candidates lower than bootstrap expect param, stopping notify") + return nil + } + candidates := make([]raft.Server, 0, len(st.candidates)) + for id, addr := range st.candidates { + candidates = append(candidates, raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID(id), + Address: raft.ServerAddress(addr), + }) + delete(st.candidates, id) + } + + st.log.WithFields(logrus.Fields{ + "action": "bootstrap", + "candidates": candidates, + }).Info("starting cluster bootstrapping") + + fut := st.raft.BootstrapCluster(raft.Configuration{Servers: candidates}) + if err := fut.Error(); err != nil { + if !errors.Is(err, raft.ErrCantBootstrap) { + st.log.WithField("action", "bootstrap").WithError(err).Error("could not bootstrapping cluster") + return err + } + st.log.WithFields(logrus.Fields{ + "action": "bootstrap", + "warn": err, + }).Warn("bootstrapping cluster") + } + st.bootstrapped.Store(true) + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_query.go b/platform/dbops/binaries/weaviate-src/cluster/store_query.go new file mode 100644 index 0000000000000000000000000000000000000000..b06832da021a87a797c46e8a6ff73c64d9a00969 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_query.go @@ -0,0 +1,170 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" +) + +func (st *Store) Query(req *cmd.QueryRequest) (*cmd.QueryResponse, error) { + st.log.WithFields(logrus.Fields{"type": req.Type, "type_name": req.Type.String()}).Debug("server.query") + + var payload []byte + var err error + switch req.Type { + case cmd.QueryRequest_TYPE_GET_CLASSES: + payload, err = st.schemaManager.QueryReadOnlyClasses(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get read only class: %w", err) + } + case cmd.QueryRequest_TYPE_GET_SCHEMA: + payload, err = st.schemaManager.QuerySchema() + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get schema: %w", err) + } + case cmd.QueryRequest_TYPE_GET_COLLECTIONS_COUNT: + payload, err = st.schemaManager.QueryCollectionsCount() + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get schema: %w", err) + } + case cmd.QueryRequest_TYPE_GET_TENANTS: + payload, err = st.schemaManager.QueryTenants(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get tenants: %w", err) + } + case cmd.QueryRequest_TYPE_RESOLVE_ALIAS: + payload, err = st.schemaManager.ResolveAlias(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not resolve alias: %w", err) + } + case cmd.QueryRequest_TYPE_GET_ALIASES: + payload, err = st.schemaManager.GetAliases(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get aliases: %w", err) + } + case cmd.QueryRequest_TYPE_GET_SHARD_OWNER: + payload, err = st.schemaManager.QueryShardOwner(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get shard owner: %w", err) + } + case cmd.QueryRequest_TYPE_GET_TENANTS_SHARDS: + payload, err = st.schemaManager.QueryTenantsShards(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get tenant shard: %w", err) + } + case cmd.QueryRequest_TYPE_GET_SHARDING_STATE: + payload, err = st.schemaManager.QueryShardingState(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get sharding state: %w", err) + } + case cmd.QueryRequest_TYPE_HAS_PERMISSION: + payload, err = st.authZManager.HasPermission(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get RBAC permissions: %w", err) + } + case cmd.QueryRequest_TYPE_GET_ROLES: + payload, err = st.authZManager.GetRoles(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get RBAC permissions: %w", err) + } + case cmd.QueryRequest_TYPE_GET_ROLES_FOR_USER: + payload, err = st.authZManager.GetRolesForUserOrGroup(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get RBAC permissions: %w", err) + } + case cmd.QueryRequest_TYPE_GET_USERS_OR_GROUPS_WITH_ROLES: + payload, err = st.authZManager.GetUsersOrGroupsWithRoles(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get users and groups with roles: %w", err) + } + case cmd.QueryRequest_TYPE_GET_USERS_FOR_ROLE: + payload, err = st.authZManager.GetUsersForRole(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get RBAC permissions: %w", err) + } + case cmd.QueryRequest_TYPE_GET_CLASS_VERSIONS: + payload, err = st.schemaManager.QueryClassVersions(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get class versions: %w", err) + } + case cmd.QueryRequest_TYPE_GET_USERS: + payload, err = st.dynUserManager.GetUsers(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get dynamic user: %w", err) + } + case cmd.QueryRequest_TYPE_USER_IDENTIFIER_EXISTS: + payload, err = st.dynUserManager.GetUsers(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not check user identifier: %w", err) + } + case cmd.QueryRequest_TYPE_GET_REPLICATION_DETAILS: + payload, err = st.replicationManager.GetReplicationDetailsByReplicationId(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get replication operation details: %w", err) + } + case cmd.QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION: + payload, err = st.replicationManager.GetReplicationDetailsByCollection(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get replication operation details by collection: %w", err) + } + case cmd.QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_COLLECTION_AND_SHARD: + payload, err = st.replicationManager.GetReplicationDetailsByCollectionAndShard(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get replication operation details by collection and shards: %w", err) + } + case cmd.QueryRequest_TYPE_GET_REPLICATION_DETAILS_BY_TARGET_NODE: + payload, err = st.replicationManager.GetReplicationDetailsByTargetNode(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get replication operation details by target node: %w", err) + } + case cmd.QueryRequest_TYPE_GET_SHARDING_STATE_BY_COLLECTION: + payload, err = st.replicationManager.QueryShardingStateByCollection(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get sharding state by collection: %w", err) + } + case cmd.QueryRequest_TYPE_GET_SHARDING_STATE_BY_COLLECTION_AND_SHARD: + payload, err = st.replicationManager.QueryShardingStateByCollectionAndShard(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get sharding state by collection and shard: %w", err) + } + case cmd.QueryRequest_TYPE_GET_ALL_REPLICATION_DETAILS: + payload, err = st.replicationManager.GetAllReplicationDetails(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get all replication operation details: %w", err) + } + case cmd.QueryRequest_TYPE_DISTRIBUTED_TASK_LIST: + payload, err = st.distributedTasksManager.ListDistributedTasksPayload(context.Background()) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get distributed task list: %w", err) + } + case cmd.QueryRequest_TYPE_GET_REPLICATION_OPERATION_STATE: + payload, err = st.replicationManager.GetReplicationOperationState(req) + if err != nil { + return &cmd.QueryResponse{}, fmt.Errorf("could not get replication operation state: %w", err) + } + default: + // This could occur when a new command has been introduced in a later app version + // At this point, we need to panic so that the app undergo an upgrade during restart + const msg = "consider upgrading to newer version" + st.log.WithFields(logrus.Fields{ + "type": req.Type, + "more": msg, + }).Error("unknown command") + return &cmd.QueryResponse{}, fmt.Errorf("unknown command type %s: %s", req.Type, msg) + } + return &cmd.QueryResponse{Payload: payload}, nil +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_raft_config_test.go b/platform/dbops/binaries/weaviate-src/cluster/store_raft_config_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1e14756cf1326237c008fc3a5a6d3b25c1205234 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_raft_config_test.go @@ -0,0 +1,166 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRaftConfig(t *testing.T) { + tests := []struct { + name string + config Config + expectedConfig func(*raft.Config) + }{ + { + name: "default configuration", + config: Config{ + NodeID: "node1", + Logger: logrus.New(), + Voter: true, + WorkDir: t.TempDir(), + Host: "localhost", + RaftPort: 9090, + RPCPort: 9091, + BootstrapExpect: 1, + }, + expectedConfig: func(cfg *raft.Config) { + assert.Equal(t, raft.ServerID("node1"), cfg.LocalID) + assert.Equal(t, "info", cfg.LogLevel) + assert.True(t, cfg.NoLegacyTelemetry) + assert.NotNil(t, cfg.Logger) + // Default timeouts should be used since none were specified + assert.Equal(t, raft.DefaultConfig().HeartbeatTimeout, cfg.HeartbeatTimeout) + assert.Equal(t, raft.DefaultConfig().ElectionTimeout, cfg.ElectionTimeout) + assert.Equal(t, raft.DefaultConfig().LeaderLeaseTimeout, cfg.LeaderLeaseTimeout) + assert.Equal(t, raft.DefaultConfig().SnapshotInterval, cfg.SnapshotInterval) + assert.Equal(t, raft.DefaultConfig().SnapshotThreshold, cfg.SnapshotThreshold) + assert.Equal(t, raft.DefaultConfig().TrailingLogs, cfg.TrailingLogs) + }, + }, + { + name: "custom timeouts with multiplier", + config: Config{ + NodeID: "node1", + Logger: logrus.New(), + Voter: true, + WorkDir: t.TempDir(), + Host: "localhost", + RaftPort: 9090, + RPCPort: 9091, + BootstrapExpect: 1, + HeartbeatTimeout: 2 * time.Second, + ElectionTimeout: 3 * time.Second, + LeaderLeaseTimeout: 4 * time.Second, + TimeoutsMultiplier: 2, + }, + expectedConfig: func(cfg *raft.Config) { + assert.Equal(t, raft.ServerID("node1"), cfg.LocalID) + assert.Equal(t, "info", cfg.LogLevel) + assert.True(t, cfg.NoLegacyTelemetry) + assert.NotNil(t, cfg.Logger) + // Timeouts should be multiplied by 2 + assert.Equal(t, 4*time.Second, cfg.HeartbeatTimeout) + assert.Equal(t, 6*time.Second, cfg.ElectionTimeout) + assert.Equal(t, 8*time.Second, cfg.LeaderLeaseTimeout) + }, + }, + { + name: "custom timeouts without multiplier", + config: Config{ + NodeID: "node1", + Logger: logrus.New(), + Voter: true, + WorkDir: t.TempDir(), + Host: "localhost", + RaftPort: 9090, + RPCPort: 9091, + BootstrapExpect: 1, + HeartbeatTimeout: 2 * time.Second, + ElectionTimeout: 3 * time.Second, + LeaderLeaseTimeout: 4 * time.Second, + // TimeoutsMultiplier not set, should default to 1 + }, + expectedConfig: func(cfg *raft.Config) { + assert.Equal(t, raft.ServerID("node1"), cfg.LocalID) + assert.Equal(t, "info", cfg.LogLevel) + assert.True(t, cfg.NoLegacyTelemetry) + assert.NotNil(t, cfg.Logger) + // Timeouts should remain unchanged since multiplier is not set + assert.Equal(t, 2*time.Second, cfg.HeartbeatTimeout) + assert.Equal(t, 3*time.Second, cfg.ElectionTimeout) + assert.Equal(t, 4*time.Second, cfg.LeaderLeaseTimeout) + }, + }, + { + name: "custom snapshot settings", + config: Config{ + NodeID: "node1", + Logger: logrus.New(), + Voter: true, + WorkDir: t.TempDir(), + Host: "localhost", + RaftPort: 9090, + RPCPort: 9091, + BootstrapExpect: 1, + SnapshotInterval: 5 * time.Second, + SnapshotThreshold: 100, + TrailingLogs: 200, + }, + expectedConfig: func(cfg *raft.Config) { + assert.Equal(t, raft.ServerID("node1"), cfg.LocalID) + assert.Equal(t, "info", cfg.LogLevel) + assert.True(t, cfg.NoLegacyTelemetry) + assert.NotNil(t, cfg.Logger) + // Snapshot settings should be set to custom values + assert.Equal(t, 5*time.Second, cfg.SnapshotInterval) + assert.Equal(t, uint64(100), cfg.SnapshotThreshold) + assert.Equal(t, uint64(200), cfg.TrailingLogs) + }, + }, + { + name: "debug log level", + config: Config{ + NodeID: "node1", + Logger: func() *logrus.Logger { l := logrus.New(); l.SetLevel(logrus.DebugLevel); return l }(), + Voter: true, + WorkDir: t.TempDir(), + Host: "localhost", + RaftPort: 9090, + RPCPort: 9091, + BootstrapExpect: 1, + }, + expectedConfig: func(cfg *raft.Config) { + assert.Equal(t, raft.ServerID("node1"), cfg.LocalID) + assert.Equal(t, "debug", cfg.LogLevel) + assert.True(t, cfg.NoLegacyTelemetry) + assert.NotNil(t, cfg.Logger) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + store := NewFSM(tt.config, nil, nil, prometheus.NewPedanticRegistry()) + cfg := store.raftConfig() + require.NotNil(t, cfg) + tt.expectedConfig(cfg) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_snapshot.go b/platform/dbops/binaries/weaviate-src/cluster/store_snapshot.go new file mode 100644 index 0000000000000000000000000000000000000000..8ecd5ca4ab24a5bc63f26bacd804ebd1ea6cff5d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_snapshot.go @@ -0,0 +1,201 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "encoding/json" + "fmt" + "io" + "sync" + + "github.com/hashicorp/raft" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/cluster/fsm" + enterrors "github.com/weaviate/weaviate/entities/errors" +) + +// Persist should dump all necessary state to the WriteCloser 'sink', +// and call sink.Close() when finished or call sink.Cancel() on error. +func (s *Store) Persist(sink raft.SnapshotSink) (err error) { + defer sink.Close() + schemaSnapshot, err := s.schemaManager.SchemaSnapshot() + if err != nil { + return fmt.Errorf("schema snapshot: %w", err) + } + + aliasSnapshot, err := s.schemaManager.AliasSnapshot() + if err != nil { + return fmt.Errorf("alias snapshot: %w", err) + } + + rbacSnapshot, err := s.authZManager.Snapshot() + if err != nil { + return fmt.Errorf("rbac snapshot: %w", err) + } + + dbUserSnapshot, err := s.dynUserManager.Snapshot() + if err != nil { + return fmt.Errorf("db user snapshot: %w", err) + } + + tasksSnapshot, err := s.distributedTasksManager.Snapshot() + if err != nil { + return fmt.Errorf("tasks snapshot: %w", err) + } + + replicationSnapshot, err := s.replicationManager.Snapshot() + if err != nil { + return fmt.Errorf("replication snapshot: %w", err) + } + + snap := fsm.Snapshot{ + NodeID: s.cfg.NodeID, + SnapshotID: sink.ID(), + Schema: schemaSnapshot, + Aliases: aliasSnapshot, + RBAC: rbacSnapshot, + DbUsers: dbUserSnapshot, + DistributedTasks: tasksSnapshot, + ReplicationOps: replicationSnapshot, + } + if err := json.NewEncoder(sink).Encode(&snap); err != nil { + return fmt.Errorf("encode: %w", err) + } + + return nil +} + +// Release is invoked when we are finished with the snapshot. +// Satisfy the interface for raft.FSMSnapshot +func (s *Store) Release() { +} + +// Snapshot returns an FSMSnapshot used to: support log compaction, to +// restore the FSM to a previous state, or to bring out-of-date followers up +// to a recent log index. +// +// The Snapshot implementation should return quickly, because Apply can not +// be called while Snapshot is running. Generally this means Snapshot should +// only capture a pointer to the state, and any expensive IO should happen +// as part of FSMSnapshot.Persist. +// +// Apply and Snapshot are always called from the same thread, but Apply will +// be called concurrently with FSMSnapshot.Persist. This means the FSM should +// be implemented to allow for concurrent updates while a snapshot is happening. +func (st *Store) Snapshot() (raft.FSMSnapshot, error) { + st.log.Info("persisting snapshot") + return st, nil +} + +// Restore is used to restore an FSM from a snapshot. It is not called +// concurrently with any other command. The FSM must discard all previous +// state before restoring the snapshot. +func (st *Store) Restore(rc io.ReadCloser) error { + f := func() error { + st.log.Info("restoring schema from snapshot") + defer func() { + if err := rc.Close(); err != nil { + st.log.WithError(err).Error("restore snapshot: close reader") + } + }() + + snap := fsm.Snapshot{} + if err := json.NewDecoder(rc).Decode(&snap); err != nil { + return fmt.Errorf("restore snapshot: decode json: %w", err) + } + + if snap.Schema != nil { + if err := st.schemaManager.Restore(snap.Schema, st.cfg.Parser); err != nil { + st.log.WithError(err).Error("restoring schema from snapshot") + return fmt.Errorf("restore schema from snapshot: %w", err) + } + } else { + // old snapshot format + jsonBytes, err := json.Marshal(snap) + if err != nil { + return fmt.Errorf("restore snapshot: marshal json: %w", err) + } + + if err := st.schemaManager.RestoreLegacy(jsonBytes, st.cfg.Parser); err != nil { + st.log.WithError(err).Error("restoring schema from snapshot") + return fmt.Errorf("restore schema from snapshot: %w", err) + } + } + + st.log.Info("successfully restored schema from snapshot") + + if snap.Aliases != nil { + if err := st.schemaManager.RestoreAliases(snap.Aliases); err != nil { + return fmt.Errorf("restore aliases from snapshot: %w", err) + } + } + + if snap.RBAC != nil { + if err := st.authZManager.Restore(snap.RBAC); err != nil { + st.log.WithError(err).Error("restoring rbac from snapshot") + return fmt.Errorf("restore rbac from snapshot: %w", err) + } + } + + if snap.DistributedTasks != nil { + if err := st.distributedTasksManager.Restore(snap.DistributedTasks); err != nil { + st.log.WithError(err).Error("restoring distributed tasks from snapshot") + return fmt.Errorf("restore distributed tasks from snapshot: %w", err) + } + } + + if snap.ReplicationOps != nil { + if err := st.replicationManager.Restore(snap.ReplicationOps); err != nil { + st.log.WithError(err).Error("restoring replication ops from snapshot") + return fmt.Errorf("restore replication ops from snapshot: %w", err) + } + } + + if snap.DbUsers != nil { + if err := st.dynUserManager.Restore(snap.DbUsers); err != nil { + st.log.WithError(err).Error("restoring db user from snapshot") + return fmt.Errorf("restore db user from snapshot: %w", err) + } + } + + if st.cfg.MetadataOnlyVoters { + return nil + } + + snapIndex := lastSnapshotIndex(st.snapshotStore) + if st.lastAppliedIndexToDB.Load() <= snapIndex { + // db shall reload after snapshot applied to schema + st.reloadDBFromSchema() + } + + st.log.WithFields(logrus.Fields{ + "last_applied_index": st.lastIndex(), + "last_store_log_applied_index": st.lastAppliedIndexToDB.Load(), + "last_snapshot_index": snapIndex, + "n": st.schemaManager.NewSchemaReader().Len(), + }).Info("successfully reloaded indexes from snapshot") + + return nil + } + + wg := sync.WaitGroup{} + wg.Add(1) + var err error + g := func() { + err = f() + wg.Done() + } + enterrors.GoWrapper(g, st.log) + wg.Wait() + return err +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_snapshot_test.go b/platform/dbops/binaries/weaviate-src/cluster/store_snapshot_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f8ff99c5f8ae8601f0b371f34e5e88b4e2822444 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_snapshot_test.go @@ -0,0 +1,1120 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "sync" + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/mocks" + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/utils" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/sharding" +) + +// TestSchemaSnapshotPersistAndRestore tests snapshot persistence and restoration +func TestSchemaSnapshotPersistAndRestore(t *testing.T) { + // Setup test schema data directly using the Store's Apply method + source := NewMockStore(t, "source-node", utils.MustGetFreeTCPPort()) + setupTestSchema(t, source) + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + assert.NoError(t, err) + + err = snapshot.Persist(snapshotSink) + assert.NoError(t, err) + + target := NewMockStore(t, "target-node", utils.MustGetFreeTCPPort()) + target.store.init() + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + + err = target.store.Restore(snapshotReader) + assert.NoError(t, err) + + verifySchemaRestoration(t, source, target) +} + +// TestSchemaSnapshotEmptyStore tests snapshot persistence and restoration with an empty store +func TestSchemaSnapshotEmptyStore(t *testing.T) { + source := NewMockStore(t, "empty-source-node", utils.MustGetFreeTCPPort()) + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + assert.NoError(t, err) + + err = snapshot.Persist(snapshotSink) + assert.NoError(t, err) + + target := NewMockStore(t, "empty-target-node", utils.MustGetFreeTCPPort()) + target.store.init() + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + + err = target.store.Restore(snapshotReader) + assert.NoError(t, err) + + assert.Equal(t, 0, source.store.SchemaReader().Len(), "Source schema should be empty") + assert.Equal(t, 0, target.store.SchemaReader().Len(), "Target schema should be empty") +} + +// TestSchemaSnapshotPersistError tests handling errors during snapshot persistence +func TestSchemaSnapshotPersistError(t *testing.T) { + // Create source store with schema data + source := NewMockStore(t, "error-source-node", utils.MustGetFreeTCPPort()) + + errorSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + WriteError: errors.New("simulated write error"), + } + + snapshot, err := source.store.Snapshot() + assert.NoError(t, err) + + // Persist the snapshot - should return an error + err = snapshot.Persist(errorSink) + assert.Error(t, err, "Expected an error during snapshot persistence") + assert.Contains(t, err.Error(), "simulated write error", "Error should contain the specific write error") +} + +// TestSchemaSnapshotRestoreError tests handling errors during snapshot restoration +func TestSchemaSnapshotRestoreError(t *testing.T) { + // Create source store with schema data + source := NewMockStore(t, "restore-error-source-node", utils.MustGetFreeTCPPort()) + setupTestSchema(t, source) + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + assert.NoError(t, err) + + err = snapshot.Persist(snapshotSink) + assert.NoError(t, err) + + target := NewMockStore(t, "restore-error-target-node", utils.MustGetFreeTCPPort()) + target.store.init() + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + + // Restore from snapshot - should return an error + target.parser.On("ParseClass", mock.Anything).Return(errors.New("simulated parse error")) + err = target.store.Restore(snapshotReader) + assert.Error(t, err, "Expected an error during snapshot restoration") + assert.Contains(t, err.Error(), "simulated parse error", "Error should contain the specific parse error") +} + +// TestSchemaSnapshotCorruptedData tests restoration from corrupted snapshot data +func TestSchemaSnapshotCorruptedData(t *testing.T) { + target := NewMockStore(t, "corrupt-target-node", utils.MustGetFreeTCPPort()) + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + corruptedData := bytes.NewBufferString(`{invalid`) + snapshotReader := io.NopCloser(corruptedData) + + // Restore from snapshot - should return an error + err := target.store.Restore(snapshotReader) + assert.Error(t, err, "Expected an error when restoring from corrupted data") +} + +// TestConcurrentSnapshotOperations tests the thread safety of snapshot operations +// when performed concurrently +func TestConcurrentSnapshotOperations(t *testing.T) { + source := NewMockStore(t, "concurrent-snapshot-node", utils.MustGetFreeTCPPort()) + setupTestSchema(t, source) + source.store.init() + const numGoroutines = 10 + const iterations = 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines * 3) + + // Test concurrent Persist operations + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + sink := &mocks.SnapshotSink{ + Buffer: &bytes.Buffer{}, + } + err := source.store.Persist(sink) + assert.NoError(t, err) + time.Sleep(time.Microsecond) + } + }() + } + target := NewMockStore(t, "concurrent-snapshot-node", utils.MustGetFreeTCPPort()) + target.store.init() + // Test concurrent Restore operations + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + sink := &mocks.SnapshotSink{ + Buffer: &bytes.Buffer{}, + } + err := source.store.Persist(sink) + assert.NoError(t, err) + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + // Restore from the snapshot + err = target.store.Restore(io.NopCloser(bytes.NewBuffer(sink.Buffer.Bytes()))) + assert.NoError(t, err) + time.Sleep(time.Microsecond) + verifySchemaRestoration(t, source, target) + sourceSchema := source.store.SchemaReader().ReadOnlySchema() + targetSchema := target.store.SchemaReader().ReadOnlySchema() + assert.Greater(t, len(sourceSchema.Classes), 0) + assert.Equal(t, len(sourceSchema.Classes), len(targetSchema.Classes)) + } + }() + } + + // Test concurrent reads while snapshot operations are happening + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + for j := 0; j < iterations; j++ { + schema := source.store.SchemaReader().ReadOnlySchema() + assert.NotNil(t, schema) + time.Sleep(time.Microsecond) + } + }() + } + + wg.Wait() +} + +func setupTestSchema(t *testing.T, ms MockStore) { + // Set up mock behaviors + ms.parser.On("ParseClass", mock.Anything).Return(nil) + ms.indexer.On("AddClass", mock.Anything).Return(nil) + ms.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + productClass := &models.Class{ + Class: "Product", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + BelongsToNodes: []string{ms.cfg.NodeID}, + Status: models.TenantActivityStatusHOT, + }, + "tenant2": { + Name: "tenant2", + BelongsToNodes: []string{ms.cfg.NodeID}, + Status: models.TenantActivityStatusCOLD, + }, + }, + } + + categoryClass := &models.Class{ + Class: "Category", + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + categoryShardingState := &sharding.State{ + Physical: map[string]sharding.Physical{ + "tenant3": { + Name: "tenant3", + BelongsToNodes: []string{ms.cfg.NodeID}, + Status: models.TenantActivityStatusHOT, + }, + }, + } + + // Create mock raft logs to use with the Store's Apply method + productLog := raft.Log{ + Data: cmdAsBytes("Product", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: productClass, + State: shardingState, + }, nil), + } + + categoryLog := raft.Log{ + Data: cmdAsBytes("Category", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: categoryClass, + State: categoryShardingState, + }, nil), + } + + // Apply the logs to add the classes + ms.store.Apply(&productLog) + ms.store.Apply(&categoryLog) + + // Verify classes were added + assert.NotNil(t, ms.store.SchemaReader().ReadOnlyClass("Product"), "Product class should be added") + assert.NotNil(t, ms.store.SchemaReader().ReadOnlyClass("Category"), "Category class should be added") +} + +func verifySchemaRestoration(t *testing.T, source, target MockStore) { + // Get source and target schema readers + sourceSchema := source.store.SchemaReader() + targetSchema := target.store.SchemaReader() + + // Verify classes count + assert.Equal(t, sourceSchema.Len(), targetSchema.Len(), "Schema class count should match") + + // Get all classes via ReadOnlySchema + sourceClasses := sourceSchema.ReadOnlySchema().Classes + targetClasses := targetSchema.ReadOnlySchema().Classes + + // Verify class count + assert.Equal(t, len(sourceClasses), len(targetClasses), "Number of classes should match") + + // Create a map for easier lookup of target classes + targetClassMap := make(map[string]*models.Class) + for _, class := range targetClasses { + targetClassMap[class.Class] = class + } + + // Verify class properties and configuration + for _, sourceClass := range sourceClasses { + targetClass, exists := targetClassMap[sourceClass.Class] + assert.True(t, exists, "Class %s should exist in target", sourceClass.Class) + + if exists { + // Compare properties + assert.Equal(t, len(sourceClass.Properties), len(targetClass.Properties), + "Number of properties should match for class %s", sourceClass.Class) + + // Compare vector configs + assert.Equal(t, len(sourceClass.VectorConfig), len(targetClass.VectorConfig), + "Vector config count should match for class %s", sourceClass.Class) + + // Compare sharding state + sourceShardingState, err := readShardingState(sourceSchema, sourceClass.Class) + require.Nil(t, err) + + targetShardingState, err := readShardingState(targetSchema, targetClass.Class) + require.Nil(t, err) + + if sourceShardingState != nil && targetShardingState != nil { + assert.Equal(t, len(sourceShardingState.Physical), len(targetShardingState.Physical), + "Number of tenants should match for class %s", sourceClass.Class) + + assert.Equal(t, sourceShardingState.ReplicationFactor, targetShardingState.ReplicationFactor, + "Replication factor should match for class %s", sourceClass.Class) + // Compare each tenant's configuration + for tenantName, sourceTenant := range sourceShardingState.Physical { + targetTenant, exists := targetShardingState.Physical[tenantName] + assert.True(t, exists, "Tenant %s should exist in target for class %s", tenantName, sourceClass.Class) + + if exists { + assert.Equal(t, sourceTenant.Status, targetTenant.Status, + "Tenant status should match for %s in class %s", tenantName, sourceClass.Class) + assert.Equal(t, len(sourceTenant.BelongsToNodes), len(targetTenant.BelongsToNodes), + "Node count should match for tenant %s in class %s", tenantName, sourceClass.Class) + sourceTenantNumberOfReplicas, err := sourceShardingState.NumberOfReplicas(sourceTenant.Name) + assert.Nil(t, err, "error while getting number of replicas for source tenant %s", sourceTenant.Name) + targetTenantNumberOfReplicas, err := targetShardingState.NumberOfReplicas(targetTenant.Name) + assert.Nil(t, err, "error while getting number of replicas for target tenant %s", targetTenant.Name) + assert.Equal(t, sourceTenantNumberOfReplicas, targetTenantNumberOfReplicas) + } + } + } + } + } +} + +func TestReplicationFactorMigration(t *testing.T) { + t.Run("copy sharding state with uninitialized replication factor and partitioning disabled", func(t *testing.T) { + source := NewMockStore(t, "replication-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + BelongsToNodes: []string{source.cfg.NodeID}, + Status: models.TenantActivityStatusHOT, + }, + }, + PartitioningEnabled: false, + // uninitialized ReplicationFactor + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "error while reading class schema") + + sourceState, err := readShardingState(source.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.Equal(t, int64(1), sourceState.ReplicationFactor, + "error while copying sharding state") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "error while creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "error while persisting snapshot") + + target := NewMockStore(t, "replication-target-node", utils.MustGetFreeTCPPort()) + target.store.init() + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err) + + targetClass := target.store.SchemaReader().ReadOnlyClass(className) + require.NotNil(t, targetClass, "Class should be restored") + + targetState, err := readShardingState(target.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.NotNil(t, targetState, "Sharding state should be restored") + require.Equal(t, int64(1), targetState.ReplicationFactor, + "Replication factor should be migrated to match the number of nodes (1)") + + for tenantName, targetTenant := range targetState.Physical { + require.Equal(t, 1, len(targetTenant.BelongsToNodes), + "Tenant %s should still have 1 replicas", tenantName) + } + }) + + t.Run("copy sharding state with uninitialized replication factor and partitioning enabled", func(t *testing.T) { + source := NewMockStore(t, "partitioning-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{}, + PartitioningEnabled: true, + // uninitialized ReplicationFactor + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + + // Verify class was added + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "error while reading class schema") + + sourceState, err := readShardingState(source.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.Equal(t, int64(1), sourceState.ReplicationFactor, + "source replication factor should be 1 before snapshot") + require.True(t, sourceState.PartitioningEnabled, + "partitioning should be enabled") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "error while creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "error while persisting snapshot") + + target := NewMockStore(t, "partitioning-target-node", utils.MustGetFreeTCPPort()) + err = target.store.init() + require.NoError(t, err, "error while initializing target store") + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err, "error while restoring snapshot") + + targetClass := target.store.SchemaReader().ReadOnlyClass(className) + require.NotNil(t, targetClass, "error while reading class") + + targetState, err := readShardingState(target.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.NotNil(t, targetState, "Sharding state should be restored") + require.True(t, targetState.PartitioningEnabled, + "partitioning should still be enabled after restoring the snapshot") + require.Equal(t, int64(1), targetState.ReplicationFactor, + "replication factor should be 1 as a result of migrating a sharding state which is missing the replication factor") + }) + + t.Run("copy sharding state with default replication factor and partitioning disabled", func(t *testing.T) { + source := NewMockStore(t, "replication-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + BelongsToNodes: []string{source.cfg.NodeID}, + Status: models.TenantActivityStatusHOT, + }, + }, + PartitioningEnabled: false, + ReplicationFactor: 0, + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "error while reading class schema") + + sourceState, err := readShardingState(source.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.Equal(t, int64(1), sourceState.ReplicationFactor, + "error while copying sharding state") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "error while creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "error while persisting snapshot") + + target := NewMockStore(t, "replication-target-node", utils.MustGetFreeTCPPort()) + target.store.init() + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err) + + targetClass := target.store.SchemaReader().ReadOnlyClass(className) + require.NotNil(t, targetClass, "Class should be restored") + + targetState, err := readShardingState(target.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.NotNil(t, targetState, "Sharding state should be restored") + require.Equal(t, int64(1), targetState.ReplicationFactor, + "Replication factor should be migrated to match the number of nodes (1)") + + for tenantName, targetTenant := range targetState.Physical { + require.Equal(t, 1, len(targetTenant.BelongsToNodes), + "Tenant %s should still have 1 replicas", tenantName) + } + }) + + t.Run("copy sharding state with default replication factor and partitioning enabled", func(t *testing.T) { + source := NewMockStore(t, "partitioning-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{}, + PartitioningEnabled: true, + ReplicationFactor: 0, + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + + // Verify class was added + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "error while reading class schema") + + sourceState, err := readShardingState(source.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.Equal(t, int64(1), sourceState.ReplicationFactor, + "source replication factor should be 1 before snapshot") + require.True(t, sourceState.PartitioningEnabled, + "partitioning should be enabled") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "error while creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "error while persisting snapshot") + + target := NewMockStore(t, "partitioning-target-node", utils.MustGetFreeTCPPort()) + err = target.store.init() + require.NoError(t, err, "error while initializing target store") + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err, "error while restoring snapshot") + + targetClass := target.store.SchemaReader().ReadOnlyClass(className) + require.NotNil(t, targetClass, "error while reading class") + + targetState, err := readShardingState(target.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.NotNil(t, targetState, "Sharding state should be restored") + require.True(t, targetState.PartitioningEnabled, + "partitioning should still be enabled after restoring the snapshot") + require.Equal(t, int64(1), targetState.ReplicationFactor, + "replication factor should be 1 as a result of migrating a sharding state which is missing the replication factor") + }) + + t.Run("copy sharding state with non-default replication factor and partitioning disabled", func(t *testing.T) { + source := NewMockStore(t, "replication-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + BelongsToNodes: []string{source.cfg.NodeID}, + Status: models.TenantActivityStatusHOT, + }, + }, + PartitioningEnabled: false, + ReplicationFactor: 4, + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "error while reading class schema") + + sourceState, err := readShardingState(source.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.Equal(t, int64(4), sourceState.ReplicationFactor, + "error while copying sharding state") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "error while creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "error while persisting snapshot") + + target := NewMockStore(t, "replication-target-node", utils.MustGetFreeTCPPort()) + target.store.init() + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err) + + targetClass := target.store.SchemaReader().ReadOnlyClass(className) + require.NotNil(t, targetClass, "Class should be restored") + + targetState, err := readShardingState(target.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.NotNil(t, targetState, "Sharding state should be restored") + require.Equal(t, int64(4), targetState.ReplicationFactor, + "Replication factor should be migrated to match the number of nodes (1)") + + for tenantName, targetTenant := range targetState.Physical { + require.Equal(t, 1, len(targetTenant.BelongsToNodes), + "Tenant %s should still have 1 replicas", tenantName) + } + }) + + t.Run("copy sharding state with non-default replication factor and partitioning enabled", func(t *testing.T) { + source := NewMockStore(t, "partitioning-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{}, // Empty physical map for partitioning + PartitioningEnabled: true, + ReplicationFactor: 3, + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + + // Verify class was added + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "error while reading class schema") + + sourceState, err := readShardingState(source.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.Equal(t, int64(3), sourceState.ReplicationFactor, + "source replication factor should be 1 before snapshot") + require.True(t, sourceState.PartitioningEnabled, + "partitioning should be enabled") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "error while creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "error while persisting snapshot") + + target := NewMockStore(t, "partitioning-target-node", utils.MustGetFreeTCPPort()) + err = target.store.init() + require.NoError(t, err, "error while initializing target store") + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err, "error while restoring snapshot") + + targetClass := target.store.SchemaReader().ReadOnlyClass(className) + require.NotNil(t, targetClass, "error while reading class") + + targetState, err := readShardingState(target.store.SchemaReader(), class.Class) + require.Nil(t, err) + require.NotNil(t, targetState, "Sharding state should be restored") + require.True(t, targetState.PartitioningEnabled, + "partitioning should still be enabled after restoring the snapshot") + require.Equal(t, int64(3), targetState.ReplicationFactor, + "replication factor should be 1 as a result of migrating a sharding state which is missing the replication factor") + }) + + t.Run("sharding state after snapshot restore with undefined replication factor", func(t *testing.T) { + source := NewMockStore(t, "snapshot-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + BelongsToNodes: []string{source.cfg.NodeID, "another-node"}, // 2 replicas + Status: models.TenantActivityStatusHOT, + }, + }, + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "Class should be added") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "Error creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "Error persisting snapshot") + + target := NewMockStore(t, "snapshot-target-node", utils.MustGetFreeTCPPort()) + err = target.store.init() + require.NoError(t, err, "error while initializing target store") + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err, "Error restoring snapshot") + + req := cmd.QueryReadOnlyClassesRequest{Classes: []string{className}} + subCommand, err := json.Marshal(&req) + require.NoErrorf(t, err, "Error marshaling subcommand") + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_SHARDING_STATE, + SubCommand: subCommand, + } + queryResp, err := target.store.Query(command) + require.NoErrorf(t, err, "error while querying class from restored shanpshot") + + resp := cmd.QueryReadOnlyClassResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + require.NoErrorf(t, err, "error while unmarshalling query response") + + for _, restoredClass := range resp.Classes { + require.Equal(t, int64(1), restoredClass.ReplicationConfig.Factor) + } + }) + + t.Run("sharding state after snapshot restore with default replication factor", func(t *testing.T) { + source := NewMockStore(t, "snapshot-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + BelongsToNodes: []string{source.cfg.NodeID, "another-node"}, // 2 replicas + Status: models.TenantActivityStatusHOT, + }, + }, + ReplicationFactor: 0, + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "Class should be added") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "Error creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "Error persisting snapshot") + + target := NewMockStore(t, "snapshot-target-node", utils.MustGetFreeTCPPort()) + err = target.store.init() + require.NoError(t, err, "error while initializing target store") + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err, "Error restoring snapshot") + + req := cmd.QueryReadOnlyClassesRequest{Classes: []string{className}} + subCommand, err := json.Marshal(&req) + require.NoErrorf(t, err, "Error marshaling subcommand") + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_SHARDING_STATE, + SubCommand: subCommand, + } + queryResp, err := target.store.Query(command) + require.NoErrorf(t, err, "error while querying class from restored shanpshot") + + resp := cmd.QueryReadOnlyClassResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + require.NoErrorf(t, err, "error while unmarshalling query response") + + for _, restoredClass := range resp.Classes { + require.Equal(t, int64(1), restoredClass.ReplicationConfig.Factor) + } + }) + + t.Run("sharding state after snapshot restore with non-default replication factor", func(t *testing.T) { + source := NewMockStore(t, "snapshot-source-node", utils.MustGetFreeTCPPort()) + + source.parser.On("ParseClass", mock.Anything).Return(nil) + source.indexer.On("AddClass", mock.Anything).Return(nil) + source.indexer.On("TriggerSchemaUpdateCallbacks").Return() + + className := "TestClass" + class := &models.Class{ + Class: className, + MultiTenancyConfig: &models.MultiTenancyConfig{ + Enabled: true, + }, + } + + shardState := &sharding.State{ + IndexID: className, + Physical: map[string]sharding.Physical{ + "tenant1": { + Name: "tenant1", + BelongsToNodes: []string{source.cfg.NodeID, "another-node"}, // 2 replicas + Status: models.TenantActivityStatusHOT, + }, + }, + ReplicationFactor: 3, + } + + createClassLog := raft.Log{ + Data: cmdAsBytes( + className, + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: class, + State: shardState, + }, nil), + } + + source.store.Apply(&createClassLog) + require.NotNil(t, source.store.SchemaReader().ReadOnlyClass(className), + "Class should be added") + + snapshotSink := &mocks.SnapshotSink{ + Buffer: bytes.NewBuffer(nil), + } + + snapshot, err := source.store.Snapshot() + require.NoError(t, err, "Error creating snapshot") + + err = snapshot.Persist(snapshotSink) + require.NoError(t, err, "Error persisting snapshot") + + target := NewMockStore(t, "snapshot-target-node", utils.MustGetFreeTCPPort()) + err = target.store.init() + require.NoError(t, err, "error while initializing target store") + + target.parser.On("ParseClass", mock.Anything).Return(nil) + target.indexer.On("TriggerSchemaUpdateCallbacks").Return() + target.indexer.On("RestoreClassDir", mock.Anything).Return(nil) + target.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + target.indexer.On("AddClass", mock.Anything).Return(nil) + + snapshotReader := io.NopCloser(bytes.NewReader(snapshotSink.Buffer.Bytes())) + err = target.store.Restore(snapshotReader) + require.NoError(t, err, "Error restoring snapshot") + + req := cmd.QueryReadOnlyClassesRequest{Classes: []string{className}} + subCommand, err := json.Marshal(&req) + require.NoErrorf(t, err, "Error marshaling subcommand") + command := &cmd.QueryRequest{ + Type: cmd.QueryRequest_TYPE_GET_SHARDING_STATE, + SubCommand: subCommand, + } + queryResp, err := target.store.Query(command) + require.NoErrorf(t, err, "error while querying class from restored shanpshot") + + resp := cmd.QueryReadOnlyClassResponse{} + err = json.Unmarshal(queryResp.Payload, &resp) + require.NoErrorf(t, err, "error while unmarshalling query response") + + for _, restoredClass := range resp.Classes { + require.Equal(t, int64(3), restoredClass.ReplicationConfig.Factor) + } + }) +} diff --git a/platform/dbops/binaries/weaviate-src/cluster/store_test.go b/platform/dbops/binaries/weaviate-src/cluster/store_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e6087e1ccce3411f9354f1685f448b33507e2564 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/cluster/store_test.go @@ -0,0 +1,1466 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package cluster + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "slices" + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + dto "github.com/prometheus/client_model/go" + "github.com/sirupsen/logrus" + logrustest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + gproto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + cmd "github.com/weaviate/weaviate/cluster/proto/api" + "github.com/weaviate/weaviate/cluster/schema" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/fakes" + "github.com/weaviate/weaviate/usecases/sharding" +) + +var ( + errAny = errors.New("any error") + Anything = mock.Anything +) + +func TestStoreApply(t *testing.T) { + doFirst := func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + } + + cls := &models.Class{Class: "C1", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: true}} + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"THIS"}, + }, "T2": { + Name: "T2", + BelongsToNodes: []string{"THIS"}, + }}} + + tests := []struct { + name string + req raft.Log + resp Response + doBefore func(*MockStore) + doAfter func(*MockStore) error + }{ + { + name: "AddClass/Unmarshal", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, + nil, &cmd.AddTenantsRequest{})}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: doFirst, + }, + { + name: "AddClass/StateIsNil", + req: raft.Log{Data: cmdAsBytes("C2", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: nil}, + nil)}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: func(m *MockStore) { + m.indexer.On("Open", mock.Anything).Return(nil) + }, + }, + { + name: "AddClass/ParseClass", + req: raft.Log{Data: cmdAsBytes("C2", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, + nil)}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: func(m *MockStore) { + m.indexer.On("Open", mock.Anything).Return(nil) + m.parser.On("ParseClass", mock.Anything).Return(errAny) + }, + }, + { + name: "AddClass/Success", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + return nil + }, + }, + { + name: "AddClass/Success/MetadataOnly", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.store.cfg.MetadataOnlyVoters = true + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + return nil + }, + }, + { + name: "AddClass/Success/CatchingUp", + req: raft.Log{ + // Fake the index to higher than 0 as we are always applying the first log entry + Index: 2, + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + // Set a high enough last applied index to fake applying a log entry when catching up + m.store.lastAppliedIndexToDB.Store(3) + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + return nil + }, + }, + { + name: "AddClass/DBError", + req: raft.Log{ + Index: 3, + Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, + nil), + }, + resp: Response{Error: errAny}, + doBefore: func(ms *MockStore) { + doFirst(ms) + ms.indexer.On("AddClass", mock.Anything).Return(errAny) + }, + }, + { + name: "AddClass/AlreadyExists", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, + nil)}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + m.indexer.On("Open", mock.Anything).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + }, + { + name: "RestoreClass/Success", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_RESTORE_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("RestoreClassDir", cls.Class).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + return nil + }, + }, + { + name: "UpdateClass/Unmarshal", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_CLASS, + nil, &cmd.AddTenantsRequest{})}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: doFirst, + }, + { + name: "UpdateClass/ClassNotFound", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_UPDATE_CLASS, + cmd.UpdateClassRequest{Class: cls, State: nil}, + nil)}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + m.indexer.On("Open", mock.Anything).Return(nil) + m.parser.On("ParseClassUpdate", mock.Anything, mock.Anything).Return(mock.Anything, nil) + }, + }, + { + name: "UpdateClass/ParseUpdate", + req: raft.Log{Data: cmdAsBytes("C2", + cmd.ApplyRequest_TYPE_UPDATE_CLASS, + cmd.UpdateClassRequest{Class: cls, State: nil}, + nil)}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("Open", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + m.parser.On("ParseClassUpdate", mock.Anything, mock.Anything).Return(nil, errAny) + }, + }, + { + name: "UpdateClass/Success", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_UPDATE_CLASS, + cmd.UpdateClassRequest{Class: cls, State: nil}, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("Open", mock.Anything).Return(nil) + m.parser.On("ParseClassUpdate", mock.Anything, mock.Anything).Return(mock.Anything, nil) + m.indexer.On("UpdateClass", mock.Anything).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + }, + { + name: "DeleteClass/Success/NoErrorDeletingReplications", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_DELETE_CLASS, nil, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.indexer.On("DeleteClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.replicationFSM.On("DeleteReplicationsByCollection", mock.Anything).Return(nil) + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class != nil { + return fmt.Errorf("class still exists") + } + return nil + }, + }, + { + name: "DeleteClass/Success/ErrorDeletingReplications", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_DELETE_CLASS, nil, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.indexer.On("DeleteClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.replicationFSM.On("DeleteReplicationsByCollection", mock.Anything).Return(fmt.Errorf("any error")) + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class != nil { + return fmt.Errorf("class still exists") + } + return nil + }, + }, + { + name: "AddProperty/Unmarshal", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_PROPERTY, + nil, &cmd.AddTenantsRequest{})}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: doFirst, + }, + { + name: "AddProperty/ClassNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_PROPERTY, + cmd.AddPropertyRequest{Properties: []*models.Property{{Name: "P1"}}}, nil)}, + resp: Response{Error: schema.ErrSchema}, + doBefore: doFirst, + }, + { + name: "AddProperty/Nil", + req: raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_PROPERTY, + cmd.AddPropertyRequest{Properties: nil}, nil), + }, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + }, + { + name: "AddProperty/Success", + req: raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_PROPERTY, + cmd.AddPropertyRequest{Properties: []*models.Property{{Name: "P1"}}}, nil), + }, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + m.indexer.On("AddProperty", mock.Anything, mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class not found") + } + + ok := false + for _, p := range class.Properties { + if p.Name == "P1" { + ok = true + break + } + } + if !ok { + return fmt.Errorf("property is missing") + } + return nil + }, + }, + { + name: "UpdateShard/Unmarshal", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_SHARD_STATUS, + nil, &cmd.AddTenantsRequest{})}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: doFirst, + }, + { + name: "UpdateShard/Success", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_SHARD_STATUS, + cmd.UpdateShardStatusRequest{Class: "C1"}, nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("UpdateShardStatus", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + }, + { + name: "AddTenant/Unmarshal", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_TENANT, cmd.AddClassRequest{}, nil)}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: doFirst, + }, + { + name: "AddTenant/ClassNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_TENANT, nil, &cmd.AddTenantsRequest{ + Tenants: []*cmd.Tenant{nil, {Name: "T1"}, nil}, + })}, + resp: Response{Error: schema.ErrSchema}, + doBefore: doFirst, + }, + { + name: "AddTenant/Success", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_TENANT, nil, &cmd.AddTenantsRequest{ + ClusterNodes: []string{"THIS"}, + Tenants: []*cmd.Tenant{nil, {Name: "T1"}, nil}, + })}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{ + Class: cls, State: &sharding.State{ + Physical: map[string]sharding.Physical{"T1": {}}, + }, + }, nil), + }) + m.indexer.On("AddTenants", mock.Anything, mock.Anything).Return(nil) + }, + doAfter: func(ms *MockStore) error { + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState == nil { + return fmt.Errorf("sharding state not found") + } + if _, ok := shardingState.Physical["T1"]; !ok { + return fmt.Errorf("tenant is missing") + } + return nil + }, + }, + { + name: "UpdateTenant/Unmarshal", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_TENANT, cmd.AddClassRequest{}, nil)}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: doFirst, + }, + { + name: "UpdateTenant/ClassNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_TENANT, + nil, &cmd.UpdateTenantsRequest{Tenants: []*cmd.Tenant{nil, {Name: "T1"}, nil}})}, + resp: Response{Error: schema.ErrSchema}, + doBefore: doFirst, + }, + { + name: "UpdateTenant/NoFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_TENANT, + nil, &cmd.UpdateTenantsRequest{Tenants: []*cmd.Tenant{ + {Name: "T1", Status: models.TenantActivityStatusCOLD}, + }})}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + ss := &sharding.State{Physical: map[string]sharding.Physical{}} + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + }, + { + name: "UpdateTenant/HasOngoingReplication/true", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_TENANT, + nil, &cmd.UpdateTenantsRequest{Tenants: []*cmd.Tenant{ + {Name: "T1", Status: models.TenantActivityStatusCOLD}, + }})}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: models.TenantActivityStatusHOT, + }}} + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + m.replicationFSM.EXPECT().HasOngoingReplication("C1", "T1", "Node-1").Return(true) + m.indexer.On("UpdateTenants", mock.Anything, mock.Anything).Return(nil) + }, + doAfter: func(ms *MockStore) error { + want := map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: models.TenantActivityStatusHOT, + }} + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if got := shardingState.Physical; !reflect.DeepEqual(got, want) { + return fmt.Errorf("physical state want: %v got: %v", want, got) + } + return nil + }, + }, + { + name: "UpdateTenant/HasOngoingReplication/false", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_TENANT, + nil, &cmd.UpdateTenantsRequest{Tenants: []*cmd.Tenant{ + {Name: "T1", Status: models.TenantActivityStatusCOLD}, + }})}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: models.TenantActivityStatusHOT, + }}} + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + m.replicationFSM.EXPECT().HasOngoingReplication("C1", "T1", "Node-1").Return(false) + m.indexer.On("UpdateTenants", mock.Anything, mock.Anything).Return(nil) + }, + doAfter: func(ms *MockStore) error { + want := map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1"}, + Status: models.TenantActivityStatusCOLD, + }} + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if got := shardingState.Physical; !reflect.DeepEqual(got, want) { + return fmt.Errorf("physical state want: %v got: %v", want, got) + } + return nil + }, + }, + { + name: "UpdateTenant/Success", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_UPDATE_TENANT, + nil, &cmd.UpdateTenantsRequest{Tenants: []*cmd.Tenant{ + {Name: "T1", Status: models.TenantActivityStatusCOLD}, + {Name: "T2", Status: models.TenantActivityStatusCOLD}, + {Name: "T3", Status: models.TenantActivityStatusCOLD}, + }})}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"THIS"}, + Status: models.TenantActivityStatusHOT, + }, "T2": { + Name: "T2", + BelongsToNodes: []string{"THIS"}, + Status: models.TenantActivityStatusCOLD, + }, "T3": { + Name: "T3", + BelongsToNodes: []string{"NODE-2"}, + Status: models.TenantActivityStatusHOT, + }}} + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + m.indexer.On("UpdateTenants", mock.Anything, mock.Anything).Return(nil) + m.replicationFSM.EXPECT().HasOngoingReplication(Anything, Anything, Anything).Return(false) + }, + doAfter: func(ms *MockStore) error { + want := map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"THIS"}, + Status: models.TenantActivityStatusCOLD, + }, "T2": { + Name: "T2", + BelongsToNodes: []string{"THIS"}, + Status: models.TenantActivityStatusCOLD, + }, "T3": { + Name: "T3", + BelongsToNodes: []string{"NODE-2"}, + Status: models.TenantActivityStatusCOLD, + }} + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if got := shardingState.Physical; !reflect.DeepEqual(got, want) { + return fmt.Errorf("physical state want: %v got: %v", want, got) + } + return nil + }, + }, + { + name: "DeleteTenant/Unmarshal", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_TENANT, cmd.AddClassRequest{}, nil)}, + resp: Response{Error: schema.ErrBadRequest}, + doBefore: doFirst, + }, + { + name: "DeleteTenant/ClassNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_TENANT, + nil, &cmd.DeleteTenantsRequest{Tenants: []string{"T1", "T2"}})}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + doFirst(m) + }, + }, + { + name: "DeleteTenant/Success/NoErrorDeletingReplications", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_TENANT, + nil, &cmd.DeleteTenantsRequest{Tenants: []string{"T1", "T2"}})}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{ + Class: cls, State: &sharding.State{ + Physical: map[string]sharding.Physical{"T1": {}}, + }, + }, nil), + }) + m.indexer.On("DeleteTenants", mock.Anything, mock.Anything).Return(nil) + m.replicationFSM.On("DeleteReplicationsByTenants", mock.Anything, mock.Anything).Return(nil) + }, + doAfter: func(ms *MockStore) error { + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if len(shardingState.Physical) != 0 { + return fmt.Errorf("sharding state mus be empty after deletion") + } + return nil + }, + }, + { + name: "DeleteTenant/Success/ErrorDeletingReplications", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_TENANT, + nil, &cmd.DeleteTenantsRequest{Tenants: []string{"T1", "T2"}})}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + doFirst(m) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{ + Class: cls, State: &sharding.State{ + Physical: map[string]sharding.Physical{"T1": {}}, + }, + }, nil), + }) + m.indexer.On("DeleteTenants", mock.Anything, mock.Anything).Return(nil) + m.replicationFSM.On("DeleteReplicationsByTenants", mock.Anything, mock.Anything).Return(fmt.Errorf("any error")) + }, + doAfter: func(ms *MockStore) error { + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if len(shardingState.Physical) != 0 { + return fmt.Errorf("sharding state mus be empty after deletion") + } + return nil + }, + }, + { + name: "DeleteReplicaFromShard/Success/UpdateDB", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T1", TargetNode: "Node-1"}, nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"Node-1", "Node-2"}, + }}, ReplicationFactor: 1} + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("DeleteReplicaFromShard", mock.Anything, mock.Anything, mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T1") + if err != nil { + return err + } + if len(replicas) != 1 { + return fmt.Errorf("sharding state should have 1 shard for class C1 after deleting a shard") + } + + return nil + }, + }, + { + name: "DeleteReplicaFromShard/Success/NotUpdateDB", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T1", TargetNode: "Node-2"}, nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"Node-2", "Node-3"}, + }}, ReplicationFactor: 1} + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T1") + if err != nil { + return err + } + if len(replicas) != 1 { + return fmt.Errorf("sharding state should have 1 shard for class C1 after deleting a shard") + } + + return nil + }, + }, + { + name: "DeleteReplicaFromShard/Fail/ClassNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T1", TargetNode: "Node-2"}, nil)}, + resp: Response{Error: schema.ErrSchema}, + }, + { + name: "DeleteReplicaFromShard/Fail/ShardNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T1", TargetNode: "Node-2"}, nil)}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + ss := &sharding.State{Physical: map[string]sharding.Physical{"T2": { + Name: "T2", + BelongsToNodes: []string{"Node-2"}, + }}} + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T2") + if err != nil { + return err + } + if len(replicas) != 1 { + return fmt.Errorf("sharding state should have 1 shard for class C1") + } + + return nil + }, + }, + { + name: "DeleteReplicaFromShard/Fail/BelowMinimumReplicationFactor/SingleReplica", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T2", TargetNode: "Node-1"}, nil)}, + resp: Response{Error: schema.ErrSchema}, // Expect an error + doBefore: func(m *MockStore) { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{"T2": { + Name: "T2", + BelongsToNodes: []string{"Node-1"}, + }}, + // ReplicationFactor will be migrated to 1 as the default minimum + } + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T2") + if err != nil { + return err + } + if len(replicas) != 1 { + return fmt.Errorf("sharding state should still have 1 replica for class C1, shard T2") + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState.ReplicationFactor != 1 { + return fmt.Errorf("replication factor should be 1, got %d", shardingState.ReplicationFactor) + } + + return nil + }, + }, + { + name: "DeleteReplicaFromShard/Success/AboveMinimumReplicationFactor/DefaultReplicationFactor", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T2", TargetNode: "Node-2"}, nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{"T2": { + Name: "T2", + BelongsToNodes: []string{"Node-1", "Node-2", "Node-3"}, + }}, + // ReplicationFactor will be migrated to 1 as the default minimum + } + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T2") + if err != nil { + return err + } + if len(replicas) != 2 { + return fmt.Errorf("sharding state should have 2 replicas after deletion, got %d", len(replicas)) + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState.ReplicationFactor != 1 { + return fmt.Errorf("replication factor should be 1, got %d", shardingState.ReplicationFactor) + } + + return nil + }, + }, + { + name: "DeleteReplicaFromShard/Fail/BelowCustomReplicationFactor", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T2", TargetNode: "Node-2"}, nil)}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{"T2": { + Name: "T2", + BelongsToNodes: []string{"Node-1", "Node-2"}, + }}, + ReplicationFactor: 2, + } + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T2") + if err != nil { + return err + } + if len(replicas) != 2 { + return fmt.Errorf("sharding state should still have 2 replicas for class C1, shard T2") + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState.ReplicationFactor != 2 { + return fmt.Errorf("replication factor should be 2, got %d", shardingState.ReplicationFactor) + } + + return nil + }, + }, + { + name: "DeleteReplicaFromShard/Success/AboveCustomReplicationFactor", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_DELETE_REPLICA_FROM_SHARD, cmd.DeleteReplicaFromShard{Class: "C1", Shard: "T2", TargetNode: "Node-3"}, nil)}, + resp: Response{Error: nil}, // Should succeed + doBefore: func(m *MockStore) { + ss := &sharding.State{ + Physical: map[string]sharding.Physical{"T2": { + Name: "T2", + BelongsToNodes: []string{"Node-1", "Node-2", "Node-3", "Node-4"}, + }}, + ReplicationFactor: 3, + } + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T2") + if err != nil { + return err + } + if len(replicas) != 3 { + return fmt.Errorf("sharding state should have 3 replicas after deletion, got %d", len(replicas)) + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState.ReplicationFactor != 3 { + return fmt.Errorf("replication factor should be 3, got %d", shardingState.ReplicationFactor) + } + + return nil + }, + }, + { + name: "AddReplicaToShard/Success/UpdateDB", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD, cmd.AddReplicaToShard{Class: "C1", Shard: "T1", TargetNode: "Node-1"}, nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("AddReplicaToShard", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T1") + if err != nil { + return err + } + if len(replicas) != 2 { + return fmt.Errorf("sharding state should have 2 shards for class C1") + } + if !slices.Contains(replicas, "THIS") || !slices.Contains(replicas, "Node-1") { + return fmt.Errorf("replias for coll C1 shard T1 is missing the correct replicas got=%v want=[\"THIS\", \"Node-1\"]", replicas) + } + + return nil + }, + }, + { + name: "AddReplicaToShard/Success/NotUpdateDB", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD, cmd.AddReplicaToShard{Class: "C1", Shard: "T1", TargetNode: "Node-3"}, nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + doAfter: func(ms *MockStore) error { + replicas, err := ms.store.SchemaReader().ShardReplicas("C1", "T1") + if err != nil { + return err + } + if len(replicas) != 2 { + return fmt.Errorf("sharding state should have 2 shards for class C1") + } + if !slices.Contains(replicas, "THIS") || !slices.Contains(replicas, "Node-3") { + return fmt.Errorf("replias for coll C1 shard T1 is missing the correct replicas got=%v want=[\"THIS\", \"Node-3\"]", replicas) + } + + return nil + }, + }, + { + name: "AddReplicaToShard/FailClassNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD, cmd.AddReplicaToShard{Class: "C1", Shard: "T1", TargetNode: "Node-3"}, nil)}, + resp: Response{Error: schema.ErrSchema}, + }, + { + name: "AddReplicaToShard/FailShardNotFound", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD, cmd.AddReplicaToShard{Class: "C1", Shard: "T1000", TargetNode: "Node-3"}, nil)}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + }, + { + name: "AddReplicaToShard/FailReplicaAlreadyExists", + req: raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_REPLICA_TO_SHARD, cmd.AddReplicaToShard{Class: "C1", Shard: "T1", TargetNode: "THIS"}, nil)}, + resp: Response{Error: schema.ErrSchema}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.store.Apply(&raft.Log{ + Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, cmd.AddClassRequest{Class: cls, State: ss}, nil), + }) + }, + }, + { + name: "AddClass/MigrateReplicationFactor/Uninitialized", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: cls, + State: &sharding.State{ + IndexID: "C1", + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"THIS", "THAT"}, + Status: models.TenantActivityStatusHOT, + }, + }, + // ReplicationFactor intentionally not set (uninitialized) + }, + }, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState == nil { + return fmt.Errorf("sharding state is missing") + } + + if shardingState.ReplicationFactor != 1 { + return fmt.Errorf("replication factor not properly migrated, expected 1, got %d", + shardingState.ReplicationFactor) + } + + for tenantName, tenant := range shardingState.Physical { + if len(tenant.BelongsToNodes) != 2 { + return fmt.Errorf("tenant %s should have 2 replicas, got %d", + tenantName, len(tenant.BelongsToNodes)) + } + } + + return nil + }, + }, + { + name: "AddClass/MigrateReplicationFactor/ExplicitZero", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: cls, + State: &sharding.State{ + IndexID: "C1", + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"THIS", "THAT", "ANOTHER"}, + Status: models.TenantActivityStatusHOT, + }, + }, + ReplicationFactor: 0, + }, + }, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState == nil { + return fmt.Errorf("sharding state is missing") + } + + if shardingState.ReplicationFactor != 1 { + return fmt.Errorf("replication factor not properly migrated, expected 1, got %d", + shardingState.ReplicationFactor) + } + + for tenantName, tenant := range shardingState.Physical { + if len(tenant.BelongsToNodes) != 3 { + return fmt.Errorf("tenant %s should have 3 replicas, got %d", + tenantName, len(tenant.BelongsToNodes)) + } + } + + return nil + }, + }, + { + name: "AddClass/MigrateReplicationFactor/Partitioned", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: cls, + State: &sharding.State{ + IndexID: "C1", + Physical: map[string]sharding.Physical{}, + PartitioningEnabled: true, + // ReplicationFactor intentionally not set (uninitialized) + }, + }, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState == nil { + return fmt.Errorf("sharding state is missing") + } + + if shardingState.ReplicationFactor != 1 { + return fmt.Errorf("replication factor for partitioned state not properly migrated, expected 1, got %d", + shardingState.ReplicationFactor) + } + + if !shardingState.PartitioningEnabled { + return fmt.Errorf("partitioning should still be enabled") + } + + return nil + }, + }, + { + name: "AddClass/PreserveReplicationFactor/NonDefault", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{ + Class: cls, + State: &sharding.State{ + IndexID: "C1", + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"THIS", "THAT"}, + Status: models.TenantActivityStatusHOT, + }, + }, + ReplicationFactor: 5, + }, + }, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState == nil { + return fmt.Errorf("sharding state is missing") + } + + if shardingState.ReplicationFactor != 5 { + return fmt.Errorf("non-default replication factor not preserved, expected 5, got %d", + shardingState.ReplicationFactor) + } + + return nil + }, + }, + { + name: "RestoreClass/MigrateReplicationFactor/Uninitialized", + req: raft.Log{Data: cmdAsBytes("C1", + cmd.ApplyRequest_TYPE_RESTORE_CLASS, + cmd.AddClassRequest{ + Class: cls, + State: &sharding.State{ + IndexID: "C1", + Physical: map[string]sharding.Physical{ + "T1": { + Name: "T1", + BelongsToNodes: []string{"THIS", "THAT"}, + Status: models.TenantActivityStatusHOT, + }, + }, + // ReplicationFactor intentionally not set (uninitialized) + }, + }, + nil)}, + resp: Response{Error: nil}, + doBefore: func(m *MockStore) { + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("RestoreClassDir", cls.Class).Return(nil) + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + }, + doAfter: func(ms *MockStore) error { + class := ms.store.SchemaReader().ReadOnlyClass("C1") + if class == nil { + return fmt.Errorf("class is missing") + } + + shardingState, err := readShardingState(ms.store.SchemaReader(), "C1") + require.Nil(t, err) + if shardingState == nil { + return fmt.Errorf("sharding state is missing") + } + + if shardingState.ReplicationFactor != 1 { + return fmt.Errorf("replication factor not properly migrated during restore, expected 1, got %d", + shardingState.ReplicationFactor) + } + + return nil + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + m := NewMockStore(t, "Node-1", 9091) + store := m.Store(tc.doBefore) + ret := store.Apply(&tc.req) + resp, ok := ret.(Response) + if !ok { + t.Errorf("%s: response has wrong type", tc.name) + } + if got, want := resp.Error, tc.resp.Error; want != nil { + if !errors.Is(resp.Error, tc.resp.Error) { + t.Errorf("%s: error want: %v got: %v", tc.name, want, got) + } + } else if got != nil { + t.Errorf("%s: error want: nil got: %v", tc.name, got) + } + if tc.doAfter != nil { + if err := tc.doAfter(&m); err != nil { + t.Errorf("%s check updates: %v", tc.name, err) + } + m.indexer.AssertExpectations(t) + m.parser.AssertExpectations(t) + m.replicationFSM.AssertExpectations(t) + } + }) + } +} + +func TestStoreMetrics(t *testing.T) { + t.Run("store_apply_duration", func(t *testing.T) { + doBefore := func(m *MockStore) { + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + } + nodeID := t.Name() + cls := &models.Class{Class: "C1", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: true}} + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"THIS"}, + }, "T2": { + Name: "T2", + BelongsToNodes: []string{"THIS"}, + }}} + ms := NewMockStore(t, nodeID, 9092) + store := ms.Store(doBefore) + m := dto.Metric{} + require.NoError(t, store.metrics.applyDuration.Write(&m)) + // before + assert.Equal(t, 0, int(*m.Histogram.SampleCount)) + store.Apply( + &raft.Log{ + Data: cmdAsBytes("CI", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, nil), + }, + ) + // after + require.NoError(t, store.metrics.applyDuration.Write(&m)) + assert.Equal(t, 1, int(*m.Histogram.SampleCount)) + assert.Equal(t, 0, int(testutil.ToFloat64(store.metrics.applyFailures))) + }) + t.Run("fsm_last_applied_index", func(t *testing.T) { + appliedIndex := 34 // after successful apply, this node should have 34 as last applied index metric + + doBefore := func(m *MockStore) { + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + } + nodeID := t.Name() + cls := &models.Class{Class: "C1", MultiTenancyConfig: &models.MultiTenancyConfig{Enabled: true}} + ss := &sharding.State{Physical: map[string]sharding.Physical{"T1": { + Name: "T1", + BelongsToNodes: []string{"THIS"}, + }, "T2": { + Name: "T2", + BelongsToNodes: []string{"THIS"}, + }}} + ms := NewMockStore(t, nodeID, 9092) + store := ms.Store(doBefore) + + // before + require.Equal(t, 0, int(testutil.ToFloat64(store.metrics.fsmLastAppliedIndex))) + require.Equal(t, 0, int(testutil.ToFloat64(store.metrics.raftLastAppliedIndex))) + + store.Apply( + &raft.Log{ + Index: uint64(appliedIndex), + Data: cmdAsBytes("CI", + cmd.ApplyRequest_TYPE_ADD_CLASS, + cmd.AddClassRequest{Class: cls, State: ss}, nil), + }, + ) + // after + require.Equal(t, appliedIndex, int(testutil.ToFloat64(store.metrics.fsmLastAppliedIndex))) + require.Equal(t, appliedIndex, int(testutil.ToFloat64(store.metrics.raftLastAppliedIndex))) + }) + + t.Run("last_applied_index on Configuration LogType", func(t *testing.T) { + appliedIndex := 34 // after successful apply, this node should have 34 as last applied index metric + + doBefore := func(m *MockStore) { + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + } + nodeID := t.Name() + + ms := NewMockStore(t, nodeID, 9092) + store := ms.Store(doBefore) + + // before + require.Equal(t, 0, int(testutil.ToFloat64(store.metrics.fsmLastAppliedIndex))) + require.Equal(t, 0, int(testutil.ToFloat64(store.metrics.raftLastAppliedIndex))) + + store.StoreConfiguration(uint64(appliedIndex), raft.Configuration{}) + + // after + require.Equal(t, 0, int(testutil.ToFloat64(store.metrics.fsmLastAppliedIndex))) // fsm index should staty the same because it counts non-config commands. + require.Equal(t, appliedIndex, int(testutil.ToFloat64(store.metrics.raftLastAppliedIndex))) + }) + + t.Run("apply_failures", func(t *testing.T) { + doBefore := func(m *MockStore) { + m.indexer.On("AddClass", mock.Anything).Return(nil) + m.parser.On("ParseClass", mock.Anything).Return(nil) + m.indexer.On("TriggerSchemaUpdateCallbacks").Return() + } + + nodeID := t.Name() + ms := NewMockStore(t, nodeID, 9092) + store := ms.Store(doBefore) + + // before + require.Equal(t, 0, int(testutil.ToFloat64(store.metrics.applyFailures))) + + // this apply will trigger failure with BadRequest as we pass empty (nil) AddClassRequest. + store.Apply( + &raft.Log{Data: cmdAsBytes("C1", cmd.ApplyRequest_TYPE_ADD_CLASS, + nil, &cmd.AddTenantsRequest{})}, + ) + // after + require.Equal(t, 1, int(testutil.ToFloat64(store.metrics.applyFailures))) + }) +} + +type MockStore struct { + indexer *fakes.MockSchemaExecutor + parser *fakes.MockParser + logger *logrus.Logger + cfg Config + store *Store + replicationFSM *schema.MockreplicationFSM +} + +func NewMockStore(t *testing.T, nodeID string, raftPort int) MockStore { + indexer := fakes.NewMockSchemaExecutor() + parser := fakes.NewMockParser() + logger, _ := logrustest.NewNullLogger() + ms := MockStore{ + indexer: indexer, + parser: parser, + logger: logger, + cfg: Config{ + WorkDir: t.TempDir(), + NodeID: nodeID, + Host: "localhost", + RaftPort: raftPort, + Voter: true, + BootstrapExpect: 1, + HeartbeatTimeout: 1 * time.Second, + ElectionTimeout: 1 * time.Second, + SnapshotInterval: 2 * time.Second, + SnapshotThreshold: 125, + DB: indexer, + Parser: parser, + NodeSelector: mocks.NewMockNodeSelector("localhost"), + Logger: logger, + ConsistencyWaitTimeout: time.Millisecond * 50, + }, + replicationFSM: schema.NewMockreplicationFSM(t), + } + + s := NewFSM(ms.cfg, nil, nil, prometheus.NewPedanticRegistry()) + s.schemaManager.SetReplicationFSM(ms.replicationFSM) + ms.store = &s + return ms +} + +func (m *MockStore) Store(doBefore func(*MockStore)) *Store { + if doBefore != nil { + doBefore(m) + } + return m.store +} + +// Runs the provided function `predicate` up to `n` times, sleeping `sleepDuration` between each +// function call until `f` returns true or returns false if all `n` calls return false. +// Useful in tests which require an unknown but bounded delay where the component under test has +// a way to indicate when it's ready to proceed. +func tryNTimesWithWait(n int, sleepDuration time.Duration, predicate func() bool) bool { + for i := 0; i < n; i++ { + if predicate() { + return true + } + time.Sleep(sleepDuration) + } + return false +} + +func cmdAsBytes(class string, + cmdType cmd.ApplyRequest_Type, + jsonSubCmd interface{}, + rpcSubCmd protoreflect.ProtoMessage, +) []byte { + var ( + subData []byte + err error + ) + if rpcSubCmd != nil { + subData, err = gproto.Marshal(rpcSubCmd) + if err != nil { + panic("proto.Marshal: " + err.Error()) + } + } else if jsonSubCmd != nil { + subData, err = json.Marshal(jsonSubCmd) + if err != nil { + panic("json.Marshal( " + err.Error()) + } + } + + cmd := cmd.ApplyRequest{ + Type: cmdType, + Class: class, + SubCommand: subData, + } + data, err := gproto.Marshal(&cmd) + if err != nil { + panic(err) + } + + return data +} diff --git a/platform/dbops/binaries/weaviate-src/deprecations/data.go b/platform/dbops/binaries/weaviate-src/deprecations/data.go new file mode 100644 index 0000000000000000000000000000000000000000..53666afa502e86d11ef79b2844b3491ba39f870a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/deprecations/data.go @@ -0,0 +1,109 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// Code generated by go generate; DO NOT EDIT. +// This file was generated by go generate ./deprecations +package deprecations + +import ( + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" +) + +func timeMust(t time.Time, err error) strfmt.DateTime { + if err != nil { + panic(err) + } + + return strfmt.DateTime(t) +} + +func timeMustPtr(t time.Time, err error) *strfmt.DateTime { + if err != nil { + panic(err) + } + + parsed := strfmt.DateTime(t) + return &parsed +} + +func ptString(in string) *string { + return &in +} + +var ByID = map[string]models.Deprecation{ + "rest-meta-prop": { + ID: "rest-meta-prop", + Locations: []string{ + "GET /v1/thing/{id}", + "GET /v1/things", + "GET /v1/action/{id}", + "GET /v1/actions", + }, + Status: "deprecated", + APIType: "REST", + Mitigation: "Use ?include=, e.g. ?include=_classification for classification meta or ?include=_vector to show the vector position or ?include=_classification,_vector for both. When consuming the response use the additional fields such as _vector, as the meta object in the response, such as meta.vector will be removed.", + Msg: "use of deprecated property ?meta=true/false", + SinceVersion: "0.22.8", + SinceTime: timeMust(time.Parse(time.RFC3339, "2020-06-15T16:18:06.000Z")), + RemovedIn: ptString("0.23.0"), + RemovedTime: timeMustPtr(time.Parse(time.RFC3339, "2020-06-15T16:18:06.000Z")), + }, + "config-files": { + ID: "config-files", + Locations: []string{ + "--config-file=\"\"", + }, + Status: "deprecated", + APIType: "Configuration", + Mitigation: "Configure Weaviate using environment variables.", + Msg: "use of deprecated command line argument --config-file", + SinceVersion: "0.22.16", + SinceTime: timeMust(time.Parse(time.RFC3339, "2020-09-08T09:46:00.000Z")), + }, + "cardinality": { + ID: "cardinality", + Locations: []string{ + "GET /v1/schema", + "POST /v1/schema/things", + "POST /v1/schema/actions", + "POST /v1/schema/things/{className}/properties", + "POST /v1/schema/actions/{className}/properties", + }, + Status: "deprecated", + APIType: "REST", + Mitigation: "Omit this field. Starting in 0.22.7 it no longer has any effect.", + Msg: "use of deprecated property option 'cardinality'", + SinceVersion: "0.22.17", + SinceTime: timeMust(time.Parse(time.RFC3339, "2020-09-16T09:06:00.000Z")), + RemovedIn: ptString("0.23.0"), + RemovedTime: timeMustPtr(time.Parse(time.RFC3339, "2020-09-16T09:06:00.000Z")), + }, + "ref-meta-deprecated-fields": { + ID: "ref-meta-deprecated-fields", + Locations: []string{ + "GET /v1/thing/{id}", + "GET /v1/things", + "GET /v1/action/{id}", + "GET /v1/actions", + }, + Status: "deprecated", + APIType: "REST", + Mitigation: "when using _classification the reference meta after a successful\nclassification contains various counts and distances. Starting in 0.22.20\nthe fields winningDistance and losingDistance are considered deprecated.\nNew fields were added and they have more descriptive names. User\nmeanWinningDistance instead of winningDistance and use meanLosingDistance\ninstead of losingDistance", + Msg: "response contains deprecated fields winningDistance and losingDistance", + SinceVersion: "0.22.20", + SinceTime: timeMust(time.Parse(time.RFC3339, "2020-11-26T14:58:00.000Z")), + RemovedIn: ptString("0.23.0"), + RemovedTime: timeMustPtr(time.Parse(time.RFC3339, "2020-11-26T14:58:00.000Z")), + }, +} diff --git a/platform/dbops/binaries/weaviate-src/deprecations/deprecations.yml b/platform/dbops/binaries/weaviate-src/deprecations/deprecations.yml new file mode 100644 index 0000000000000000000000000000000000000000..2312520103202d141f72825a7a8f8040ce70af72 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/deprecations/deprecations.yml @@ -0,0 +1,65 @@ +deprecations: + - id: rest-meta-prop + status: deprecated # switch to removed once feature is completely removed + apiType: REST + locations: + - GET /v1/thing/{id} + - GET /v1/things + - GET /v1/action/{id} + - GET /v1/actions + msg: "use of deprecated property ?meta=true/false" + mitigation: "Use ?include=, e.g. ?include=_classification for classification meta or ?include=_vector to show the vector position or ?include=_classification,_vector for both. When consuming the response use the additional fields such as _vector, as the meta object in the response, such as meta.vector will be removed." + sinceVersion: "0.22.8" + sinceTime: "2020-06-15T16:18:06+00:00" + plannedRemovalVersion: "0.23.0" + removedIn: "0.23.0" + removedTime: "2020-12-18T18:00:00+00:00" + - id: config-files + status: deprecated # switch to removed once feature is completely removed + apiType: Configuration + locations: + - --config-file="" + msg: "use of deprecated command line argument --config-file" + mitigation: "Configure Weaviate using environment variables." + sinceVersion: "0.22.16" + sinceTime: "2020-09-08T09:46:00+00:00" + plannedRemovalVersion: "0.23.0" + removedIn: null + removedTime: null + - id: cardinality + status: deprecated # switch to removed once feature is completely removed + apiType: REST + locations: + - GET /v1/schema + - POST /v1/schema/things + - POST /v1/schema/actions + - POST /v1/schema/things/{className}/properties + - POST /v1/schema/actions/{className}/properties + msg: "use of deprecated property option 'cardinality'" + mitigation: "Omit this field. Starting in 0.22.7 it no longer has any effect." + sinceVersion: "0.22.17" + sinceTime: "2020-09-16T09:06:00+00:00" + plannedRemovalVersion: "0.23.0" + removedIn: "0.23.0" + removedTime: "2020-12-18T18:00:00+00:00" + - id: ref-meta-deprecated-fields + status: deprecated # switch to removed once feature is completely removed + apiType: REST + locations: + - GET /v1/thing/{id} + - GET /v1/things + - GET /v1/action/{id} + - GET /v1/actions + msg: "response contains deprecated fields winningDistance and losingDistance" + mitigation: |- + when using _classification the reference meta after a successful + classification contains various counts and distances. Starting in 0.22.20 + the fields winningDistance and losingDistance are considered deprecated. + New fields were added and they have more descriptive names. User + meanWinningDistance instead of winningDistance and use meanLosingDistance + instead of losingDistance + sinceVersion: "0.22.20" + sinceTime: "2020-11-26T14:58:00+00:00" + plannedRemovalVersion: "0.23.0" + removedIn: "0.23.0" + removedTime: "2020-12-18T18:00:00+00:00" diff --git a/platform/dbops/binaries/weaviate-src/deprecations/gen.go b/platform/dbops/binaries/weaviate-src/deprecations/gen.go new file mode 100644 index 0000000000000000000000000000000000000000..940d53e97dc226446de7081c0bb0998528c45671 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/deprecations/gen.go @@ -0,0 +1,176 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +//go:build ignore +// +build ignore + +// The following directive is necessary to make the package coherent: +// This program generates data.go. +// +// + +package main + +import ( + "fmt" + "log" + "os" + "text/template" + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" + "gopkg.in/yaml.v3" +) + +func main() { + fmt.Println("Generating deprecations code") + + fd, err := os.Open("deprecations.yml") + fatal(err) + defer fd.Close() + + var deprecations struct { + // yaml tags not working on the go-swagger model, so we need to do the + // map[string]interface{} workaround + Deprecations []map[string]interface{} `yaml:"deprecations"` + } + err = yaml.NewDecoder(fd).Decode(&deprecations) + fatal(err) + + parsed, err := parseDeprecations(deprecations.Deprecations) + fatal(err) + + f, err := os.Create("data.go") + fatal(err) + defer f.Close() + err = packageTemplate.Execute(f, struct { + Deprecations []models.Deprecation + }{ + Deprecations: parsed, + }) + fatal(err) +} + +func parseDeprecations(in []map[string]interface{}) ([]models.Deprecation, error) { + out := make([]models.Deprecation, len(in)) + + for i, d := range in { + out[i] = models.Deprecation{ + ID: d["id"].(string), + Status: d["status"].(string), + APIType: d["apiType"].(string), + Msg: d["msg"].(string), + Mitigation: d["mitigation"].(string), + SinceVersion: d["sinceVersion"].(string), + PlannedRemovalVersion: d["plannedRemovalVersion"].(string), + Locations: parseStringSlice(d["locations"].([]interface{})), + SinceTime: timeMust(time.Parse(time.RFC3339, d["sinceTime"].(string))), + } + + if t, ok := d["removedTime"]; ok && t != nil { + parsed := timeMust(time.Parse(time.RFC3339, t.(string))) + out[i].RemovedTime = &parsed + } + + if v, ok := d["removedIn"]; ok && v != nil { + parsed := v.(string) + out[i].RemovedIn = &parsed + } + } + + return out, nil +} + +func timeMust(t time.Time, err error) strfmt.DateTime { + if err != nil { + panic(err) + } + + return strfmt.DateTime(t) +} + +func parseStringSlice(in []interface{}) []string { + out := make([]string, len(in)) + for i, elem := range in { + out[i] = elem.(string) + } + + return out +} + +func fatal(err error) { + if err != nil { + log.Fatal(err) + } +} + +var packageTemplate = template.Must(template.New("").Funcs( + template.FuncMap{ + "DerefString": func(i *string) string { return *i }, + }, +).Parse(`// Code generated by go generate; DO NOT EDIT. +// This file was generated by go generate ./deprecations +package deprecations + +import ( + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" +) + +func timeMust(t time.Time, err error) strfmt.DateTime { + if err != nil { + panic(err) + } + + return strfmt.DateTime(t) +} + +func timeMustPtr(t time.Time, err error) *strfmt.DateTime { + if err != nil { + panic(err) + } + + parsed := strfmt.DateTime(t) + return &parsed +} + +func ptString(in string) *string { + return &in +} + +var ByID = map[string]models.Deprecation{ +{{- range .Deprecations }} + {{ printf "%q" .ID }}: { + ID: {{ printf "%q" .ID }}, + Locations: []string{ + {{- range $index, $element := .Locations }} + {{ printf "%q," $element }} + {{- end }} + }, + Status: {{ printf "%q" .Status }}, + APIType: {{ printf "%q" .APIType }}, + Mitigation: {{ printf "%q" .Mitigation }}, + Msg: {{ printf "%q" .Msg }}, + SinceVersion: {{ printf "%q" .SinceVersion }}, + SinceTime: timeMust(time.Parse(time.RFC3339, {{ printf "%q" .SinceTime }})), + {{ if .RemovedIn -}} + RemovedIn: ptString({{ printf "%q" (DerefString .RemovedIn) }}), + {{- end }} + {{ if .RemovedTime -}} + RemovedTime: timeMustPtr(time.Parse(time.RFC3339, {{ printf "%q" .SinceTime }})), + {{- end }} + }, +{{- end }} +} +`)) diff --git a/platform/dbops/binaries/weaviate-src/deprecations/main.go b/platform/dbops/binaries/weaviate-src/deprecations/main.go new file mode 100644 index 0000000000000000000000000000000000000000..07faf4cde684226a9e26572aaf8d4306450fbf6d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/deprecations/main.go @@ -0,0 +1,21 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package deprecations + +import "github.com/sirupsen/logrus" + +//go:generate go run gen.go +//go:generate goimports -w data.go + +func Log(logger logrus.FieldLogger, id string) { + logger.WithField("deprecation", ByID[id]).Warning(ByID[id].Msg) +} diff --git a/platform/dbops/binaries/weaviate-src/docker-compose-raft/docker-compose-raft.yml.j2 b/platform/dbops/binaries/weaviate-src/docker-compose-raft/docker-compose-raft.yml.j2 new file mode 100644 index 0000000000000000000000000000000000000000..0a904fabb02e8791469927907818f4fe24d2adc2 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/docker-compose-raft/docker-compose-raft.yml.j2 @@ -0,0 +1,85 @@ +version: '3.4' +name: weaviate +services: +{% set raft_join = [] -%} +{% for i in range(NUMBER_VOTERS|int) -%} + {% do raft_join.append('weaviate-voter-' ~ i ~ ':830' ~ i) -%} +{% endfor -%} +{% set raft_join = raft_join | join(',') -%} +{% for i in range(NUMBER_VOTERS|int) %} + weaviate-voter-{{ i }}: + image: weaviate/test-server + build: + context: . + dockerfile: Dockerfile + target: weaviate + restart: on-failure:0 + ports: + - "808{{ i }}:8080" + - "606{{ i }}:6060" + - "5005{{ i+1 }}:50051" + environment: + LOG_LEVEL: "debug" + CONTEXTIONARY_URL: contextionary:9999 + QUERY_DEFAULTS_LIMIT: 20 + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true' + PERSISTENCE_DATA_PATH: "./data" + DEFAULT_VECTORIZER_MODULE: text2vec-contextionary + ENABLE_MODULES: "text2vec-contextionary" + PROMETHEUS_MONITORING_ENABLED: 'true' + PROMETHEUS_MONITORING_GROUP_CLASSES: 'true' + CLUSTER_HOSTNAME: "weaviate-voter-{{ i }}" + CLUSTER_JOIN: weaviate-voter-0:7100 + CLUSTER_GOSSIP_BIND_PORT: "7100" + CLUSTER_DATA_BIND_PORT: "7101" + ASYNC_INDEXING: ${ASYNC_INDEXING:-false} + RAFT_BOOTSTRAP_EXPECT: {{ NUMBER_VOTERS }} + RAFT_JOIN: "{{ raft_join }}" + # necessary for the metrics tests, some metrics only exist once segments + # are flushed. If we wait to long the before run is completely in + # memtables, the after run has some flushed which leads to some metrics + # diffs in the before and after + PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS: 2 +{% endfor %} + weaviate: + image: weaviate/test-server + build: + context: . + dockerfile: Dockerfile + target: weaviate + restart: on-failure:0 + ports: + - "8080" + - "6060" + - "50051" + environment: + LOG_LEVEL: "debug" + CONTEXTIONARY_URL: contextionary:9999 + QUERY_DEFAULTS_LIMIT: 20 + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true' + PERSISTENCE_DATA_PATH: "./data" + DEFAULT_VECTORIZER_MODULE: text2vec-contextionary + ENABLE_MODULES: "text2vec-contextionary" + PROMETHEUS_MONITORING_ENABLED: 'true' + PROMETHEUS_MONITORING_GROUP_CLASSES: 'true' + CLUSTER_GOSSIP_BIND_PORT: "7100" + CLUSTER_DATA_BIND_PORT: "7101" + CLUSTER_JOIN: weaviate-voter-0:7100 + ASYNC_INDEXING: ${ASYNC_INDEXING:-false} + RAFT_BOOTSTRAP_EXPECT: {{ NUMBER_VOTERS }} + RAFT_JOIN: "{{ raft_join }}" + # necessary for the metrics tests, some metrics only exist once segments + # are flushed. If we wait to long the before run is completely in + # memtables, the after run has some flushed which leads to some metrics + # diffs in the before and after + PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS: 2 + depends_on: + - weaviate-voter-{{ NUMBER_VOTERS|int - 1}} + contextionary: + image: semitechnologies/contextionary:en0.16.0-v1.2.1 + ports: + - "9999:9999" + environment: + OCCURRENCE_WEIGHT_LINEAR_FACTOR: 0.75 + EXTENSIONS_STORAGE_MODE: weaviate + EXTENSIONS_STORAGE_ORIGIN: http://weaviate-voter-0:8080 \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/docker-compose-raft/raft_cluster.sh b/platform/dbops/binaries/weaviate-src/docker-compose-raft/raft_cluster.sh new file mode 100644 index 0000000000000000000000000000000000000000..9e80d70ce3580253f291da310e0f97c85fe00e71 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/docker-compose-raft/raft_cluster.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Function to display help message +display_help() { + echo "Usage: $0 [VOTERS]" + echo + echo "Generate a docker-compose file for a Weaviate raft cluster." + echo + echo "VOTERS is the number of voters in the raft cluster. Default is 2." + echo + echo "Options:" + echo " --help Display this help message." +} + +# Check command-line arguments +case "$1" in + --help) + display_help + exit 0 + ;; +esac + +# Set default value for VOTERS +VOTERS=${1:-2} +FILE_NAME="docker-compose-raft.yml" +# Get the directory of the script +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +# Check if jinja2 is installed +if ! command -v jinja2 &> /dev/null; then + # Check if running on Mac + if [[ $(uname) == "Darwin" ]]; then + # Install jinja2-cli via Homebrew + brew install jinja2-cli + else + # Prompt user to install jinja2-cli via pip + echo "jinja2-cli is not installed. Please install it using 'pip install jinja2-cli'." + exit 1 + fi +fi + +# Generate docker-compose-raft.yml using jinja2 +jinja2 ${SCRIPT_DIR}/docker-compose-raft.yml.j2 -D NUMBER_VOTERS=${VOTERS} -o ${FILE_NAME} + +echo -e "You can now start your multinode Weaviate compose! To do so, run the following command:\n\ + docker-compose -f ${FILE_NAME} up -d\n\ +This command will start $((VOTERS + 1)) nodes.\n\ +If you want to start more nodes, for example 10 nodes. You can use the following command:\n\ + docker-compose -f ${FILE_NAME} up -d --scale weaviate=$((10 - VOTERS))\n\ +To stop the nodes, you can use the following command:\n\ + docker-compose -f ${FILE_NAME} down\n" diff --git a/platform/dbops/binaries/weaviate-src/docker-compose/Readme.md b/platform/dbops/binaries/weaviate-src/docker-compose/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..535eb8656a7d5b041b8459a0865c223c3ceb9857 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/docker-compose/Readme.md @@ -0,0 +1,8 @@ +## Docker-compose runtime files + +This folder used to contain example `docker-compose.yml` files for various +languages. Instead you can now configure your own docker-compose file - on the +fly - using Weaviate's documentation. + +Click here to [generate a `docker-compose.yml` file with your desired +configuration](https://weaviate.io/developers/weaviate/installation/docker-compose). diff --git a/platform/dbops/binaries/weaviate-src/openapi-specs/extendresponses.js b/platform/dbops/binaries/weaviate-src/openapi-specs/extendresponses.js new file mode 100644 index 0000000000000000000000000000000000000000..99e20eee1dd8541f1d1047c0d3ac6d521165f978 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/openapi-specs/extendresponses.js @@ -0,0 +1,26 @@ +/** + * A simple script to add a new response to every single API path. This was + * built for the purpose of adding 500 Internal Server Error to everything, but + * could potentially also be used for other purposes in the future. + */ + +const fs = require('fs') + +const file = fs.readFileSync('./schema.json', 'utf-8') +const parsed = JSON.parse(file) + +for (const [pathKey, pathValue] of Object.entries(parsed.paths)) { + for (const [path, value] of Object.entries(pathValue)) { + if (!value.responses) { + continue + } + + value.responses['500'] = { + description: "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + schema: { + "$ref": "#/definitions/ErrorResponse" + } + } + } +} +fs.writeFileSync('./schema.json', JSON.stringify(parsed, null, 2)) diff --git a/platform/dbops/binaries/weaviate-src/openapi-specs/schema.json b/platform/dbops/binaries/weaviate-src/openapi-specs/schema.json new file mode 100644 index 0000000000000000000000000000000000000000..7a4618db8cabb01a9f3b963e8b8613203513d6f0 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/openapi-specs/schema.json @@ -0,0 +1,8869 @@ +{ + "basePath": "/v1", + "consumes": [ + "application/yaml", + "application/json" + ], + "definitions": { + "UserTypeInput": { + "type": "string", + "enum": [ + "db", + "oidc" + ], + "description": "the type of user" + }, + "GroupType": { + "type": "string", + "enum": [ + "db", + "oidc" + ], + "description": "If the group contains OIDC or database users." + }, + "UserTypeOutput": { + "type": "string", + "enum": [ + "db_user", + "db_env_user", + "oidc" + ], + "description": "the type of user" + }, + "UserOwnInfo": { + "type": "object", + "properties": { + "groups": { + "type": "array", + "description": "The groups associated to the user", + "items": { + "type": "string" + } + }, + "roles": { + "type": "array", + "items": { + "type": "object", + "description": "The roles assigned to own user", + "$ref": "#/definitions/Role" + } + }, + "username": { + "type": "string", + "description": "The username associated with the provided key" + } + }, + "required": [ + "username" + ] + }, + "DBUserInfo": { + "type": "object", + "properties": { + "roles": { + "type": "array", + "description": "The role names associated to the user", + "items": { + "type": "string" + } + }, + "userId": { + "type": "string", + "description": "The user id of the given user" + }, + "dbUserType": { + "type": "string", + "enum": [ + "db_user", + "db_env_user" + ], + "description": "type of the returned user" + }, + "active": { + "type": "boolean", + "description": "activity status of the returned user" + }, + "createdAt": { + "type": [ + "string", + "null" + ], + "format": "date-time", + "description": "Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ)" + }, + "apiKeyFirstLetters": { + "type": [ + "string", + "null" + ], + "maxLength": 3, + "description": "First 3 letters of the associated API-key" + }, + "lastUsedAt": { + "type": [ + "string", + "null" + ], + "format": "date-time", + "description": "Date and time in ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ)" + } + }, + "required": [ + "userId", + "dbUserType", + "roles", + "active" + ] + }, + "UserApiKey": { + "type": "object", + "properties": { + "apikey": { + "type": "string", + "description": "The apikey" + } + }, + "required": [ + "apikey" + ] + }, + "Role": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "role name" + }, + "permissions": { + "type": "array", + "items": { + "type": "object", + "description": "list of permissions (level, action, resource)", + "$ref": "#/definitions/Permission" + } + } + }, + "required": [ + "name", + "permissions" + ] + }, + "Permission": { + "type": "object", + "description": "permissions attached to a role.", + "properties": { + "backups": { + "type": "object", + "description": "resources applicable for backup actions", + "properties": { + "collection": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *" + } + } + }, + "data": { + "type": "object", + "description": "resources applicable for data actions", + "properties": { + "collection": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *" + }, + "tenant": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *" + }, + "object": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific object ID, if left empty it will be ALL or *" + } + } + }, + "nodes": { + "type": "object", + "description": "resources applicable for cluster actions", + "properties": { + "verbosity": { + "type": "string", + "default": "minimal", + "enum": [ + "verbose", + "minimal" + ], + "description": "whether to allow (verbose) returning shards and stats data in the response" + }, + "collection": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *" + } + } + }, + "users": { + "type": "object", + "description": "resources applicable for user actions", + "properties": { + "users": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific name, if left empty it will be ALL or *" + } + } + }, + "groups": { + "type": "object", + "description": "Resources applicable for group actions.", + "properties": { + "group": { + "type": "string", + "default": "*", + "description": "A string that specifies which groups this permission applies to. Can be an exact group name or a regex pattern. The default value `*` applies the permission to all groups." + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + }, + "tenants": { + "type": "object", + "description": "resources applicable for tenant actions", + "properties": { + "collection": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *" + }, + "tenant": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific tenant name, if left empty it will be ALL or *" + } + } + }, + "roles": { + "type": "object", + "description": "resources applicable for role actions", + "properties": { + "role": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific role name, if left empty it will be ALL or *" + }, + "scope": { + "enum": [ + "all", + "match" + ], + "type": "string", + "default": "match", + "description": "set the scope for the manage role permission" + } + } + }, + "collections": { + "type": "object", + "description": "resources applicable for collection and/or tenant actions", + "properties": { + "collection": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *" + } + } + }, + "replicate": { + "type": "object", + "description": "resources applicable for replicate actions", + "properties": { + "collection": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific collection name, if left empty it will be ALL or *" + }, + "shard": { + "type": "string", + "default": "*", + "description": "string or regex. if a specific shard name, if left empty it will be ALL or *" + } + } + }, + "aliases": { + "type": "object", + "description": "Resource definition for alias-related actions and permissions. Used to specify which aliases and collections can be accessed or modified.", + "properties": { + "collection": { + "type": "string", + "default": "*", + "description": "A string that specifies which collections this permission applies to. Can be an exact collection name or a regex pattern. The default value `*` applies the permission to all collections." + }, + "alias": { + "type": "string", + "default": "*", + "description": "A string that specifies which aliases this permission applies to. Can be an exact alias name or a regex pattern. The default value `*` applies the permission to all aliases." + } + } + }, + "action": { + "type": "string", + "description": "allowed actions in weaviate.", + "enum": [ + "manage_backups", + "read_cluster", + "create_data", + "read_data", + "update_data", + "delete_data", + "read_nodes", + "create_roles", + "read_roles", + "update_roles", + "delete_roles", + "create_collections", + "read_collections", + "update_collections", + "delete_collections", + "assign_and_revoke_users", + "create_users", + "read_users", + "update_users", + "delete_users", + "create_tenants", + "read_tenants", + "update_tenants", + "delete_tenants", + "create_replicate", + "read_replicate", + "update_replicate", + "delete_replicate", + "create_aliases", + "read_aliases", + "update_aliases", + "delete_aliases", + "assign_and_revoke_groups", + "read_groups" + ] + } + }, + "required": [ + "action" + ] + }, + "RolesListResponse": { + "type": "array", + "description": "list of roles", + "items": { + "$ref": "#/definitions/Role" + } + }, + "Link": { + "type": "object", + "properties": { + "href": { + "type": "string", + "description": "target of the link" + }, + "rel": { + "type": "string", + "description": "relationship if both resources are related, e.g. 'next', 'previous', 'parent', etc." + }, + "name": { + "type": "string", + "description": "human readable name of the resource group" + }, + "documentationHref": { + "type": "string", + "description": "weaviate documentation about this resource group" + } + } + }, + "Principal": { + "type": "object", + "properties": { + "username": { + "type": "string", + "description": "The username that was extracted either from the authentication information" + }, + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + } + } + }, + "C11yWordsResponse": { + "description": "An array of available words and contexts.", + "properties": { + "concatenatedWord": { + "description": "Weighted results for all words", + "type": "object", + "properties": { + "concatenatedWord": { + "type": "string" + }, + "singleWords": { + "type": "array", + "items": { + "format": "string" + } + }, + "concatenatedVector": { + "$ref": "#/definitions/C11yVector" + }, + "concatenatedNearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + } + } + }, + "individualWords": { + "description": "Weighted results for per individual word", + "type": "array", + "items": { + "type": "object", + "properties": { + "word": { + "type": "string" + }, + "present": { + "type": "boolean" + }, + "info": { + "type": "object", + "properties": { + "vector": { + "$ref": "#/definitions/C11yVector" + }, + "nearestNeighbors": { + "$ref": "#/definitions/C11yNearestNeighbors" + } + } + } + } + } + } + } + }, + "C11yExtension": { + "description": "A resource describing an extension to the contextinoary, containing both the identifier and the definition of the extension", + "properties": { + "concept": { + "description": "The new concept you want to extend. Must be an all-lowercase single word, or a space delimited compound word. Examples: 'foobarium', 'my custom concept'", + "type": "string", + "example": "foobarium" + }, + "definition": { + "description": "A list of space-delimited words or a sentence describing what the custom concept is about. Avoid using the custom concept itself. An Example definition for the custom concept 'foobarium': would be 'a naturally occurring element which can only be seen by programmers'", + "type": "string" + }, + "weight": { + "description": "Weight of the definition of the new concept where 1='override existing definition entirely' and 0='ignore custom definition'. Note that if the custom concept is not present in the contextionary yet, the weight cannot be less than 1.", + "type": "number", + "format": "float" + } + } + }, + "C11yNearestNeighbors": { + "description": "C11y function to show the nearest neighbors to a word.", + "type": "array", + "items": { + "type": "object", + "properties": { + "word": { + "type": "string" + }, + "distance": { + "type": "number", + "format": "float" + } + } + } + }, + "C11yVector": { + "description": "A vector representation of the object in the Contextionary. If provided at object creation, this wil take precedence over any vectorizer setting.", + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "Vector": { + "description": "A vector representation of the object. If provided at object creation, this wil take precedence over any vectorizer setting.", + "type": "object" + }, + "Vectors": { + "description": "A map of named vectors for multi-vector representations.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Vector" + } + }, + "C11yVectorBasedQuestion": { + "description": "Receive question based on array of classes, properties and values.", + "type": "array", + "items": { + "type": "object", + "properties": { + "classVectors": { + "description": "Vectorized classname.", + "type": "array", + "items": { + "type": "number", + "format": "float" + }, + "minItems": 300, + "maxItems": 300 + }, + "classProps": { + "description": "Vectorized properties.", + "type": "array", + "items": { + "type": "object", + "properties": { + "propsVectors": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "value": { + "description": "String with valuename.", + "type": "string" + } + } + }, + "minItems": 300, + "maxItems": 300 + } + } + } + }, + "Deprecation": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The id that uniquely identifies this particular deprecations (mostly used internally)" + }, + "status": { + "type": "string", + "description": "Whether the problematic API functionality is deprecated (planned to be removed) or already removed" + }, + "apiType": { + "type": "string", + "description": "Describes which API is effected, usually one of: REST, GraphQL" + }, + "msg": { + "type": "string", + "description": "What this deprecation is about" + }, + "mitigation": { + "type": "string", + "description": "User-required object to not be affected by the (planned) removal" + }, + "sinceVersion": { + "type": "string", + "description": "The deprecation was introduced in this version" + }, + "plannedRemovalVersion": { + "type": "string", + "description": "A best-effort guess of which upcoming version will remove the feature entirely" + }, + "removedIn": { + "type": "string", + "description": "If the feature has already been removed, it was removed in this version", + "x-nullable": true + }, + "removedTime": { + "type": "string", + "format": "date-time", + "description": "If the feature has already been removed, it was removed at this timestamp", + "x-nullable": true + }, + "sinceTime": { + "type": "string", + "format": "date-time", + "description": "The deprecation was introduced in this version" + }, + "locations": { + "type": "array", + "description": "The locations within the specified API affected by this deprecation", + "items": { + "type": "string" + } + } + } + }, + "ErrorResponse": { + "description": "An error response given by Weaviate end-points.", + "properties": { + "error": { + "items": { + "properties": { + "message": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "GraphQLError": { + "description": "An error response caused by a GraphQL query.", + "properties": { + "locations": { + "items": { + "properties": { + "column": { + "format": "int64", + "type": "integer" + }, + "line": { + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "type": "string" + }, + "path": { + "items": { + "type": "string" + }, + "type": "array" + } + } + }, + "GraphQLQuery": { + "description": "GraphQL query based on: http://facebook.github.io/graphql/.", + "properties": { + "operationName": { + "description": "The name of the operation if multiple exist in the query.", + "type": "string" + }, + "query": { + "description": "Query based on GraphQL syntax.", + "type": "string" + }, + "variables": { + "description": "Additional variables for the query.", + "type": "object" + } + }, + "type": "object" + }, + "GraphQLQueries": { + "description": "A list of GraphQL queries.", + "items": { + "$ref": "#/definitions/GraphQLQuery" + }, + "type": "array" + }, + "GraphQLResponse": { + "description": "GraphQL based response: http://facebook.github.io/graphql/.", + "properties": { + "data": { + "additionalProperties": { + "$ref": "#/definitions/JsonObject" + }, + "description": "GraphQL data object.", + "type": "object" + }, + "errors": { + "description": "Array with errors.", + "items": { + "$ref": "#/definitions/GraphQLError" + }, + "x-omitempty": true, + "type": "array" + } + } + }, + "GraphQLResponses": { + "description": "A list of GraphQL responses.", + "items": { + "$ref": "#/definitions/GraphQLResponse" + }, + "type": "array" + }, + "InvertedIndexConfig": { + "description": "Configure the inverted index built into Weaviate (default: 60).", + "properties": { + "cleanupIntervalSeconds": { + "description": "Asynchronous index clean up happens every n seconds", + "format": "int", + "type": "number" + }, + "bm25": { + "$ref": "#/definitions/BM25Config" + }, + "stopwords": { + "$ref": "#/definitions/StopwordConfig" + }, + "indexTimestamps": { + "description": "Index each object by its internal timestamps (default: 'false').", + "type": "boolean" + }, + "indexNullState": { + "description": "Index each object with the null state (default: 'false').", + "type": "boolean" + }, + "indexPropertyLength": { + "description": "Index length of properties (default: 'false').", + "type": "boolean" + }, + "usingBlockMaxWAND": { + "description": "Using BlockMax WAND for query execution (default: 'false', will be 'true' for new collections created after 1.30).", + "type": "boolean" + } + }, + "type": "object" + }, + "ReplicationConfig": { + "description": "Configure how replication is executed in a cluster", + "properties": { + "factor": { + "description": "Number of times a class is replicated (default: 1).", + "type": "integer" + }, + "asyncEnabled": { + "description": "Enable asynchronous replication (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "deletionStrategy": { + "description": "Conflict resolution strategy for deleted objects.", + "type": "string", + "enum": [ + "NoAutomatedResolution", + "DeleteOnConflict", + "TimeBasedResolution" + ], + "x-omitempty": true + } + }, + "type": "object" + }, + "BM25Config": { + "description": "tuning parameters for the BM25 algorithm", + "properties": { + "k1": { + "description": "Calibrates term-weight scaling based on the term frequency within a document (default: 1.2).", + "format": "float", + "type": "number" + }, + "b": { + "description": "Calibrates term-weight scaling based on the document length (default: 0.75).", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "StopwordConfig": { + "description": "fine-grained control over stopword list usage", + "properties": { + "preset": { + "description": "Pre-existing list of common words by language (default: 'en'). Options: ['en', 'none'].", + "type": "string" + }, + "additions": { + "description": "Stopwords to be considered additionally (default: []). Can be any array of custom strings.", + "type": "array", + "items": { + "type": "string" + } + }, + "removals": { + "description": "Stopwords to be removed from consideration (default: []). Can be any array of custom strings.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "type": "object" + }, + "MultiTenancyConfig": { + "description": "Configuration related to multi-tenancy within a class", + "properties": { + "enabled": { + "description": "Whether or not multi-tenancy is enabled for this class (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "autoTenantCreation": { + "description": "Nonexistent tenants should (not) be created implicitly (default: false).", + "type": "boolean", + "x-omitempty": false + }, + "autoTenantActivation": { + "description": "Existing tenants should (not) be turned HOT implicitly when they are accessed and in another activity status (default: false).", + "type": "boolean", + "x-omitempty": false + } + } + }, + "JsonObject": { + "description": "JSON object value.", + "type": "object" + }, + "Meta": { + "description": "Contains meta information of the current Weaviate instance.", + "properties": { + "hostname": { + "description": "The url of the host.", + "format": "url", + "type": "string" + }, + "version": { + "description": "The Weaviate server version.", + "type": "string" + }, + "modules": { + "description": "Module-specific meta information.", + "type": "object" + }, + "grpcMaxMessageSize": { + "description": "Max message size for GRPC connection in bytes.", + "type": "integer" + } + }, + "type": "object" + }, + "MultipleRef": { + "description": "Multiple instances of references to other objects.", + "items": { + "$ref": "#/definitions/SingleRef" + }, + "type": "array" + }, + "PatchDocumentObject": { + "description": "Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396).", + "properties": { + "from": { + "description": "A string containing a JSON Pointer value.", + "type": "string" + }, + "op": { + "description": "The operation to be performed.", + "enum": [ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ], + "type": "string" + }, + "path": { + "description": "A JSON-Pointer.", + "type": "string" + }, + "value": { + "description": "The value to be used within the operations.", + "type": "object" + }, + "merge": { + "$ref": "#/definitions/Object" + } + }, + "required": [ + "op", + "path" + ] + }, + "PatchDocumentAction": { + "description": "Either a JSONPatch document as defined by RFC 6902 (from, op, path, value), or a merge document (RFC 7396).", + "properties": { + "from": { + "description": "A string containing a JSON Pointer value.", + "type": "string" + }, + "op": { + "description": "The operation to be performed.", + "enum": [ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ], + "type": "string" + }, + "path": { + "description": "A JSON-Pointer.", + "type": "string" + }, + "value": { + "description": "The value to be used within the operations.", + "type": "object" + }, + "merge": { + "$ref": "#/definitions/Object" + } + }, + "required": [ + "op", + "path" + ] + }, + "ReplicationReplicateReplicaRequest": { + "description": "Specifies the parameters required to initiate a shard replica movement operation between two nodes for a given collection and shard. This request defines the source and target node, the collection and type of transfer.", + "properties": { + "sourceNode": { + "description": "The name of the Weaviate node currently hosting the shard replica that needs to be moved or copied.", + "type": "string" + }, + "targetNode": { + "description": "The name of the Weaviate node where the new shard replica will be created as part of the movement or copy operation.", + "type": "string" + }, + "collection": { + "description": "The name of the collection to which the target shard belongs.", + "type": "string" + }, + "shard": { + "description": "The name of the shard whose replica is to be moved or copied.", + "type": "string" + }, + "type": { + "description": "Specifies the type of replication operation to perform. 'COPY' creates a new replica on the target node while keeping the source replica. 'MOVE' creates a new replica on the target node and then removes the source replica upon successful completion. Defaults to 'COPY' if omitted.", + "type": "string", + "enum": ["COPY", "MOVE"], + "default": "COPY" + } + }, + "type": "object", + "required": [ + "sourceNode", + "targetNode", + "collection", + "shard" + ] + }, + "ReplicationReplicateReplicaResponse": { + "description": "Contains the unique identifier for a successfully initiated asynchronous replica movement operation. This ID can be used to track the progress of the operation.", + "properties": { + "id": { + "description": "The unique identifier (ID) assigned to the registered replication operation.", + "format": "uuid", + "type": "string" + } + }, + "type": "object", + "required": [ + "id" + ] + }, + "ReplicationShardingStateResponse": { + "description": "Provides the detailed sharding state for one or more collections, including the distribution of shards and their replicas across the cluster nodes.", + "properties": { + "shardingState": { + "$ref": "#/definitions/ReplicationShardingState" + } + }, + "type": "object" + }, + "ReplicationDisableReplicaRequest": { + "description": "Specifies the parameters required to mark a specific shard replica as inactive (soft-delete) on a particular node. This action typically prevents the replica from serving requests but does not immediately remove its data.", + "properties": { + "node": { + "description": "The name of the Weaviate node hosting the shard replica that is to be disabled.", + "type": "string" + }, + "collection": { + "description": "The name of the collection to which the shard replica belongs.", + "type": "string" + }, + "shard": { + "description": "The ID of the shard whose replica is to be disabled.", + "type": "string" + } + }, + "type": "object", + "required": [ + "node", + "collection", + "shard" + ] + }, + "ReplicationDeleteReplicaRequest": { + "description": "Specifies the parameters required to permanently delete a specific shard replica from a particular node. This action will remove the replica's data from the node.", + "properties": { + "node": { + "description": "The name of the Weaviate node from which the shard replica will be deleted.", + "type": "string" + }, + "collection": { + "description": "The name of the collection to which the shard replica belongs.", + "type": "string" + }, + "shard": { + "description": "The ID of the shard whose replica is to be deleted.", + "type": "string" + } + }, + "type": "object", + "required": [ + "node", + "collection", + "shard" + ] + }, + "ReplicationShardReplicas": { + "description": "Represents a shard and lists the nodes that currently host its replicas.", + "type": "object", + "properties": { + "shard": { + "type": "string" + }, + "replicas": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ReplicationShardingState": { + "description": "Details the sharding layout for a specific collection, mapping each shard to its set of replicas across the cluster.", + "type": "object", + "properties": { + "collection": { + "description": "The name of the collection.", + "type": "string" + }, + "shards": { + "description": "An array detailing each shard within the collection and the nodes hosting its replicas.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationShardReplicas" + } + } + } + }, + "ReplicationReplicateDetailsReplicaStatusError": { + "description": "Represents an error encountered during a replication operation, including its timestamp and a human-readable message.", + "type": "object", + "properties": { + "whenErroredUnixMs": { + "description": "The unix timestamp in ms when the error occurred. This is an approximate time and so should not be used for precise timing.", + "format": "int64", + "type": "integer" + }, + "message": { + "description": "A human-readable message describing the error.", + "type": "string" + } + } + }, + "ReplicationReplicateDetailsReplicaStatus": { + "description": "Represents the current or historical status of a shard replica involved in a replication operation, including its operational state and any associated errors.", + "type": "object", + "properties": { + "state": { + "description": "The current operational state of the replica during the replication process.", + "type": "string", + "enum": [ + "REGISTERED", + "HYDRATING", + "FINALIZING", + "DEHYDRATING", + "READY", + "CANCELLED" + ] + }, + "whenStartedUnixMs": { + "description": "The UNIX timestamp in ms when this state was first entered. This is an approximate time and so should not be used for precise timing.", + "format": "int64", + "type": "integer" + }, + "errors": { + "description": "A list of error messages encountered by this replica during the replication operation, if any.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatusError" + } + } + } + }, + "ReplicationReplicateDetailsReplicaResponse": { + "description": "Provides a comprehensive overview of a specific replication operation, detailing its unique ID, the involved collection, shard, source and target nodes, transfer type, current status, and optionally, its status history.", + "properties": { + "id": { + "description": "The unique identifier (ID) of this specific replication operation.", + "format": "uuid", + "type": "string" + }, + "shard": { + "description": "The name of the shard involved in this replication operation.", + "type": "string" + }, + "collection": { + "description": "The name of the collection to which the shard being replicated belongs.", + "type": "string" + }, + "sourceNode": { + "description": "The identifier of the node from which the replica is being moved or copied (the source node).", + "type": "string" + }, + "targetNode": { + "description": "The identifier of the node to which the replica is being moved or copied (the target node).", + "type": "string" + }, + "type": { + "description": "Indicates whether the operation is a 'COPY' (source replica remains) or a 'MOVE' (source replica is removed after successful transfer).", + "type": "string", + "enum": [ + "COPY", + "MOVE" + ] + }, + "uncancelable": { + "description": "Whether the replica operation is uncancelable.", + "type": "boolean" + }, + "scheduledForCancel": { + "description": "Whether the replica operation is scheduled for cancellation.", + "type": "boolean" + }, + "scheduledForDelete": { + "description": "Whether the replica operation is scheduled for deletion.", + "type": "boolean" + }, + "status": { + "description": "An object detailing the current operational state of the replica movement and any errors encountered.", + "type": "object", + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatus" + }, + "statusHistory": { + "description": "An array detailing the historical sequence of statuses the replication operation has transitioned through, if requested and available.", + "type": "array", + "items": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaStatus" + } + }, + "whenStartedUnixMs": { + "description": "The UNIX timestamp in ms when the replication operation was initiated. This is an approximate time and so should not be used for precise timing.", + "format": "int64", + "type": "integer" + } + }, + "required": ["id", "shard", "sourceNode", "targetNode", "collection", "status", "type"] + }, + "ReplicationReplicateForceDeleteRequest": { + "description": "Specifies the parameters available when force deleting replication operations.", + "properties": { + "id": { + "description": "The unique identifier (ID) of the replication operation to be forcefully deleted.", + "format": "uuid", + "type": "string" + }, + "collection": { + "description": "The name of the collection to which the shard being replicated belongs.", + "type": "string" + }, + "shard": { + "description": "The identifier of the shard involved in the replication operations.", + "type": "string" + }, + "node": { + "description": "The name of the target node where the replication operations are registered.", + "type": "string" + }, + "dryRun": { + "description": "If true, the operation will not actually delete anything but will return the expected outcome of the deletion.", + "type": "boolean", + "default": false + } + }, + "type": "object" + }, + "ReplicationReplicateForceDeleteResponse": { + "description": "Provides the UUIDs that were successfully force deleted as part of the replication operation. If dryRun is true, this will return the expected outcome without actually deleting anything.", + "properties": { + "deleted": { + "description": "The unique identifiers (IDs) of the replication operations that were forcefully deleted.", + "type": "array", + "items": { + "format": "uuid", + "type": "string" + } + }, + "dryRun": { + "description": "Indicates whether the operation was a dry run (true) or an actual deletion (false).", + "type": "boolean" + } + }, + "type": "object" + }, + "PeerUpdate": { + "description": "A single peer in the network.", + "properties": { + "id": { + "description": "The session ID of the peer.", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "Human readable name.", + "type": "string" + }, + "uri": { + "description": "The location where the peer is exposed to the internet.", + "type": "string", + "format": "uri" + }, + "schemaHash": { + "description": "The latest known hash of the peer's schema.", + "type": "string" + } + } + }, + "PeerUpdateList": { + "description": "List of known peers.", + "items": { + "$ref": "#/definitions/PeerUpdate" + }, + "type": "array" + }, + "VectorWeights": { + "description": "Allow custom overrides of vector weights as math expressions. E.g. \"pancake\": \"7\" will set the weight for the word pancake to 7 in the vectorization, whereas \"w * 3\" would triple the originally calculated word. This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value (string/string) object.", + "type": "object" + }, + "PropertySchema": { + "description": "Names and values of an individual property. A returned response may also contain additional metadata, such as from classification or feature projection.", + "type": "object" + }, + "SchemaHistory": { + "description": "This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value OR a SingleRef definition.", + "type": "object" + }, + "Schema": { + "description": "Definitions of semantic schemas (also see: https://github.com/weaviate/weaviate-semantic-schemas).", + "properties": { + "classes": { + "description": "Semantic classes that are available.", + "items": { + "$ref": "#/definitions/Class" + }, + "type": "array" + }, + "maintainer": { + "description": "Email of the maintainer.", + "format": "email", + "type": "string" + }, + "name": { + "description": "Name of the schema.", + "type": "string" + } + }, + "type": "object" + }, + "SchemaClusterStatus": { + "description": "Indicates the health of the schema in a cluster.", + "properties": { + "healthy": { + "description": "True if the cluster is in sync, false if there is an issue (see error).", + "type": "boolean", + "x-omitempty": false + }, + "error": { + "description": "Contains the sync check error if one occurred", + "type": "string", + "x-omitempty": true + }, + "hostname": { + "description": "Hostname of the coordinating node, i.e. the one that received the cluster. This can be useful information if the error message contains phrases such as 'other nodes agree, but local does not', etc.", + "type": "string" + }, + "nodeCount": { + "description": "Number of nodes that participated in the sync check", + "type": "number", + "format": "int" + }, + "ignoreSchemaSync": { + "description": "The cluster check at startup can be ignored (to recover from an out-of-sync situation).", + "type": "boolean", + "x-omitempty": false + } + }, + "type": "object" + }, + "Class": { + "properties": { + "class": { + "description": "Name of the class (a.k.a. 'collection') (required). Multiple words should be concatenated in CamelCase, e.g. `ArticleAuthor`.", + "type": "string" + }, + "vectorConfig": { + "description": "Configure named vectors. Either use this field or `vectorizer`, `vectorIndexType`, and `vectorIndexConfig` fields. Available from `v1.24.0`.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/VectorConfig" + } + }, + "vectorIndexType": { + "description": "Name of the vector index to use, eg. (HNSW)", + "type": "string" + }, + "vectorIndexConfig": { + "description": "Vector-index config, that is specific to the type of index selected in vectorIndexType", + "type": "object" + }, + "shardingConfig": { + "description": "Manage how the index should be sharded and distributed in the cluster", + "type": "object" + }, + "replicationConfig": { + "$ref": "#/definitions/ReplicationConfig" + }, + "invertedIndexConfig": { + "$ref": "#/definitions/InvertedIndexConfig" + }, + "multiTenancyConfig": { + "$ref": "#/definitions/MultiTenancyConfig" + }, + "vectorizer": { + "description": "Specify how the vectors for this class should be determined. The options are either 'none' - this means you have to import a vector with each object yourself - or the name of a module that provides vectorization capabilities, such as 'text2vec-contextionary'. If left empty, it will use the globally configured default which can itself either be 'none' or a specific module.", + "type": "string" + }, + "moduleConfig": { + "description": "Configuration specific to modules in a collection context.", + "type": "object" + }, + "description": { + "description": "Description of the collection for metadata purposes.", + "type": "string" + }, + "properties": { + "description": "Define properties of the collection.", + "items": { + "$ref": "#/definitions/Property" + }, + "type": "array" + } + }, + "type": "object" + }, + "Property": { + "properties": { + "dataType": { + "description": "Data type of the property (required). If it starts with a capital (for example Person), may be a reference to another type.", + "items": { + "type": "string" + }, + "type": "array" + }, + "description": { + "description": "Description of the property.", + "type": "string" + }, + "moduleConfig": { + "description": "Configuration specific to modules this Weaviate instance has installed", + "type": "object" + }, + "name": { + "description": "The name of the property (required). Multiple words should be concatenated in camelCase, e.g. `nameOfAuthor`.", + "type": "string" + }, + "indexInverted": { + "description": "(Deprecated). Whether to include this property in the inverted index. If `false`, this property cannot be used in `where` filters, `bm25` or `hybrid` search.

    Unrelated to vectorization behavior (deprecated as of v1.19; use indexFilterable or/and indexSearchable instead)", + "type": "boolean", + "x-nullable": true + }, + "indexFilterable": { + "description": "Whether to include this property in the filterable, Roaring Bitmap index. If `false`, this property cannot be used in `where` filters.

    Note: Unrelated to vectorization behavior.", + "type": "boolean", + "x-nullable": true + }, + "indexSearchable": { + "description": "Optional. Should this property be indexed in the inverted index. Defaults to true. Applicable only to properties of data type text and text[]. If you choose false, you will not be able to use this property in bm25 or hybrid search. This property has no affect on vectorization decisions done by modules", + "type": "boolean", + "x-nullable": true + }, + "indexRangeFilters": { + "description": "Whether to include this property in the filterable, range-based Roaring Bitmap index. Provides better performance for range queries compared to filterable index in large datasets. Applicable only to properties of data type int, number, date.", + "type": "boolean", + "x-nullable": true + }, + "tokenization": { + "description": "Determines tokenization of the property as separate words or whole field. Optional. Applies to text and text[] data types. Allowed values are `word` (default; splits on any non-alphanumerical, lowercases), `lowercase` (splits on white spaces, lowercases), `whitespace` (splits on white spaces), `field` (trims). Not supported for remaining data types", + "type": "string", + "enum": [ + "word", + "lowercase", + "whitespace", + "field", + "trigram", + "gse", + "kagome_kr", + "kagome_ja", + "gse_ch" + ] + }, + "nestedProperties": { + "description": "The properties of the nested object(s). Applies to object and object[] data types.", + "items": { + "$ref": "#/definitions/NestedProperty" + }, + "type": "array", + "x-omitempty": true + } + }, + "type": "object" + }, + "VectorConfig": { + "properties": { + "vectorizer": { + "description": "Configuration of a specific vectorizer used by this vector", + "type": "object" + }, + "vectorIndexType": { + "description": "Name of the vector index to use, eg. (HNSW)", + "type": "string" + }, + "vectorIndexConfig": { + "description": "Vector-index config, that is specific to the type of index selected in vectorIndexType", + "type": "object" + } + }, + "type": "object" + }, + "NestedProperty": { + "properties": { + "dataType": { + "items": { + "type": "string" + }, + "type": "array" + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "indexFilterable": { + "type": "boolean", + "x-nullable": true + }, + "indexSearchable": { + "type": "boolean", + "x-nullable": true + }, + "indexRangeFilters": { + "type": "boolean", + "x-nullable": true + }, + "tokenization": { + "type": "string", + "enum": [ + "word", + "lowercase", + "whitespace", + "field", + "trigram", + "gse", + "kagome_kr", + "kagome_ja", + "gse_ch" + ] + }, + "nestedProperties": { + "description": "The properties of the nested object(s). Applies to object and object[] data types.", + "items": { + "$ref": "#/definitions/NestedProperty" + }, + "type": "array", + "x-omitempty": true + } + }, + "type": "object" + }, + "ShardStatusList": { + "description": "The status of all the shards of a Class", + "items": { + "$ref": "#/definitions/ShardStatusGetResponse" + }, + "type": "array" + }, + "ShardStatusGetResponse": { + "description": "Response body of shard status get request", + "properties": { + "name": { + "description": "Name of the shard", + "type": "string" + }, + "status": { + "description": "Status of the shard", + "type": "string" + }, + "vectorQueueSize": { + "description": "Size of the vector queue of the shard", + "type": "integer", + "x-omitempty": false + } + } + }, + "ShardStatus": { + "description": "The status of a single shard", + "properties": { + "status": { + "description": "Status of the shard", + "type": "string" + } + } + }, + "BackupCreateStatusResponse": { + "description": "The definition of a backup create metadata", + "properties": { + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backend", + "type": "string" + }, + "error": { + "description": "error message if creation failed", + "type": "string" + }, + "status": { + "description": "phase of backup creation process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupRestoreStatusResponse": { + "description": "The definition of a backup restore metadata", + "properties": { + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backup backend, contains bucket and path", + "type": "string" + }, + "error": { + "description": "error message if restoration failed", + "type": "string" + }, + "status": { + "description": "phase of backup restoration process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupConfig": { + "description": "Backup custom configuration", + "type": "object", + "properties": { + "Endpoint": { + "type": "string", + "description": "name of the endpoint, e.g. s3.amazonaws.com" + }, + "Bucket": { + "type": "string", + "description": "Name of the bucket, container, volume, etc" + }, + "Path": { + "type": "string", + "description": "Path or key within the bucket" + }, + "CPUPercentage": { + "description": "Desired CPU core utilization ranging from 1%-80%", + "type": "integer", + "default": 50, + "minimum": 1, + "maximum": 80, + "x-nullable": false + }, + "ChunkSize": { + "description": "Aimed chunk size, with a minimum of 2MB, default of 128MB, and a maximum of 512MB. The actual chunk size may vary.", + "type": "integer", + "default": 128, + "minimum": 2, + "maximum": 512, + "x-nullable": false + }, + "CompressionLevel": { + "description": "compression level used by compression algorithm", + "type": "string", + "default": "DefaultCompression", + "x-nullable": false, + "enum": [ + "DefaultCompression", + "BestSpeed", + "BestCompression" + ] + } + } + }, + "RestoreConfig": { + "description": "Backup custom configuration", + "type": "object", + "properties": { + "Endpoint": { + "type": "string", + "description": "name of the endpoint, e.g. s3.amazonaws.com" + }, + "Bucket": { + "type": "string", + "description": "Name of the bucket, container, volume, etc" + }, + "Path": { + "type": "string", + "description": "Path within the bucket" + }, + "CPUPercentage": { + "description": "Desired CPU core utilization ranging from 1%-80%", + "type": "integer", + "default": 50, + "minimum": 1, + "maximum": 80, + "x-nullable": false + }, + "rolesOptions" : { + "description": "How roles should be restored", + "type": "string", + "enum": [ + "noRestore", + "all" + ], + "default": "noRestore" + }, + "usersOptions" : { + "description": "How users should be restored", + "type": "string", + "enum": [ + "noRestore", + "all" + ], + "default": "noRestore" + } + } + }, + "BackupCreateRequest": { + "description": "Request body for creating a backup of a set of classes", + "properties": { + "id": { + "description": "The ID of the backup (required). Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "config": { + "description": "Custom configuration for the backup creation process", + "type": "object", + "$ref": "#/definitions/BackupConfig" + }, + "include": { + "description": "List of collections to include in the backup creation process. If not set, all collections are included. Cannot be used together with `exclude`.", + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "description": "List of collections to exclude from the backup creation process. If not set, all collections are included. Cannot be used together with `include`.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "BackupCreateResponse": { + "description": "The definition of a backup create response body", + "properties": { + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "classes": { + "description": "The list of classes for which the backup creation process was started", + "type": "array", + "items": { + "type": "string" + } + }, + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "bucket": { + "description": "Name of the bucket, container, volume, etc", + "type": "string" + }, + "path": { + "description": "Path within bucket of backup", + "type": "string" + }, + "error": { + "description": "error message if creation failed", + "type": "string" + }, + "status": { + "description": "phase of backup creation process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "BackupListResponse": { + "description": "The definition of a backup create response body", + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "classes": { + "description": "The list of classes for which the existed backup process", + "type": "array", + "items": { + "type": "string" + } + }, + "status": { + "description": "status of backup process", + "type": "string", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + } + }, + "BackupRestoreRequest": { + "description": "Request body for restoring a backup for a set of classes", + "properties": { + "config": { + "description": "Custom configuration for the backup restoration process", + "type": "object", + "$ref": "#/definitions/RestoreConfig" + }, + "include": { + "description": "List of classes to include in the backup restoration process", + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "description": "List of classes to exclude from the backup restoration process", + "type": "array", + "items": { + "type": "string" + } + }, + "node_mapping": { + "description": "Allows overriding the node names stored in the backup with different ones. Useful when restoring backups to a different environment.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "overwriteAlias": { + "description": "Allows ovewriting the collection alias if there is a conflict", + "type": "boolean" + } + } + }, + "BackupRestoreResponse": { + "description": "The definition of a backup restore response body", + "properties": { + "id": { + "description": "The ID of the backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed.", + "type": "string" + }, + "classes": { + "description": "The list of classes for which the backup restoration process was started", + "type": "array", + "items": { + "type": "string" + } + }, + "backend": { + "description": "Backup backend name e.g. filesystem, gcs, s3.", + "type": "string" + }, + "path": { + "description": "destination path of backup files proper to selected backend", + "type": "string" + }, + "error": { + "description": "error message if restoration failed", + "type": "string" + }, + "status": { + "description": "phase of backup restoration process", + "type": "string", + "default": "STARTED", + "enum": [ + "STARTED", + "TRANSFERRING", + "TRANSFERRED", + "SUCCESS", + "FAILED", + "CANCELED" + ] + } + } + }, + "NodeStats": { + "description": "The summary of Weaviate's statistics.", + "properties": { + "shardCount": { + "description": "The count of Weaviate's shards. To see this value, set `output` to `verbose`.", + "format": "int", + "type": "number", + "x-omitempty": false + }, + "objectCount": { + "description": "The total number of objects in DB.", + "format": "int64", + "type": "number", + "x-omitempty": false + } + } + }, + "BatchStats": { + "description": "The summary of a nodes batch queue congestion status.", + "properties": { + "queueLength": { + "description": "How many objects are currently in the batch queue.", + "format": "int", + "type": "number", + "x-omitempty": true, + "x-nullable": true + }, + "ratePerSecond": { + "description": "How many objects are approximately processed from the batch queue per second.", + "format": "int", + "type": "number", + "x-omitempty": false + } + } + }, + "NodeShardStatus": { + "description": "The definition of a node shard status response body", + "properties": { + "name": { + "description": "The name of the shard.", + "type": "string", + "x-omitempty": false + }, + "class": { + "description": "The name of shard's class.", + "type": "string", + "x-omitempty": false + }, + "objectCount": { + "description": "The number of objects in shard.", + "format": "int64", + "type": "number", + "x-omitempty": false + }, + "vectorIndexingStatus": { + "description": "The status of the vector indexing process.", + "format": "string", + "x-omitempty": false + }, + "compressed": { + "description": "The status of vector compression/quantization.", + "format": "boolean", + "x-omitempty": false + }, + "vectorQueueLength": { + "description": "The length of the vector indexing queue.", + "format": "int64", + "type": "number", + "x-omitempty": false + }, + "loaded": { + "description": "The load status of the shard.", + "type": "boolean", + "x-omitempty": false + }, + "asyncReplicationStatus": { + "description": "The status of the async replication.", + "type": "array", + "items": { + "$ref": "#/definitions/AsyncReplicationStatus" + } + }, + "numberOfReplicas": { + "description": "Number of replicas for the shard.", + "type": ["integer", "null"], + "format": "int64", + "x-omitempty": true + }, + "replicationFactor": { + "description": "Minimum number of replicas for the shard.", + "type": ["integer", "null"], + "format": "int64", + "x-omitempty": true + } + } + }, + "AsyncReplicationStatus": { + "description": "The status of the async replication.", + "properties": { + "objectsPropagated": { + "description": "The number of objects propagated in the most recent iteration.", + "type": "number", + "format": "uint64" + }, + "startDiffTimeUnixMillis": { + "description": "The start time of the most recent iteration.", + "type": "number", + "format": "int64" + }, + "targetNode": { + "description": "The target node of the replication, if set, otherwise empty.", + "type": "string" + } + } + }, + "NodeStatus": { + "description": "The definition of a backup node status response body", + "properties": { + "name": { + "description": "The name of the node.", + "type": "string" + }, + "status": { + "description": "Node's status.", + "type": "string", + "default": "HEALTHY", + "enum": [ + "HEALTHY", + "UNHEALTHY", + "UNAVAILABLE", + "TIMEOUT" + ] + }, + "version": { + "description": "The version of Weaviate.", + "type": "string" + }, + "gitHash": { + "description": "The gitHash of Weaviate.", + "type": "string" + }, + "stats": { + "description": "Weaviate overall statistics.", + "type": "object", + "$ref": "#/definitions/NodeStats" + }, + "batchStats": { + "description": "Weaviate batch statistics.", + "type": "object", + "$ref": "#/definitions/BatchStats" + }, + "shards": { + "description": "The list of the shards with it's statistics.", + "type": "array", + "items": { + "$ref": "#/definitions/NodeShardStatus" + } + } + } + }, + "NodesStatusResponse": { + "description": "The status of all of the Weaviate nodes", + "type": "object", + "properties": { + "nodes": { + "type": "array", + "items": { + "$ref": "#/definitions/NodeStatus" + } + } + } + }, + "DistributedTask": { + "description": "Distributed task metadata.", + "type": "object", + "properties": { + "id": { + "description": "The ID of the task.", + "type": "string" + }, + "version": { + "description": "The version of the task.", + "type": "integer" + }, + "status": { + "description": "The status of the task.", + "type": "string" + }, + "startedAt": { + "description": "The time when the task was created.", + "type": "string", + "format": "date-time" + }, + "finishedAt": { + "description": "The time when the task was finished.", + "type": "string", + "format": "date-time" + }, + "finishedNodes": { + "description": "The nodes that finished the task.", + "type": "array", + "items": { + "type": "string" + } + }, + "error": { + "description": "The high level reason why the task failed.", + "type": "string", + "x-omitempty": true + }, + "payload": { + "description": "The payload of the task.", + "type": "object" + } + } + }, + "DistributedTasks": { + "description": "Active distributed tasks by namespace.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/DistributedTask" + } + } + }, + "RaftStatistics": { + "description": "The definition of Raft statistics.", + "properties": { + "appliedIndex": { + "type": "string" + }, + "commitIndex": { + "type": "string" + }, + "fsmPending": { + "type": "string" + }, + "lastContact": { + "type": "string" + }, + "lastLogIndex": { + "type": "string" + }, + "lastLogTerm": { + "type": "string" + }, + "lastSnapshotIndex": { + "type": "string" + }, + "lastSnapshotTerm": { + "type": "string" + }, + "latestConfiguration": { + "description": "Weaviate Raft nodes.", + "type": "object" + }, + "latestConfigurationIndex": { + "type": "string" + }, + "numPeers": { + "type": "string" + }, + "protocolVersion": { + "type": "string" + }, + "protocolVersionMax": { + "type": "string" + }, + "protocolVersionMin": { + "type": "string" + }, + "snapshotVersionMax": { + "type": "string" + }, + "snapshotVersionMin": { + "type": "string" + }, + "state": { + "type": "string" + }, + "term": { + "type": "string" + } + } + }, + "Statistics": { + "description": "The definition of node statistics.", + "properties": { + "name": { + "description": "The name of the node.", + "type": "string" + }, + "status": { + "description": "Node's status.", + "type": "string", + "default": "HEALTHY", + "enum": [ + "HEALTHY", + "UNHEALTHY", + "UNAVAILABLE", + "TIMEOUT" + ] + }, + "bootstrapped": { + "type": "boolean" + }, + "dbLoaded": { + "type": "boolean" + }, + "initialLastAppliedIndex": { + "type": "number", + "format": "uint64" + }, + "lastAppliedIndex": { + "type": "number" + }, + "isVoter": { + "type": "boolean" + }, + "leaderId": { + "type": "object" + }, + "leaderAddress": { + "type": "object" + }, + "open": { + "type": "boolean" + }, + "ready": { + "type": "boolean" + }, + "candidates": { + "type": "object" + }, + "raft": { + "description": "Weaviate Raft statistics.", + "type": "object", + "$ref": "#/definitions/RaftStatistics" + } + } + }, + "ClusterStatisticsResponse": { + "description": "The cluster statistics of all of the Weaviate nodes", + "type": "object", + "properties": { + "statistics": { + "type": "array", + "items": { + "$ref": "#/definitions/Statistics" + } + }, + "synchronized": { + "type": "boolean", + "x-omitempty": false + } + } + }, + "SingleRef": { + "description": "Either set beacon (direct reference) or set class and schema (concept reference)", + "properties": { + "class": { + "description": "If using a concept reference (rather than a direct reference), specify the desired class name here", + "format": "uri", + "type": "string" + }, + "schema": { + "description": "If using a concept reference (rather than a direct reference), specify the desired properties here", + "$ref": "#/definitions/PropertySchema" + }, + "beacon": { + "description": "If using a direct reference, specify the URI to point to the cross-ref here. Should be in the form of weaviate://localhost/ for the example of a local cross-ref to an object", + "format": "uri", + "type": "string" + }, + "href": { + "description": "If using a direct reference, this read-only fields provides a link to the referenced resource. If 'origin' is globally configured, an absolute URI is shown - a relative URI otherwise.", + "format": "uri", + "type": "string" + }, + "classification": { + "description": "Additional Meta information about classifications if the item was part of one", + "$ref": "#/definitions/ReferenceMetaClassification" + } + } + }, + "AdditionalProperties": { + "description": "(Response only) Additional meta information about a single object.", + "type": "object", + "additionalProperties": { + "type": "object" + } + }, + "ReferenceMetaClassification": { + "description": "This meta field contains additional info about the classified reference property", + "properties": { + "overallCount": { + "description": "overall neighbors checked as part of the classification. In most cases this will equal k, but could be lower than k - for example if not enough data was present", + "type": "number", + "format": "int64" + }, + "winningCount": { + "description": "size of the winning group, a number between 1..k", + "type": "number", + "format": "int64" + }, + "losingCount": { + "description": "size of the losing group, can be 0 if the winning group size equals k", + "type": "number", + "format": "int64" + }, + "closestOverallDistance": { + "description": "The lowest distance of any neighbor, regardless of whether they were in the winning or losing group", + "type": "number", + "format": "float32" + }, + "winningDistance": { + "description": "deprecated - do not use, to be removed in 0.23.0", + "type": "number", + "format": "float32" + }, + "meanWinningDistance": { + "description": "Mean distance of all neighbors from the winning group", + "type": "number", + "format": "float32" + }, + "closestWinningDistance": { + "description": "Closest distance of a neighbor from the winning group", + "type": "number", + "format": "float32" + }, + "closestLosingDistance": { + "description": "The lowest distance of a neighbor in the losing group. Optional. If k equals the size of the winning group, there is no losing group", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "losingDistance": { + "description": "deprecated - do not use, to be removed in 0.23.0", + "type": "number", + "format": "float32", + "x-nullable": true + }, + "meanLosingDistance": { + "description": "Mean distance of all neighbors from the losing group. Optional. If k equals the size of the winning group, there is no losing group.", + "type": "number", + "format": "float32", + "x-nullable": true + } + } + }, + "BatchReference": { + "properties": { + "from": { + "description": "Long-form beacon-style URI to identify the source of the cross-ref including the property name. Should be in the form of weaviate://localhost////, where must be one of 'objects', 'objects' and and must represent the cross-ref property of source class to be used.", + "format": "uri", + "type": "string", + "example": "weaviate://localhost/Zoo/a5d09582-4239-4702-81c9-92a6e0122bb4/hasAnimals" + }, + "to": { + "description": "Short-form URI to point to the cross-ref. Should be in the form of weaviate://localhost/ for the example of a local cross-ref to an object", + "example": "weaviate://localhost/97525810-a9a5-4eb0-858a-71449aeb007f", + "format": "uri", + "type": "string" + }, + "tenant": { + "type": "string", + "description": "Name of the reference tenant." + } + } + }, + "BatchReferenceResponse": { + "allOf": [ + { + "$ref": "#/definitions/BatchReference" + }, + { + "properties": { + "result": { + "description": "Results for this specific reference.", + "format": "object", + "properties": { + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + }, + "errors": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + } + ], + "type": "object" + }, + "GeoCoordinates": { + "properties": { + "latitude": { + "description": "The latitude of the point on earth in decimal form", + "format": "float", + "type": "number", + "x-nullable": true + }, + "longitude": { + "description": "The longitude of the point on earth in decimal form", + "format": "float", + "type": "number", + "x-nullable": true + } + } + }, + "PhoneNumber": { + "properties": { + "input": { + "description": "The raw input as the phone number is present in your raw data set. It will be parsed into the standardized formats if valid.", + "type": "string" + }, + "internationalFormatted": { + "description": "Read-only. Parsed result in the international format (e.g. +49 123 ...)", + "type": "string" + }, + "defaultCountry": { + "description": "Optional. The ISO 3166-1 alpha-2 country code. This is used to figure out the correct countryCode and international format if only a national number (e.g. 0123 4567) is provided", + "type": "string" + }, + "countryCode": { + "description": "Read-only. The numerical country code (e.g. 49)", + "format": "uint64", + "type": "number" + }, + "national": { + "description": "Read-only. The numerical representation of the national part", + "format": "uint64", + "type": "number" + }, + "nationalFormatted": { + "description": "Read-only. Parsed result in the national format (e.g. 0123 456789)", + "type": "string" + }, + "valid": { + "description": "Read-only. Indicates whether the parsed number is a valid phone number", + "type": "boolean" + } + } + }, + "Object": { + "properties": { + "class": { + "description": "Class of the Object, defined in the schema.", + "type": "string" + }, + "vectorWeights": { + "$ref": "#/definitions/VectorWeights" + }, + "properties": { + "$ref": "#/definitions/PropertySchema" + }, + "id": { + "description": "ID of the Object.", + "format": "uuid", + "type": "string" + }, + "creationTimeUnix": { + "description": "(Response only) Timestamp of creation of this object in milliseconds since epoch UTC.", + "format": "int64", + "type": "integer" + }, + "lastUpdateTimeUnix": { + "description": "(Response only) Timestamp of the last object update in milliseconds since epoch UTC.", + "format": "int64", + "type": "integer" + }, + "vector": { + "description": "This field returns vectors associated with the Object. C11yVector, Vector or Vectors values are possible.", + "$ref": "#/definitions/C11yVector" + }, + "vectors": { + "description": "This field returns vectors associated with the Object.", + "$ref": "#/definitions/Vectors" + }, + "tenant": { + "description": "Name of the Objects tenant.", + "type": "string" + }, + "additional": { + "$ref": "#/definitions/AdditionalProperties" + } + }, + "type": "object" + }, + "ObjectsGetResponse": { + "allOf": [ + { + "$ref": "#/definitions/Object" + }, + { + "properties": { + "deprecations": { + "type": "array", + "items": { + "$ref": "#/definitions/Deprecation" + } + } + } + }, + { + "properties": { + "result": { + "description": "Results for this specific Object.", + "format": "object", + "properties": { + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "FAILED" + ] + }, + "errors": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + } + ], + "type": "object" + }, + "BatchDelete": { + "type": "object", + "properties": { + "match": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "output": { + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "type": "string", + "default": "minimal" + }, + "deletionTimeUnixMilli": { + "description": "Timestamp of deletion in milliseconds since epoch UTC.", + "format": "int64", + "type": "integer", + "x-nullable": true + }, + "dryRun": { + "description": "If true, the call will show which objects would be matched using the specified filter without deleting any objects.

    Depending on the configured verbosity, you will either receive a count of affected objects, or a list of IDs.", + "type": "boolean", + "default": false + } + } + }, + "BatchDeleteResponse": { + "description": "Delete Objects response.", + "type": "object", + "properties": { + "match": { + "description": "Outlines how to find the objects to be deleted.", + "type": "object", + "properties": { + "class": { + "description": "Class (name) which objects will be deleted.", + "type": "string", + "example": "City" + }, + "where": { + "description": "Filter to limit the objects to be deleted.", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + }, + "output": { + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "type": "string", + "default": "minimal" + }, + "deletionTimeUnixMilli": { + "description": "Timestamp of deletion in milliseconds since epoch UTC.", + "format": "int64", + "type": "integer", + "x-nullable": true + }, + "dryRun": { + "description": "If true, objects will not be deleted yet, but merely listed. Defaults to false.", + "type": "boolean", + "default": false + }, + "results": { + "type": "object", + "properties": { + "matches": { + "description": "How many objects were matched by the filter.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "limit": { + "description": "The most amount of objects that can be deleted in a single query, equals QUERY_MAXIMUM_RESULTS.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "successful": { + "description": "How many objects were successfully deleted in this round.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "failed": { + "description": "How many objects should have been deleted but could not be deleted.", + "type": "number", + "format": "int64", + "x-omitempty": false + }, + "objects": { + "description": "With output set to \"minimal\" only objects with error occurred will the be described. Successfully deleted objects would be omitted. Output set to \"verbose\" will list all of the objets with their respective statuses.", + "type": "array", + "items": { + "description": "Results for this specific Object.", + "format": "object", + "properties": { + "id": { + "description": "ID of the Object.", + "format": "uuid", + "type": "string" + }, + "status": { + "type": "string", + "default": "SUCCESS", + "enum": [ + "SUCCESS", + "DRYRUN", + "FAILED" + ] + }, + "errors": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + } + } + } + }, + "ObjectsListResponse": { + "description": "List of Objects.", + "properties": { + "objects": { + "description": "The actual list of Objects.", + "items": { + "$ref": "#/definitions/Object" + }, + "type": "array" + }, + "deprecations": { + "type": "array", + "items": { + "$ref": "#/definitions/Deprecation" + } + }, + "totalResults": { + "description": "The total number of Objects for the query. The number of items in a response may be smaller due to paging.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "Classification": { + "description": "Manage classifications, trigger them and view status of past classifications.", + "properties": { + "id": { + "description": "ID to uniquely identify this classification run", + "format": "uuid", + "type": "string", + "example": "ee722219-b8ec-4db1-8f8d-5150bb1a9e0c" + }, + "class": { + "description": "class (name) which is used in this classification", + "type": "string", + "example": "City" + }, + "classifyProperties": { + "description": "which ref-property to set as part of the classification", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "inCountry" + ] + }, + "basedOnProperties": { + "description": "base the text-based classification on these fields (of type text)", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "description" + ] + }, + "status": { + "description": "status of this classification", + "type": "string", + "enum": [ + "running", + "completed", + "failed" + ], + "example": "running" + }, + "meta": { + "description": "additional meta information about the classification", + "type": "object", + "$ref": "#/definitions/ClassificationMeta" + }, + "type": { + "description": "which algorithm to use for classifications", + "type": "string" + }, + "settings": { + "description": "classification-type specific settings", + "type": "object" + }, + "error": { + "description": "error message if status == failed", + "type": "string", + "default": "", + "example": "classify xzy: something went wrong" + }, + "filters": { + "type": "object", + "properties": { + "sourceWhere": { + "description": "limit the objects to be classified", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "trainingSetWhere": { + "description": "Limit the training objects to be considered during the classification. Can only be used on types with explicit training sets, such as 'knn'", + "type": "object", + "$ref": "#/definitions/WhereFilter" + }, + "targetWhere": { + "description": "Limit the possible sources when using an algorithm which doesn't really on training data, e.g. 'contextual'. When using an algorithm with a training set, such as 'knn', limit the training set instead", + "type": "object", + "$ref": "#/definitions/WhereFilter" + } + } + } + }, + "type": "object" + }, + "ClassificationMeta": { + "description": "Additional information to a specific classification", + "properties": { + "started": { + "description": "time when this classification was started", + "type": "string", + "format": "date-time", + "example": "2017-07-21T17:32:28Z" + }, + "completed": { + "description": "time when this classification finished", + "type": "string", + "format": "date-time", + "example": "2017-07-21T17:32:28Z" + }, + "count": { + "description": "number of objects which were taken into consideration for classification", + "type": "integer", + "example": 147 + }, + "countSucceeded": { + "description": "number of objects successfully classified", + "type": "integer", + "example": 140 + }, + "countFailed": { + "description": "number of objects which could not be classified - see error message for details", + "type": "integer", + "example": 7 + } + }, + "type": "object" + }, + "WhereFilter": { + "description": "Filter search results using a where filter", + "properties": { + "operands": { + "description": "combine multiple where filters, requires 'And' or 'Or' operator", + "type": "array", + "items": { + "$ref": "#/definitions/WhereFilter" + } + }, + "operator": { + "description": "operator to use", + "type": "string", + "enum": [ + "And", + "Or", + "Equal", + "Like", + "NotEqual", + "GreaterThan", + "GreaterThanEqual", + "LessThan", + "LessThanEqual", + "WithinGeoRange", + "IsNull", + "ContainsAny", + "ContainsAll", + "ContainsNone", + "Not" + ], + "example": "GreaterThanEqual" + }, + "path": { + "description": "path to the property currently being filtered", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "inCity", + "City", + "name" + ] + }, + "valueInt": { + "description": "value as integer", + "type": "integer", + "format": "int64", + "example": 2000, + "x-nullable": true + }, + "valueNumber": { + "description": "value as number/float", + "type": "number", + "format": "float64", + "example": 3.14, + "x-nullable": true + }, + "valueBoolean": { + "description": "value as boolean", + "type": "boolean", + "example": false, + "x-nullable": true + }, + "valueString": { + "description": "value as text (deprecated as of v1.19; alias for valueText)", + "type": "string", + "example": "my search term", + "x-nullable": true + }, + "valueText": { + "description": "value as text", + "type": "string", + "example": "my search term", + "x-nullable": true + }, + "valueDate": { + "description": "value as date (as string)", + "type": "string", + "example": "TODO", + "x-nullable": true + }, + "valueIntArray": { + "description": "value as integer", + "type": "array", + "items": { + "type": "integer", + "format": "int64" + }, + "example": "[100, 200]", + "x-nullable": true, + "x-omitempty": true + }, + "valueNumberArray": { + "description": "value as number/float", + "type": "array", + "items": { + "type": "number", + "format": "float64" + }, + "example": [ + 3.14 + ], + "x-nullable": true, + "x-omitempty": true + }, + "valueBooleanArray": { + "description": "value as boolean", + "type": "array", + "items": { + "type": "boolean" + }, + "example": [ + true, + false + ], + "x-nullable": true, + "x-omitempty": true + }, + "valueStringArray": { + "description": "value as text (deprecated as of v1.19; alias for valueText)", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "my search term" + ], + "x-nullable": true, + "x-omitempty": true + }, + "valueTextArray": { + "description": "value as text", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "my search term" + ], + "x-nullable": true, + "x-omitempty": true + }, + "valueDateArray": { + "description": "value as date (as string)", + "type": "array", + "items": { + "type": "string" + }, + "example": "TODO", + "x-nullable": true, + "x-omitempty": true + }, + "valueGeoRange": { + "description": "value as geo coordinates and distance", + "type": "object", + "$ref": "#/definitions/WhereFilterGeoRange", + "x-nullable": true + } + }, + "type": "object" + }, + "WhereFilterGeoRange": { + "type": "object", + "description": "filter within a distance of a georange", + "properties": { + "geoCoordinates": { + "$ref": "#/definitions/GeoCoordinates", + "x-nullable": false + }, + "distance": { + "type": "object", + "properties": { + "max": { + "type": "number", + "format": "float64" + } + } + } + } + }, + "Tenant": { + "type": "object", + "description": "attributes representing a single tenant within weaviate", + "properties": { + "name": { + "description": "The name of the tenant (required).", + "type": "string" + }, + "activityStatus": { + "description": "activity status of the tenant's shard. Optional for creating tenant (implicit `ACTIVE`) and required for updating tenant. For creation, allowed values are `ACTIVE` - tenant is fully active and `INACTIVE` - tenant is inactive; no actions can be performed on tenant, tenant's files are stored locally. For updating, `ACTIVE`, `INACTIVE` and also `OFFLOADED` - as INACTIVE, but files are stored on cloud storage. The following values are read-only and are set by the server for internal use: `OFFLOADING` - tenant is transitioning from ACTIVE/INACTIVE to OFFLOADED, `ONLOADING` - tenant is transitioning from OFFLOADED to ACTIVE/INACTIVE. We still accept deprecated names `HOT` (now `ACTIVE`), `COLD` (now `INACTIVE`), `FROZEN` (now `OFFLOADED`), `FREEZING` (now `OFFLOADING`), `UNFREEZING` (now `ONLOADING`).", + "type": "string", + "enum": [ + "ACTIVE", + "INACTIVE", + "OFFLOADED", + "OFFLOADING", + "ONLOADING", + "HOT", + "COLD", + "FROZEN", + "FREEZING", + "UNFREEZING" + ] + } + } + }, + "Alias": { + "type": "object", + "description": "Represents the mapping between an alias name and a collection. An alias provides an alternative name for accessing a collection.", + "properties": { + "alias": { + "description": "The unique name of the alias that serves as an alternative identifier for the collection.", + "type": "string" + }, + "class": { + "description": "The name of the collection (class) to which this alias is mapped.", + "type": "string" + } + } + }, + "AliasResponse": { + "description": "Response object containing a list of alias mappings.", + "type": "object", + "properties": { + "aliases": { + "description": "Array of alias objects, each containing an alias-to-collection mapping.", + "type": "array", + "items": { + "$ref": "#/definitions/Alias" + } + } + } + } + }, + "externalDocs": { + "url": "https://github.com/weaviate/weaviate" + }, + "info": { + "contact": { + "email": "hello@weaviate.io", + "name": "Weaviate", + "url": "https://github.com/weaviate" + }, + "description": "# Introduction\n Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications. \n ### Base Path \nThe base path for the Weaviate server is structured as `[YOUR-WEAVIATE-HOST]:[PORT]/v1`. As an example, if you wish to access the `schema` endpoint on a local instance, you would navigate to `http://localhost:8080/v1/schema`. Ensure you replace `[YOUR-WEAVIATE-HOST]` and `[PORT]` with your actual server host and port number respectively. \n ### Questions? \nIf you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/). \n### Issues? \nIf you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate). \n### Want more documentation? \nFor a quickstart, code examples, concepts and more, please visit our [documentation page](https://weaviate.io/developers/weaviate).", + "title": "Weaviate", + "version": "1.33.0-rc.1" + }, + "parameters": { + "CommonAfterParameterQuery": { + "description": "A threshold UUID of the objects to retrieve after, using an UUID-based ordering. This object is not part of the set.

    Must be used with `class`, typically in conjunction with `limit`.

    Note `after` cannot be used with `offset` or `sort`.

    For a null value similar to offset=0, set an empty string in the request, i.e. `after=` or `after`.", + "in": "query", + "name": "after", + "required": false, + "type": "string" + }, + "CommonOffsetParameterQuery": { + "description": "The starting index of the result window. Note `offset` will retrieve `offset+limit` results and return `limit` results from the object with index `offset` onwards. Limited by the value of `QUERY_MAXIMUM_RESULTS`.

    Should be used in conjunction with `limit`.

    Cannot be used with `after`.", + "format": "int64", + "in": "query", + "name": "offset", + "required": false, + "type": "integer", + "default": 0 + }, + "CommonLimitParameterQuery": { + "description": "The maximum number of items to be returned per page. The default is 25 unless set otherwise as an environment variable.", + "format": "int64", + "in": "query", + "name": "limit", + "required": false, + "type": "integer" + }, + "CommonIncludeParameterQuery": { + "description": "Include additional information, such as classification infos. Allowed values include: classification, vector, interpretation", + "in": "query", + "name": "include", + "required": false, + "type": "string" + }, + "CommonConsistencyLevelParameterQuery": { + "description": "Determines how many replicas must acknowledge a request before it is considered successful", + "in": "query", + "name": "consistency_level", + "required": false, + "type": "string" + }, + "CommonTenantParameterQuery": { + "description": "Specifies the tenant in a request targeting a multi-tenant class", + "in": "query", + "name": "tenant", + "required": false, + "type": "string" + }, + "CommonNodeNameParameterQuery": { + "description": "The target node which should fulfill the request", + "in": "query", + "name": "node_name", + "required": false, + "type": "string" + }, + "CommonSortParameterQuery": { + "description": "Name(s) of the property to sort by - e.g. `city`, or `country,city`.", + "in": "query", + "name": "sort", + "required": false, + "type": "string" + }, + "CommonOrderParameterQuery": { + "description": "Order parameter to tell how to order (asc or desc) data within given field. Should be used in conjunction with `sort` parameter. If providing multiple `sort` values, provide multiple `order` values in corresponding order, e.g.: `sort=author_name,title&order=desc,asc`.", + "in": "query", + "name": "order", + "required": false, + "type": "string" + }, + "CommonClassParameterQuery": { + "description": "The collection from which to query objects.

    Note that if `class` is not provided, the response will not include any objects.", + "in": "query", + "name": "class", + "required": false, + "type": "string" + }, + "CommonOutputVerbosityParameterQuery": { + "description": "Controls the verbosity of the output, possible values are: \"minimal\", \"verbose\". Defaults to \"minimal\".", + "in": "query", + "name": "output", + "required": false, + "type": "string", + "default": "minimal" + } + }, + "paths": { + "/": { + "get": { + "description": "Get links to other endpoints to help discover the REST API", + "summary": "List available endpoints", + "operationId": "weaviate.root", + "responses": { + "200": { + "description": "Weaviate is alive and ready to serve content", + "schema": { + "type": "object", + "properties": { + "links": { + "type": "array", + "items": { + "$ref": "#/definitions/Link" + } + } + } + } + } + } + } + }, + "/.well-known/live": { + "get": { + "summary": "Get application liveness.", + "description": "Determines whether the application is alive. Can be used for kubernetes liveness probe", + "operationId": "weaviate.wellknown.liveness", + "responses": { + "200": { + "description": "The application is able to respond to HTTP requests" + } + } + } + }, + "/.well-known/ready": { + "get": { + "summary": "Get application readiness.", + "description": "Determines whether the application is ready to receive traffic. Can be used for kubernetes readiness probe.", + "operationId": "weaviate.wellknown.readiness", + "responses": { + "200": { + "description": "The application has completed its start-up routine and is ready to accept traffic." + }, + "503": { + "description": "The application is currently not able to serve traffic. If other horizontal replicas of weaviate are available and they are capable of receiving traffic, all traffic should be redirected there instead." + } + } + } + }, + "/.well-known/openid-configuration": { + "get": { + "description": "OIDC Discovery page, redirects to the token issuer if one is configured", + "responses": { + "200": { + "description": "Successful response, inspect body", + "schema": { + "type": "object", + "properties": { + "href": { + "description": "The Location to redirect to", + "type": "string" + }, + "clientId": { + "description": "OAuth Client ID", + "type": "string" + }, + "scopes": { + "description": "OAuth Scopes", + "type": "array", + "items": { + "type": "string" + }, + "x-omitempty": true + } + } + } + }, + "404": { + "description": "Not found, no oidc provider present" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "OIDC discovery information if OIDC auth is enabled", + "tags": [ + "well-known", + "oidc", + "discovery" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/replication/replicate": { + "post": { + "summary": "Initiate a replica movement", + "description": "Begins an asynchronous operation to move or copy a specific shard replica from its current node to a designated target node. The operation involves copying data, synchronizing, and potentially decommissioning the source replica.", + "operationId": "replicate", + "x-serviceIds": [ + "weaviate.replication.replicate" + ], + "tags": [ + "replication" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ReplicationReplicateReplicaRequest" + } + } + ], + "responses": { + "200": { + "description": "Replication operation registered successfully. ID of the operation is returned.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateReplicaResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "summary": "Schedules all replication operations for deletion across all collections, shards, and nodes.", + "operationId": "deleteAllReplications", + "x-serviceIds": [ + "weaviate.replication.deleteAllReplications" + ], + "tags": [ + "replication" + ], + "responses": { + "204": { + "description": "Replication operation registered successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/replication/replicate/force-delete": { + "post": { + "summary": "Force delete replication operations", + "description": "USE AT OWN RISK! Synchronously force delete operations from the FSM. This will not perform any checks on which state the operation is in so may lead to data corruption or loss. It is recommended to first scale the number of replication engine workers to 0 before calling this endpoint to ensure no operations are in-flight.", + "operationId": "forceDeleteReplications", + "x-serviceIds": [ + "weaviate.replication.forceDeleteReplications" + ], + "tags": [ + "replication" + ], + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/ReplicationReplicateForceDeleteRequest" + } + } + ], + "responses": { + "200": { + "description": "Replication operations force deleted successfully.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateForceDeleteResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/replication/replicate/{id}": { + "get": { + "summary": "Retrieve a replication operation", + "description": "Fetches the current status and detailed information for a specific replication operation, identified by its unique ID. Optionally includes historical data of the operation's progress if requested.", + "operationId": "replicationDetails", + "x-serviceIds": [ + "weaviate.replication.replicate.details" + ], + "tags": [ + "replication" + ], + "parameters": [ + { + "name": "id", + "in": "path", + "format": "uuid", + "description": "The ID of the replication operation to get details for.", + "required": true, + "type": "string" + }, + { + "name": "includeHistory", + "in": "query", + "description": "Whether to include the history of the replication operation.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "The details of the replication operation.", + "schema": { + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "summary": "Delete a replication operation", + "description": "Removes a specific replication operation. If the operation is currently active, it will be cancelled and its resources cleaned up before the operation is deleted.", + "operationId": "deleteReplication", + "x-serviceIds": [ + "weaviate.replication.replicate.delete" + ], + "tags": [ + "replication" + ], + "parameters": [ + { + "name": "id", + "in": "path", + "format": "uuid", + "description": "The ID of the replication operation to delete.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "409": { + "description": "The operation is not in a deletable state, e.g. it is a MOVE op in the DEHYDRATING state.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/replication/replicate/list": { + "get": { + "summary": "List replication operations", + "description": "Retrieves a list of currently registered replication operations, optionally filtered by collection, shard, or node ID.", + "operationId": "listReplication", + "x-serviceIds": [ + "weaviate.replication.replicate.details" + ], + "tags": [ + "replication" + ], + "parameters": [ + { + "name": "targetNode", + "in": "query", + "description": "The name of the target node to get details for.", + "required": false, + "type": "string" + }, + { + "name": "collection", + "in": "query", + "description": "The name of the collection to get details for.", + "required": false, + "type": "string" + }, + { + "name": "shard", + "in": "query", + "description": "The shard to get details for.", + "required": false, + "type": "string" + }, + { + "name": "includeHistory", + "in": "query", + "description": "Whether to include the history of the replication operation.", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "The details of the replication operations.", + "schema": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ReplicationReplicateDetailsReplicaResponse" + } + } + }, + "400": { + "description": "Bad request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/replication/replicate/{id}/cancel": { + "post": { + "summary": "Cancel a replication operation", + "description": "Requests the cancellation of an active replication operation identified by its ID. The operation will be stopped, but its record will remain in the 'CANCELLED' state (can't be resumed) and will not be automatically deleted.", + "operationId": "cancelReplication", + "x-serviceIds": [ + "weaviate.replication.replicate.cancel" + ], + "tags": [ + "replication" + ], + "parameters": [ + { + "name": "id", + "in": "path", + "format": "uuid", + "description": "The ID of the replication operation to cancel.", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully cancelled." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard replica operation not found." + }, + "409": { + "description": "The operation is not in a cancellable state, e.g. it is READY or is a MOVE op in the DEHYDRATING state.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/replication/sharding-state": { + "get": { + "summary": "Get sharding state", + "description": "Fetches the current sharding state, including replica locations and statuses, for all collections or a specified collection. If a shard name is provided along with a collection, the state for that specific shard is returned.", + "operationId": "getCollectionShardingState", + "x-serviceIds": [ + "weaviate.replication.shardingstate.collection.get" + ], + "tags": [ + "replication" + ], + "parameters": [ + { + "name": "collection", + "in": "query", + "description": "The collection name to get the sharding state for.", + "required": false, + "type": "string" + }, + { + "name": "shard", + "in": "query", + "description": "The shard to get the sharding state for.", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved sharding state.", + "schema": { + "$ref": "#/definitions/ReplicationShardingStateResponse" + } + }, + "400": { + "description": "Bad request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Collection or shard not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/users/own-info": { + "get": { + "summary": "get info relevant to own user, e.g. username, roles", + "operationId": "getOwnInfo", + "x-serviceIds": [ + "weaviate.users.get.own-info" + ], + "tags": [ + "users" + ], + "responses": { + "200": { + "description": "Info about the user", + "schema": { + "$ref": "#/definitions/UserOwnInfo" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "501": { + "description": "Replica movement operations are disabled.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/users/db": { + "get": { + "summary": "list all db users", + "operationId": "listAllUsers", + "x-serviceIds": [ + "weaviate.users.db.list_all" + ], + "tags": [ + "users" + ], + "parameters": [ + { + "description": "Whether to include the last used time of the users", + "in": "query", + "name": "includeLastUsedTime", + "required": false, + "type": "boolean", + "default": false + } + ], + "responses": { + "200": { + "description": "Info about the users", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/DBUserInfo" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/users/db/{user_id}": { + "get": { + "summary": "get info relevant to user, e.g. username, roles", + "operationId": "getUserInfo", + "x-serviceIds": [ + "weaviate.users.db.get" + ], + "tags": [ + "users" + ], + "parameters": [ + { + "description": "user id", + "in": "path", + "name": "user_id", + "required": true, + "type": "string" + }, + { + "description": "Whether to include the last used time of the given user", + "in": "query", + "name": "includeLastUsedTime", + "required": false, + "type": "boolean", + "default": false + } + ], + "responses": { + "200": { + "description": "Info about the user", + "schema": { + "$ref": "#/definitions/DBUserInfo" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "summary": "create new user", + "operationId": "createUser", + "x-serviceIds": [ + "weaviate.users.db.create" + ], + "tags": [ + "users" + ], + "parameters": [ + { + "description": "user id", + "in": "path", + "name": "user_id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": false, + "schema": { + "type": "object", + "properties": { + "import": { + "type":"boolean", + "description": "EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - import api key from static user", + "default": false + }, + "createTime": { + "type":"string", + "format": "date-time", + "description": "EXPERIMENTAL, DONT USE. THIS WILL BE REMOVED AGAIN. - set the given time as creation time" + } + } + } + } + ], + "responses": { + "201": { + "description": "User created successfully", + "schema": { + "$ref": "#/definitions/UserApiKey" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "409": { + "description": "User already exists", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "summary": "Delete User", + "operationId": "deleteUser", + "x-serviceIds": [ + "weaviate.users.db.delete" + ], + "tags": [ + "users" + ], + "parameters": [ + { + "description": "user name", + "in": "path", + "name": "user_id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/users/db/{user_id}/rotate-key": { + "post": { + "summary": "rotate user api key", + "operationId": "rotateUserApiKey", + "x-serviceIds": [ + "weaviate.users.db.rotateApiKey" + ], + "tags": [ + "users" + ], + "parameters": [ + { + "description": "user id", + "in": "path", + "name": "user_id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "ApiKey successfully changed", + "schema": { + "$ref": "#/definitions/UserApiKey" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/users/db/{user_id}/activate": { + "post": { + "summary": "activate a deactivated user", + "operationId": "activateUser", + "x-serviceIds": [ + "weaviate.users.db.activateUser" + ], + "tags": [ + "users" + ], + "parameters": [ + { + "description": "user id", + "in": "path", + "name": "user_id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "User successfully activated" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "409": { + "description": "user already activated" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/users/db/{user_id}/deactivate": { + "post": { + "summary": "deactivate a user", + "operationId": "deactivateUser", + "x-serviceIds": [ + "weaviate.users.db.deactivateUser" + ], + "tags": [ + "users" + ], + "parameters": [ + { + "description": "user id", + "in": "path", + "name": "user_id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": false, + "schema": { + "type": "object", + "properties": { + "revoke_key": { + "type": "boolean", + "description": "if the key should be revoked when deactivating the user", + "default": false + } + } + } + } + ], + "responses": { + "200": { + "description": "users successfully deactivated" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "user not found" + }, + "409": { + "description": "user already deactivated" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles": { + "get": { + "summary": "Get all roles", + "operationId": "getRoles", + "x-serviceIds": [ + "weaviate.authz.get.roles" + ], + "tags": [ + "authz" + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "summary": "create new role", + "operationId": "createRole", + "x-serviceIds": [ + "weaviate.authz.create.role" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Role" + } + } + ], + "responses": { + "201": { + "description": "Role created successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "409": { + "description": "Role already exists", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles/{id}/add-permissions": { + "post": { + "summary": "Add permission to a given role.", + "operationId": "addPermissions", + "x-serviceIds": [ + "weaviate.authz.add.role.permissions" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "role name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "permissions": { + "type": "array", + "items": { + "$ref": "#/definitions/Permission" + }, + "description": "permissions to be added to the role" + } + }, + "required": [ + "name", + "permissions" + ] + } + } + ], + "responses": { + "200": { + "description": "Permissions added successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles/{id}/remove-permissions": { + "post": { + "summary": "Remove permissions from a role. If this results in an empty role, the role will be deleted.", + "operationId": "removePermissions", + "x-serviceIds": [ + "weaviate.authz.remove.role.permissions" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "role name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "permissions": { + "type": "array", + "items": { + "$ref": "#/definitions/Permission" + }, + "description": "permissions to remove from the role" + } + }, + "required": [ + "permissions" + ] + } + } + ], + "responses": { + "200": { + "description": "Permissions removed successfully" + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles/{id}": { + "get": { + "summary": "Get a role", + "operationId": "getRole", + "x-serviceIds": [ + "weaviate.authz.get.role" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "role name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Role" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "summary": "Delete role", + "operationId": "deleteRole", + "x-serviceIds": [ + "weaviate.authz.delete.role" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "role name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles/{id}/has-permission": { + "post": { + "summary": "Check whether role possesses this permission.", + "operationId": "hasPermission", + "x-serviceIds": [ + "weaviate.authz.has.role.permission" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "role name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Permission" + } + } + ], + "responses": { + "200": { + "description": "Permission check was successful", + "schema": { + "type": "boolean" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles/{id}/users": { + "get": { + "deprecated": true, + "summary": "get users (db + OIDC) assigned to role. Deprecated, will be removed when 1.29 is not supported anymore", + "operationId": "getUsersForRoleDeprecated", + "x-serviceIds": [ + "weaviate.authz.get.roles.users" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "role name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Users assigned to this role", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles/{id}/user-assignments": { + "get": { + "summary": "get users assigned to role", + "operationId": "getUsersForRole", + "x-serviceIds": [ + "weaviate.authz.get.roles.users" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "role name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Users assigned to this role", + "schema": { + "type": "array", + "items": { + "type": "object", + "properties": { + "userId": { + "type": "string" + }, + "userType": { + "$ref": "#/definitions/UserTypeOutput" + } + }, + "required": [ + "name", + "userType" + ] + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/roles/{id}/group-assignments": { + "get": { + "summary": "Get groups that have a specific role assigned", + "description": "Retrieves a list of all groups that have been assigned a specific role, identified by its name.", + "operationId": "getGroupsForRole", + "x-serviceIds": [ + "weaviate.authz.get.roles.groups" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "The unique name of the role.", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the list of groups that have the role assigned.", + "schema": { + "type": "array", + "items": { + "type": "object", + "properties": { + "groupId": { + "type": "string" + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + }, + "required": [ + "name", + "groupType" + ] + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The specified role was not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/users/{id}/roles": { + "get": { + "deprecated": true, + "summary": "get roles assigned to user (DB + OIDC). Deprecated, will be removed when 1.29 is not supported anymore", + "operationId": "getRolesForUserDeprecated", + "x-serviceIds": [ + "weaviate.authz.get.users.roles" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "user name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Role assigned users", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found for user" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/users/{id}/roles/{userType}": { + "get": { + "summary": "get roles assigned to user", + "operationId": "getRolesForUser", + "x-serviceIds": [ + "weaviate.authz.get.users.roles" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "user name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "userType", + "required": true, + "type": "string", + "enum": [ + "oidc", + "db" + ], + "description": "The type of user" + }, + { + "in": "query", + "name": "includeFullRoles", + "required": false, + "type": "boolean", + "default": false, + "description": "Whether to include detailed role information needed the roles permission" + } + ], + "responses": { + "200": { + "description": "Role assigned users", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "no role found for user" + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/users/{id}/assign": { + "post": { + "summary": "Assign a role to a user", + "operationId": "assignRoleToUser", + "x-serviceIds": [ + "weaviate.authz.assign.role.user" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "user name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "type": "array", + "description": "the roles that assigned to user", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role assigned successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or user is not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/users/{id}/revoke": { + "post": { + "summary": "Revoke a role from a user", + "operationId": "revokeRoleFromUser", + "x-serviceIds": [ + "weaviate.authz.revoke.role.user" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "user name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "type": "array", + "description": "the roles that revoked from the key or user", + "items": { + "type": "string" + } + }, + "userType": { + "$ref": "#/definitions/UserTypeInput" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role revoked successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or user is not found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/groups/{id}/assign": { + "post": { + "summary": "Assign a role to a group", + "operationId": "assignRoleToGroup", + "x-serviceIds": [ + "weaviate.authz.assign.role" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "group name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "type": "array", + "description": "the roles that assigned to group", + "items": { + "type": "string" + } + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role assigned successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or group is not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/groups/{id}/revoke": { + "post": { + "summary": "Revoke a role from a group", + "operationId": "revokeRoleFromGroup", + "x-serviceIds": [ + "weaviate.authz.revoke.role.group" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "group name", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "roles": { + "type": "array", + "description": "the roles that revoked from group", + "items": { + "type": "string" + } + }, + "groupType": { + "$ref": "#/definitions/GroupType" + } + } + } + } + ], + "responses": { + "200": { + "description": "Role revoked successfully" + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "role or group is not found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/groups/{id}/roles/{groupType}": { + "get": { + "summary": "Get roles assigned to a specific group", + "description": "Retrieves a list of all roles assigned to a specific group. The group must be identified by both its name (`id`) and its type (`db` or `oidc`).", + "operationId": "getRolesForGroup", + "x-serviceIds": [ + "weaviate.authz.get.groups.roles" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "description": "The unique name of the group.", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "path", + "name": "groupType", + "required": true, + "type": "string", + "enum": [ + "oidc" + ], + "description": "The type of the group." + }, + { + "in": "query", + "name": "includeFullRoles", + "required": false, + "type": "boolean", + "default": false, + "description": "If true, the response will include the full role definitions with all associated permissions. If false, only role names are returned." + } + ], + "responses": { + "200": { + "description": "A list of roles assigned to the specified group.", + "schema": { + "$ref": "#/definitions/RolesListResponse" + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The specified group was not found." + }, + "422": { + "description": "The request syntax is correct, but the server couldn't process it due to semantic issues.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/authz/groups/{groupType}": { + "get": { + "summary": "List all groups of a specific type", + "description": "Retrieves a list of all available group names for a specified group type (`oidc` or `db`).", + "operationId": "getGroups", + "x-serviceIds": [ + "weaviate.authz.get.groups" + ], + "tags": [ + "authz" + ], + "parameters": [ + { + "in": "path", + "name": "groupType", + "required": true, + "type": "string", + "enum": [ + "oidc" + ], + "description": "The type of group to retrieve." + } + ], + "responses": { + "200": { + "description": "A list of group names for the specified type.", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "400": { + "description": "Bad request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "The request syntax is correct, but the server couldn't process it due to semantic issues.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/objects": { + "get": { + "description": "Lists all Objects in reverse order of creation, owned by the user that belongs to the used token.", + "operationId": "objects.list", + "x-serviceIds": [ + "weaviate.local.query" + ], + "parameters": [ + { + "$ref": "#/parameters/CommonAfterParameterQuery" + }, + { + "$ref": "#/parameters/CommonOffsetParameterQuery" + }, + { + "$ref": "#/parameters/CommonLimitParameterQuery" + }, + { + "$ref": "#/parameters/CommonIncludeParameterQuery" + }, + { + "$ref": "#/parameters/CommonSortParameterQuery" + }, + { + "$ref": "#/parameters/CommonOrderParameterQuery" + }, + { + "$ref": "#/parameters/CommonClassParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successful response.

    If `class` is not provided, the response will not include any objects.", + "schema": { + "$ref": "#/definitions/ObjectsListResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Get a list of Objects.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + }, + "post": { + "description": "Create a new object.

    Meta-data and schema values are validated.

    **Note: Use `/batch` for importing many objects**:
    If you plan on importing a large number of objects, it's much more efficient to use the `/batch` endpoint. Otherwise, sending multiple single requests sequentially would incur a large performance penalty.

    **Note: idempotence of `/objects`**:
    POST /objects will fail if an id is provided which already exists in the class. To update an existing object with the objects endpoint, use the PUT or PATCH method.", + "operationId": "objects.create", + "x-serviceIds": [ + "weaviate.local.add" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Object created.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Create a new object.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/objects/{id}": { + "delete": { + "description": "Deletes an object from the database based on its UUID.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead.", + "operationId": "objects.delete", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Delete an Object based on its UUID.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "deprecated": true + }, + "get": { + "description": "Get a specific object based on its UUID. Also available as Websocket bus.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead.", + "operationId": "objects.get", + "x-serviceIds": [ + "weaviate.local.query" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "$ref": "#/parameters/CommonIncludeParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Get a specific Object based on its UUID.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "deprecated": true + }, + "patch": { + "description": "Update an object based on its UUID (using patch semantics). This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead.", + "operationId": "objects.patch", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "RFC 7396-style patch, the body contains the object to merge into the existing object.", + "in": "body", + "name": "body", + "required": false, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully applied. No content provided." + }, + "400": { + "description": "The patch-JSON is malformed." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "The patch-JSON is valid but unprocessable.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Update an Object based on its UUID (using patch semantics).", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "deprecated": true + }, + "put": { + "description": "Updates an object based on its UUID. Given meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead.", + "operationId": "objects.update", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully received.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Update an Object based on its UUID.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "deprecated": true + }, + "head": { + "description": "Checks if an object exists in the system based on its UUID.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}` endpoint instead.", + "operationId": "objects.head", + "x-serviceIds": [ + "weaviate.objects.check" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Object exists." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Object doesn't exist." + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Checks Object's existence based on its UUID.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": true, + "x-available-in-websocket": true, + "deprecated": true + } + }, + "/objects/{className}/{id}": { + "get": { + "description": "Get a data object based on its collection and UUID.", + "operationId": "objects.class.get", + "x-serviceIds": [ + "weaviate.local.query" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "$ref": "#/parameters/CommonIncludeParameterQuery" + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonNodeNameParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Get a specific Object based on its class and UUID. Also available as Websocket bus.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + }, + "delete": { + "description": "Delete an object based on its collection and UUID.

    Note: For backward compatibility, beacons also support an older, deprecated format without the collection name. As a result, when deleting a reference, the beacon specified has to match the beacon to be deleted exactly. In other words, if a beacon is present using the old format (without collection name) you also need to specify it the same way.

    In the beacon format, you need to always use `localhost` as the host, rather than the actual hostname. `localhost` here refers to the fact that the beacon's target is on the same Weaviate instance, as opposed to a foreign instance.", + "operationId": "objects.class.delete", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Delete object based on its class and UUID.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": true, + "x-available-in-websocket": true + }, + "put": { + "description": "Update an object based on its uuid and collection. This (`put`) method replaces the object with the provided object.", + "operationId": "objects.class.put", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "The uuid of the data object to update.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully received.", + "schema": { + "$ref": "#/definitions/Object" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Update a class object based on its uuid", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + }, + "patch": { + "description": "Update an individual data object based on its class and uuid. This method supports json-merge style patch semantics (RFC 7396). Provided meta-data and schema values are validated. LastUpdateTime is set to the time this function is called.", + "operationId": "objects.class.patch", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "The uuid of the data object to update.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "RFC 7396-style patch, the body contains the object to merge into the existing object.", + "in": "body", + "name": "body", + "required": false, + "schema": { + "$ref": "#/definitions/Object" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully applied. No content provided." + }, + "400": { + "description": "The patch-JSON is malformed.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found." + }, + "422": { + "description": "The patch-JSON is valid but unprocessable.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Update an Object based on its UUID (using patch semantics).", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + }, + "head": { + "description": "Checks if a data object exists based on its collection and uuid without retrieving it.

    Internally it skips reading the object from disk other than checking if it is present. Thus it does not use resources on marshalling, parsing, etc., and is faster. Note the resulting HTTP request has no body; the existence of an object is indicated solely by the status code.", + "operationId": "objects.class.head", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "The uuid of the data object", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Object exists." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Object doesn't exist." + }, + "422": { + "description": "Request is well-formed (i.e., syntactically correct), but erroneous.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Checks object's existence based on its class and uuid.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": true, + "x-available-in-websocket": true + } + }, + "/objects/{id}/references/{propertyName}": { + "post": { + "description": "Add a cross-reference.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead.", + "operationId": "objects.references.create", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "Unique name of the property related to the Object.", + "in": "path", + "name": "propertyName", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully added the reference." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Add a single reference to a class-property.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "deprecated": true + }, + "put": { + "description": "Replace all references in cross-reference property of an object.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead.", + "operationId": "objects.references.update", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "Unique name of the property related to the Object.", + "in": "path", + "name": "propertyName", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MultipleRef" + } + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully replaced all the references." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Replace all references to a class-property.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "deprecated": true + }, + "delete": { + "description": "Delete the single reference that is given in the body from the list of references that this property has.

    **Note**: This endpoint is deprecated and will be removed in a future version. Use the `/objects/{className}/{id}/references/{propertyName}` endpoint instead.", + "operationId": "objects.references.delete", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "Unique name of the property related to the Object.", + "in": "path", + "name": "propertyName", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Delete a single reference from the list of references.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false, + "deprecated": true + } + }, + "/objects/{className}/{id}/references/{propertyName}": { + "post": { + "description": "Add a single reference to an object. This adds a reference to the array of cross-references of the given property in the source object specified by its collection name and id", + "operationId": "objects.class.references.create", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "Unique name of the property related to the Object.", + "in": "path", + "name": "propertyName", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully added the reference." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Source object doesn't exist." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Add a single reference to a class-property.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + }, + "put": { + "description": "Replace **all** references in cross-reference property of an object.", + "operationId": "objects.class.references.put", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "Unique name of the property related to the Object.", + "in": "path", + "name": "propertyName", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MultipleRef" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Successfully replaced all the references." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Source object doesn't exist." + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Replace all references to a class-property.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + }, + "delete": { + "description": "Delete the single reference that is given in the body from the list of references that this property has.", + "operationId": "objects.class.references.delete", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "description": "The class name as defined in the schema", + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "description": "Unique ID of the Object.", + "format": "uuid", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "description": "Unique name of the property related to the Object.", + "in": "path", + "name": "propertyName", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SingleRef" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Successful query result but no resource was found.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the property exists or that it is a class?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Delete a single reference from the list of references.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/objects/validate": { + "post": { + "description": "Validate an object's schema and meta-data without creating it.

    If the schema of the object is valid, the request should return nothing with a plain RESTful request. Otherwise, an error object will be returned.", + "operationId": "objects.validate", + "x-serviceIds": [ + "weaviate.local.query.meta" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Object" + } + } + ], + "responses": { + "200": { + "description": "Successfully validated." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Validate an Object based on a schema.", + "tags": [ + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/batch/objects": { + "post": { + "description": "Create new objects in bulk.

    Meta-data and schema values are validated.

    **Note: idempotence of `/batch/objects`**:
    `POST /batch/objects` is idempotent, and will overwrite any existing object given the same id.", + "operationId": "batch.objects.create", + "x-serviceIds": [ + "weaviate.local.add" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "fields": { + "description": "Define which fields need to be returned. Default value is ALL", + "type": "array", + "items": { + "type": "string", + "default": "ALL", + "enum": [ + "ALL", + "class", + "schema", + "id", + "creationTimeUnix" + ] + } + }, + "objects": { + "type": "array", + "items": { + "$ref": "#/definitions/Object" + } + } + } + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Request succeeded, see response body to get detailed information about each batched item.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/ObjectsGetResponse" + } + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Creates new Objects based on a Object template as a batch.", + "tags": [ + "batch", + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + }, + "delete": { + "description": "Batch delete objects that match a particular filter.

    The request body takes a single `where` filter and will delete all objects matched.

    Note that there is a limit to the number of objects to be deleted at once using this filter, in order to protect against unexpected memory surges and very-long-running requests. The default limit is 10,000 and may be configured by setting the `QUERY_MAXIMUM_RESULTS` environment variable.

    Objects are deleted in the same order that they would be returned in an equivalent Get query. To delete more objects than the limit, run the same query multiple times.", + "operationId": "batch.objects.delete", + "x-serviceIds": [ + "weaviate.local.manipulate" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BatchDelete" + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + }, + { + "$ref": "#/parameters/CommonTenantParameterQuery" + } + ], + "responses": { + "200": { + "description": "Request succeeded, see response body to get detailed information about each batched item.", + "schema": { + "$ref": "#/definitions/BatchDeleteResponse" + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Deletes Objects based on a match filter as a batch.", + "tags": [ + "batch", + "objects" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/batch/references": { + "post": { + "description": "Batch create cross-references between collections items (objects or objects) in bulk.", + "operationId": "batch.references.create", + "x-serviceIds": [ + "weaviate.local.add" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "A list of references to be batched. The ideal size depends on the used database connector. Please see the documentation of the used connector for help", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/BatchReference" + } + } + }, + { + "$ref": "#/parameters/CommonConsistencyLevelParameterQuery" + } + ], + "responses": { + "200": { + "description": "Request Successful. Warning: A successful request does not guarantee that every batched reference was successfully created. Inspect the response body to see which references succeeded and which failed.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/BatchReferenceResponse" + } + } + }, + "400": { + "description": "Malformed request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Creates new Cross-References between arbitrary classes in bulk.", + "tags": [ + "batch", + "references" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/graphql": { + "post": { + "description": "Get a response based on a GraphQL query", + "operationId": "graphql.post", + "x-serviceIds": [ + "weaviate.local.query", + "weaviate.local.query.meta", + "weaviate.network.query", + "weaviate.network.query.meta" + ], + "parameters": [ + { + "description": "The GraphQL query request parameters.", + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/GraphQLQuery" + } + } + ], + "responses": { + "200": { + "description": "Successful query (with select).", + "schema": { + "$ref": "#/definitions/GraphQLResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Get a response based on GraphQL", + "tags": [ + "graphql" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/graphql/batch": { + "post": { + "description": "Perform a batched GraphQL query", + "operationId": "graphql.batch", + "x-serviceIds": [ + "weaviate.local.query", + "weaviate.local.query.meta", + "weaviate.network.query", + "weaviate.network.query.meta" + ], + "parameters": [ + { + "description": "The GraphQL queries.", + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/GraphQLQueries" + } + } + ], + "responses": { + "200": { + "description": "Successful query (with select).", + "schema": { + "$ref": "#/definitions/GraphQLResponses" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Request body is well-formed (i.e., syntactically correct), but semantically erroneous. Are you sure the class is defined in the configuration file?", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Get a response based on GraphQL.", + "tags": [ + "graphql" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/meta": { + "get": { + "description": "Returns meta information about the server. Can be used to provide information to another Weaviate instance that wants to interact with the current instance.", + "operationId": "meta.get", + "x-serviceIds": [ + "weaviate.local.query.meta" + ], + "responses": { + "200": { + "description": "Successful response.", + "schema": { + "$ref": "#/definitions/Meta" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Returns meta information of the current Weaviate instance.", + "tags": [ + "meta" + ], + "x-available-in-mqtt": false, + "x-available-in-websocket": false + } + }, + "/schema": { + "get": { + "summary": "Dump the current the database schema.", + "description": "Fetch an array of all collection definitions from the schema.", + "operationId": "schema.dump", + "x-serviceIds": [ + "weaviate.local.query.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "consistency", + "in": "header", + "required": false, + "default": true, + "type": "boolean", + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency" + } + ], + "responses": { + "200": { + "description": "Successfully dumped the database schema.", + "schema": { + "$ref": "#/definitions/Schema" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "summary": "Create a new Object class in the schema.", + "description": "Create a new data object collection.

    If AutoSchema is enabled, Weaviate will attempt to infer the schema from the data at import time. However, manual schema definition is recommended for production environments.", + "operationId": "schema.objects.create", + "x-serviceIds": [ + "weaviate.local.add.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "objectClass", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Class" + } + } + ], + "responses": { + "200": { + "description": "Added the new Object class to the schema.", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Object class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}": { + "get": { + "summary": "Get a single class from the schema", + "operationId": "schema.objects.get", + "x-serviceIds": [ + "weaviate.local.get.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "consistency", + "in": "header", + "required": false, + "default": true, + "type": "boolean", + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency" + } + ], + "responses": { + "200": { + "description": "Found the Class, returned as body", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "This class does not exist" + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "summary": "Remove an Object class (and all data in the instances) from the schema.", + "description": "Remove a collection from the schema. This will also delete all the objects in the collection.", + "operationId": "schema.objects.delete", + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Removed the Object class from the schema." + }, + "400": { + "description": "Could not delete the Object class.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "put": { + "summary": "Update settings of an existing schema class", + "description": "Add a property to an existing collection.", + "operationId": "schema.objects.update", + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "objectClass", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Class" + } + } + ], + "responses": { + "200": { + "description": "Class was updated successfully", + "schema": { + "$ref": "#/definitions/Class" + } + }, + "422": { + "description": "Invalid update attempt", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Class to be updated does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}/properties": { + "post": { + "summary": "Add a property to an Object class.", + "operationId": "schema.objects.properties.add", + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Property" + } + } + ], + "responses": { + "200": { + "description": "Added the property.", + "schema": { + "$ref": "#/definitions/Property" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid property.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}/shards": { + "get": { + "summary": "Get the shards status of an Object class", + "description": "Get the status of every shard in the cluster.", + "operationId": "schema.objects.shards.get", + "x-serviceIds": [ + "weaviate.local.get.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "tenant", + "in": "query", + "type": "string" + } + ], + "responses": { + "200": { + "description": "Found the status of the shards, returned as body", + "schema": { + "$ref": "#/definitions/ShardStatusList" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "This class does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}/shards/{shardName}": { + "put": { + "summary": "Update a shard status.", + "description": "Update a shard status for a collection. For example, a shard may have been marked as `READONLY` because its disk was full. After providing more disk space, use this endpoint to set the shard status to `READY` again. There is also a convenience function in each client to set the status of all shards of a collection.", + "operationId": "schema.objects.shards.update", + "x-serviceIds": [ + "weaviate.local.manipulate.meta" + ], + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "shardName", + "in": "path", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ShardStatus" + } + } + ], + "responses": { + "200": { + "description": "Shard status was updated successfully", + "schema": { + "$ref": "#/definitions/ShardStatus" + } + }, + "422": { + "description": "Invalid update attempt", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Shard to be updated does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}/tenants": { + "post": { + "summary": "Create a new tenant", + "description": "Create a new tenant for a collection. Multi-tenancy must be enabled in the collection definition.", + "operationId": "tenants.create", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + } + ], + "responses": { + "200": { + "description": "Added new tenants to the specified class", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "put": { + "summary": "Update a tenant.", + "description": "Update tenant of a specific class", + "operationId": "tenants.update", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + } + ], + "responses": { + "200": { + "description": "Updated tenants of the specified class", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "description": "delete tenants from a specific class", + "operationId": "tenants.delete", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "tenants", + "required": true, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "Deleted tenants from specified class." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "get": { + "summary": "Get the list of tenants.", + "description": "get all tenants from a specific class", + "operationId": "tenants.get", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "consistency", + "in": "header", + "required": false, + "default": true, + "type": "boolean", + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency" + } + ], + "responses": { + "200": { + "description": "tenants from specified class.", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Tenant" + } + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/schema/{className}/tenants/{tenantName}": { + "head": { + "summary": "Check whether a tenant exists", + "description": "Check if a tenant exists for a specific class", + "operationId": "tenant.exists", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "tenantName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "consistency", + "in": "header", + "required": false, + "default": true, + "type": "boolean", + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency" + } + ], + "responses": { + "200": { + "description": "The tenant exists in the specified class" + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "The tenant not found" + }, + "422": { + "description": "Invalid Tenant class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "get": { + "summary": "Get a specific tenant", + "description": "get a specific tenant for the given class", + "operationId": "tenants.get.one", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "tenantName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "consistency", + "in": "header", + "required": false, + "default": true, + "type": "boolean", + "description": "If consistency is true, the request will be proxied to the leader to ensure strong schema consistency" + } + ], + "responses": { + "200": { + "description": "load the tenant given the specified class", + "schema": { + "$ref": "#/definitions/Tenant" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Tenant not found" + }, + "422": { + "description": "Invalid tenant or class", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/aliases": { + "get": { + "summary": "List aliases", + "description": "Retrieve a list of all aliases in the system. Results can be filtered by specifying a collection (class) name to get aliases for a specific collection only.", + "operationId": "aliases.get", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "class", + "description": "Optional filter to retrieve aliases for a specific collection (class) only. If not provided, returns all aliases.", + "in": "query", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the list of aliases", + "schema": { + "$ref": "#/definitions/AliasResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid collection (class) parameter provided", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "post": { + "summary": "Create a new alias", + "description": "Create a new alias mapping between an alias name and a collection (class). The alias acts as an alternative name for accessing the collection.", + "operationId": "aliases.create", + "tags": [ + "schema" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/Alias" + } + } + ], + "responses": { + "200": { + "description": "Successfully created a new alias for the specified collection (class)", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid create alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/aliases/{aliasName}": { + "get": { + "summary": "Get an alias", + "description": "Retrieve details about a specific alias by its name, including which collection (class) it points to.", + "operationId": "aliases.get.alias", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "aliasName", + "in": "path", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Successfully retrieved the alias details.", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid alias name provided.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "put": { + "summary": "Update an alias", + "description": "Update an existing alias to point to a different collection (class). This allows you to redirect an alias from one collection to another without changing the alias name.", + "operationId": "aliases.update", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "aliasName", + "in": "path", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "class": { + "description": "The new collection (class) that the alias should point to.", + "type": "string" + } + } + } + } + ], + "responses": { + "200": { + "description": "Successfully updated the alias to point to the new collection (class).", + "schema": { + "$ref": "#/definitions/Alias" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid update alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "summary": "Delete an alias", + "description": "Remove an existing alias from the system. This will delete the alias mapping but will not affect the underlying collection (class).", + "operationId": "aliases.delete", + "tags": [ + "schema" + ], + "parameters": [ + { + "name": "aliasName", + "in": "path", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "Successfully deleted the alias." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Alias does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid delete alias request.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/backups/{backend}": { + "post": { + "summary": "Start a backup process", + "description": "Start creating a backup for a set of collections.

    Notes:
    - Weaviate uses gzip compression by default.
    - Weaviate stays usable while a backup process is ongoing.", + "operationId": "backups.create", + "x-serviceIds": [ + "weaviate.local.backup" + ], + "tags": [ + "backups" + ], + "parameters": [ + { + "name": "backend", + "in": "path", + "required": true, + "type": "string", + "description": "Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`." + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BackupCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "Backup create process successfully started.", + "schema": { + "$ref": "#/definitions/BackupCreateResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup creation attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "get": { + "summary": "List backups in progress", + "description": "[Coming soon] List all backups in progress not implemented yet.", + "operationId": "backups.list", + "x-serviceIds": [ + "weaviate.local.backup" + ], + "tags": [ + "backups" + ], + "parameters": [ + { + "name": "backend", + "in": "path", + "required": true, + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3." + } + ], + "responses": { + "200": { + "description": "Existed backups", + "schema": { + "$ref": "#/definitions/BackupListResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup list.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/backups/{backend}/{id}": { + "get": { + "summary": "Get backup process status", + "description": "Returns status of backup creation attempt for a set of collections.

    All client implementations have a `wait for completion` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the `wait for completion` option to false, you can also check the status yourself using this endpoint.", + "operationId": "backups.create.status", + "x-serviceIds": [ + "weaviate.local.backup" + ], + "tags": [ + "backups" + ], + "parameters": [ + { + "name": "backend", + "in": "path", + "required": true, + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3." + }, + { + "name": "id", + "in": "path", + "required": true, + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed." + }, + { + "name": "bucket", + "in": "query", + "required": false, + "type": "string", + "description": "Name of the bucket, container, volume, etc" + }, + { + "name": "path", + "in": "query", + "required": false, + "type": "string", + "description": "The path within the bucket" + } + ], + "responses": { + "200": { + "description": "Backup creation status successfully returned", + "schema": { + "$ref": "#/definitions/BackupCreateStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "delete": { + "summary": "Cancel backup", + "description": "Cancel created backup with specified ID", + "operationId": "backups.cancel", + "x-serviceIds": [ + "weaviate.local.backup" + ], + "tags": [ + "backups" + ], + "parameters": [ + { + "name": "backend", + "in": "path", + "required": true, + "type": "string", + "description": "Backup backend name e.g. filesystem, gcs, s3." + }, + { + "name": "id", + "in": "path", + "required": true, + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed." + }, + { + "name": "bucket", + "in": "query", + "required": false, + "type": "string", + "description": "Name of the bucket, container, volume, etc" + }, + { + "name": "path", + "in": "query", + "required": false, + "type": "string", + "description": "The path within the bucket" + } + ], + "responses": { + "204": { + "description": "Successfully deleted." + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup cancellation attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/backups/{backend}/{id}/restore": { + "post": { + "summary": "Start a restoration process", + "description": "Starts a process of restoring a backup for a set of collections.

    Any backup can be restored to any machine, as long as the number of nodes between source and target are identical.

    Requrements:

    - None of the collections to be restored already exist on the target restoration node(s).
    - The node names of the backed-up collections' must match those of the target restoration node(s).", + "operationId": "backups.restore", + "x-serviceIds": [ + "weaviate.local.backup" + ], + "tags": [ + "backups" + ], + "parameters": [ + { + "name": "backend", + "in": "path", + "required": true, + "type": "string", + "description": "Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`." + }, + { + "name": "id", + "in": "path", + "required": true, + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed." + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/BackupRestoreRequest" + } + } + ], + "responses": { + "200": { + "description": "Backup restoration process successfully started.", + "schema": { + "$ref": "#/definitions/BackupRestoreResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + }, + "get": { + "summary": "Get restore process status", + "description": "Returns status of a backup restoration attempt for a set of classes.

    All client implementations have a `wait for completion` option which will poll the backup status in the background and only return once the backup has completed (successfully or unsuccessfully). If you set the `wait for completion` option to false, you can also check the status yourself using the this endpoint.", + "operationId": "backups.restore.status", + "x-serviceIds": [ + "weaviate.local.backup" + ], + "tags": [ + "backups" + ], + "parameters": [ + { + "name": "backend", + "in": "path", + "required": true, + "type": "string", + "description": "Backup backend name e.g. `filesystem`, `gcs`, `s3`, `azure`." + }, + { + "name": "id", + "in": "path", + "required": true, + "type": "string", + "description": "The ID of a backup. Must be URL-safe and work as a filesystem path, only lowercase, numbers, underscore, minus characters allowed." + }, + { + "name": "bucket", + "in": "query", + "required": false, + "type": "string", + "description": "Name of the bucket, container, volume, etc" + }, + { + "name": "path", + "in": "query", + "required": false, + "type": "string", + "description": "The path within the bucket" + } + ], + "responses": { + "200": { + "description": "Backup restoration status successfully returned", + "schema": { + "$ref": "#/definitions/BackupRestoreStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/cluster/statistics": { + "get": { + "summary": "See Raft cluster statistics", + "description": "Returns Raft cluster statistics of Weaviate DB.", + "operationId": "cluster.get.statistics", + "x-serviceIds": [ + "weaviate.cluster.statistics.get" + ], + "tags": [ + "cluster" + ], + "responses": { + "200": { + "description": "Cluster statistics successfully returned", + "schema": { + "$ref": "#/definitions/ClusterStatisticsResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/nodes": { + "get": { + "summary": "Node information for the database.", + "description": "Returns node information for the entire database.", + "operationId": "nodes.get", + "x-serviceIds": [ + "weaviate.nodes.status.get" + ], + "tags": [ + "nodes" + ], + "parameters": [ + { + "$ref": "#/parameters/CommonOutputVerbosityParameterQuery" + } + ], + "responses": { + "200": { + "description": "Nodes status successfully returned", + "schema": { + "$ref": "#/definitions/NodesStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/nodes/{className}": { + "get": { + "summary": "Node information for a collection.", + "description": "Returns node information for the nodes relevant to the collection.", + "operationId": "nodes.get.class", + "x-serviceIds": [ + "weaviate.nodes.status.get.class" + ], + "tags": [ + "nodes" + ], + "parameters": [ + { + "name": "className", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "shardName", + "in": "query", + "required": false, + "type": "string" + }, + { + "$ref": "#/parameters/CommonOutputVerbosityParameterQuery" + } + ], + "responses": { + "200": { + "description": "Nodes status successfully returned", + "schema": { + "$ref": "#/definitions/NodesStatusResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "404": { + "description": "Not Found - Backup does not exist", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "422": { + "description": "Invalid backup restoration status attempt.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/tasks": { + "get": { + "summary": "Lists all distributed tasks in the cluster.", + "operationId": "distributedTasks.get", + "x-serviceIds": [ + "weaviate.distributedTasks.get" + ], + "tags": [ + "distributedTasks" + ], + "responses": { + "200": { + "description": "Distributed tasks successfully returned", + "schema": { + "$ref": "#/definitions/DistributedTasks" + } + }, + "403": { + "description": "Unauthorized or invalid credentials.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + } + } + }, + "/classifications/": { + "post": { + "description": "Trigger a classification based on the specified params. Classifications will run in the background, use GET /classifications/ to retrieve the status of your classification.", + "operationId": "classifications.post", + "x-serviceIds": [ + "weaviate.classifications.post" + ], + "parameters": [ + { + "description": "parameters to start a classification", + "in": "body", + "schema": { + "$ref": "#/definitions/Classification" + }, + "name": "params", + "required": true + } + ], + "responses": { + "201": { + "description": "Successfully started classification.", + "schema": { + "$ref": "#/definitions/Classification" + } + }, + "400": { + "description": "Incorrect request", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "Starts a classification.", + "tags": [ + "classifications" + ] + } + }, + "/classifications/{id}": { + "get": { + "description": "Get status, results and metadata of a previously created classification", + "operationId": "classifications.get", + "x-serviceIds": [ + "weaviate.classifications.get" + ], + "parameters": [ + { + "description": "classification id", + "in": "path", + "type": "string", + "name": "id", + "required": true + } + ], + "responses": { + "200": { + "description": "Found the classification, returned as body", + "schema": { + "$ref": "#/definitions/Classification" + } + }, + "404": { + "description": "Not Found - Classification does not exist" + }, + "401": { + "description": "Unauthorized or invalid credentials." + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + }, + "500": { + "description": "An error has occurred while trying to fulfill the request. Most likely the ErrorResponse will contain more information about the error.", + "schema": { + "$ref": "#/definitions/ErrorResponse" + } + } + }, + "summary": "View previously created classification", + "tags": [ + "classifications" + ] + } + } + }, + "produces": [ + "application/json" + ], + "schemes": [ + "https" + ], + "security": [ + {}, + { + "oidc": [] + } + ], + "securityDefinitions": { + "oidc": { + "type": "oauth2", + "description": "OIDC (OpenConnect ID - based on OAuth2)", + "flow": "implicit", + "authorizationUrl": "http://to-be-configured-in-the-application-config" + } + }, + "swagger": "2.0", + "tags": [ + { + "name": "objects" + }, + { + "name": "batch", + "description": "These operations allow to execute batch requests for Objects and Objects. Mostly used for importing large datasets." + }, + { + "name": "graphql" + }, + { + "name": "meta" + }, + { + "name": "P2P" + }, + { + "name": "contextionary-API", + "description": "All functions related to the Contextionary." + }, + { + "name": "schema", + "description": "These operations enable manipulation of the schema in Weaviate schema." + }, + { + "name": "replication", + "description": "Operations related to managing data replication, including initiating and monitoring shard replica movements between nodes, querying current sharding states, and managing the lifecycle of replication tasks." + } + ] +} diff --git a/platform/dbops/binaries/weaviate-src/test/README.md b/platform/dbops/binaries/weaviate-src/test/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1d13440026164d7f522b76bfc400d7b7a0d3bdc5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/test/README.md @@ -0,0 +1,23 @@ +## Testing + + +we use shell to run our (unit/ acceptance / integration) tests + +```shell +./test/run.sh ${command} +``` +run that command with any of the following available commands, make sure you're in the project root folder. + +### available commands + - `--unit-only` | `-u` + - `--unit-and-integration-only` | `-ui` + - `--integration-only` | `-i` + - `--acceptance-only` | `-a` + - `--acceptance-only-fast` | `-aof` + - `--acceptance-only-graphql` | `-aog` + - `--acceptance-only-replication` | `-aor` + - `--acceptance-only-async-replication` | `-aoar` + - `--acceptance-module-tests-only` | `--modules-only` | `-m` + - `--acceptance-module-tests-only-backup` | `--modules-backup-only` | `-mob` + - `--acceptance-module-tests-except-backup` | `--modules-except-backup` | `-meb` + - `--only-module-{moduleName}` \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/test/__init__.py b/platform/dbops/binaries/weaviate-src/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/platform/dbops/binaries/weaviate-src/test/run.sh b/platform/dbops/binaries/weaviate-src/test/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..aa8c02c987950eb1e6e62ba2120eb12f9ef98217 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/test/run.sh @@ -0,0 +1,518 @@ +#!/bin/bash + +set -eou pipefail + +function main() { + # This script runs all non-benchmark tests if no CMD switch is given and the respective tests otherwise. + run_all_tests=true + run_acceptance_tests=false + run_acceptance_only_fast=false + run_acceptance_only_authz=false + run_acceptance_only_python=false + run_acceptance_go_client=false + run_acceptance_graphql_tests=false + run_acceptance_replication_tests=false + run_acceptance_replica_replication_fast_tests=false + run_acceptance_replica_replication_slow_tests=false + run_acceptance_async_replication_tests=false + run_acceptance_objects=false + only_acceptance=false + run_module_tests=false + only_module=false + only_module_value=false + run_unit_and_integration_tests=false + run_unit_tests=false + run_integration_tests=false + run_integration_tests_only_vector_package=false + run_integration_tests_without_vector_package=false + run_benchmark=false + run_module_only_backup_tests=false + run_module_only_offload_tests=false + run_module_except_backup_tests=false + run_module_except_offload_tests=false + run_cleanup=false + run_acceptance_go_client_only_fast=false + run_acceptance_go_client_named_vectors_single_node=false + run_acceptance_go_client_named_vectors_cluster=false + run_acceptance_lsmkv=false + + while [[ "$#" -gt 0 ]]; do + case $1 in + --unit-only|-u) run_all_tests=false; run_unit_tests=true;; + --unit-and-integration-only|-ui) run_all_tests=false; run_unit_and_integration_tests=true;; + --integration-only|-i) run_all_tests=false; run_integration_tests=true;; + --integration-vector-package-only|-ivpo) run_all_tests=false; run_integration_tests=true; run_integration_tests_only_vector_package=true;; + --integration-without-vector-package|-iwvp) run_all_tests=false; run_integration_tests=true; run_integration_tests_without_vector_package=true;; + --acceptance-only|--e2e-only|-a) run_all_tests=false; run_acceptance_tests=true ;; + --acceptance-only-fast|-aof) run_all_tests=false; run_acceptance_only_fast=true;; + --acceptance-only-python|-aop) run_all_tests=false; run_acceptance_only_python=true;; + --acceptance-go-client|-ag) run_all_tests=false; run_acceptance_go_client=true;; + --acceptance-go-client-only-fast|-agof) run_all_tests=false; run_acceptance_go_client=false; run_acceptance_go_client_only_fast=true;; + --acceptance-go-client-named-vectors-single-node|-agnv) run_all_tests=false; run_acceptance_go_client=false; run_acceptance_go_client_named_vectors_single_node=true;; + --acceptance-go-client-named-vectors-cluster|-agnv) run_all_tests=false; run_acceptance_go_client=false; run_acceptance_go_client_named_vectors_cluster=true;; + --acceptance-only-graphql|-aog) run_all_tests=false; run_acceptance_graphql_tests=true ;; + --acceptance-only-authz|-aoa) run_all_tests=false; run_acceptance_only_authz=true;; + --acceptance-only-replication|-aor) run_all_tests=false; run_acceptance_replication_tests=true ;; + --acceptance-only-replica-replication-fast|-aorrf) run_all_tests=false; run_acceptance_replica_replication_fast_tests=true ;; + --acceptance-only-replica-replication-slow|-aorrs) run_all_tests=false; run_acceptance_replica_replication_slow_tests=true ;; + --acceptance-only-async-replication|-aoar) run_all_tests=false; run_acceptance_async_replication_tests=true ;; + --acceptance-only-objects|-aoob) run_all_tests=false; run_acceptance_objects=true ;; + --only-acceptance-*|-oa)run_all_tests=false; only_acceptance=true;only_acceptance_value=$1;; + --only-module-*|-om)run_all_tests=false; only_module=true;only_module_value=$1;; + --acceptance-module-tests-only|--modules-only|-m) run_all_tests=false; run_module_tests=true; run_module_only_backup_tests=true; run_module_except_backup_tests=true;run_module_only_offload_tests=true;run_module_except_offload_tests=true;; + --acceptance-module-tests-only-backup|--modules-backup-only|-mob) run_all_tests=false; run_module_tests=true; run_module_only_backup_tests=true;; + --acceptance-module-tests-only-offload|--modules-offload-only|-moo) run_all_tests=false; run_module_tests=true; run_module_only_offload_tests=true;; + --acceptance-module-tests-except-backup|--modules-except-backup|-meb) run_all_tests=false; run_module_tests=true; run_module_except_backup_tests=true; echo $run_module_except_backup_tests ;; + --acceptance-module-tests-except-offload|--modules-except-offload|-meo) run_all_tests=false; run_module_tests=true; run_module_except_offload_tests=true; echo $run_module_except_offload_tests ;; + --acceptance-lsmkv|--lsmkv) run_all_tests=false; run_acceptance_lsmkv=true;; + --benchmark-only|-b) run_all_tests=false; run_benchmark=true;; + --cleanup) run_all_tests=false; run_cleanup=true;; + --help|-h) printf '%s\n' \ + "Options:"\ + "--unit-only | -u"\ + "--unit-and-integration-only | -ui"\ + "--integration-only | -i"\ + "--acceptance-only | -a"\ + "--acceptance-only-fast | -aof"\ + "--acceptance-only-python | -aop"\ + "--acceptance-go-client | -ag"\ + "--acceptance-go-client-only-fast | -agof"\ + "--acceptance-go-client-named-vectors | -agnv"\ + "--acceptance-only-graphql | -aog"\ + "--acceptance-only-replication| -aor"\ + "--acceptance-only-async-replication-fast| -aoarf"\ + "--acceptance-only-async-replication-slow| -aoars"\ + "--acceptance-module-tests-only | --modules-only | -m"\ + "--acceptance-module-tests-only-backup | --modules-backup-only | -mob"\ + "--acceptance-module-tests-except-backup | --modules-except-backup | -meb"\ + "--acceptance-lsmkv | --lsmkv"\ + "--only-acceptance-{packageName}" + "--only-module-{moduleName}" + "--benchmark-only | -b" \ + "--help | -h"; exit 1;; + *) echo "Unknown parameter passed: $1"; exit 1 ;; + esac + shift + done + + # Jump to root directory + cd "$( dirname "${BASH_SOURCE[0]}" )"/.. + + echo "INFO: In directory $PWD" + + echo "INFO: This script will suppress most output, unless a command ultimately fails" + echo " Then it will print the output of the failed command." + + echo_green "Prepare workspace..." + + # Remove data directory in case of previous runs + rm -rf data + echo "Done!" + + if $run_unit_and_integration_tests || $run_unit_tests || $run_all_tests + then + echo_green "Run all unit tests..." + run_unit_tests "$@" + echo_green "Unit tests successful" + fi + + if $run_unit_and_integration_tests || $run_integration_tests || $run_all_tests + then + echo_green "Run integration tests..." + run_integration_tests "$@" + echo_green "Integration tests successful" + fi + + if $run_acceptance_tests || $run_acceptance_only_fast || $run_acceptance_only_authz || $run_acceptance_go_client || $run_acceptance_graphql_tests || $run_acceptance_replication_tests || $run_acceptance_replica_replication_fast_tests || $run_acceptance_replica_replication_slow_tests || $run_acceptance_async_replication_tests || $run_acceptance_only_python || $run_all_tests || $run_benchmark || $run_acceptance_go_client_only_fast || $run_acceptance_go_client_named_vectors_single_node || $run_acceptance_go_client_named_vectors_cluster || $only_acceptance || $run_acceptance_objects + then + echo "Start docker container needed for acceptance and/or benchmark test" + echo_green "Stop any running docker-compose containers..." + suppress_on_success docker compose -f docker-compose-test.yml down --remove-orphans + + echo_green "Start up weaviate and backing dbs in docker-compose..." + echo "This could take some time..." + if $run_acceptance_only_authz || $run_acceptance_only_python + then + tools/test/run_ci_server.sh --with-auth + build_mockoidc_docker_image_for_tests + else + tools/test/run_ci_server.sh + fi + + # echo_green "Import required schema and test fixtures..." + # # Note: It's not best practice to do this as part of the test script + # # It would be better if each test independently prepared (and also + # # cleaned up) the test fixtures it needs, but one step at a time ;) + # suppress_on_success import_test_fixtures + + if $run_benchmark + then + echo_green "Run performance tracker..." + ./test/benchmark/run_performance_tracker.sh + fi + + if $run_acceptance_tests || $run_acceptance_only_fast || $run_acceptance_only_authz || $run_acceptance_go_client || $run_acceptance_graphql_tests || $run_acceptance_replication_tests || $run_acceptance_replica_replication_fast_tests || $run_acceptance_replica_replication_slow_tests || $run_acceptance_async_replication_tests || $run_acceptance_go_client_only_fast || $run_acceptance_go_client_named_vectors_single_node || $run_acceptance_go_client_named_vectors_cluster || $run_all_tests || $only_acceptance || $run_acceptance_objects + then + echo_green "Run acceptance tests..." + run_acceptance_tests "$@" + fi + fi + + if $run_acceptance_only_python || $run_all_tests + then + echo_green "Run python acceptance tests..." + ./test/acceptance_with_python/run.sh + echo_green "Python tests successful" + fi + + if $only_module; then + mod=${only_module_value//--only-module-/} + echo_green "Running module acceptance tests for $mod..." + for pkg in $(go list ./test/modules/... | grep '/modules/'${mod}); do + build_docker_image_for_tests + echo_green "Weaviate image successfully built, run module tests for $mod..." + if ! go test -count 1 -race -timeout 15m -v "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + echo_green "Module acceptance tests for $mod successful" + done + fi + if $run_module_tests; then + echo_green "Running module acceptance tests..." + build_docker_image_for_tests + echo_green "Weaviate image successfully built, run module tests..." + run_module_tests "$@" + echo_green "Module acceptance tests successful" + fi + if $run_cleanup; then + echo_green "Cleaning up all running docker containers..." + docker rm -f $(docker ps -a -q) + fi + + if $run_acceptance_lsmkv || $run_acceptance_tests || $run_all_tests; then + echo "running lsmkv acceptance lsmkv tests" + run_acceptance_lsmkv "$@" + fi + echo "Done!" +} + +function build_docker_image_for_tests() { + local module_test_image=weaviate:module-tests + echo_green "Stop any running docker-compose containers..." + suppress_on_success docker compose -f docker-compose-test.yml down --remove-orphans + echo_green "Building weaviate image for module acceptance tests..." + echo "This could take some time..." + GIT_REVISION=$(git rev-parse --short HEAD) + GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + + docker build --build-arg GIT_REVISION="$GIT_REVISION" --build-arg GIT_BRANCH="$GIT_BRANCH" --target weaviate -t $module_test_image . + export "TEST_WEAVIATE_IMAGE"=$module_test_image +} + +function build_mockoidc_docker_image_for_tests() { + local mockoidc_test_image=mockoidc:module-tests + echo_green "Building MockOIDC image for module acceptance tests..." + docker build -t $mockoidc_test_image test/docker/mockoidc + export "TEST_MOCKOIDC_IMAGE"=$mockoidc_test_image + local mockoidc_helper_test_image=mockoidchelper:module-tests + echo_green "MockOIDC image successfully built" + echo_green "Building MockOIDC Helper image for module acceptance tests..." + docker build -t $mockoidc_helper_test_image test/docker/mockoidchelper + export "TEST_MOCKOIDC_HELPER_IMAGE"=$mockoidc_helper_test_image + echo_green "MockOIDC Helper image successfully built" +} + +function run_unit_tests() { + if [[ "$*" == *--acceptance-only* ]]; then + echo "Skipping unit test" + return + fi + go test -race -coverprofile=coverage-unit.txt -covermode=atomic -count 1 $(go list ./... | grep -v 'test/acceptance' | grep -v 'test/modules') | grep -v '\[no test files\]' +} + +function run_integration_tests() { + if [[ "$*" == *--acceptance-only* ]]; then + echo "Skipping integration test" + return + fi + + if $run_integration_tests_only_vector_package; then + ./test/integration/run.sh --include-slow --only-vector-pkg + elif $run_integration_tests_without_vector_package; then + ./test/integration/run.sh --include-slow --without-vector-pkg + else + ./test/integration/run.sh --include-slow + fi +} + +function run_acceptance_lsmkv() { + echo "This test runs without the race detector because it asserts performance" + cd 'test/acceptance_lsmkv' + for pkg in $(go list ./...); do + if ! go test -timeout=15m -count 1 "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done + cd - +} + +function run_acceptance_tests() { + if $run_acceptance_only_fast || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance fast only" + run_acceptance_only_fast "$@" + fi + if $run_acceptance_only_authz || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance authz" + run_acceptance_only_authz "$@" + fi + if $run_acceptance_graphql_tests || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance graphql" + run_acceptance_graphql_tests "$@" + fi + if $run_acceptance_replication_tests || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance replication" + run_acceptance_replication_tests "$@" + fi + if $run_acceptance_replica_replication_fast_tests || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance replica replication replication fast" + run_acceptance_replica_replication_fast_tests "$@" + fi + if $run_acceptance_replica_replication_slow_tests || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance replica replication replication slow" + run_acceptance_replica_replication_slow_tests "$@" + fi + if $run_acceptance_async_replication_tests || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance async replication" + run_acceptance_async_replication_tests "$@" + fi + if $only_acceptance; then + echo "running only acceptance" + run_acceptance_only_tests + fi + if $run_acceptance_go_client_only_fast || $run_acceptance_go_client || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance go client only fast" + run_acceptance_go_client_only_fast "$@" + fi + if $run_acceptance_go_client_named_vectors_single_node || $run_acceptance_go_client || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance go client named vectors for single node" + run_acceptance_go_client_named_vectors_single_node "$@" + fi + if $run_acceptance_go_client_named_vectors_cluster || $run_acceptance_go_client || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance go client named vectors for cluster" + run_acceptance_go_client_named_vectors_cluster "$@" + fi + if $run_acceptance_objects || $run_acceptance_tests || $run_all_tests; then + echo "running acceptance objects" + run_acceptance_objects "$@" + fi +} + +function run_acceptance_only_fast() { + # needed for test/docker package during replication tests + export TEST_WEAVIATE_IMAGE=weaviate/test-server + # to make sure all tests are run and the script fails if one of them fails + # but after all tests ran + testFailed=0 + # for now we need to run the tests sequentially, there seems to be some sort of issues with running them in parallel + for pkg in $(go list ./... | grep 'test/acceptance' | grep -v 'test/acceptance/stress_tests' | grep -v 'test/acceptance/replication' | grep -v 'test/acceptance/graphql_resolvers' | grep -v 'test/acceptance_lsmkv' | grep -v 'test/acceptance/authz'); do + if ! go test -count 1 -timeout=20m -race "$pkg"; then + echo "Test for $pkg failed" >&2 + testFailed=1 + fi + done + if [ "$testFailed" -eq 1 ]; then + return 1 + fi + for pkg in $(go list ./... | grep 'test/acceptance/stress_tests' ); do + if ! go test -count 1 "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_go_client_only_fast() { + export TEST_WEAVIATE_IMAGE=weaviate/test-server + # tests with go client are in a separate package with its own dependencies to isolate them + cd 'test/acceptance_with_go_client' + for pkg in $(go list ./... | grep -v 'acceptance_tests_with_client/named_vectors_tests'); do + if ! go test -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done + cd - +} + +function run_acceptance_go_client_named_vectors_single_node() { + export TEST_WEAVIATE_IMAGE=weaviate/test-server + # tests with go client are in a separate package with its own dependencies to isolate them + cd 'test/acceptance_with_go_client' + for pkg in $(go list ./... | grep 'acceptance_tests_with_client/named_vectors_tests/singlenode'); do + if ! go test -timeout=15m -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done + cd - +} + +function run_acceptance_go_client_named_vectors_cluster() { + export TEST_WEAVIATE_IMAGE=weaviate/test-server + # tests with go client are in a separate package with its own dependencies to isolate them + cd 'test/acceptance_with_go_client' + for pkg in $(go list ./... | grep 'acceptance_tests_with_client/named_vectors_tests/cluster'); do + if ! go test -timeout=15m -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done + cd - +} + +function run_acceptance_graphql_tests() { + for pkg in $(go list ./... | grep 'test/acceptance/graphql_resolvers'); do + if ! go test -timeout=15m -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_only_authz() { + export TEST_WEAVIATE_IMAGE=weaviate/test-server + for pkg in $(go list ./.../ | grep 'test/acceptance/authz'); do + if ! go test -timeout=15m -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_replica_replication_fast_tests() { + for pkg in $(go list ./.../ | grep 'test/acceptance/replication/replica_replication/fast'); do + if ! go test -timeout=30m -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_replica_replication_slow_tests() { + for pkg in $(go list ./.../ | grep 'test/acceptance/replication/replica_replication/slow'); do + if ! go test -timeout=30m -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_replication_tests() { + for pkg in $(go list ./.../ | grep 'test/acceptance/replication/read_repair'); do + if ! go test -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_async_replication_tests() { + for pkg in $(go list ./.../ | grep 'test/acceptance/replication/async_replication'); do + if ! go test -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_objects() { + for pkg in $(go list ./.../ | grep 'test/acceptance/objects'); do + if ! go test -count 1 -race -v "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_acceptance_only_tests() { + package=${only_acceptance_value//--only-acceptance-/} + echo_green "Running acceptance tests for $package..." + for pkg in $(go list ./.../ | grep 'test/acceptance/'${package}); do + if ! go test -v -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_module_only_backup_tests() { + for pkg in $(go list ./... | grep 'test/modules' | grep 'test/modules/backup'); do + if ! go test -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_module_only_offload_tests() { + for pkg in $(go list ./... |grep 'test/modules/offload'); do + if ! go test -count 1 -race -v "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_module_except_backup_tests() { + for pkg in $(go list ./... | grep 'test/modules' | grep -v 'test/modules/backup'); do + if ! go test -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_module_except_offload_tests() { + for pkg in $(go list ./... | grep 'test/modules' | grep -v 'test/modules/offload'); do + if ! go test -count 1 -race "$pkg"; then + echo "Test for $pkg failed" >&2 + return 1 + fi + done +} + +function run_module_tests() { + if $run_module_only_backup_tests; then + run_module_only_backup_tests "$@" + fi + if $run_module_only_offload_tests; then + run_module_only_offload_tests "$@" + fi + if $run_module_except_backup_tests; then + run_module_except_backup_tests "$@" + fi + if $run_module_except_offload_tests; then + run_module_except_offload_tests "$@" + fi +} + +suppress_on_success() { + out="$("${@}" 2>&1)" || { echo_red "FAILED!"; echo "$out"; return 1; } + echo "Done!" +} + +function echo_green() { + green='\033[0;32m' + nc='\033[0m' + echo -e "${green}${*}${nc}" +} + +function echo_red() { + red='\033[0;31m' + nc='\033[0m' + echo -e "${red}${*}${nc}" +} + +main "$@" diff --git a/platform/dbops/binaries/weaviate-src/tools/.gitignore b/platform/dbops/binaries/weaviate-src/tools/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0ea61554f35e927193eb4c02efe090885c5fe958 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/tools/.gitignore @@ -0,0 +1 @@ +/swagger-* diff --git a/platform/dbops/binaries/weaviate-src/tools/gen-code-from-swagger.sh b/platform/dbops/binaries/weaviate-src/tools/gen-code-from-swagger.sh new file mode 100644 index 0000000000000000000000000000000000000000..af5bb9defbdc1b4c8da62b0793c43976890eba0f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/tools/gen-code-from-swagger.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -eou pipefail + +# Version of go-swagger to use. +version=v0.30.4 + +# Always points to the directory of this script. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +SWAGGER=$DIR/swagger-${version} + +GOARCH=$(go env GOARCH) +GOOS=$(go env GOOS) +if [ ! -f "$SWAGGER" ]; then + if [ "$GOOS" = "linux" ]; then + curl -o "$SWAGGER" -L'#' https://github.com/go-swagger/go-swagger/releases/download/$version/swagger_"$(echo `uname`|tr '[:upper:]' '[:lower:]')"_"$GOARCH" + else + curl -o "$SWAGGER" -L'#' https://github.com/go-swagger/go-swagger/releases/download/$version/swagger_"$(echo `uname`|tr '[:upper:]' '[:lower:]')"_amd64 + fi + chmod +x "$SWAGGER" +fi + +# Always install goimports to ensure that all parties use the same version +go install golang.org/x/tools/cmd/goimports@v0.1.12 + +# Explicitly get yamplc package +(go get github.com/go-openapi/runtime/yamlpc@v0.24.2) + +# Remove old stuff. +(cd "$DIR"/..; rm -rf entities/models client adapters/handlers/rest/operations/) + +(cd "$DIR"/..; $SWAGGER generate server --name=weaviate --model-package=entities/models --server-package=adapters/handlers/rest --spec=openapi-specs/schema.json -P models.Principal --default-scheme=https --struct-tags=yaml --struct-tags=json) +(cd "$DIR"/..; $SWAGGER generate client --name=weaviate --model-package=entities/models --spec=openapi-specs/schema.json -P models.Principal --default-scheme=https) + +echo Generate Deprecation code... +(cd "$DIR"/..; GO111MODULE=on GOWORK=off go generate ./deprecations) + +echo Now add custom UnmarmarshalJSON code to models.Vectors swagger generated file. +(cd "$DIR"/..; GO111MODULE=on go run ./tools/swagger_custom_code/main.go) + +echo Now add the header to the generated code too. +(cd "$DIR"/..; GO111MODULE=on go run ./tools/license_headers/main.go) +# goimports and exclude hidden files and proto auto generated files, do this process in steps, first for regular go files, then only for test go files +(cd "$DIR"/..; goimports -w $(find . -type f -name '*.go' -not -name '*_test.go' -not -name '*pb.go' -not -path './vendor/*' -not -path "./.*/*")) +(cd "$DIR"/..; goimports -w $(find . -type f -name '*_test.go' -not -name '*pb.go' -not -path './vendor/*' -not -path "./.*/*")) + +CHANGED=$(git status -s | wc -l) +if [ "$CHANGED" -gt 0 ]; then + echo "There are changes in the files that need to be committed:" + git status -s +fi + +echo Success diff --git a/platform/dbops/binaries/weaviate-src/tools/generate-release-artifacts.sh b/platform/dbops/binaries/weaviate-src/tools/generate-release-artifacts.sh new file mode 100644 index 0000000000000000000000000000000000000000..f6c4bbe51957ccafdcfc3b857d008266b2eaec16 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/tools/generate-release-artifacts.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +# NOTES: +# 1. This script is used only in case of emergencies when our CI pipelines fail to generate release binaries. +# 2. This script generates only linux binaries. We use different script for Mac binaries (`tools/dev/goreleaser_and_sign.sh`) + +set -eou pipefail + +OS=${GOOS:-"linux"} + +BUILD_ARTIFACTS_DIR="build_artifacts" +GIT_REVISION=$(git rev-parse --short HEAD) +GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + +VERSION="v$(jq -r '.info.version' openapi-specs/schema.json)" +VPREFIX="github.com/weaviate/weaviate/usecases/build" + +BUILD_TAGS="-X ${VPREFIX}.Branch=${GIT_BRANCH} -X ${VPREFIX}.Revision=${GIT_REVISION} -X ${VPREFIX}.BuildUser=$(whoami)@$(hostname) -X ${VPREFIX}.BuildDate=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" + +function main() { + cd ./cmd/weaviate-server + + if [ -d $BUILD_ARTIFACTS_DIR ]; then + rm -fr $BUILD_ARTIFACTS_DIR + fi + + build_binary_arm64 + build_binary_amd64 + + echo_purp_bold "${VERSION} artifacts available here: $(pwd)/${BUILD_ARTIFACTS_DIR}" +} + +function echo_green() { + green='\033[0;32m' + nc='\033[0m' + echo -e "${green}${*}${nc}" +} + +function echo_purp_bold() { + purp='\033[1;35m' + nc='\033[0m' + echo -e "${purp}${*}${nc}" +} + +function step_complete() { + echo_green "==> Done!" +} + +function build_binary_arm64() { + build_binary "arm64" +} + +function build_binary_amd64() { + build_binary "amd64" +} + +function build_binary() { + arch=$1 + arch_dir="${BUILD_ARTIFACTS_DIR}/${arch}" + + echo_green "Building linux/${arch} binary..." + GOOS=linux GOARCH=$arch go build -o $BUILD_ARTIFACTS_DIR/$arch/weaviate -ldflags "-w -extldflags \"-static\" ${BUILD_TAGS}" + step_complete + + cd $arch_dir + + echo_green "Copy README.md and LICENSE file..." + cp ../../../../README.md . + cp ../../../../LICENSE . + + echo_green "Packing linux/${arch} distribution..." + LINUX_DIST="weaviate-${VERSION}-linux-${arch}.tar.gz" + tar cvfz "$LINUX_DIST" weaviate LICENSE README.md + step_complete + + echo_green "Calculating linux/${arch} checksums..." + shasum -a 256 "$LINUX_DIST" | cut -d ' ' -f 1 > "${LINUX_DIST}.sha256" + md5 "$LINUX_DIST" | cut -d ' ' -f 4 > "${LINUX_DIST}.md5" + step_complete + + echo_green "Move linux/${arch} artifacts to ${BUILD_ARTIFACTS_DIR} directory..." + mv $LINUX_DIST* ../ + step_complete + + echo_green "Clean up ${arch} directory" + cd ../.. + rm -fr $arch_dir + step_complete +} + +main "$@" diff --git a/platform/dbops/binaries/weaviate-src/tools/linter_error_groups.sh b/platform/dbops/binaries/weaviate-src/tools/linter_error_groups.sh new file mode 100644 index 0000000000000000000000000000000000000000..fad0a460ebed879f34ac333813079efc9d81dfd8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/tools/linter_error_groups.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Search for all tracked non-test .go files in the Git repository +all_files=$(git ls-files | grep -E '\.go$' | grep -vE 'test') + +# Get all files with 'errgroup' in them. The only place where direct usage is allowed is in error_group_wrapper.go +files=$(grep -l 'errgroup' ${all_files}) + +found_error=false + +for file in $files; do + # Check if the file is not one of the permitted usages + if [ "$file" != "entities/errors/error_group_wrapper.go" ] && [ "$file" != "tools/linter_error_groups.sh" ]; then + echo "Error: $file directly uses error groups. Please use entities/errors/error_group_wrapper.go instead." + found_error=true + fi +done + +if [ "$found_error" = true ]; then + exit 1 +fi \ No newline at end of file diff --git a/platform/dbops/binaries/weaviate-src/tools/linter_go_routines.sh b/platform/dbops/binaries/weaviate-src/tools/linter_go_routines.sh new file mode 100644 index 0000000000000000000000000000000000000000..225514050b2145c21ee277512a367feeff063722 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/tools/linter_go_routines.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +set -euo pipefail +# Search for all tracked non-test .go files in the Git repository +all_files=$(git ls-files | grep -E '\.go$' | grep -vE '_test\.go$') + +# Get all files with 'go ' in them, ignoring lines that start with a comment +files=$(grep -E -l '^[[:space:]]*[^/]*go ' ${all_files}) + +found_error=false + +# Define array of excluded files with their explanations +excluded_files=( + "entities/errors/go_wrapper.go" # wrapper + "adapters/handlers/rest/server.go" # autogenerated + "test/helper/sample-schema/books/books.go" # test file + # race condition in classification when replacing the direct goroutine with a wrapper. Not important enough to investigate as nobody is using classification. + "usecases/classification/classifier_run.go" + "usecases/auth/authorization/docs/generator.go" # docs generator + "tools/dev/generate_release_notes/main.go" # generate release notes tool + "test/docker/mockoidchelper/mockoidc_helper.go" # used only for OIDC tests +) + +# Check if file is in excluded list +for file in $files; do + if [[ " ${excluded_files[@]} " =~ " ${file} " ]]; then + continue + fi + + echo "Error: $file uses direct goroutines. Please use entities/errors/go_wrapper.go instead." + found_error=true +done + +if [ "$found_error" = true ]; then + exit 1 +fi diff --git a/platform/dbops/binaries/weaviate-src/tools/prepare_release.sh b/platform/dbops/binaries/weaviate-src/tools/prepare_release.sh new file mode 100644 index 0000000000000000000000000000000000000000..9cfcfd24b5ae28b94b94d058fdedc78e1fa9bd72 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/tools/prepare_release.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -euo pipefail + +VERSION="$(jq -r '.info.version' openapi-specs/schema.json)" +LANGUAGES="en nl de cs it" +IMAGE_BASE="semitechnologies/weaviate:" +MSG=${1:-""} +REQUIRED_TOOLS="jq git" + +for tool in $REQUIRED_TOOLS; do + if ! hash "$tool" 2>/dev/null; then + echo "This script requires '$tool', but it is not installed." + exit 1 + fi +done + +if git rev-parse "v$VERSION" >/dev/null 2>&1; then + echo "Cannot prepare release, a release for v$VERSION already exists" + exit 1 +fi + +tools/gen-code-from-swagger.sh + +git commit -a -m "prepare release v$VERSION" + +git tag -a "v$VERSION" -m "release v$VERSION - $MSG" + +echo "You can use the following template for the release notes, copy/paste below the line" +echo "----------------------------" +VERSION="$VERSION" LANGUAGES="$LANGUAGES" go run ./tools/release_template